1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Toshiba TC86C001 ("Goku-S") USB Device Controller driver 4 * 5 * Copyright (C) 2000-2002 Lineo 6 * by Stuart Lynne, Tom Rushworth, and Bruce Balden 7 * Copyright (C) 2002 Toshiba Corporation 8 * Copyright (C) 2003 MontaVista Software (source@mvista.com) 9 */ 10 11 /* 12 * This device has ep0 and three semi-configurable bulk/interrupt endpoints. 13 * 14 * - Endpoint numbering is fixed: ep{1,2,3}-bulk 15 * - Gadget drivers can choose ep maxpacket (8/16/32/64) 16 * - Gadget drivers can choose direction (IN, OUT) 17 * - DMA works with ep1 (OUT transfers) and ep2 (IN transfers). 18 */ 19 20 // #define VERBOSE /* extra debug messages (success too) */ 21 // #define USB_TRACE /* packet-level success messages */ 22 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/pci.h> 26 #include <linux/delay.h> 27 #include <linux/ioport.h> 28 #include <linux/slab.h> 29 #include <linux/errno.h> 30 #include <linux/timer.h> 31 #include <linux/list.h> 32 #include <linux/interrupt.h> 33 #include <linux/proc_fs.h> 34 #include <linux/seq_file.h> 35 #include <linux/device.h> 36 #include <linux/usb/ch9.h> 37 #include <linux/usb/gadget.h> 38 #include <linux/prefetch.h> 39 40 #include <asm/byteorder.h> 41 #include <asm/io.h> 42 #include <asm/irq.h> 43 #include <asm/unaligned.h> 44 45 46 #include "goku_udc.h" 47 48 #define DRIVER_DESC "TC86C001 USB Device Controller" 49 #define DRIVER_VERSION "30-Oct 2003" 50 51 static const char driver_name [] = "goku_udc"; 52 static const char driver_desc [] = DRIVER_DESC; 53 54 MODULE_AUTHOR("source@mvista.com"); 55 MODULE_DESCRIPTION(DRIVER_DESC); 56 MODULE_LICENSE("GPL"); 57 58 59 /* 60 * IN dma behaves ok under testing, though the IN-dma abort paths don't 61 * seem to behave quite as expected. Used by default. 62 * 63 * OUT dma documents design problems handling the common "short packet" 64 * transfer termination policy; it couldn't be enabled by default, even 65 * if the OUT-dma abort problems had a resolution. 66 */ 67 static unsigned use_dma = 1; 68 69 #if 0 70 //#include <linux/moduleparam.h> 71 /* "modprobe goku_udc use_dma=1" etc 72 * 0 to disable dma 73 * 1 to use IN dma only (normal operation) 74 * 2 to use IN and OUT dma 75 */ 76 module_param(use_dma, uint, S_IRUGO); 77 #endif 78 79 /*-------------------------------------------------------------------------*/ 80 81 static void nuke(struct goku_ep *, int status); 82 83 static inline void 84 command(struct goku_udc_regs __iomem *regs, int command, unsigned epnum) 85 { 86 writel(COMMAND_EP(epnum) | command, ®s->Command); 87 udelay(300); 88 } 89 90 static int 91 goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) 92 { 93 struct goku_udc *dev; 94 struct goku_ep *ep; 95 u32 mode; 96 u16 max; 97 unsigned long flags; 98 99 ep = container_of(_ep, struct goku_ep, ep); 100 if (!_ep || !desc 101 || desc->bDescriptorType != USB_DT_ENDPOINT) 102 return -EINVAL; 103 dev = ep->dev; 104 if (ep == &dev->ep[0]) 105 return -EINVAL; 106 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) 107 return -ESHUTDOWN; 108 if (ep->num != usb_endpoint_num(desc)) 109 return -EINVAL; 110 111 switch (usb_endpoint_type(desc)) { 112 case USB_ENDPOINT_XFER_BULK: 113 case USB_ENDPOINT_XFER_INT: 114 break; 115 default: 116 return -EINVAL; 117 } 118 119 if ((readl(ep->reg_status) & EPxSTATUS_EP_MASK) 120 != EPxSTATUS_EP_INVALID) 121 return -EBUSY; 122 123 /* enabling the no-toggle interrupt mode would need an api hook */ 124 mode = 0; 125 max = get_unaligned_le16(&desc->wMaxPacketSize); 126 switch (max) { 127 case 64: 128 mode++; 129 fallthrough; 130 case 32: 131 mode++; 132 fallthrough; 133 case 16: 134 mode++; 135 fallthrough; 136 case 8: 137 mode <<= 3; 138 break; 139 default: 140 return -EINVAL; 141 } 142 mode |= 2 << 1; /* bulk, or intr-with-toggle */ 143 144 /* ep1/ep2 dma direction is chosen early; it works in the other 145 * direction, with pio. be cautious with out-dma. 146 */ 147 ep->is_in = usb_endpoint_dir_in(desc); 148 if (ep->is_in) { 149 mode |= 1; 150 ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT); 151 } else { 152 ep->dma = (use_dma == 2) && (ep->num == UDC_MSTWR_ENDPOINT); 153 if (ep->dma) 154 DBG(dev, "%s out-dma hides short packets\n", 155 ep->ep.name); 156 } 157 158 spin_lock_irqsave(&ep->dev->lock, flags); 159 160 /* ep1 and ep2 can do double buffering and/or dma */ 161 if (ep->num < 3) { 162 struct goku_udc_regs __iomem *regs = ep->dev->regs; 163 u32 tmp; 164 165 /* double buffer except (for now) with pio in */ 166 tmp = ((ep->dma || !ep->is_in) 167 ? 0x10 /* double buffered */ 168 : 0x11 /* single buffer */ 169 ) << ep->num; 170 tmp |= readl(®s->EPxSingle); 171 writel(tmp, ®s->EPxSingle); 172 173 tmp = (ep->dma ? 0x10/*dma*/ : 0x11/*pio*/) << ep->num; 174 tmp |= readl(®s->EPxBCS); 175 writel(tmp, ®s->EPxBCS); 176 } 177 writel(mode, ep->reg_mode); 178 command(ep->dev->regs, COMMAND_RESET, ep->num); 179 ep->ep.maxpacket = max; 180 ep->stopped = 0; 181 ep->ep.desc = desc; 182 spin_unlock_irqrestore(&ep->dev->lock, flags); 183 184 DBG(dev, "enable %s %s %s maxpacket %u\n", ep->ep.name, 185 ep->is_in ? "IN" : "OUT", 186 ep->dma ? "dma" : "pio", 187 max); 188 189 return 0; 190 } 191 192 static void ep_reset(struct goku_udc_regs __iomem *regs, struct goku_ep *ep) 193 { 194 struct goku_udc *dev = ep->dev; 195 196 if (regs) { 197 command(regs, COMMAND_INVALID, ep->num); 198 if (ep->num) { 199 if (ep->num == UDC_MSTWR_ENDPOINT) 200 dev->int_enable &= ~(INT_MSTWREND 201 |INT_MSTWRTMOUT); 202 else if (ep->num == UDC_MSTRD_ENDPOINT) 203 dev->int_enable &= ~INT_MSTRDEND; 204 dev->int_enable &= ~INT_EPxDATASET (ep->num); 205 } else 206 dev->int_enable &= ~INT_EP0; 207 writel(dev->int_enable, ®s->int_enable); 208 readl(®s->int_enable); 209 if (ep->num < 3) { 210 struct goku_udc_regs __iomem *r = ep->dev->regs; 211 u32 tmp; 212 213 tmp = readl(&r->EPxSingle); 214 tmp &= ~(0x11 << ep->num); 215 writel(tmp, &r->EPxSingle); 216 217 tmp = readl(&r->EPxBCS); 218 tmp &= ~(0x11 << ep->num); 219 writel(tmp, &r->EPxBCS); 220 } 221 /* reset dma in case we're still using it */ 222 if (ep->dma) { 223 u32 master; 224 225 master = readl(®s->dma_master) & MST_RW_BITS; 226 if (ep->num == UDC_MSTWR_ENDPOINT) { 227 master &= ~MST_W_BITS; 228 master |= MST_WR_RESET; 229 } else { 230 master &= ~MST_R_BITS; 231 master |= MST_RD_RESET; 232 } 233 writel(master, ®s->dma_master); 234 } 235 } 236 237 usb_ep_set_maxpacket_limit(&ep->ep, MAX_FIFO_SIZE); 238 ep->ep.desc = NULL; 239 ep->stopped = 1; 240 ep->irqs = 0; 241 ep->dma = 0; 242 } 243 244 static int goku_ep_disable(struct usb_ep *_ep) 245 { 246 struct goku_ep *ep; 247 struct goku_udc *dev; 248 unsigned long flags; 249 250 ep = container_of(_ep, struct goku_ep, ep); 251 if (!_ep || !ep->ep.desc) 252 return -ENODEV; 253 dev = ep->dev; 254 if (dev->ep0state == EP0_SUSPEND) 255 return -EBUSY; 256 257 VDBG(dev, "disable %s\n", _ep->name); 258 259 spin_lock_irqsave(&dev->lock, flags); 260 nuke(ep, -ESHUTDOWN); 261 ep_reset(dev->regs, ep); 262 spin_unlock_irqrestore(&dev->lock, flags); 263 264 return 0; 265 } 266 267 /*-------------------------------------------------------------------------*/ 268 269 static struct usb_request * 270 goku_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) 271 { 272 struct goku_request *req; 273 274 if (!_ep) 275 return NULL; 276 req = kzalloc(sizeof *req, gfp_flags); 277 if (!req) 278 return NULL; 279 280 INIT_LIST_HEAD(&req->queue); 281 return &req->req; 282 } 283 284 static void 285 goku_free_request(struct usb_ep *_ep, struct usb_request *_req) 286 { 287 struct goku_request *req; 288 289 if (!_ep || !_req) 290 return; 291 292 req = container_of(_req, struct goku_request, req); 293 WARN_ON(!list_empty(&req->queue)); 294 kfree(req); 295 } 296 297 /*-------------------------------------------------------------------------*/ 298 299 static void 300 done(struct goku_ep *ep, struct goku_request *req, int status) 301 { 302 struct goku_udc *dev; 303 unsigned stopped = ep->stopped; 304 305 list_del_init(&req->queue); 306 307 if (likely(req->req.status == -EINPROGRESS)) 308 req->req.status = status; 309 else 310 status = req->req.status; 311 312 dev = ep->dev; 313 314 if (ep->dma) 315 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in); 316 317 #ifndef USB_TRACE 318 if (status && status != -ESHUTDOWN) 319 #endif 320 VDBG(dev, "complete %s req %p stat %d len %u/%u\n", 321 ep->ep.name, &req->req, status, 322 req->req.actual, req->req.length); 323 324 /* don't modify queue heads during completion callback */ 325 ep->stopped = 1; 326 spin_unlock(&dev->lock); 327 usb_gadget_giveback_request(&ep->ep, &req->req); 328 spin_lock(&dev->lock); 329 ep->stopped = stopped; 330 } 331 332 /*-------------------------------------------------------------------------*/ 333 334 static inline int 335 write_packet(u32 __iomem *fifo, u8 *buf, struct goku_request *req, unsigned max) 336 { 337 unsigned length, count; 338 339 length = min(req->req.length - req->req.actual, max); 340 req->req.actual += length; 341 342 count = length; 343 while (likely(count--)) 344 writel(*buf++, fifo); 345 return length; 346 } 347 348 // return: 0 = still running, 1 = completed, negative = errno 349 static int write_fifo(struct goku_ep *ep, struct goku_request *req) 350 { 351 struct goku_udc *dev = ep->dev; 352 u32 tmp; 353 u8 *buf; 354 unsigned count; 355 int is_last; 356 357 tmp = readl(&dev->regs->DataSet); 358 buf = req->req.buf + req->req.actual; 359 prefetch(buf); 360 361 dev = ep->dev; 362 if (unlikely(ep->num == 0 && dev->ep0state != EP0_IN)) 363 return -EL2HLT; 364 365 /* NOTE: just single-buffered PIO-IN for now. */ 366 if (unlikely((tmp & DATASET_A(ep->num)) != 0)) 367 return 0; 368 369 /* clear our "packet available" irq */ 370 if (ep->num != 0) 371 writel(~INT_EPxDATASET(ep->num), &dev->regs->int_status); 372 373 count = write_packet(ep->reg_fifo, buf, req, ep->ep.maxpacket); 374 375 /* last packet often short (sometimes a zlp, especially on ep0) */ 376 if (unlikely(count != ep->ep.maxpacket)) { 377 writel(~(1<<ep->num), &dev->regs->EOP); 378 if (ep->num == 0) { 379 dev->ep[0].stopped = 1; 380 dev->ep0state = EP0_STATUS; 381 } 382 is_last = 1; 383 } else { 384 if (likely(req->req.length != req->req.actual) 385 || req->req.zero) 386 is_last = 0; 387 else 388 is_last = 1; 389 } 390 #if 0 /* printk seemed to trash is_last...*/ 391 //#ifdef USB_TRACE 392 VDBG(dev, "wrote %s %u bytes%s IN %u left %p\n", 393 ep->ep.name, count, is_last ? "/last" : "", 394 req->req.length - req->req.actual, req); 395 #endif 396 397 /* requests complete when all IN data is in the FIFO, 398 * or sometimes later, if a zlp was needed. 399 */ 400 if (is_last) { 401 done(ep, req, 0); 402 return 1; 403 } 404 405 return 0; 406 } 407 408 static int read_fifo(struct goku_ep *ep, struct goku_request *req) 409 { 410 struct goku_udc_regs __iomem *regs; 411 u32 size, set; 412 u8 *buf; 413 unsigned bufferspace, is_short, dbuff; 414 415 regs = ep->dev->regs; 416 top: 417 buf = req->req.buf + req->req.actual; 418 prefetchw(buf); 419 420 if (unlikely(ep->num == 0 && ep->dev->ep0state != EP0_OUT)) 421 return -EL2HLT; 422 423 dbuff = (ep->num == 1 || ep->num == 2); 424 do { 425 /* ack dataset irq matching the status we'll handle */ 426 if (ep->num != 0) 427 writel(~INT_EPxDATASET(ep->num), ®s->int_status); 428 429 set = readl(®s->DataSet) & DATASET_AB(ep->num); 430 size = readl(®s->EPxSizeLA[ep->num]); 431 bufferspace = req->req.length - req->req.actual; 432 433 /* usually do nothing without an OUT packet */ 434 if (likely(ep->num != 0 || bufferspace != 0)) { 435 if (unlikely(set == 0)) 436 break; 437 /* use ep1/ep2 double-buffering for OUT */ 438 if (!(size & PACKET_ACTIVE)) 439 size = readl(®s->EPxSizeLB[ep->num]); 440 if (!(size & PACKET_ACTIVE)) /* "can't happen" */ 441 break; 442 size &= DATASIZE; /* EPxSizeH == 0 */ 443 444 /* ep0out no-out-data case for set_config, etc */ 445 } else 446 size = 0; 447 448 /* read all bytes from this packet */ 449 req->req.actual += size; 450 is_short = (size < ep->ep.maxpacket); 451 #ifdef USB_TRACE 452 VDBG(ep->dev, "read %s %u bytes%s OUT req %p %u/%u\n", 453 ep->ep.name, size, is_short ? "/S" : "", 454 req, req->req.actual, req->req.length); 455 #endif 456 while (likely(size-- != 0)) { 457 u8 byte = (u8) readl(ep->reg_fifo); 458 459 if (unlikely(bufferspace == 0)) { 460 /* this happens when the driver's buffer 461 * is smaller than what the host sent. 462 * discard the extra data in this packet. 463 */ 464 if (req->req.status != -EOVERFLOW) 465 DBG(ep->dev, "%s overflow %u\n", 466 ep->ep.name, size); 467 req->req.status = -EOVERFLOW; 468 } else { 469 *buf++ = byte; 470 bufferspace--; 471 } 472 } 473 474 /* completion */ 475 if (unlikely(is_short || req->req.actual == req->req.length)) { 476 if (unlikely(ep->num == 0)) { 477 /* non-control endpoints now usable? */ 478 if (ep->dev->req_config) 479 writel(ep->dev->configured 480 ? USBSTATE_CONFIGURED 481 : 0, 482 ®s->UsbState); 483 /* ep0out status stage */ 484 writel(~(1<<0), ®s->EOP); 485 ep->stopped = 1; 486 ep->dev->ep0state = EP0_STATUS; 487 } 488 done(ep, req, 0); 489 490 /* empty the second buffer asap */ 491 if (dbuff && !list_empty(&ep->queue)) { 492 req = list_entry(ep->queue.next, 493 struct goku_request, queue); 494 goto top; 495 } 496 return 1; 497 } 498 } while (dbuff); 499 return 0; 500 } 501 502 static inline void 503 pio_irq_enable(struct goku_udc *dev, 504 struct goku_udc_regs __iomem *regs, int epnum) 505 { 506 dev->int_enable |= INT_EPxDATASET (epnum); 507 writel(dev->int_enable, ®s->int_enable); 508 /* write may still be posted */ 509 } 510 511 static inline void 512 pio_irq_disable(struct goku_udc *dev, 513 struct goku_udc_regs __iomem *regs, int epnum) 514 { 515 dev->int_enable &= ~INT_EPxDATASET (epnum); 516 writel(dev->int_enable, ®s->int_enable); 517 /* write may still be posted */ 518 } 519 520 static inline void 521 pio_advance(struct goku_ep *ep) 522 { 523 struct goku_request *req; 524 525 if (unlikely(list_empty (&ep->queue))) 526 return; 527 req = list_entry(ep->queue.next, struct goku_request, queue); 528 (ep->is_in ? write_fifo : read_fifo)(ep, req); 529 } 530 531 532 /*-------------------------------------------------------------------------*/ 533 534 // return: 0 = q running, 1 = q stopped, negative = errno 535 static int start_dma(struct goku_ep *ep, struct goku_request *req) 536 { 537 struct goku_udc_regs __iomem *regs = ep->dev->regs; 538 u32 master; 539 u32 start = req->req.dma; 540 u32 end = start + req->req.length - 1; 541 542 master = readl(®s->dma_master) & MST_RW_BITS; 543 544 /* re-init the bits affecting IN dma; careful with zlps */ 545 if (likely(ep->is_in)) { 546 if (unlikely(master & MST_RD_ENA)) { 547 DBG (ep->dev, "start, IN active dma %03x!!\n", 548 master); 549 // return -EL2HLT; 550 } 551 writel(end, ®s->in_dma_end); 552 writel(start, ®s->in_dma_start); 553 554 master &= ~MST_R_BITS; 555 if (unlikely(req->req.length == 0)) 556 master |= MST_RD_ENA | MST_RD_EOPB; 557 else if ((req->req.length % ep->ep.maxpacket) != 0 558 || req->req.zero) 559 master |= MST_RD_ENA | MST_EOPB_ENA; 560 else 561 master |= MST_RD_ENA | MST_EOPB_DIS; 562 563 ep->dev->int_enable |= INT_MSTRDEND; 564 565 /* Goku DMA-OUT merges short packets, which plays poorly with 566 * protocols where short packets mark the transfer boundaries. 567 * The chip supports a nonstandard policy with INT_MSTWRTMOUT, 568 * ending transfers after 3 SOFs; we don't turn it on. 569 */ 570 } else { 571 if (unlikely(master & MST_WR_ENA)) { 572 DBG (ep->dev, "start, OUT active dma %03x!!\n", 573 master); 574 // return -EL2HLT; 575 } 576 writel(end, ®s->out_dma_end); 577 writel(start, ®s->out_dma_start); 578 579 master &= ~MST_W_BITS; 580 master |= MST_WR_ENA | MST_TIMEOUT_DIS; 581 582 ep->dev->int_enable |= INT_MSTWREND|INT_MSTWRTMOUT; 583 } 584 585 writel(master, ®s->dma_master); 586 writel(ep->dev->int_enable, ®s->int_enable); 587 return 0; 588 } 589 590 static void dma_advance(struct goku_udc *dev, struct goku_ep *ep) 591 { 592 struct goku_request *req; 593 struct goku_udc_regs __iomem *regs = ep->dev->regs; 594 u32 master; 595 596 master = readl(®s->dma_master); 597 598 if (unlikely(list_empty(&ep->queue))) { 599 stop: 600 if (ep->is_in) 601 dev->int_enable &= ~INT_MSTRDEND; 602 else 603 dev->int_enable &= ~(INT_MSTWREND|INT_MSTWRTMOUT); 604 writel(dev->int_enable, ®s->int_enable); 605 return; 606 } 607 req = list_entry(ep->queue.next, struct goku_request, queue); 608 609 /* normal hw dma completion (not abort) */ 610 if (likely(ep->is_in)) { 611 if (unlikely(master & MST_RD_ENA)) 612 return; 613 req->req.actual = readl(®s->in_dma_current); 614 } else { 615 if (unlikely(master & MST_WR_ENA)) 616 return; 617 618 /* hardware merges short packets, and also hides packet 619 * overruns. a partial packet MAY be in the fifo here. 620 */ 621 req->req.actual = readl(®s->out_dma_current); 622 } 623 req->req.actual -= req->req.dma; 624 req->req.actual++; 625 626 #ifdef USB_TRACE 627 VDBG(dev, "done %s %s dma, %u/%u bytes, req %p\n", 628 ep->ep.name, ep->is_in ? "IN" : "OUT", 629 req->req.actual, req->req.length, req); 630 #endif 631 done(ep, req, 0); 632 if (list_empty(&ep->queue)) 633 goto stop; 634 req = list_entry(ep->queue.next, struct goku_request, queue); 635 (void) start_dma(ep, req); 636 } 637 638 static void abort_dma(struct goku_ep *ep, int status) 639 { 640 struct goku_udc_regs __iomem *regs = ep->dev->regs; 641 struct goku_request *req; 642 u32 curr, master; 643 644 /* NAK future host requests, hoping the implicit delay lets the 645 * dma engine finish reading (or writing) its latest packet and 646 * empty the dma buffer (up to 16 bytes). 647 * 648 * This avoids needing to clean up a partial packet in the fifo; 649 * we can't do that for IN without side effects to HALT and TOGGLE. 650 */ 651 command(regs, COMMAND_FIFO_DISABLE, ep->num); 652 req = list_entry(ep->queue.next, struct goku_request, queue); 653 master = readl(®s->dma_master) & MST_RW_BITS; 654 655 /* FIXME using these resets isn't usably documented. this may 656 * not work unless it's followed by disabling the endpoint. 657 * 658 * FIXME the OUT reset path doesn't even behave consistently. 659 */ 660 if (ep->is_in) { 661 if (unlikely((readl(®s->dma_master) & MST_RD_ENA) == 0)) 662 goto finished; 663 curr = readl(®s->in_dma_current); 664 665 writel(curr, ®s->in_dma_end); 666 writel(curr, ®s->in_dma_start); 667 668 master &= ~MST_R_BITS; 669 master |= MST_RD_RESET; 670 writel(master, ®s->dma_master); 671 672 if (readl(®s->dma_master) & MST_RD_ENA) 673 DBG(ep->dev, "IN dma active after reset!\n"); 674 675 } else { 676 if (unlikely((readl(®s->dma_master) & MST_WR_ENA) == 0)) 677 goto finished; 678 curr = readl(®s->out_dma_current); 679 680 writel(curr, ®s->out_dma_end); 681 writel(curr, ®s->out_dma_start); 682 683 master &= ~MST_W_BITS; 684 master |= MST_WR_RESET; 685 writel(master, ®s->dma_master); 686 687 if (readl(®s->dma_master) & MST_WR_ENA) 688 DBG(ep->dev, "OUT dma active after reset!\n"); 689 } 690 req->req.actual = (curr - req->req.dma) + 1; 691 req->req.status = status; 692 693 VDBG(ep->dev, "%s %s %s %d/%d\n", __func__, ep->ep.name, 694 ep->is_in ? "IN" : "OUT", 695 req->req.actual, req->req.length); 696 697 command(regs, COMMAND_FIFO_ENABLE, ep->num); 698 699 return; 700 701 finished: 702 /* dma already completed; no abort needed */ 703 command(regs, COMMAND_FIFO_ENABLE, ep->num); 704 req->req.actual = req->req.length; 705 req->req.status = 0; 706 } 707 708 /*-------------------------------------------------------------------------*/ 709 710 static int 711 goku_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) 712 { 713 struct goku_request *req; 714 struct goku_ep *ep; 715 struct goku_udc *dev; 716 unsigned long flags; 717 int status; 718 719 /* always require a cpu-view buffer so pio works */ 720 req = container_of(_req, struct goku_request, req); 721 if (unlikely(!_req || !_req->complete 722 || !_req->buf || !list_empty(&req->queue))) 723 return -EINVAL; 724 ep = container_of(_ep, struct goku_ep, ep); 725 if (unlikely(!_ep || (!ep->ep.desc && ep->num != 0))) 726 return -EINVAL; 727 dev = ep->dev; 728 if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) 729 return -ESHUTDOWN; 730 731 /* can't touch registers when suspended */ 732 if (dev->ep0state == EP0_SUSPEND) 733 return -EBUSY; 734 735 /* set up dma mapping in case the caller didn't */ 736 if (ep->dma) { 737 status = usb_gadget_map_request(&dev->gadget, &req->req, 738 ep->is_in); 739 if (status) 740 return status; 741 } 742 743 #ifdef USB_TRACE 744 VDBG(dev, "%s queue req %p, len %u buf %p\n", 745 _ep->name, _req, _req->length, _req->buf); 746 #endif 747 748 spin_lock_irqsave(&dev->lock, flags); 749 750 _req->status = -EINPROGRESS; 751 _req->actual = 0; 752 753 /* for ep0 IN without premature status, zlp is required and 754 * writing EOP starts the status stage (OUT). 755 */ 756 if (unlikely(ep->num == 0 && ep->is_in)) 757 _req->zero = 1; 758 759 /* kickstart this i/o queue? */ 760 status = 0; 761 if (list_empty(&ep->queue) && likely(!ep->stopped)) { 762 /* dma: done after dma completion IRQ (or error) 763 * pio: done after last fifo operation 764 */ 765 if (ep->dma) 766 status = start_dma(ep, req); 767 else 768 status = (ep->is_in ? write_fifo : read_fifo)(ep, req); 769 770 if (unlikely(status != 0)) { 771 if (status > 0) 772 status = 0; 773 req = NULL; 774 } 775 776 } /* else pio or dma irq handler advances the queue. */ 777 778 if (likely(req != NULL)) 779 list_add_tail(&req->queue, &ep->queue); 780 781 if (likely(!list_empty(&ep->queue)) 782 && likely(ep->num != 0) 783 && !ep->dma 784 && !(dev->int_enable & INT_EPxDATASET (ep->num))) 785 pio_irq_enable(dev, dev->regs, ep->num); 786 787 spin_unlock_irqrestore(&dev->lock, flags); 788 789 /* pci writes may still be posted */ 790 return status; 791 } 792 793 /* dequeue ALL requests */ 794 static void nuke(struct goku_ep *ep, int status) 795 { 796 struct goku_request *req; 797 798 ep->stopped = 1; 799 if (list_empty(&ep->queue)) 800 return; 801 if (ep->dma) 802 abort_dma(ep, status); 803 while (!list_empty(&ep->queue)) { 804 req = list_entry(ep->queue.next, struct goku_request, queue); 805 done(ep, req, status); 806 } 807 } 808 809 /* dequeue JUST ONE request */ 810 static int goku_dequeue(struct usb_ep *_ep, struct usb_request *_req) 811 { 812 struct goku_request *req = NULL, *iter; 813 struct goku_ep *ep; 814 struct goku_udc *dev; 815 unsigned long flags; 816 817 ep = container_of(_ep, struct goku_ep, ep); 818 if (!_ep || !_req || (!ep->ep.desc && ep->num != 0)) 819 return -EINVAL; 820 dev = ep->dev; 821 if (!dev->driver) 822 return -ESHUTDOWN; 823 824 /* we can't touch (dma) registers when suspended */ 825 if (dev->ep0state == EP0_SUSPEND) 826 return -EBUSY; 827 828 VDBG(dev, "%s %s %s %s %p\n", __func__, _ep->name, 829 ep->is_in ? "IN" : "OUT", 830 ep->dma ? "dma" : "pio", 831 _req); 832 833 spin_lock_irqsave(&dev->lock, flags); 834 835 /* make sure it's actually queued on this endpoint */ 836 list_for_each_entry(iter, &ep->queue, queue) { 837 if (&iter->req != _req) 838 continue; 839 req = iter; 840 break; 841 } 842 if (!req) { 843 spin_unlock_irqrestore (&dev->lock, flags); 844 return -EINVAL; 845 } 846 847 if (ep->dma && ep->queue.next == &req->queue && !ep->stopped) { 848 abort_dma(ep, -ECONNRESET); 849 done(ep, req, -ECONNRESET); 850 dma_advance(dev, ep); 851 } else if (!list_empty(&req->queue)) 852 done(ep, req, -ECONNRESET); 853 else 854 req = NULL; 855 spin_unlock_irqrestore(&dev->lock, flags); 856 857 return req ? 0 : -EOPNOTSUPP; 858 } 859 860 /*-------------------------------------------------------------------------*/ 861 862 static void goku_clear_halt(struct goku_ep *ep) 863 { 864 // assert (ep->num !=0) 865 VDBG(ep->dev, "%s clear halt\n", ep->ep.name); 866 command(ep->dev->regs, COMMAND_SETDATA0, ep->num); 867 command(ep->dev->regs, COMMAND_STALL_CLEAR, ep->num); 868 if (ep->stopped) { 869 ep->stopped = 0; 870 if (ep->dma) { 871 struct goku_request *req; 872 873 if (list_empty(&ep->queue)) 874 return; 875 req = list_entry(ep->queue.next, struct goku_request, 876 queue); 877 (void) start_dma(ep, req); 878 } else 879 pio_advance(ep); 880 } 881 } 882 883 static int goku_set_halt(struct usb_ep *_ep, int value) 884 { 885 struct goku_ep *ep; 886 unsigned long flags; 887 int retval = 0; 888 889 if (!_ep) 890 return -ENODEV; 891 ep = container_of (_ep, struct goku_ep, ep); 892 893 if (ep->num == 0) { 894 if (value) { 895 ep->dev->ep0state = EP0_STALL; 896 ep->dev->ep[0].stopped = 1; 897 } else 898 return -EINVAL; 899 900 /* don't change EPxSTATUS_EP_INVALID to READY */ 901 } else if (!ep->ep.desc) { 902 DBG(ep->dev, "%s %s inactive?\n", __func__, ep->ep.name); 903 return -EINVAL; 904 } 905 906 spin_lock_irqsave(&ep->dev->lock, flags); 907 if (!list_empty(&ep->queue)) 908 retval = -EAGAIN; 909 else if (ep->is_in && value 910 /* data in (either) packet buffer? */ 911 && (readl(&ep->dev->regs->DataSet) 912 & DATASET_AB(ep->num))) 913 retval = -EAGAIN; 914 else if (!value) 915 goku_clear_halt(ep); 916 else { 917 ep->stopped = 1; 918 VDBG(ep->dev, "%s set halt\n", ep->ep.name); 919 command(ep->dev->regs, COMMAND_STALL, ep->num); 920 readl(ep->reg_status); 921 } 922 spin_unlock_irqrestore(&ep->dev->lock, flags); 923 return retval; 924 } 925 926 static int goku_fifo_status(struct usb_ep *_ep) 927 { 928 struct goku_ep *ep; 929 struct goku_udc_regs __iomem *regs; 930 u32 size; 931 932 if (!_ep) 933 return -ENODEV; 934 ep = container_of(_ep, struct goku_ep, ep); 935 936 /* size is only reported sanely for OUT */ 937 if (ep->is_in) 938 return -EOPNOTSUPP; 939 940 /* ignores 16-byte dma buffer; SizeH == 0 */ 941 regs = ep->dev->regs; 942 size = readl(®s->EPxSizeLA[ep->num]) & DATASIZE; 943 size += readl(®s->EPxSizeLB[ep->num]) & DATASIZE; 944 VDBG(ep->dev, "%s %s %u\n", __func__, ep->ep.name, size); 945 return size; 946 } 947 948 static void goku_fifo_flush(struct usb_ep *_ep) 949 { 950 struct goku_ep *ep; 951 struct goku_udc_regs __iomem *regs; 952 u32 size; 953 954 if (!_ep) 955 return; 956 ep = container_of(_ep, struct goku_ep, ep); 957 VDBG(ep->dev, "%s %s\n", __func__, ep->ep.name); 958 959 /* don't change EPxSTATUS_EP_INVALID to READY */ 960 if (!ep->ep.desc && ep->num != 0) { 961 DBG(ep->dev, "%s %s inactive?\n", __func__, ep->ep.name); 962 return; 963 } 964 965 regs = ep->dev->regs; 966 size = readl(®s->EPxSizeLA[ep->num]); 967 size &= DATASIZE; 968 969 /* Non-desirable behavior: FIFO_CLEAR also clears the 970 * endpoint halt feature. For OUT, we _could_ just read 971 * the bytes out (PIO, if !ep->dma); for in, no choice. 972 */ 973 if (size) 974 command(regs, COMMAND_FIFO_CLEAR, ep->num); 975 } 976 977 static const struct usb_ep_ops goku_ep_ops = { 978 .enable = goku_ep_enable, 979 .disable = goku_ep_disable, 980 981 .alloc_request = goku_alloc_request, 982 .free_request = goku_free_request, 983 984 .queue = goku_queue, 985 .dequeue = goku_dequeue, 986 987 .set_halt = goku_set_halt, 988 .fifo_status = goku_fifo_status, 989 .fifo_flush = goku_fifo_flush, 990 }; 991 992 /*-------------------------------------------------------------------------*/ 993 994 static int goku_get_frame(struct usb_gadget *_gadget) 995 { 996 return -EOPNOTSUPP; 997 } 998 999 static struct usb_ep *goku_match_ep(struct usb_gadget *g, 1000 struct usb_endpoint_descriptor *desc, 1001 struct usb_ss_ep_comp_descriptor *ep_comp) 1002 { 1003 struct goku_udc *dev = to_goku_udc(g); 1004 struct usb_ep *ep; 1005 1006 switch (usb_endpoint_type(desc)) { 1007 case USB_ENDPOINT_XFER_INT: 1008 /* single buffering is enough */ 1009 ep = &dev->ep[3].ep; 1010 if (usb_gadget_ep_match_desc(g, ep, desc, ep_comp)) 1011 return ep; 1012 break; 1013 case USB_ENDPOINT_XFER_BULK: 1014 if (usb_endpoint_dir_in(desc)) { 1015 /* DMA may be available */ 1016 ep = &dev->ep[2].ep; 1017 if (usb_gadget_ep_match_desc(g, ep, desc, ep_comp)) 1018 return ep; 1019 } 1020 break; 1021 default: 1022 /* nothing */ ; 1023 } 1024 1025 return NULL; 1026 } 1027 1028 static int goku_udc_start(struct usb_gadget *g, 1029 struct usb_gadget_driver *driver); 1030 static int goku_udc_stop(struct usb_gadget *g); 1031 1032 static const struct usb_gadget_ops goku_ops = { 1033 .get_frame = goku_get_frame, 1034 .udc_start = goku_udc_start, 1035 .udc_stop = goku_udc_stop, 1036 .match_ep = goku_match_ep, 1037 // no remote wakeup 1038 // not selfpowered 1039 }; 1040 1041 /*-------------------------------------------------------------------------*/ 1042 1043 static inline const char *dmastr(void) 1044 { 1045 if (use_dma == 0) 1046 return "(dma disabled)"; 1047 else if (use_dma == 2) 1048 return "(dma IN and OUT)"; 1049 else 1050 return "(dma IN)"; 1051 } 1052 1053 #ifdef CONFIG_USB_GADGET_DEBUG_FILES 1054 1055 static const char proc_node_name [] = "driver/udc"; 1056 1057 #define FOURBITS "%s%s%s%s" 1058 #define EIGHTBITS FOURBITS FOURBITS 1059 1060 static void dump_intmask(struct seq_file *m, const char *label, u32 mask) 1061 { 1062 /* int_status is the same format ... */ 1063 seq_printf(m, "%s %05X =" FOURBITS EIGHTBITS EIGHTBITS "\n", 1064 label, mask, 1065 (mask & INT_PWRDETECT) ? " power" : "", 1066 (mask & INT_SYSERROR) ? " sys" : "", 1067 (mask & INT_MSTRDEND) ? " in-dma" : "", 1068 (mask & INT_MSTWRTMOUT) ? " wrtmo" : "", 1069 1070 (mask & INT_MSTWREND) ? " out-dma" : "", 1071 (mask & INT_MSTWRSET) ? " wrset" : "", 1072 (mask & INT_ERR) ? " err" : "", 1073 (mask & INT_SOF) ? " sof" : "", 1074 1075 (mask & INT_EP3NAK) ? " ep3nak" : "", 1076 (mask & INT_EP2NAK) ? " ep2nak" : "", 1077 (mask & INT_EP1NAK) ? " ep1nak" : "", 1078 (mask & INT_EP3DATASET) ? " ep3" : "", 1079 1080 (mask & INT_EP2DATASET) ? " ep2" : "", 1081 (mask & INT_EP1DATASET) ? " ep1" : "", 1082 (mask & INT_STATUSNAK) ? " ep0snak" : "", 1083 (mask & INT_STATUS) ? " ep0status" : "", 1084 1085 (mask & INT_SETUP) ? " setup" : "", 1086 (mask & INT_ENDPOINT0) ? " ep0" : "", 1087 (mask & INT_USBRESET) ? " reset" : "", 1088 (mask & INT_SUSPEND) ? " suspend" : ""); 1089 } 1090 1091 static const char *udc_ep_state(enum ep0state state) 1092 { 1093 switch (state) { 1094 case EP0_DISCONNECT: 1095 return "ep0_disconnect"; 1096 case EP0_IDLE: 1097 return "ep0_idle"; 1098 case EP0_IN: 1099 return "ep0_in"; 1100 case EP0_OUT: 1101 return "ep0_out"; 1102 case EP0_STATUS: 1103 return "ep0_status"; 1104 case EP0_STALL: 1105 return "ep0_stall"; 1106 case EP0_SUSPEND: 1107 return "ep0_suspend"; 1108 } 1109 1110 return "ep0_?"; 1111 } 1112 1113 static const char *udc_ep_status(u32 status) 1114 { 1115 switch (status & EPxSTATUS_EP_MASK) { 1116 case EPxSTATUS_EP_READY: 1117 return "ready"; 1118 case EPxSTATUS_EP_DATAIN: 1119 return "packet"; 1120 case EPxSTATUS_EP_FULL: 1121 return "full"; 1122 case EPxSTATUS_EP_TX_ERR: /* host will retry */ 1123 return "tx_err"; 1124 case EPxSTATUS_EP_RX_ERR: 1125 return "rx_err"; 1126 case EPxSTATUS_EP_BUSY: /* ep0 only */ 1127 return "busy"; 1128 case EPxSTATUS_EP_STALL: 1129 return "stall"; 1130 case EPxSTATUS_EP_INVALID: /* these "can't happen" */ 1131 return "invalid"; 1132 } 1133 1134 return "?"; 1135 } 1136 1137 static int udc_proc_read(struct seq_file *m, void *v) 1138 { 1139 struct goku_udc *dev = m->private; 1140 struct goku_udc_regs __iomem *regs = dev->regs; 1141 unsigned long flags; 1142 int i, is_usb_connected; 1143 u32 tmp; 1144 1145 local_irq_save(flags); 1146 1147 /* basic device status */ 1148 tmp = readl(®s->power_detect); 1149 is_usb_connected = tmp & PW_DETECT; 1150 seq_printf(m, 1151 "%s - %s\n" 1152 "%s version: %s %s\n" 1153 "Gadget driver: %s\n" 1154 "Host %s, %s\n" 1155 "\n", 1156 pci_name(dev->pdev), driver_desc, 1157 driver_name, DRIVER_VERSION, dmastr(), 1158 dev->driver ? dev->driver->driver.name : "(none)", 1159 is_usb_connected 1160 ? ((tmp & PW_PULLUP) ? "full speed" : "powered") 1161 : "disconnected", 1162 udc_ep_state(dev->ep0state)); 1163 1164 dump_intmask(m, "int_status", readl(®s->int_status)); 1165 dump_intmask(m, "int_enable", readl(®s->int_enable)); 1166 1167 if (!is_usb_connected || !dev->driver || (tmp & PW_PULLUP) == 0) 1168 goto done; 1169 1170 /* registers for (active) device and ep0 */ 1171 seq_printf(m, "\nirqs %lu\ndataset %02x single.bcs %02x.%02x state %x addr %u\n", 1172 dev->irqs, readl(®s->DataSet), 1173 readl(®s->EPxSingle), readl(®s->EPxBCS), 1174 readl(®s->UsbState), 1175 readl(®s->address)); 1176 if (seq_has_overflowed(m)) 1177 goto done; 1178 1179 tmp = readl(®s->dma_master); 1180 seq_printf(m, "dma %03X =" EIGHTBITS "%s %s\n", 1181 tmp, 1182 (tmp & MST_EOPB_DIS) ? " eopb-" : "", 1183 (tmp & MST_EOPB_ENA) ? " eopb+" : "", 1184 (tmp & MST_TIMEOUT_DIS) ? " tmo-" : "", 1185 (tmp & MST_TIMEOUT_ENA) ? " tmo+" : "", 1186 1187 (tmp & MST_RD_EOPB) ? " eopb" : "", 1188 (tmp & MST_RD_RESET) ? " in_reset" : "", 1189 (tmp & MST_WR_RESET) ? " out_reset" : "", 1190 (tmp & MST_RD_ENA) ? " IN" : "", 1191 1192 (tmp & MST_WR_ENA) ? " OUT" : "", 1193 (tmp & MST_CONNECTION) ? "ep1in/ep2out" : "ep1out/ep2in"); 1194 if (seq_has_overflowed(m)) 1195 goto done; 1196 1197 /* dump endpoint queues */ 1198 for (i = 0; i < 4; i++) { 1199 struct goku_ep *ep = &dev->ep [i]; 1200 struct goku_request *req; 1201 1202 if (i && !ep->ep.desc) 1203 continue; 1204 1205 tmp = readl(ep->reg_status); 1206 seq_printf(m, "%s %s max %u %s, irqs %lu, status %02x (%s) " FOURBITS "\n", 1207 ep->ep.name, 1208 ep->is_in ? "in" : "out", 1209 ep->ep.maxpacket, 1210 ep->dma ? "dma" : "pio", 1211 ep->irqs, 1212 tmp, udc_ep_status(tmp), 1213 (tmp & EPxSTATUS_TOGGLE) ? "data1" : "data0", 1214 (tmp & EPxSTATUS_SUSPEND) ? " suspend" : "", 1215 (tmp & EPxSTATUS_FIFO_DISABLE) ? " disable" : "", 1216 (tmp & EPxSTATUS_STAGE_ERROR) ? " ep0stat" : ""); 1217 if (seq_has_overflowed(m)) 1218 goto done; 1219 1220 if (list_empty(&ep->queue)) { 1221 seq_puts(m, "\t(nothing queued)\n"); 1222 if (seq_has_overflowed(m)) 1223 goto done; 1224 continue; 1225 } 1226 list_for_each_entry(req, &ep->queue, queue) { 1227 if (ep->dma && req->queue.prev == &ep->queue) { 1228 if (i == UDC_MSTRD_ENDPOINT) 1229 tmp = readl(®s->in_dma_current); 1230 else 1231 tmp = readl(®s->out_dma_current); 1232 tmp -= req->req.dma; 1233 tmp++; 1234 } else 1235 tmp = req->req.actual; 1236 1237 seq_printf(m, "\treq %p len %u/%u buf %p\n", 1238 &req->req, tmp, req->req.length, 1239 req->req.buf); 1240 if (seq_has_overflowed(m)) 1241 goto done; 1242 } 1243 } 1244 1245 done: 1246 local_irq_restore(flags); 1247 return 0; 1248 } 1249 #endif /* CONFIG_USB_GADGET_DEBUG_FILES */ 1250 1251 /*-------------------------------------------------------------------------*/ 1252 1253 static void udc_reinit (struct goku_udc *dev) 1254 { 1255 static char *names [] = { "ep0", "ep1-bulk", "ep2-bulk", "ep3-bulk" }; 1256 1257 unsigned i; 1258 1259 INIT_LIST_HEAD (&dev->gadget.ep_list); 1260 dev->gadget.ep0 = &dev->ep [0].ep; 1261 dev->gadget.speed = USB_SPEED_UNKNOWN; 1262 dev->ep0state = EP0_DISCONNECT; 1263 dev->irqs = 0; 1264 1265 for (i = 0; i < 4; i++) { 1266 struct goku_ep *ep = &dev->ep[i]; 1267 1268 ep->num = i; 1269 ep->ep.name = names[i]; 1270 ep->reg_fifo = &dev->regs->ep_fifo [i]; 1271 ep->reg_status = &dev->regs->ep_status [i]; 1272 ep->reg_mode = &dev->regs->ep_mode[i]; 1273 1274 ep->ep.ops = &goku_ep_ops; 1275 list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list); 1276 ep->dev = dev; 1277 INIT_LIST_HEAD (&ep->queue); 1278 1279 ep_reset(NULL, ep); 1280 1281 if (i == 0) 1282 ep->ep.caps.type_control = true; 1283 else 1284 ep->ep.caps.type_bulk = true; 1285 1286 ep->ep.caps.dir_in = true; 1287 ep->ep.caps.dir_out = true; 1288 } 1289 1290 dev->ep[0].reg_mode = NULL; 1291 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, MAX_EP0_SIZE); 1292 list_del_init (&dev->ep[0].ep.ep_list); 1293 } 1294 1295 static void udc_reset(struct goku_udc *dev) 1296 { 1297 struct goku_udc_regs __iomem *regs = dev->regs; 1298 1299 writel(0, ®s->power_detect); 1300 writel(0, ®s->int_enable); 1301 readl(®s->int_enable); 1302 dev->int_enable = 0; 1303 1304 /* deassert reset, leave USB D+ at hi-Z (no pullup) 1305 * don't let INT_PWRDETECT sequence begin 1306 */ 1307 udelay(250); 1308 writel(PW_RESETB, ®s->power_detect); 1309 readl(®s->int_enable); 1310 } 1311 1312 static void ep0_start(struct goku_udc *dev) 1313 { 1314 struct goku_udc_regs __iomem *regs = dev->regs; 1315 unsigned i; 1316 1317 VDBG(dev, "%s\n", __func__); 1318 1319 udc_reset(dev); 1320 udc_reinit (dev); 1321 //writel(MST_EOPB_ENA | MST_TIMEOUT_ENA, ®s->dma_master); 1322 1323 /* hw handles set_address, set_feature, get_status; maybe more */ 1324 writel( G_REQMODE_SET_INTF | G_REQMODE_GET_INTF 1325 | G_REQMODE_SET_CONF | G_REQMODE_GET_CONF 1326 | G_REQMODE_GET_DESC 1327 | G_REQMODE_CLEAR_FEAT 1328 , ®s->reqmode); 1329 1330 for (i = 0; i < 4; i++) 1331 dev->ep[i].irqs = 0; 1332 1333 /* can't modify descriptors after writing UsbReady */ 1334 for (i = 0; i < DESC_LEN; i++) 1335 writel(0, ®s->descriptors[i]); 1336 writel(0, ®s->UsbReady); 1337 1338 /* expect ep0 requests when the host drops reset */ 1339 writel(PW_RESETB | PW_PULLUP, ®s->power_detect); 1340 dev->int_enable = INT_DEVWIDE | INT_EP0; 1341 writel(dev->int_enable, &dev->regs->int_enable); 1342 readl(®s->int_enable); 1343 dev->gadget.speed = USB_SPEED_FULL; 1344 dev->ep0state = EP0_IDLE; 1345 } 1346 1347 static void udc_enable(struct goku_udc *dev) 1348 { 1349 /* start enumeration now, or after power detect irq */ 1350 if (readl(&dev->regs->power_detect) & PW_DETECT) 1351 ep0_start(dev); 1352 else { 1353 DBG(dev, "%s\n", __func__); 1354 dev->int_enable = INT_PWRDETECT; 1355 writel(dev->int_enable, &dev->regs->int_enable); 1356 } 1357 } 1358 1359 /*-------------------------------------------------------------------------*/ 1360 1361 /* keeping it simple: 1362 * - one bus driver, initted first; 1363 * - one function driver, initted second 1364 */ 1365 1366 /* when a driver is successfully registered, it will receive 1367 * control requests including set_configuration(), which enables 1368 * non-control requests. then usb traffic follows until a 1369 * disconnect is reported. then a host may connect again, or 1370 * the driver might get unbound. 1371 */ 1372 static int goku_udc_start(struct usb_gadget *g, 1373 struct usb_gadget_driver *driver) 1374 { 1375 struct goku_udc *dev = to_goku_udc(g); 1376 1377 /* hook up the driver */ 1378 driver->driver.bus = NULL; 1379 dev->driver = driver; 1380 1381 /* 1382 * then enable host detection and ep0; and we're ready 1383 * for set_configuration as well as eventual disconnect. 1384 */ 1385 udc_enable(dev); 1386 1387 return 0; 1388 } 1389 1390 static void stop_activity(struct goku_udc *dev) 1391 { 1392 unsigned i; 1393 1394 DBG (dev, "%s\n", __func__); 1395 1396 /* disconnect gadget driver after quiesceing hw and the driver */ 1397 udc_reset (dev); 1398 for (i = 0; i < 4; i++) 1399 nuke(&dev->ep [i], -ESHUTDOWN); 1400 1401 if (dev->driver) 1402 udc_enable(dev); 1403 } 1404 1405 static int goku_udc_stop(struct usb_gadget *g) 1406 { 1407 struct goku_udc *dev = to_goku_udc(g); 1408 unsigned long flags; 1409 1410 spin_lock_irqsave(&dev->lock, flags); 1411 dev->driver = NULL; 1412 stop_activity(dev); 1413 spin_unlock_irqrestore(&dev->lock, flags); 1414 1415 return 0; 1416 } 1417 1418 /*-------------------------------------------------------------------------*/ 1419 1420 static void ep0_setup(struct goku_udc *dev) 1421 { 1422 struct goku_udc_regs __iomem *regs = dev->regs; 1423 struct usb_ctrlrequest ctrl; 1424 int tmp; 1425 1426 /* read SETUP packet and enter DATA stage */ 1427 ctrl.bRequestType = readl(®s->bRequestType); 1428 ctrl.bRequest = readl(®s->bRequest); 1429 ctrl.wValue = cpu_to_le16((readl(®s->wValueH) << 8) 1430 | readl(®s->wValueL)); 1431 ctrl.wIndex = cpu_to_le16((readl(®s->wIndexH) << 8) 1432 | readl(®s->wIndexL)); 1433 ctrl.wLength = cpu_to_le16((readl(®s->wLengthH) << 8) 1434 | readl(®s->wLengthL)); 1435 writel(0, ®s->SetupRecv); 1436 1437 nuke(&dev->ep[0], 0); 1438 dev->ep[0].stopped = 0; 1439 if (likely(ctrl.bRequestType & USB_DIR_IN)) { 1440 dev->ep[0].is_in = 1; 1441 dev->ep0state = EP0_IN; 1442 /* detect early status stages */ 1443 writel(ICONTROL_STATUSNAK, &dev->regs->IntControl); 1444 } else { 1445 dev->ep[0].is_in = 0; 1446 dev->ep0state = EP0_OUT; 1447 1448 /* NOTE: CLEAR_FEATURE is done in software so that we can 1449 * synchronize transfer restarts after bulk IN stalls. data 1450 * won't even enter the fifo until the halt is cleared. 1451 */ 1452 switch (ctrl.bRequest) { 1453 case USB_REQ_CLEAR_FEATURE: 1454 switch (ctrl.bRequestType) { 1455 case USB_RECIP_ENDPOINT: 1456 tmp = le16_to_cpu(ctrl.wIndex) & 0x0f; 1457 /* active endpoint */ 1458 if (tmp > 3 || 1459 (!dev->ep[tmp].ep.desc && tmp != 0)) 1460 goto stall; 1461 if (ctrl.wIndex & cpu_to_le16( 1462 USB_DIR_IN)) { 1463 if (!dev->ep[tmp].is_in) 1464 goto stall; 1465 } else { 1466 if (dev->ep[tmp].is_in) 1467 goto stall; 1468 } 1469 if (ctrl.wValue != cpu_to_le16( 1470 USB_ENDPOINT_HALT)) 1471 goto stall; 1472 if (tmp) 1473 goku_clear_halt(&dev->ep[tmp]); 1474 succeed: 1475 /* start ep0out status stage */ 1476 writel(~(1<<0), ®s->EOP); 1477 dev->ep[0].stopped = 1; 1478 dev->ep0state = EP0_STATUS; 1479 return; 1480 case USB_RECIP_DEVICE: 1481 /* device remote wakeup: always clear */ 1482 if (ctrl.wValue != cpu_to_le16(1)) 1483 goto stall; 1484 VDBG(dev, "clear dev remote wakeup\n"); 1485 goto succeed; 1486 case USB_RECIP_INTERFACE: 1487 goto stall; 1488 default: /* pass to gadget driver */ 1489 break; 1490 } 1491 break; 1492 default: 1493 break; 1494 } 1495 } 1496 1497 #ifdef USB_TRACE 1498 VDBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n", 1499 ctrl.bRequestType, ctrl.bRequest, 1500 le16_to_cpu(ctrl.wValue), le16_to_cpu(ctrl.wIndex), 1501 le16_to_cpu(ctrl.wLength)); 1502 #endif 1503 1504 /* hw wants to know when we're configured (or not) */ 1505 dev->req_config = (ctrl.bRequest == USB_REQ_SET_CONFIGURATION 1506 && ctrl.bRequestType == USB_RECIP_DEVICE); 1507 if (unlikely(dev->req_config)) 1508 dev->configured = (ctrl.wValue != cpu_to_le16(0)); 1509 1510 /* delegate everything to the gadget driver. 1511 * it may respond after this irq handler returns. 1512 */ 1513 spin_unlock (&dev->lock); 1514 tmp = dev->driver->setup(&dev->gadget, &ctrl); 1515 spin_lock (&dev->lock); 1516 if (unlikely(tmp < 0)) { 1517 stall: 1518 #ifdef USB_TRACE 1519 VDBG(dev, "req %02x.%02x protocol STALL; err %d\n", 1520 ctrl.bRequestType, ctrl.bRequest, tmp); 1521 #endif 1522 command(regs, COMMAND_STALL, 0); 1523 dev->ep[0].stopped = 1; 1524 dev->ep0state = EP0_STALL; 1525 } 1526 1527 /* expect at least one data or status stage irq */ 1528 } 1529 1530 #define ACK(irqbit) { \ 1531 stat &= ~irqbit; \ 1532 writel(~irqbit, ®s->int_status); \ 1533 handled = 1; \ 1534 } 1535 1536 static irqreturn_t goku_irq(int irq, void *_dev) 1537 { 1538 struct goku_udc *dev = _dev; 1539 struct goku_udc_regs __iomem *regs = dev->regs; 1540 struct goku_ep *ep; 1541 u32 stat, handled = 0; 1542 unsigned i, rescans = 5; 1543 1544 spin_lock(&dev->lock); 1545 1546 rescan: 1547 stat = readl(®s->int_status) & dev->int_enable; 1548 if (!stat) 1549 goto done; 1550 dev->irqs++; 1551 1552 /* device-wide irqs */ 1553 if (unlikely(stat & INT_DEVWIDE)) { 1554 if (stat & INT_SYSERROR) { 1555 ERROR(dev, "system error\n"); 1556 stop_activity(dev); 1557 stat = 0; 1558 handled = 1; 1559 // FIXME have a neater way to prevent re-enumeration 1560 dev->driver = NULL; 1561 goto done; 1562 } 1563 if (stat & INT_PWRDETECT) { 1564 writel(~stat, ®s->int_status); 1565 if (readl(&dev->regs->power_detect) & PW_DETECT) { 1566 VDBG(dev, "connect\n"); 1567 ep0_start(dev); 1568 } else { 1569 DBG(dev, "disconnect\n"); 1570 if (dev->gadget.speed == USB_SPEED_FULL) 1571 stop_activity(dev); 1572 dev->ep0state = EP0_DISCONNECT; 1573 dev->int_enable = INT_DEVWIDE; 1574 writel(dev->int_enable, &dev->regs->int_enable); 1575 } 1576 stat = 0; 1577 handled = 1; 1578 goto done; 1579 } 1580 if (stat & INT_SUSPEND) { 1581 ACK(INT_SUSPEND); 1582 if (readl(®s->ep_status[0]) & EPxSTATUS_SUSPEND) { 1583 switch (dev->ep0state) { 1584 case EP0_DISCONNECT: 1585 case EP0_SUSPEND: 1586 goto pm_next; 1587 default: 1588 break; 1589 } 1590 DBG(dev, "USB suspend\n"); 1591 dev->ep0state = EP0_SUSPEND; 1592 if (dev->gadget.speed != USB_SPEED_UNKNOWN 1593 && dev->driver 1594 && dev->driver->suspend) { 1595 spin_unlock(&dev->lock); 1596 dev->driver->suspend(&dev->gadget); 1597 spin_lock(&dev->lock); 1598 } 1599 } else { 1600 if (dev->ep0state != EP0_SUSPEND) { 1601 DBG(dev, "bogus USB resume %d\n", 1602 dev->ep0state); 1603 goto pm_next; 1604 } 1605 DBG(dev, "USB resume\n"); 1606 dev->ep0state = EP0_IDLE; 1607 if (dev->gadget.speed != USB_SPEED_UNKNOWN 1608 && dev->driver 1609 && dev->driver->resume) { 1610 spin_unlock(&dev->lock); 1611 dev->driver->resume(&dev->gadget); 1612 spin_lock(&dev->lock); 1613 } 1614 } 1615 } 1616 pm_next: 1617 if (stat & INT_USBRESET) { /* hub reset done */ 1618 ACK(INT_USBRESET); 1619 INFO(dev, "USB reset done, gadget %s\n", 1620 dev->driver->driver.name); 1621 } 1622 // and INT_ERR on some endpoint's crc/bitstuff/... problem 1623 } 1624 1625 /* progress ep0 setup, data, or status stages. 1626 * no transition {EP0_STATUS, EP0_STALL} --> EP0_IDLE; saves irqs 1627 */ 1628 if (stat & INT_SETUP) { 1629 ACK(INT_SETUP); 1630 dev->ep[0].irqs++; 1631 ep0_setup(dev); 1632 } 1633 if (stat & INT_STATUSNAK) { 1634 ACK(INT_STATUSNAK|INT_ENDPOINT0); 1635 if (dev->ep0state == EP0_IN) { 1636 ep = &dev->ep[0]; 1637 ep->irqs++; 1638 nuke(ep, 0); 1639 writel(~(1<<0), ®s->EOP); 1640 dev->ep0state = EP0_STATUS; 1641 } 1642 } 1643 if (stat & INT_ENDPOINT0) { 1644 ACK(INT_ENDPOINT0); 1645 ep = &dev->ep[0]; 1646 ep->irqs++; 1647 pio_advance(ep); 1648 } 1649 1650 /* dma completion */ 1651 if (stat & INT_MSTRDEND) { /* IN */ 1652 ACK(INT_MSTRDEND); 1653 ep = &dev->ep[UDC_MSTRD_ENDPOINT]; 1654 ep->irqs++; 1655 dma_advance(dev, ep); 1656 } 1657 if (stat & INT_MSTWREND) { /* OUT */ 1658 ACK(INT_MSTWREND); 1659 ep = &dev->ep[UDC_MSTWR_ENDPOINT]; 1660 ep->irqs++; 1661 dma_advance(dev, ep); 1662 } 1663 if (stat & INT_MSTWRTMOUT) { /* OUT */ 1664 ACK(INT_MSTWRTMOUT); 1665 ep = &dev->ep[UDC_MSTWR_ENDPOINT]; 1666 ep->irqs++; 1667 ERROR(dev, "%s write timeout ?\n", ep->ep.name); 1668 // reset dma? then dma_advance() 1669 } 1670 1671 /* pio */ 1672 for (i = 1; i < 4; i++) { 1673 u32 tmp = INT_EPxDATASET(i); 1674 1675 if (!(stat & tmp)) 1676 continue; 1677 ep = &dev->ep[i]; 1678 pio_advance(ep); 1679 if (list_empty (&ep->queue)) 1680 pio_irq_disable(dev, regs, i); 1681 stat &= ~tmp; 1682 handled = 1; 1683 ep->irqs++; 1684 } 1685 1686 if (rescans--) 1687 goto rescan; 1688 1689 done: 1690 (void)readl(®s->int_enable); 1691 spin_unlock(&dev->lock); 1692 if (stat) 1693 DBG(dev, "unhandled irq status: %05x (%05x, %05x)\n", stat, 1694 readl(®s->int_status), dev->int_enable); 1695 return IRQ_RETVAL(handled); 1696 } 1697 1698 #undef ACK 1699 1700 /*-------------------------------------------------------------------------*/ 1701 1702 static void gadget_release(struct device *_dev) 1703 { 1704 struct goku_udc *dev = dev_get_drvdata(_dev); 1705 1706 kfree(dev); 1707 } 1708 1709 /* tear down the binding between this driver and the pci device */ 1710 1711 static void goku_remove(struct pci_dev *pdev) 1712 { 1713 struct goku_udc *dev = pci_get_drvdata(pdev); 1714 1715 DBG(dev, "%s\n", __func__); 1716 1717 usb_del_gadget_udc(&dev->gadget); 1718 1719 BUG_ON(dev->driver); 1720 1721 #ifdef CONFIG_USB_GADGET_DEBUG_FILES 1722 remove_proc_entry(proc_node_name, NULL); 1723 #endif 1724 if (dev->regs) 1725 udc_reset(dev); 1726 if (dev->got_irq) 1727 free_irq(pdev->irq, dev); 1728 if (dev->regs) 1729 iounmap(dev->regs); 1730 if (dev->got_region) 1731 release_mem_region(pci_resource_start (pdev, 0), 1732 pci_resource_len (pdev, 0)); 1733 if (dev->enabled) 1734 pci_disable_device(pdev); 1735 1736 dev->regs = NULL; 1737 1738 INFO(dev, "unbind\n"); 1739 } 1740 1741 /* wrap this driver around the specified pci device, but 1742 * don't respond over USB until a gadget driver binds to us. 1743 */ 1744 1745 static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1746 { 1747 struct goku_udc *dev = NULL; 1748 unsigned long resource, len; 1749 void __iomem *base = NULL; 1750 int retval; 1751 1752 if (!pdev->irq) { 1753 printk(KERN_ERR "Check PCI %s IRQ setup!\n", pci_name(pdev)); 1754 retval = -ENODEV; 1755 goto err; 1756 } 1757 1758 /* alloc, and start init */ 1759 dev = kzalloc (sizeof *dev, GFP_KERNEL); 1760 if (!dev) { 1761 retval = -ENOMEM; 1762 goto err; 1763 } 1764 1765 pci_set_drvdata(pdev, dev); 1766 spin_lock_init(&dev->lock); 1767 dev->pdev = pdev; 1768 dev->gadget.ops = &goku_ops; 1769 dev->gadget.max_speed = USB_SPEED_FULL; 1770 1771 /* the "gadget" abstracts/virtualizes the controller */ 1772 dev->gadget.name = driver_name; 1773 1774 /* now all the pci goodies ... */ 1775 retval = pci_enable_device(pdev); 1776 if (retval < 0) { 1777 DBG(dev, "can't enable, %d\n", retval); 1778 goto err; 1779 } 1780 dev->enabled = 1; 1781 1782 resource = pci_resource_start(pdev, 0); 1783 len = pci_resource_len(pdev, 0); 1784 if (!request_mem_region(resource, len, driver_name)) { 1785 DBG(dev, "controller already in use\n"); 1786 retval = -EBUSY; 1787 goto err; 1788 } 1789 dev->got_region = 1; 1790 1791 base = ioremap(resource, len); 1792 if (base == NULL) { 1793 DBG(dev, "can't map memory\n"); 1794 retval = -EFAULT; 1795 goto err; 1796 } 1797 dev->regs = (struct goku_udc_regs __iomem *) base; 1798 1799 INFO(dev, "%s\n", driver_desc); 1800 INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr()); 1801 INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base); 1802 1803 /* init to known state, then setup irqs */ 1804 udc_reset(dev); 1805 udc_reinit (dev); 1806 if (request_irq(pdev->irq, goku_irq, IRQF_SHARED, 1807 driver_name, dev) != 0) { 1808 DBG(dev, "request interrupt %d failed\n", pdev->irq); 1809 retval = -EBUSY; 1810 goto err; 1811 } 1812 dev->got_irq = 1; 1813 if (use_dma) 1814 pci_set_master(pdev); 1815 1816 1817 #ifdef CONFIG_USB_GADGET_DEBUG_FILES 1818 proc_create_single_data(proc_node_name, 0, NULL, udc_proc_read, dev); 1819 #endif 1820 1821 retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget, 1822 gadget_release); 1823 if (retval) 1824 goto err; 1825 1826 return 0; 1827 1828 err: 1829 if (dev) 1830 goku_remove (pdev); 1831 /* gadget_release is not registered yet, kfree explicitly */ 1832 kfree(dev); 1833 return retval; 1834 } 1835 1836 1837 /*-------------------------------------------------------------------------*/ 1838 1839 static const struct pci_device_id pci_ids[] = { { 1840 .class = PCI_CLASS_SERIAL_USB_DEVICE, 1841 .class_mask = ~0, 1842 .vendor = 0x102f, /* Toshiba */ 1843 .device = 0x0107, /* this UDC */ 1844 .subvendor = PCI_ANY_ID, 1845 .subdevice = PCI_ANY_ID, 1846 1847 }, { /* end: all zeroes */ } 1848 }; 1849 MODULE_DEVICE_TABLE (pci, pci_ids); 1850 1851 static struct pci_driver goku_pci_driver = { 1852 .name = driver_name, 1853 .id_table = pci_ids, 1854 1855 .probe = goku_probe, 1856 .remove = goku_remove, 1857 1858 /* FIXME add power management support */ 1859 }; 1860 1861 module_pci_driver(goku_pci_driver); 1862