1 /* 2 * Driver for the PLX NET2280 USB device controller. 3 * Specs and errata are available from <http://www.plxtech.com>. 4 * 5 * PLX Technology Inc. (formerly NetChip Technology) supported the 6 * development of this driver. 7 * 8 * 9 * CODE STATUS HIGHLIGHTS 10 * 11 * This driver should work well with most "gadget" drivers, including 12 * the Mass Storage, Serial, and Ethernet/RNDIS gadget drivers 13 * as well as Gadget Zero and Gadgetfs. 14 * 15 * DMA is enabled by default. 16 * 17 * MSI is enabled by default. The legacy IRQ is used if MSI couldn't 18 * be enabled. 19 * 20 * Note that almost all the errata workarounds here are only needed for 21 * rev1 chips. Rev1a silicon (0110) fixes almost all of them. 22 */ 23 24 /* 25 * Copyright (C) 2003 David Brownell 26 * Copyright (C) 2003-2005 PLX Technology, Inc. 27 * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS 28 * 29 * Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility 30 * with 2282 chip 31 * 32 * Modified Ricardo Ribalda Qtechnology AS to provide compatibility 33 * with usb 338x chip. Based on PLX driver 34 * 35 * This program is free software; you can redistribute it and/or modify 36 * it under the terms of the GNU General Public License as published by 37 * the Free Software Foundation; either version 2 of the License, or 38 * (at your option) any later version. 39 */ 40 41 #include <linux/module.h> 42 #include <linux/pci.h> 43 #include <linux/dma-mapping.h> 44 #include <linux/kernel.h> 45 #include <linux/delay.h> 46 #include <linux/ioport.h> 47 #include <linux/slab.h> 48 #include <linux/errno.h> 49 #include <linux/init.h> 50 #include <linux/timer.h> 51 #include <linux/list.h> 52 #include <linux/interrupt.h> 53 #include <linux/moduleparam.h> 54 #include <linux/device.h> 55 #include <linux/usb/ch9.h> 56 #include <linux/usb/gadget.h> 57 #include <linux/prefetch.h> 58 #include <linux/io.h> 59 60 #include <asm/byteorder.h> 61 #include <asm/irq.h> 62 #include <asm/unaligned.h> 63 64 #define DRIVER_DESC "PLX NET228x/USB338x USB Peripheral Controller" 65 #define DRIVER_VERSION "2005 Sept 27/v3.0" 66 67 #define EP_DONTUSE 13 /* nonzero */ 68 69 #define USE_RDK_LEDS /* GPIO pins control three LEDs */ 70 71 72 static const char driver_name[] = "net2280"; 73 static const char driver_desc[] = DRIVER_DESC; 74 75 static const u32 ep_bit[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 }; 76 static const char ep0name[] = "ep0"; 77 static const char *const ep_name[] = { 78 ep0name, 79 "ep-a", "ep-b", "ep-c", "ep-d", 80 "ep-e", "ep-f", "ep-g", "ep-h", 81 }; 82 83 /* Endpoint names for usb3380 advance mode */ 84 static const char *const ep_name_adv[] = { 85 ep0name, 86 "ep1in", "ep2out", "ep3in", "ep4out", 87 "ep1out", "ep2in", "ep3out", "ep4in", 88 }; 89 90 /* mode 0 == ep-{a,b,c,d} 1K fifo each 91 * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable 92 * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable 93 */ 94 static ushort fifo_mode; 95 96 /* "modprobe net2280 fifo_mode=1" etc */ 97 module_param(fifo_mode, ushort, 0644); 98 99 /* enable_suspend -- When enabled, the driver will respond to 100 * USB suspend requests by powering down the NET2280. Otherwise, 101 * USB suspend requests will be ignored. This is acceptable for 102 * self-powered devices 103 */ 104 static bool enable_suspend; 105 106 /* "modprobe net2280 enable_suspend=1" etc */ 107 module_param(enable_suspend, bool, 0444); 108 109 #define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out") 110 111 static char *type_string(u8 bmAttributes) 112 { 113 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) { 114 case USB_ENDPOINT_XFER_BULK: return "bulk"; 115 case USB_ENDPOINT_XFER_ISOC: return "iso"; 116 case USB_ENDPOINT_XFER_INT: return "intr"; 117 } 118 return "control"; 119 } 120 121 #include "net2280.h" 122 123 #define valid_bit cpu_to_le32(BIT(VALID_BIT)) 124 #define dma_done_ie cpu_to_le32(BIT(DMA_DONE_INTERRUPT_ENABLE)) 125 126 /*-------------------------------------------------------------------------*/ 127 static inline void enable_pciirqenb(struct net2280_ep *ep) 128 { 129 u32 tmp = readl(&ep->dev->regs->pciirqenb0); 130 131 if (ep->dev->quirks & PLX_LEGACY) 132 tmp |= BIT(ep->num); 133 else 134 tmp |= BIT(ep_bit[ep->num]); 135 writel(tmp, &ep->dev->regs->pciirqenb0); 136 137 return; 138 } 139 140 static int 141 net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) 142 { 143 struct net2280 *dev; 144 struct net2280_ep *ep; 145 u32 max, tmp; 146 unsigned long flags; 147 static const u32 ep_key[9] = { 1, 0, 1, 0, 1, 1, 0, 1, 0 }; 148 int ret = 0; 149 150 ep = container_of(_ep, struct net2280_ep, ep); 151 if (!_ep || !desc || ep->desc || _ep->name == ep0name || 152 desc->bDescriptorType != USB_DT_ENDPOINT) { 153 pr_err("%s: failed at line=%d\n", __func__, __LINE__); 154 return -EINVAL; 155 } 156 dev = ep->dev; 157 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) { 158 ret = -ESHUTDOWN; 159 goto print_err; 160 } 161 162 /* erratum 0119 workaround ties up an endpoint number */ 163 if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE) { 164 ret = -EDOM; 165 goto print_err; 166 } 167 168 if (dev->quirks & PLX_SUPERSPEED) { 169 if ((desc->bEndpointAddress & 0x0f) >= 0x0c) { 170 ret = -EDOM; 171 goto print_err; 172 } 173 ep->is_in = !!usb_endpoint_dir_in(desc); 174 if (dev->enhanced_mode && ep->is_in && ep_key[ep->num]) { 175 ret = -EINVAL; 176 goto print_err; 177 } 178 } 179 180 /* sanity check ep-e/ep-f since their fifos are small */ 181 max = usb_endpoint_maxp(desc) & 0x1fff; 182 if (ep->num > 4 && max > 64 && (dev->quirks & PLX_LEGACY)) { 183 ret = -ERANGE; 184 goto print_err; 185 } 186 187 spin_lock_irqsave(&dev->lock, flags); 188 _ep->maxpacket = max & 0x7ff; 189 ep->desc = desc; 190 191 /* ep_reset() has already been called */ 192 ep->stopped = 0; 193 ep->wedged = 0; 194 ep->out_overflow = 0; 195 196 /* set speed-dependent max packet; may kick in high bandwidth */ 197 set_max_speed(ep, max); 198 199 /* set type, direction, address; reset fifo counters */ 200 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat); 201 tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK); 202 if (tmp == USB_ENDPOINT_XFER_INT) { 203 /* erratum 0105 workaround prevents hs NYET */ 204 if (dev->chiprev == 0100 && 205 dev->gadget.speed == USB_SPEED_HIGH && 206 !(desc->bEndpointAddress & USB_DIR_IN)) 207 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE), 208 &ep->regs->ep_rsp); 209 } else if (tmp == USB_ENDPOINT_XFER_BULK) { 210 /* catch some particularly blatant driver bugs */ 211 if ((dev->gadget.speed == USB_SPEED_SUPER && max != 1024) || 212 (dev->gadget.speed == USB_SPEED_HIGH && max != 512) || 213 (dev->gadget.speed == USB_SPEED_FULL && max > 64)) { 214 spin_unlock_irqrestore(&dev->lock, flags); 215 ret = -ERANGE; 216 goto print_err; 217 } 218 } 219 ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC); 220 /* Enable this endpoint */ 221 if (dev->quirks & PLX_LEGACY) { 222 tmp <<= ENDPOINT_TYPE; 223 tmp |= desc->bEndpointAddress; 224 /* default full fifo lines */ 225 tmp |= (4 << ENDPOINT_BYTE_COUNT); 226 tmp |= BIT(ENDPOINT_ENABLE); 227 ep->is_in = (tmp & USB_DIR_IN) != 0; 228 } else { 229 /* In Legacy mode, only OUT endpoints are used */ 230 if (dev->enhanced_mode && ep->is_in) { 231 tmp <<= IN_ENDPOINT_TYPE; 232 tmp |= BIT(IN_ENDPOINT_ENABLE); 233 /* Not applicable to Legacy */ 234 tmp |= BIT(ENDPOINT_DIRECTION); 235 } else { 236 tmp <<= OUT_ENDPOINT_TYPE; 237 tmp |= BIT(OUT_ENDPOINT_ENABLE); 238 tmp |= (ep->is_in << ENDPOINT_DIRECTION); 239 } 240 241 tmp |= usb_endpoint_num(desc); 242 tmp |= (ep->ep.maxburst << MAX_BURST_SIZE); 243 } 244 245 /* Make sure all the registers are written before ep_rsp*/ 246 wmb(); 247 248 /* for OUT transfers, block the rx fifo until a read is posted */ 249 if (!ep->is_in) 250 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp); 251 else if (!(dev->quirks & PLX_2280)) { 252 /* Added for 2282, Don't use nak packets on an in endpoint, 253 * this was ignored on 2280 254 */ 255 writel(BIT(CLEAR_NAK_OUT_PACKETS) | 256 BIT(CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp); 257 } 258 259 writel(tmp, &ep->cfg->ep_cfg); 260 261 /* enable irqs */ 262 if (!ep->dma) { /* pio, per-packet */ 263 enable_pciirqenb(ep); 264 265 tmp = BIT(DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) | 266 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE); 267 if (dev->quirks & PLX_2280) 268 tmp |= readl(&ep->regs->ep_irqenb); 269 writel(tmp, &ep->regs->ep_irqenb); 270 } else { /* dma, per-request */ 271 tmp = BIT((8 + ep->num)); /* completion */ 272 tmp |= readl(&dev->regs->pciirqenb1); 273 writel(tmp, &dev->regs->pciirqenb1); 274 275 /* for short OUT transfers, dma completions can't 276 * advance the queue; do it pio-style, by hand. 277 * NOTE erratum 0112 workaround #2 278 */ 279 if ((desc->bEndpointAddress & USB_DIR_IN) == 0) { 280 tmp = BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE); 281 writel(tmp, &ep->regs->ep_irqenb); 282 283 enable_pciirqenb(ep); 284 } 285 } 286 287 tmp = desc->bEndpointAddress; 288 ep_dbg(dev, "enabled %s (ep%d%s-%s) %s max %04x\n", 289 _ep->name, tmp & 0x0f, DIR_STRING(tmp), 290 type_string(desc->bmAttributes), 291 ep->dma ? "dma" : "pio", max); 292 293 /* pci writes may still be posted */ 294 spin_unlock_irqrestore(&dev->lock, flags); 295 return ret; 296 297 print_err: 298 dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, ret); 299 return ret; 300 } 301 302 static int handshake(u32 __iomem *ptr, u32 mask, u32 done, int usec) 303 { 304 u32 result; 305 306 do { 307 result = readl(ptr); 308 if (result == ~(u32)0) /* "device unplugged" */ 309 return -ENODEV; 310 result &= mask; 311 if (result == done) 312 return 0; 313 udelay(1); 314 usec--; 315 } while (usec > 0); 316 return -ETIMEDOUT; 317 } 318 319 static const struct usb_ep_ops net2280_ep_ops; 320 321 static void ep_reset_228x(struct net2280_regs __iomem *regs, 322 struct net2280_ep *ep) 323 { 324 u32 tmp; 325 326 ep->desc = NULL; 327 INIT_LIST_HEAD(&ep->queue); 328 329 usb_ep_set_maxpacket_limit(&ep->ep, ~0); 330 ep->ep.ops = &net2280_ep_ops; 331 332 /* disable the dma, irqs, endpoint... */ 333 if (ep->dma) { 334 writel(0, &ep->dma->dmactl); 335 writel(BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) | 336 BIT(DMA_TRANSACTION_DONE_INTERRUPT) | 337 BIT(DMA_ABORT), 338 &ep->dma->dmastat); 339 340 tmp = readl(®s->pciirqenb0); 341 tmp &= ~BIT(ep->num); 342 writel(tmp, ®s->pciirqenb0); 343 } else { 344 tmp = readl(®s->pciirqenb1); 345 tmp &= ~BIT((8 + ep->num)); /* completion */ 346 writel(tmp, ®s->pciirqenb1); 347 } 348 writel(0, &ep->regs->ep_irqenb); 349 350 /* init to our chosen defaults, notably so that we NAK OUT 351 * packets until the driver queues a read (+note erratum 0112) 352 */ 353 if (!ep->is_in || (ep->dev->quirks & PLX_2280)) { 354 tmp = BIT(SET_NAK_OUT_PACKETS_MODE) | 355 BIT(SET_NAK_OUT_PACKETS) | 356 BIT(CLEAR_EP_HIDE_STATUS_PHASE) | 357 BIT(CLEAR_INTERRUPT_MODE); 358 } else { 359 /* added for 2282 */ 360 tmp = BIT(CLEAR_NAK_OUT_PACKETS_MODE) | 361 BIT(CLEAR_NAK_OUT_PACKETS) | 362 BIT(CLEAR_EP_HIDE_STATUS_PHASE) | 363 BIT(CLEAR_INTERRUPT_MODE); 364 } 365 366 if (ep->num != 0) { 367 tmp |= BIT(CLEAR_ENDPOINT_TOGGLE) | 368 BIT(CLEAR_ENDPOINT_HALT); 369 } 370 writel(tmp, &ep->regs->ep_rsp); 371 372 /* scrub most status bits, and flush any fifo state */ 373 if (ep->dev->quirks & PLX_2280) 374 tmp = BIT(FIFO_OVERFLOW) | 375 BIT(FIFO_UNDERFLOW); 376 else 377 tmp = 0; 378 379 writel(tmp | BIT(TIMEOUT) | 380 BIT(USB_STALL_SENT) | 381 BIT(USB_IN_NAK_SENT) | 382 BIT(USB_IN_ACK_RCVD) | 383 BIT(USB_OUT_PING_NAK_SENT) | 384 BIT(USB_OUT_ACK_SENT) | 385 BIT(FIFO_FLUSH) | 386 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) | 387 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) | 388 BIT(DATA_PACKET_RECEIVED_INTERRUPT) | 389 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) | 390 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 391 BIT(DATA_IN_TOKEN_INTERRUPT), 392 &ep->regs->ep_stat); 393 394 /* fifo size is handled separately */ 395 } 396 397 static void ep_reset_338x(struct net2280_regs __iomem *regs, 398 struct net2280_ep *ep) 399 { 400 u32 tmp, dmastat; 401 402 ep->desc = NULL; 403 INIT_LIST_HEAD(&ep->queue); 404 405 usb_ep_set_maxpacket_limit(&ep->ep, ~0); 406 ep->ep.ops = &net2280_ep_ops; 407 408 /* disable the dma, irqs, endpoint... */ 409 if (ep->dma) { 410 writel(0, &ep->dma->dmactl); 411 writel(BIT(DMA_ABORT_DONE_INTERRUPT) | 412 BIT(DMA_PAUSE_DONE_INTERRUPT) | 413 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) | 414 BIT(DMA_TRANSACTION_DONE_INTERRUPT), 415 /* | BIT(DMA_ABORT), */ 416 &ep->dma->dmastat); 417 418 dmastat = readl(&ep->dma->dmastat); 419 if (dmastat == 0x5002) { 420 ep_warn(ep->dev, "The dmastat return = %x!!\n", 421 dmastat); 422 writel(0x5a, &ep->dma->dmastat); 423 } 424 425 tmp = readl(®s->pciirqenb0); 426 tmp &= ~BIT(ep_bit[ep->num]); 427 writel(tmp, ®s->pciirqenb0); 428 } else { 429 if (ep->num < 5) { 430 tmp = readl(®s->pciirqenb1); 431 tmp &= ~BIT((8 + ep->num)); /* completion */ 432 writel(tmp, ®s->pciirqenb1); 433 } 434 } 435 writel(0, &ep->regs->ep_irqenb); 436 437 writel(BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) | 438 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) | 439 BIT(FIFO_OVERFLOW) | 440 BIT(DATA_PACKET_RECEIVED_INTERRUPT) | 441 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) | 442 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 443 BIT(DATA_IN_TOKEN_INTERRUPT), &ep->regs->ep_stat); 444 } 445 446 static void nuke(struct net2280_ep *); 447 448 static int net2280_disable(struct usb_ep *_ep) 449 { 450 struct net2280_ep *ep; 451 unsigned long flags; 452 453 ep = container_of(_ep, struct net2280_ep, ep); 454 if (!_ep || !ep->desc || _ep->name == ep0name) { 455 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep); 456 return -EINVAL; 457 } 458 spin_lock_irqsave(&ep->dev->lock, flags); 459 nuke(ep); 460 461 if (ep->dev->quirks & PLX_SUPERSPEED) 462 ep_reset_338x(ep->dev->regs, ep); 463 else 464 ep_reset_228x(ep->dev->regs, ep); 465 466 ep_vdbg(ep->dev, "disabled %s %s\n", 467 ep->dma ? "dma" : "pio", _ep->name); 468 469 /* synch memory views with the device */ 470 (void)readl(&ep->cfg->ep_cfg); 471 472 if (!ep->dma && ep->num >= 1 && ep->num <= 4) 473 ep->dma = &ep->dev->dma[ep->num - 1]; 474 475 spin_unlock_irqrestore(&ep->dev->lock, flags); 476 return 0; 477 } 478 479 /*-------------------------------------------------------------------------*/ 480 481 static struct usb_request 482 *net2280_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) 483 { 484 struct net2280_ep *ep; 485 struct net2280_request *req; 486 487 if (!_ep) { 488 pr_err("%s: Invalid ep\n", __func__); 489 return NULL; 490 } 491 ep = container_of(_ep, struct net2280_ep, ep); 492 493 req = kzalloc(sizeof(*req), gfp_flags); 494 if (!req) 495 return NULL; 496 497 INIT_LIST_HEAD(&req->queue); 498 499 /* this dma descriptor may be swapped with the previous dummy */ 500 if (ep->dma) { 501 struct net2280_dma *td; 502 503 td = pci_pool_alloc(ep->dev->requests, gfp_flags, 504 &req->td_dma); 505 if (!td) { 506 kfree(req); 507 return NULL; 508 } 509 td->dmacount = 0; /* not VALID */ 510 td->dmadesc = td->dmaaddr; 511 req->td = td; 512 } 513 return &req->req; 514 } 515 516 static void net2280_free_request(struct usb_ep *_ep, struct usb_request *_req) 517 { 518 struct net2280_ep *ep; 519 struct net2280_request *req; 520 521 ep = container_of(_ep, struct net2280_ep, ep); 522 if (!_ep || !_req) { 523 dev_err(&ep->dev->pdev->dev, "%s: Inavlid ep=%p or req=%p\n", 524 __func__, _ep, _req); 525 return; 526 } 527 528 req = container_of(_req, struct net2280_request, req); 529 WARN_ON(!list_empty(&req->queue)); 530 if (req->td) 531 pci_pool_free(ep->dev->requests, req->td, req->td_dma); 532 kfree(req); 533 } 534 535 /*-------------------------------------------------------------------------*/ 536 537 /* load a packet into the fifo we use for usb IN transfers. 538 * works for all endpoints. 539 * 540 * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo 541 * at a time, but this code is simpler because it knows it only writes 542 * one packet. ep-a..ep-d should use dma instead. 543 */ 544 static void write_fifo(struct net2280_ep *ep, struct usb_request *req) 545 { 546 struct net2280_ep_regs __iomem *regs = ep->regs; 547 u8 *buf; 548 u32 tmp; 549 unsigned count, total; 550 551 /* INVARIANT: fifo is currently empty. (testable) */ 552 553 if (req) { 554 buf = req->buf + req->actual; 555 prefetch(buf); 556 total = req->length - req->actual; 557 } else { 558 total = 0; 559 buf = NULL; 560 } 561 562 /* write just one packet at a time */ 563 count = ep->ep.maxpacket; 564 if (count > total) /* min() cannot be used on a bitfield */ 565 count = total; 566 567 ep_vdbg(ep->dev, "write %s fifo (IN) %d bytes%s req %p\n", 568 ep->ep.name, count, 569 (count != ep->ep.maxpacket) ? " (short)" : "", 570 req); 571 while (count >= 4) { 572 /* NOTE be careful if you try to align these. fifo lines 573 * should normally be full (4 bytes) and successive partial 574 * lines are ok only in certain cases. 575 */ 576 tmp = get_unaligned((u32 *)buf); 577 cpu_to_le32s(&tmp); 578 writel(tmp, ®s->ep_data); 579 buf += 4; 580 count -= 4; 581 } 582 583 /* last fifo entry is "short" unless we wrote a full packet. 584 * also explicitly validate last word in (periodic) transfers 585 * when maxpacket is not a multiple of 4 bytes. 586 */ 587 if (count || total < ep->ep.maxpacket) { 588 tmp = count ? get_unaligned((u32 *)buf) : count; 589 cpu_to_le32s(&tmp); 590 set_fifo_bytecount(ep, count & 0x03); 591 writel(tmp, ®s->ep_data); 592 } 593 594 /* pci writes may still be posted */ 595 } 596 597 /* work around erratum 0106: PCI and USB race over the OUT fifo. 598 * caller guarantees chiprev 0100, out endpoint is NAKing, and 599 * there's no real data in the fifo. 600 * 601 * NOTE: also used in cases where that erratum doesn't apply: 602 * where the host wrote "too much" data to us. 603 */ 604 static void out_flush(struct net2280_ep *ep) 605 { 606 u32 __iomem *statp; 607 u32 tmp; 608 609 statp = &ep->regs->ep_stat; 610 611 tmp = readl(statp); 612 if (tmp & BIT(NAK_OUT_PACKETS)) { 613 ep_dbg(ep->dev, "%s %s %08x !NAK\n", 614 ep->ep.name, __func__, tmp); 615 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp); 616 } 617 618 writel(BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 619 BIT(DATA_PACKET_RECEIVED_INTERRUPT), 620 statp); 621 writel(BIT(FIFO_FLUSH), statp); 622 /* Make sure that stap is written */ 623 mb(); 624 tmp = readl(statp); 625 if (tmp & BIT(DATA_OUT_PING_TOKEN_INTERRUPT) && 626 /* high speed did bulk NYET; fifo isn't filling */ 627 ep->dev->gadget.speed == USB_SPEED_FULL) { 628 unsigned usec; 629 630 usec = 50; /* 64 byte bulk/interrupt */ 631 handshake(statp, BIT(USB_OUT_PING_NAK_SENT), 632 BIT(USB_OUT_PING_NAK_SENT), usec); 633 /* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */ 634 } 635 } 636 637 /* unload packet(s) from the fifo we use for usb OUT transfers. 638 * returns true iff the request completed, because of short packet 639 * or the request buffer having filled with full packets. 640 * 641 * for ep-a..ep-d this will read multiple packets out when they 642 * have been accepted. 643 */ 644 static int read_fifo(struct net2280_ep *ep, struct net2280_request *req) 645 { 646 struct net2280_ep_regs __iomem *regs = ep->regs; 647 u8 *buf = req->req.buf + req->req.actual; 648 unsigned count, tmp, is_short; 649 unsigned cleanup = 0, prevent = 0; 650 651 /* erratum 0106 ... packets coming in during fifo reads might 652 * be incompletely rejected. not all cases have workarounds. 653 */ 654 if (ep->dev->chiprev == 0x0100 && 655 ep->dev->gadget.speed == USB_SPEED_FULL) { 656 udelay(1); 657 tmp = readl(&ep->regs->ep_stat); 658 if ((tmp & BIT(NAK_OUT_PACKETS))) 659 cleanup = 1; 660 else if ((tmp & BIT(FIFO_FULL))) { 661 start_out_naking(ep); 662 prevent = 1; 663 } 664 /* else: hope we don't see the problem */ 665 } 666 667 /* never overflow the rx buffer. the fifo reads packets until 668 * it sees a short one; we might not be ready for them all. 669 */ 670 prefetchw(buf); 671 count = readl(®s->ep_avail); 672 if (unlikely(count == 0)) { 673 udelay(1); 674 tmp = readl(&ep->regs->ep_stat); 675 count = readl(®s->ep_avail); 676 /* handled that data already? */ 677 if (count == 0 && (tmp & BIT(NAK_OUT_PACKETS)) == 0) 678 return 0; 679 } 680 681 tmp = req->req.length - req->req.actual; 682 if (count > tmp) { 683 /* as with DMA, data overflow gets flushed */ 684 if ((tmp % ep->ep.maxpacket) != 0) { 685 ep_err(ep->dev, 686 "%s out fifo %d bytes, expected %d\n", 687 ep->ep.name, count, tmp); 688 req->req.status = -EOVERFLOW; 689 cleanup = 1; 690 /* NAK_OUT_PACKETS will be set, so flushing is safe; 691 * the next read will start with the next packet 692 */ 693 } /* else it's a ZLP, no worries */ 694 count = tmp; 695 } 696 req->req.actual += count; 697 698 is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0); 699 700 ep_vdbg(ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n", 701 ep->ep.name, count, is_short ? " (short)" : "", 702 cleanup ? " flush" : "", prevent ? " nak" : "", 703 req, req->req.actual, req->req.length); 704 705 while (count >= 4) { 706 tmp = readl(®s->ep_data); 707 cpu_to_le32s(&tmp); 708 put_unaligned(tmp, (u32 *)buf); 709 buf += 4; 710 count -= 4; 711 } 712 if (count) { 713 tmp = readl(®s->ep_data); 714 /* LE conversion is implicit here: */ 715 do { 716 *buf++ = (u8) tmp; 717 tmp >>= 8; 718 } while (--count); 719 } 720 if (cleanup) 721 out_flush(ep); 722 if (prevent) { 723 writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp); 724 (void) readl(&ep->regs->ep_rsp); 725 } 726 727 return is_short || ((req->req.actual == req->req.length) && 728 !req->req.zero); 729 } 730 731 /* fill out dma descriptor to match a given request */ 732 static void fill_dma_desc(struct net2280_ep *ep, 733 struct net2280_request *req, int valid) 734 { 735 struct net2280_dma *td = req->td; 736 u32 dmacount = req->req.length; 737 738 /* don't let DMA continue after a short OUT packet, 739 * so overruns can't affect the next transfer. 740 * in case of overruns on max-size packets, we can't 741 * stop the fifo from filling but we can flush it. 742 */ 743 if (ep->is_in) 744 dmacount |= BIT(DMA_DIRECTION); 745 if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0) || 746 !(ep->dev->quirks & PLX_2280)) 747 dmacount |= BIT(END_OF_CHAIN); 748 749 req->valid = valid; 750 if (valid) 751 dmacount |= BIT(VALID_BIT); 752 dmacount |= BIT(DMA_DONE_INTERRUPT_ENABLE); 753 754 /* td->dmadesc = previously set by caller */ 755 td->dmaaddr = cpu_to_le32 (req->req.dma); 756 757 /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */ 758 wmb(); 759 td->dmacount = cpu_to_le32(dmacount); 760 } 761 762 static const u32 dmactl_default = 763 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) | 764 BIT(DMA_CLEAR_COUNT_ENABLE) | 765 /* erratum 0116 workaround part 1 (use POLLING) */ 766 (POLL_100_USEC << DESCRIPTOR_POLLING_RATE) | 767 BIT(DMA_VALID_BIT_POLLING_ENABLE) | 768 BIT(DMA_VALID_BIT_ENABLE) | 769 BIT(DMA_SCATTER_GATHER_ENABLE) | 770 /* erratum 0116 workaround part 2 (no AUTOSTART) */ 771 BIT(DMA_ENABLE); 772 773 static inline void spin_stop_dma(struct net2280_dma_regs __iomem *dma) 774 { 775 handshake(&dma->dmactl, BIT(DMA_ENABLE), 0, 50); 776 } 777 778 static inline void stop_dma(struct net2280_dma_regs __iomem *dma) 779 { 780 writel(readl(&dma->dmactl) & ~BIT(DMA_ENABLE), &dma->dmactl); 781 spin_stop_dma(dma); 782 } 783 784 static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma) 785 { 786 struct net2280_dma_regs __iomem *dma = ep->dma; 787 unsigned int tmp = BIT(VALID_BIT) | (ep->is_in << DMA_DIRECTION); 788 789 if (!(ep->dev->quirks & PLX_2280)) 790 tmp |= BIT(END_OF_CHAIN); 791 792 writel(tmp, &dma->dmacount); 793 writel(readl(&dma->dmastat), &dma->dmastat); 794 795 writel(td_dma, &dma->dmadesc); 796 if (ep->dev->quirks & PLX_SUPERSPEED) 797 dmactl |= BIT(DMA_REQUEST_OUTSTANDING); 798 writel(dmactl, &dma->dmactl); 799 800 /* erratum 0116 workaround part 3: pci arbiter away from net2280 */ 801 (void) readl(&ep->dev->pci->pcimstctl); 802 803 writel(BIT(DMA_START), &dma->dmastat); 804 805 if (!ep->is_in) 806 stop_out_naking(ep); 807 } 808 809 static void start_dma(struct net2280_ep *ep, struct net2280_request *req) 810 { 811 u32 tmp; 812 struct net2280_dma_regs __iomem *dma = ep->dma; 813 814 /* FIXME can't use DMA for ZLPs */ 815 816 /* on this path we "know" there's no dma active (yet) */ 817 WARN_ON(readl(&dma->dmactl) & BIT(DMA_ENABLE)); 818 writel(0, &ep->dma->dmactl); 819 820 /* previous OUT packet might have been short */ 821 if (!ep->is_in && (readl(&ep->regs->ep_stat) & 822 BIT(NAK_OUT_PACKETS))) { 823 writel(BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT), 824 &ep->regs->ep_stat); 825 826 tmp = readl(&ep->regs->ep_avail); 827 if (tmp) { 828 writel(readl(&dma->dmastat), &dma->dmastat); 829 830 /* transfer all/some fifo data */ 831 writel(req->req.dma, &dma->dmaaddr); 832 tmp = min(tmp, req->req.length); 833 834 /* dma irq, faking scatterlist status */ 835 req->td->dmacount = cpu_to_le32(req->req.length - tmp); 836 writel(BIT(DMA_DONE_INTERRUPT_ENABLE) | tmp, 837 &dma->dmacount); 838 req->td->dmadesc = 0; 839 req->valid = 1; 840 841 writel(BIT(DMA_ENABLE), &dma->dmactl); 842 writel(BIT(DMA_START), &dma->dmastat); 843 return; 844 } 845 } 846 847 tmp = dmactl_default; 848 849 /* force packet boundaries between dma requests, but prevent the 850 * controller from automagically writing a last "short" packet 851 * (zero length) unless the driver explicitly said to do that. 852 */ 853 if (ep->is_in) { 854 if (likely((req->req.length % ep->ep.maxpacket) || 855 req->req.zero)){ 856 tmp |= BIT(DMA_FIFO_VALIDATE); 857 ep->in_fifo_validate = 1; 858 } else 859 ep->in_fifo_validate = 0; 860 } 861 862 /* init req->td, pointing to the current dummy */ 863 req->td->dmadesc = cpu_to_le32 (ep->td_dma); 864 fill_dma_desc(ep, req, 1); 865 866 req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN)); 867 868 start_queue(ep, tmp, req->td_dma); 869 } 870 871 static inline void 872 queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid) 873 { 874 struct net2280_dma *end; 875 dma_addr_t tmp; 876 877 /* swap new dummy for old, link; fill and maybe activate */ 878 end = ep->dummy; 879 ep->dummy = req->td; 880 req->td = end; 881 882 tmp = ep->td_dma; 883 ep->td_dma = req->td_dma; 884 req->td_dma = tmp; 885 886 end->dmadesc = cpu_to_le32 (ep->td_dma); 887 888 fill_dma_desc(ep, req, valid); 889 } 890 891 static void 892 done(struct net2280_ep *ep, struct net2280_request *req, int status) 893 { 894 struct net2280 *dev; 895 unsigned stopped = ep->stopped; 896 897 list_del_init(&req->queue); 898 899 if (req->req.status == -EINPROGRESS) 900 req->req.status = status; 901 else 902 status = req->req.status; 903 904 dev = ep->dev; 905 if (ep->dma) 906 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in); 907 908 if (status && status != -ESHUTDOWN) 909 ep_vdbg(dev, "complete %s req %p stat %d len %u/%u\n", 910 ep->ep.name, &req->req, status, 911 req->req.actual, req->req.length); 912 913 /* don't modify queue heads during completion callback */ 914 ep->stopped = 1; 915 spin_unlock(&dev->lock); 916 usb_gadget_giveback_request(&ep->ep, &req->req); 917 spin_lock(&dev->lock); 918 ep->stopped = stopped; 919 } 920 921 /*-------------------------------------------------------------------------*/ 922 923 static int 924 net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) 925 { 926 struct net2280_request *req; 927 struct net2280_ep *ep; 928 struct net2280 *dev; 929 unsigned long flags; 930 int ret = 0; 931 932 /* we always require a cpu-view buffer, so that we can 933 * always use pio (as fallback or whatever). 934 */ 935 ep = container_of(_ep, struct net2280_ep, ep); 936 if (!_ep || (!ep->desc && ep->num != 0)) { 937 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep); 938 return -EINVAL; 939 } 940 req = container_of(_req, struct net2280_request, req); 941 if (!_req || !_req->complete || !_req->buf || 942 !list_empty(&req->queue)) { 943 ret = -EINVAL; 944 goto print_err; 945 } 946 if (_req->length > (~0 & DMA_BYTE_COUNT_MASK)) { 947 ret = -EDOM; 948 goto print_err; 949 } 950 dev = ep->dev; 951 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) { 952 ret = -ESHUTDOWN; 953 goto print_err; 954 } 955 956 /* FIXME implement PIO fallback for ZLPs with DMA */ 957 if (ep->dma && _req->length == 0) { 958 ret = -EOPNOTSUPP; 959 goto print_err; 960 } 961 962 /* set up dma mapping in case the caller didn't */ 963 if (ep->dma) { 964 ret = usb_gadget_map_request(&dev->gadget, _req, 965 ep->is_in); 966 if (ret) 967 goto print_err; 968 } 969 970 ep_vdbg(dev, "%s queue req %p, len %d buf %p\n", 971 _ep->name, _req, _req->length, _req->buf); 972 973 spin_lock_irqsave(&dev->lock, flags); 974 975 _req->status = -EINPROGRESS; 976 _req->actual = 0; 977 978 /* kickstart this i/o queue? */ 979 if (list_empty(&ep->queue) && !ep->stopped && 980 !((dev->quirks & PLX_SUPERSPEED) && ep->dma && 981 (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)))) { 982 983 /* use DMA if the endpoint supports it, else pio */ 984 if (ep->dma) 985 start_dma(ep, req); 986 else { 987 /* maybe there's no control data, just status ack */ 988 if (ep->num == 0 && _req->length == 0) { 989 allow_status(ep); 990 done(ep, req, 0); 991 ep_vdbg(dev, "%s status ack\n", ep->ep.name); 992 goto done; 993 } 994 995 /* PIO ... stuff the fifo, or unblock it. */ 996 if (ep->is_in) 997 write_fifo(ep, _req); 998 else if (list_empty(&ep->queue)) { 999 u32 s; 1000 1001 /* OUT FIFO might have packet(s) buffered */ 1002 s = readl(&ep->regs->ep_stat); 1003 if ((s & BIT(FIFO_EMPTY)) == 0) { 1004 /* note: _req->short_not_ok is 1005 * ignored here since PIO _always_ 1006 * stops queue advance here, and 1007 * _req->status doesn't change for 1008 * short reads (only _req->actual) 1009 */ 1010 if (read_fifo(ep, req) && 1011 ep->num == 0) { 1012 done(ep, req, 0); 1013 allow_status(ep); 1014 /* don't queue it */ 1015 req = NULL; 1016 } else if (read_fifo(ep, req) && 1017 ep->num != 0) { 1018 done(ep, req, 0); 1019 req = NULL; 1020 } else 1021 s = readl(&ep->regs->ep_stat); 1022 } 1023 1024 /* don't NAK, let the fifo fill */ 1025 if (req && (s & BIT(NAK_OUT_PACKETS))) 1026 writel(BIT(CLEAR_NAK_OUT_PACKETS), 1027 &ep->regs->ep_rsp); 1028 } 1029 } 1030 1031 } else if (ep->dma) { 1032 int valid = 1; 1033 1034 if (ep->is_in) { 1035 int expect; 1036 1037 /* preventing magic zlps is per-engine state, not 1038 * per-transfer; irq logic must recover hiccups. 1039 */ 1040 expect = likely(req->req.zero || 1041 (req->req.length % ep->ep.maxpacket)); 1042 if (expect != ep->in_fifo_validate) 1043 valid = 0; 1044 } 1045 queue_dma(ep, req, valid); 1046 1047 } /* else the irq handler advances the queue. */ 1048 1049 ep->responded = 1; 1050 if (req) 1051 list_add_tail(&req->queue, &ep->queue); 1052 done: 1053 spin_unlock_irqrestore(&dev->lock, flags); 1054 1055 /* pci writes may still be posted */ 1056 return ret; 1057 1058 print_err: 1059 dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, ret); 1060 return ret; 1061 } 1062 1063 static inline void 1064 dma_done(struct net2280_ep *ep, struct net2280_request *req, u32 dmacount, 1065 int status) 1066 { 1067 req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount); 1068 done(ep, req, status); 1069 } 1070 1071 static void scan_dma_completions(struct net2280_ep *ep) 1072 { 1073 /* only look at descriptors that were "naturally" retired, 1074 * so fifo and list head state won't matter 1075 */ 1076 while (!list_empty(&ep->queue)) { 1077 struct net2280_request *req; 1078 u32 tmp; 1079 1080 req = list_entry(ep->queue.next, 1081 struct net2280_request, queue); 1082 if (!req->valid) 1083 break; 1084 rmb(); 1085 tmp = le32_to_cpup(&req->td->dmacount); 1086 if ((tmp & BIT(VALID_BIT)) != 0) 1087 break; 1088 1089 /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short" 1090 * cases where DMA must be aborted; this code handles 1091 * all non-abort DMA completions. 1092 */ 1093 if (unlikely(req->td->dmadesc == 0)) { 1094 /* paranoia */ 1095 tmp = readl(&ep->dma->dmacount); 1096 if (tmp & DMA_BYTE_COUNT_MASK) 1097 break; 1098 /* single transfer mode */ 1099 dma_done(ep, req, tmp, 0); 1100 break; 1101 } else if (!ep->is_in && 1102 (req->req.length % ep->ep.maxpacket) && 1103 !(ep->dev->quirks & PLX_SUPERSPEED)) { 1104 1105 tmp = readl(&ep->regs->ep_stat); 1106 /* AVOID TROUBLE HERE by not issuing short reads from 1107 * your gadget driver. That helps avoids errata 0121, 1108 * 0122, and 0124; not all cases trigger the warning. 1109 */ 1110 if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) { 1111 ep_warn(ep->dev, "%s lost packet sync!\n", 1112 ep->ep.name); 1113 req->req.status = -EOVERFLOW; 1114 } else { 1115 tmp = readl(&ep->regs->ep_avail); 1116 if (tmp) { 1117 /* fifo gets flushed later */ 1118 ep->out_overflow = 1; 1119 ep_dbg(ep->dev, 1120 "%s dma, discard %d len %d\n", 1121 ep->ep.name, tmp, 1122 req->req.length); 1123 req->req.status = -EOVERFLOW; 1124 } 1125 } 1126 } 1127 dma_done(ep, req, tmp, 0); 1128 } 1129 } 1130 1131 static void restart_dma(struct net2280_ep *ep) 1132 { 1133 struct net2280_request *req; 1134 1135 if (ep->stopped) 1136 return; 1137 req = list_entry(ep->queue.next, struct net2280_request, queue); 1138 1139 start_dma(ep, req); 1140 } 1141 1142 static void abort_dma(struct net2280_ep *ep) 1143 { 1144 /* abort the current transfer */ 1145 if (likely(!list_empty(&ep->queue))) { 1146 /* FIXME work around errata 0121, 0122, 0124 */ 1147 writel(BIT(DMA_ABORT), &ep->dma->dmastat); 1148 spin_stop_dma(ep->dma); 1149 } else 1150 stop_dma(ep->dma); 1151 scan_dma_completions(ep); 1152 } 1153 1154 /* dequeue ALL requests */ 1155 static void nuke(struct net2280_ep *ep) 1156 { 1157 struct net2280_request *req; 1158 1159 /* called with spinlock held */ 1160 ep->stopped = 1; 1161 if (ep->dma) 1162 abort_dma(ep); 1163 while (!list_empty(&ep->queue)) { 1164 req = list_entry(ep->queue.next, 1165 struct net2280_request, 1166 queue); 1167 done(ep, req, -ESHUTDOWN); 1168 } 1169 } 1170 1171 /* dequeue JUST ONE request */ 1172 static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req) 1173 { 1174 struct net2280_ep *ep; 1175 struct net2280_request *req; 1176 unsigned long flags; 1177 u32 dmactl; 1178 int stopped; 1179 1180 ep = container_of(_ep, struct net2280_ep, ep); 1181 if (!_ep || (!ep->desc && ep->num != 0) || !_req) { 1182 pr_err("%s: Invalid ep=%p or ep->desc or req=%p\n", 1183 __func__, _ep, _req); 1184 return -EINVAL; 1185 } 1186 1187 spin_lock_irqsave(&ep->dev->lock, flags); 1188 stopped = ep->stopped; 1189 1190 /* quiesce dma while we patch the queue */ 1191 dmactl = 0; 1192 ep->stopped = 1; 1193 if (ep->dma) { 1194 dmactl = readl(&ep->dma->dmactl); 1195 /* WARNING erratum 0127 may kick in ... */ 1196 stop_dma(ep->dma); 1197 scan_dma_completions(ep); 1198 } 1199 1200 /* make sure it's still queued on this endpoint */ 1201 list_for_each_entry(req, &ep->queue, queue) { 1202 if (&req->req == _req) 1203 break; 1204 } 1205 if (&req->req != _req) { 1206 spin_unlock_irqrestore(&ep->dev->lock, flags); 1207 dev_err(&ep->dev->pdev->dev, "%s: Request mismatch\n", 1208 __func__); 1209 return -EINVAL; 1210 } 1211 1212 /* queue head may be partially complete. */ 1213 if (ep->queue.next == &req->queue) { 1214 if (ep->dma) { 1215 ep_dbg(ep->dev, "unlink (%s) dma\n", _ep->name); 1216 _req->status = -ECONNRESET; 1217 abort_dma(ep); 1218 if (likely(ep->queue.next == &req->queue)) { 1219 /* NOTE: misreports single-transfer mode*/ 1220 req->td->dmacount = 0; /* invalidate */ 1221 dma_done(ep, req, 1222 readl(&ep->dma->dmacount), 1223 -ECONNRESET); 1224 } 1225 } else { 1226 ep_dbg(ep->dev, "unlink (%s) pio\n", _ep->name); 1227 done(ep, req, -ECONNRESET); 1228 } 1229 req = NULL; 1230 } 1231 1232 if (req) 1233 done(ep, req, -ECONNRESET); 1234 ep->stopped = stopped; 1235 1236 if (ep->dma) { 1237 /* turn off dma on inactive queues */ 1238 if (list_empty(&ep->queue)) 1239 stop_dma(ep->dma); 1240 else if (!ep->stopped) { 1241 /* resume current request, or start new one */ 1242 if (req) 1243 writel(dmactl, &ep->dma->dmactl); 1244 else 1245 start_dma(ep, list_entry(ep->queue.next, 1246 struct net2280_request, queue)); 1247 } 1248 } 1249 1250 spin_unlock_irqrestore(&ep->dev->lock, flags); 1251 return 0; 1252 } 1253 1254 /*-------------------------------------------------------------------------*/ 1255 1256 static int net2280_fifo_status(struct usb_ep *_ep); 1257 1258 static int 1259 net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged) 1260 { 1261 struct net2280_ep *ep; 1262 unsigned long flags; 1263 int retval = 0; 1264 1265 ep = container_of(_ep, struct net2280_ep, ep); 1266 if (!_ep || (!ep->desc && ep->num != 0)) { 1267 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep); 1268 return -EINVAL; 1269 } 1270 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) { 1271 retval = -ESHUTDOWN; 1272 goto print_err; 1273 } 1274 if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03) 1275 == USB_ENDPOINT_XFER_ISOC) { 1276 retval = -EINVAL; 1277 goto print_err; 1278 } 1279 1280 spin_lock_irqsave(&ep->dev->lock, flags); 1281 if (!list_empty(&ep->queue)) { 1282 retval = -EAGAIN; 1283 goto print_unlock; 1284 } else if (ep->is_in && value && net2280_fifo_status(_ep) != 0) { 1285 retval = -EAGAIN; 1286 goto print_unlock; 1287 } else { 1288 ep_vdbg(ep->dev, "%s %s %s\n", _ep->name, 1289 value ? "set" : "clear", 1290 wedged ? "wedge" : "halt"); 1291 /* set/clear, then synch memory views with the device */ 1292 if (value) { 1293 if (ep->num == 0) 1294 ep->dev->protocol_stall = 1; 1295 else 1296 set_halt(ep); 1297 if (wedged) 1298 ep->wedged = 1; 1299 } else { 1300 clear_halt(ep); 1301 if (ep->dev->quirks & PLX_SUPERSPEED && 1302 !list_empty(&ep->queue) && ep->td_dma) 1303 restart_dma(ep); 1304 ep->wedged = 0; 1305 } 1306 (void) readl(&ep->regs->ep_rsp); 1307 } 1308 spin_unlock_irqrestore(&ep->dev->lock, flags); 1309 1310 return retval; 1311 1312 print_unlock: 1313 spin_unlock_irqrestore(&ep->dev->lock, flags); 1314 print_err: 1315 dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, retval); 1316 return retval; 1317 } 1318 1319 static int net2280_set_halt(struct usb_ep *_ep, int value) 1320 { 1321 return net2280_set_halt_and_wedge(_ep, value, 0); 1322 } 1323 1324 static int net2280_set_wedge(struct usb_ep *_ep) 1325 { 1326 if (!_ep || _ep->name == ep0name) { 1327 pr_err("%s: Invalid ep=%p or ep0\n", __func__, _ep); 1328 return -EINVAL; 1329 } 1330 return net2280_set_halt_and_wedge(_ep, 1, 1); 1331 } 1332 1333 static int net2280_fifo_status(struct usb_ep *_ep) 1334 { 1335 struct net2280_ep *ep; 1336 u32 avail; 1337 1338 ep = container_of(_ep, struct net2280_ep, ep); 1339 if (!_ep || (!ep->desc && ep->num != 0)) { 1340 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep); 1341 return -ENODEV; 1342 } 1343 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) { 1344 dev_err(&ep->dev->pdev->dev, 1345 "%s: Invalid driver=%p or speed=%d\n", 1346 __func__, ep->dev->driver, ep->dev->gadget.speed); 1347 return -ESHUTDOWN; 1348 } 1349 1350 avail = readl(&ep->regs->ep_avail) & (BIT(12) - 1); 1351 if (avail > ep->fifo_size) { 1352 dev_err(&ep->dev->pdev->dev, "%s: Fifo overflow\n", __func__); 1353 return -EOVERFLOW; 1354 } 1355 if (ep->is_in) 1356 avail = ep->fifo_size - avail; 1357 return avail; 1358 } 1359 1360 static void net2280_fifo_flush(struct usb_ep *_ep) 1361 { 1362 struct net2280_ep *ep; 1363 1364 ep = container_of(_ep, struct net2280_ep, ep); 1365 if (!_ep || (!ep->desc && ep->num != 0)) { 1366 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep); 1367 return; 1368 } 1369 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) { 1370 dev_err(&ep->dev->pdev->dev, 1371 "%s: Invalid driver=%p or speed=%d\n", 1372 __func__, ep->dev->driver, ep->dev->gadget.speed); 1373 return; 1374 } 1375 1376 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat); 1377 (void) readl(&ep->regs->ep_rsp); 1378 } 1379 1380 static const struct usb_ep_ops net2280_ep_ops = { 1381 .enable = net2280_enable, 1382 .disable = net2280_disable, 1383 1384 .alloc_request = net2280_alloc_request, 1385 .free_request = net2280_free_request, 1386 1387 .queue = net2280_queue, 1388 .dequeue = net2280_dequeue, 1389 1390 .set_halt = net2280_set_halt, 1391 .set_wedge = net2280_set_wedge, 1392 .fifo_status = net2280_fifo_status, 1393 .fifo_flush = net2280_fifo_flush, 1394 }; 1395 1396 /*-------------------------------------------------------------------------*/ 1397 1398 static int net2280_get_frame(struct usb_gadget *_gadget) 1399 { 1400 struct net2280 *dev; 1401 unsigned long flags; 1402 u16 retval; 1403 1404 if (!_gadget) 1405 return -ENODEV; 1406 dev = container_of(_gadget, struct net2280, gadget); 1407 spin_lock_irqsave(&dev->lock, flags); 1408 retval = get_idx_reg(dev->regs, REG_FRAME) & 0x03ff; 1409 spin_unlock_irqrestore(&dev->lock, flags); 1410 return retval; 1411 } 1412 1413 static int net2280_wakeup(struct usb_gadget *_gadget) 1414 { 1415 struct net2280 *dev; 1416 u32 tmp; 1417 unsigned long flags; 1418 1419 if (!_gadget) 1420 return 0; 1421 dev = container_of(_gadget, struct net2280, gadget); 1422 1423 spin_lock_irqsave(&dev->lock, flags); 1424 tmp = readl(&dev->usb->usbctl); 1425 if (tmp & BIT(DEVICE_REMOTE_WAKEUP_ENABLE)) 1426 writel(BIT(GENERATE_RESUME), &dev->usb->usbstat); 1427 spin_unlock_irqrestore(&dev->lock, flags); 1428 1429 /* pci writes may still be posted */ 1430 return 0; 1431 } 1432 1433 static int net2280_set_selfpowered(struct usb_gadget *_gadget, int value) 1434 { 1435 struct net2280 *dev; 1436 u32 tmp; 1437 unsigned long flags; 1438 1439 if (!_gadget) 1440 return 0; 1441 dev = container_of(_gadget, struct net2280, gadget); 1442 1443 spin_lock_irqsave(&dev->lock, flags); 1444 tmp = readl(&dev->usb->usbctl); 1445 if (value) { 1446 tmp |= BIT(SELF_POWERED_STATUS); 1447 _gadget->is_selfpowered = 1; 1448 } else { 1449 tmp &= ~BIT(SELF_POWERED_STATUS); 1450 _gadget->is_selfpowered = 0; 1451 } 1452 writel(tmp, &dev->usb->usbctl); 1453 spin_unlock_irqrestore(&dev->lock, flags); 1454 1455 return 0; 1456 } 1457 1458 static int net2280_pullup(struct usb_gadget *_gadget, int is_on) 1459 { 1460 struct net2280 *dev; 1461 u32 tmp; 1462 unsigned long flags; 1463 1464 if (!_gadget) 1465 return -ENODEV; 1466 dev = container_of(_gadget, struct net2280, gadget); 1467 1468 spin_lock_irqsave(&dev->lock, flags); 1469 tmp = readl(&dev->usb->usbctl); 1470 dev->softconnect = (is_on != 0); 1471 if (is_on) 1472 tmp |= BIT(USB_DETECT_ENABLE); 1473 else 1474 tmp &= ~BIT(USB_DETECT_ENABLE); 1475 writel(tmp, &dev->usb->usbctl); 1476 spin_unlock_irqrestore(&dev->lock, flags); 1477 1478 return 0; 1479 } 1480 1481 static int net2280_start(struct usb_gadget *_gadget, 1482 struct usb_gadget_driver *driver); 1483 static int net2280_stop(struct usb_gadget *_gadget); 1484 1485 static const struct usb_gadget_ops net2280_ops = { 1486 .get_frame = net2280_get_frame, 1487 .wakeup = net2280_wakeup, 1488 .set_selfpowered = net2280_set_selfpowered, 1489 .pullup = net2280_pullup, 1490 .udc_start = net2280_start, 1491 .udc_stop = net2280_stop, 1492 }; 1493 1494 /*-------------------------------------------------------------------------*/ 1495 1496 #ifdef CONFIG_USB_GADGET_DEBUG_FILES 1497 1498 /* FIXME move these into procfs, and use seq_file. 1499 * Sysfs _still_ doesn't behave for arbitrarily sized files, 1500 * and also doesn't help products using this with 2.4 kernels. 1501 */ 1502 1503 /* "function" sysfs attribute */ 1504 static ssize_t function_show(struct device *_dev, struct device_attribute *attr, 1505 char *buf) 1506 { 1507 struct net2280 *dev = dev_get_drvdata(_dev); 1508 1509 if (!dev->driver || !dev->driver->function || 1510 strlen(dev->driver->function) > PAGE_SIZE) 1511 return 0; 1512 return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function); 1513 } 1514 static DEVICE_ATTR_RO(function); 1515 1516 static ssize_t registers_show(struct device *_dev, 1517 struct device_attribute *attr, char *buf) 1518 { 1519 struct net2280 *dev; 1520 char *next; 1521 unsigned size, t; 1522 unsigned long flags; 1523 int i; 1524 u32 t1, t2; 1525 const char *s; 1526 1527 dev = dev_get_drvdata(_dev); 1528 next = buf; 1529 size = PAGE_SIZE; 1530 spin_lock_irqsave(&dev->lock, flags); 1531 1532 if (dev->driver) 1533 s = dev->driver->driver.name; 1534 else 1535 s = "(none)"; 1536 1537 /* Main Control Registers */ 1538 t = scnprintf(next, size, "%s version " DRIVER_VERSION 1539 ", chiprev %04x\n\n" 1540 "devinit %03x fifoctl %08x gadget '%s'\n" 1541 "pci irqenb0 %02x irqenb1 %08x " 1542 "irqstat0 %04x irqstat1 %08x\n", 1543 driver_name, dev->chiprev, 1544 readl(&dev->regs->devinit), 1545 readl(&dev->regs->fifoctl), 1546 s, 1547 readl(&dev->regs->pciirqenb0), 1548 readl(&dev->regs->pciirqenb1), 1549 readl(&dev->regs->irqstat0), 1550 readl(&dev->regs->irqstat1)); 1551 size -= t; 1552 next += t; 1553 1554 /* USB Control Registers */ 1555 t1 = readl(&dev->usb->usbctl); 1556 t2 = readl(&dev->usb->usbstat); 1557 if (t1 & BIT(VBUS_PIN)) { 1558 if (t2 & BIT(HIGH_SPEED)) 1559 s = "high speed"; 1560 else if (dev->gadget.speed == USB_SPEED_UNKNOWN) 1561 s = "powered"; 1562 else 1563 s = "full speed"; 1564 /* full speed bit (6) not working?? */ 1565 } else 1566 s = "not attached"; 1567 t = scnprintf(next, size, 1568 "stdrsp %08x usbctl %08x usbstat %08x " 1569 "addr 0x%02x (%s)\n", 1570 readl(&dev->usb->stdrsp), t1, t2, 1571 readl(&dev->usb->ouraddr), s); 1572 size -= t; 1573 next += t; 1574 1575 /* PCI Master Control Registers */ 1576 1577 /* DMA Control Registers */ 1578 1579 /* Configurable EP Control Registers */ 1580 for (i = 0; i < dev->n_ep; i++) { 1581 struct net2280_ep *ep; 1582 1583 ep = &dev->ep[i]; 1584 if (i && !ep->desc) 1585 continue; 1586 1587 t1 = readl(&ep->cfg->ep_cfg); 1588 t2 = readl(&ep->regs->ep_rsp) & 0xff; 1589 t = scnprintf(next, size, 1590 "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s" 1591 "irqenb %02x\n", 1592 ep->ep.name, t1, t2, 1593 (t2 & BIT(CLEAR_NAK_OUT_PACKETS)) 1594 ? "NAK " : "", 1595 (t2 & BIT(CLEAR_EP_HIDE_STATUS_PHASE)) 1596 ? "hide " : "", 1597 (t2 & BIT(CLEAR_EP_FORCE_CRC_ERROR)) 1598 ? "CRC " : "", 1599 (t2 & BIT(CLEAR_INTERRUPT_MODE)) 1600 ? "interrupt " : "", 1601 (t2 & BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)) 1602 ? "status " : "", 1603 (t2 & BIT(CLEAR_NAK_OUT_PACKETS_MODE)) 1604 ? "NAKmode " : "", 1605 (t2 & BIT(CLEAR_ENDPOINT_TOGGLE)) 1606 ? "DATA1 " : "DATA0 ", 1607 (t2 & BIT(CLEAR_ENDPOINT_HALT)) 1608 ? "HALT " : "", 1609 readl(&ep->regs->ep_irqenb)); 1610 size -= t; 1611 next += t; 1612 1613 t = scnprintf(next, size, 1614 "\tstat %08x avail %04x " 1615 "(ep%d%s-%s)%s\n", 1616 readl(&ep->regs->ep_stat), 1617 readl(&ep->regs->ep_avail), 1618 t1 & 0x0f, DIR_STRING(t1), 1619 type_string(t1 >> 8), 1620 ep->stopped ? "*" : ""); 1621 size -= t; 1622 next += t; 1623 1624 if (!ep->dma) 1625 continue; 1626 1627 t = scnprintf(next, size, 1628 " dma\tctl %08x stat %08x count %08x\n" 1629 "\taddr %08x desc %08x\n", 1630 readl(&ep->dma->dmactl), 1631 readl(&ep->dma->dmastat), 1632 readl(&ep->dma->dmacount), 1633 readl(&ep->dma->dmaaddr), 1634 readl(&ep->dma->dmadesc)); 1635 size -= t; 1636 next += t; 1637 1638 } 1639 1640 /* Indexed Registers (none yet) */ 1641 1642 /* Statistics */ 1643 t = scnprintf(next, size, "\nirqs: "); 1644 size -= t; 1645 next += t; 1646 for (i = 0; i < dev->n_ep; i++) { 1647 struct net2280_ep *ep; 1648 1649 ep = &dev->ep[i]; 1650 if (i && !ep->irqs) 1651 continue; 1652 t = scnprintf(next, size, " %s/%lu", ep->ep.name, ep->irqs); 1653 size -= t; 1654 next += t; 1655 1656 } 1657 t = scnprintf(next, size, "\n"); 1658 size -= t; 1659 next += t; 1660 1661 spin_unlock_irqrestore(&dev->lock, flags); 1662 1663 return PAGE_SIZE - size; 1664 } 1665 static DEVICE_ATTR_RO(registers); 1666 1667 static ssize_t queues_show(struct device *_dev, struct device_attribute *attr, 1668 char *buf) 1669 { 1670 struct net2280 *dev; 1671 char *next; 1672 unsigned size; 1673 unsigned long flags; 1674 int i; 1675 1676 dev = dev_get_drvdata(_dev); 1677 next = buf; 1678 size = PAGE_SIZE; 1679 spin_lock_irqsave(&dev->lock, flags); 1680 1681 for (i = 0; i < dev->n_ep; i++) { 1682 struct net2280_ep *ep = &dev->ep[i]; 1683 struct net2280_request *req; 1684 int t; 1685 1686 if (i != 0) { 1687 const struct usb_endpoint_descriptor *d; 1688 1689 d = ep->desc; 1690 if (!d) 1691 continue; 1692 t = d->bEndpointAddress; 1693 t = scnprintf(next, size, 1694 "\n%s (ep%d%s-%s) max %04x %s fifo %d\n", 1695 ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK, 1696 (t & USB_DIR_IN) ? "in" : "out", 1697 type_string(d->bmAttributes), 1698 usb_endpoint_maxp(d) & 0x1fff, 1699 ep->dma ? "dma" : "pio", ep->fifo_size 1700 ); 1701 } else /* ep0 should only have one transfer queued */ 1702 t = scnprintf(next, size, "ep0 max 64 pio %s\n", 1703 ep->is_in ? "in" : "out"); 1704 if (t <= 0 || t > size) 1705 goto done; 1706 size -= t; 1707 next += t; 1708 1709 if (list_empty(&ep->queue)) { 1710 t = scnprintf(next, size, "\t(nothing queued)\n"); 1711 if (t <= 0 || t > size) 1712 goto done; 1713 size -= t; 1714 next += t; 1715 continue; 1716 } 1717 list_for_each_entry(req, &ep->queue, queue) { 1718 if (ep->dma && req->td_dma == readl(&ep->dma->dmadesc)) 1719 t = scnprintf(next, size, 1720 "\treq %p len %d/%d " 1721 "buf %p (dmacount %08x)\n", 1722 &req->req, req->req.actual, 1723 req->req.length, req->req.buf, 1724 readl(&ep->dma->dmacount)); 1725 else 1726 t = scnprintf(next, size, 1727 "\treq %p len %d/%d buf %p\n", 1728 &req->req, req->req.actual, 1729 req->req.length, req->req.buf); 1730 if (t <= 0 || t > size) 1731 goto done; 1732 size -= t; 1733 next += t; 1734 1735 if (ep->dma) { 1736 struct net2280_dma *td; 1737 1738 td = req->td; 1739 t = scnprintf(next, size, "\t td %08x " 1740 " count %08x buf %08x desc %08x\n", 1741 (u32) req->td_dma, 1742 le32_to_cpu(td->dmacount), 1743 le32_to_cpu(td->dmaaddr), 1744 le32_to_cpu(td->dmadesc)); 1745 if (t <= 0 || t > size) 1746 goto done; 1747 size -= t; 1748 next += t; 1749 } 1750 } 1751 } 1752 1753 done: 1754 spin_unlock_irqrestore(&dev->lock, flags); 1755 return PAGE_SIZE - size; 1756 } 1757 static DEVICE_ATTR_RO(queues); 1758 1759 1760 #else 1761 1762 #define device_create_file(a, b) (0) 1763 #define device_remove_file(a, b) do { } while (0) 1764 1765 #endif 1766 1767 /*-------------------------------------------------------------------------*/ 1768 1769 /* another driver-specific mode might be a request type doing dma 1770 * to/from another device fifo instead of to/from memory. 1771 */ 1772 1773 static void set_fifo_mode(struct net2280 *dev, int mode) 1774 { 1775 /* keeping high bits preserves BAR2 */ 1776 writel((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl); 1777 1778 /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */ 1779 INIT_LIST_HEAD(&dev->gadget.ep_list); 1780 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list); 1781 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); 1782 switch (mode) { 1783 case 0: 1784 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list); 1785 list_add_tail(&dev->ep[4].ep.ep_list, &dev->gadget.ep_list); 1786 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024; 1787 break; 1788 case 1: 1789 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 2048; 1790 break; 1791 case 2: 1792 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list); 1793 dev->ep[1].fifo_size = 2048; 1794 dev->ep[2].fifo_size = 1024; 1795 break; 1796 } 1797 /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */ 1798 list_add_tail(&dev->ep[5].ep.ep_list, &dev->gadget.ep_list); 1799 list_add_tail(&dev->ep[6].ep.ep_list, &dev->gadget.ep_list); 1800 } 1801 1802 static void defect7374_disable_data_eps(struct net2280 *dev) 1803 { 1804 /* 1805 * For Defect 7374, disable data EPs (and more): 1806 * - This phase undoes the earlier phase of the Defect 7374 workaround, 1807 * returing ep regs back to normal. 1808 */ 1809 struct net2280_ep *ep; 1810 int i; 1811 unsigned char ep_sel; 1812 u32 tmp_reg; 1813 1814 for (i = 1; i < 5; i++) { 1815 ep = &dev->ep[i]; 1816 writel(0, &ep->cfg->ep_cfg); 1817 } 1818 1819 /* CSROUT, CSRIN, PCIOUT, PCIIN, STATIN, RCIN */ 1820 for (i = 0; i < 6; i++) 1821 writel(0, &dev->dep[i].dep_cfg); 1822 1823 for (ep_sel = 0; ep_sel <= 21; ep_sel++) { 1824 /* Select an endpoint for subsequent operations: */ 1825 tmp_reg = readl(&dev->plregs->pl_ep_ctrl); 1826 writel(((tmp_reg & ~0x1f) | ep_sel), &dev->plregs->pl_ep_ctrl); 1827 1828 if (ep_sel < 2 || (ep_sel > 9 && ep_sel < 14) || 1829 ep_sel == 18 || ep_sel == 20) 1830 continue; 1831 1832 /* Change settings on some selected endpoints */ 1833 tmp_reg = readl(&dev->plregs->pl_ep_cfg_4); 1834 tmp_reg &= ~BIT(NON_CTRL_IN_TOLERATE_BAD_DIR); 1835 writel(tmp_reg, &dev->plregs->pl_ep_cfg_4); 1836 tmp_reg = readl(&dev->plregs->pl_ep_ctrl); 1837 tmp_reg |= BIT(EP_INITIALIZED); 1838 writel(tmp_reg, &dev->plregs->pl_ep_ctrl); 1839 } 1840 } 1841 1842 static void defect7374_enable_data_eps_zero(struct net2280 *dev) 1843 { 1844 u32 tmp = 0, tmp_reg; 1845 u32 scratch; 1846 int i; 1847 unsigned char ep_sel; 1848 1849 scratch = get_idx_reg(dev->regs, SCRATCH); 1850 1851 WARN_ON((scratch & (0xf << DEFECT7374_FSM_FIELD)) 1852 == DEFECT7374_FSM_SS_CONTROL_READ); 1853 1854 scratch &= ~(0xf << DEFECT7374_FSM_FIELD); 1855 1856 ep_warn(dev, "Operate Defect 7374 workaround soft this time"); 1857 ep_warn(dev, "It will operate on cold-reboot and SS connect"); 1858 1859 /*GPEPs:*/ 1860 tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_DIRECTION) | 1861 (2 << OUT_ENDPOINT_TYPE) | (2 << IN_ENDPOINT_TYPE) | 1862 ((dev->enhanced_mode) ? 1863 BIT(OUT_ENDPOINT_ENABLE) : BIT(ENDPOINT_ENABLE)) | 1864 BIT(IN_ENDPOINT_ENABLE)); 1865 1866 for (i = 1; i < 5; i++) 1867 writel(tmp, &dev->ep[i].cfg->ep_cfg); 1868 1869 /* CSRIN, PCIIN, STATIN, RCIN*/ 1870 tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_ENABLE)); 1871 writel(tmp, &dev->dep[1].dep_cfg); 1872 writel(tmp, &dev->dep[3].dep_cfg); 1873 writel(tmp, &dev->dep[4].dep_cfg); 1874 writel(tmp, &dev->dep[5].dep_cfg); 1875 1876 /*Implemented for development and debug. 1877 * Can be refined/tuned later.*/ 1878 for (ep_sel = 0; ep_sel <= 21; ep_sel++) { 1879 /* Select an endpoint for subsequent operations: */ 1880 tmp_reg = readl(&dev->plregs->pl_ep_ctrl); 1881 writel(((tmp_reg & ~0x1f) | ep_sel), 1882 &dev->plregs->pl_ep_ctrl); 1883 1884 if (ep_sel == 1) { 1885 tmp = 1886 (readl(&dev->plregs->pl_ep_ctrl) | 1887 BIT(CLEAR_ACK_ERROR_CODE) | 0); 1888 writel(tmp, &dev->plregs->pl_ep_ctrl); 1889 continue; 1890 } 1891 1892 if (ep_sel == 0 || (ep_sel > 9 && ep_sel < 14) || 1893 ep_sel == 18 || ep_sel == 20) 1894 continue; 1895 1896 tmp = (readl(&dev->plregs->pl_ep_cfg_4) | 1897 BIT(NON_CTRL_IN_TOLERATE_BAD_DIR) | 0); 1898 writel(tmp, &dev->plregs->pl_ep_cfg_4); 1899 1900 tmp = readl(&dev->plregs->pl_ep_ctrl) & 1901 ~BIT(EP_INITIALIZED); 1902 writel(tmp, &dev->plregs->pl_ep_ctrl); 1903 1904 } 1905 1906 /* Set FSM to focus on the first Control Read: 1907 * - Tip: Connection speed is known upon the first 1908 * setup request.*/ 1909 scratch |= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ; 1910 set_idx_reg(dev->regs, SCRATCH, scratch); 1911 1912 } 1913 1914 /* keeping it simple: 1915 * - one bus driver, initted first; 1916 * - one function driver, initted second 1917 * 1918 * most of the work to support multiple net2280 controllers would 1919 * be to associate this gadget driver (yes?) with all of them, or 1920 * perhaps to bind specific drivers to specific devices. 1921 */ 1922 1923 static void usb_reset_228x(struct net2280 *dev) 1924 { 1925 u32 tmp; 1926 1927 dev->gadget.speed = USB_SPEED_UNKNOWN; 1928 (void) readl(&dev->usb->usbctl); 1929 1930 net2280_led_init(dev); 1931 1932 /* disable automatic responses, and irqs */ 1933 writel(0, &dev->usb->stdrsp); 1934 writel(0, &dev->regs->pciirqenb0); 1935 writel(0, &dev->regs->pciirqenb1); 1936 1937 /* clear old dma and irq state */ 1938 for (tmp = 0; tmp < 4; tmp++) { 1939 struct net2280_ep *ep = &dev->ep[tmp + 1]; 1940 if (ep->dma) 1941 abort_dma(ep); 1942 } 1943 1944 writel(~0, &dev->regs->irqstat0), 1945 writel(~(u32)BIT(SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1), 1946 1947 /* reset, and enable pci */ 1948 tmp = readl(&dev->regs->devinit) | 1949 BIT(PCI_ENABLE) | 1950 BIT(FIFO_SOFT_RESET) | 1951 BIT(USB_SOFT_RESET) | 1952 BIT(M8051_RESET); 1953 writel(tmp, &dev->regs->devinit); 1954 1955 /* standard fifo and endpoint allocations */ 1956 set_fifo_mode(dev, (fifo_mode <= 2) ? fifo_mode : 0); 1957 } 1958 1959 static void usb_reset_338x(struct net2280 *dev) 1960 { 1961 u32 tmp; 1962 1963 dev->gadget.speed = USB_SPEED_UNKNOWN; 1964 (void)readl(&dev->usb->usbctl); 1965 1966 net2280_led_init(dev); 1967 1968 if (dev->bug7734_patched) { 1969 /* disable automatic responses, and irqs */ 1970 writel(0, &dev->usb->stdrsp); 1971 writel(0, &dev->regs->pciirqenb0); 1972 writel(0, &dev->regs->pciirqenb1); 1973 } 1974 1975 /* clear old dma and irq state */ 1976 for (tmp = 0; tmp < 4; tmp++) { 1977 struct net2280_ep *ep = &dev->ep[tmp + 1]; 1978 1979 if (ep->dma) 1980 abort_dma(ep); 1981 } 1982 1983 writel(~0, &dev->regs->irqstat0), writel(~0, &dev->regs->irqstat1); 1984 1985 if (dev->bug7734_patched) { 1986 /* reset, and enable pci */ 1987 tmp = readl(&dev->regs->devinit) | 1988 BIT(PCI_ENABLE) | 1989 BIT(FIFO_SOFT_RESET) | 1990 BIT(USB_SOFT_RESET) | 1991 BIT(M8051_RESET); 1992 1993 writel(tmp, &dev->regs->devinit); 1994 } 1995 1996 /* always ep-{1,2,3,4} ... maybe not ep-3 or ep-4 */ 1997 INIT_LIST_HEAD(&dev->gadget.ep_list); 1998 1999 for (tmp = 1; tmp < dev->n_ep; tmp++) 2000 list_add_tail(&dev->ep[tmp].ep.ep_list, &dev->gadget.ep_list); 2001 2002 } 2003 2004 static void usb_reset(struct net2280 *dev) 2005 { 2006 if (dev->quirks & PLX_LEGACY) 2007 return usb_reset_228x(dev); 2008 return usb_reset_338x(dev); 2009 } 2010 2011 static void usb_reinit_228x(struct net2280 *dev) 2012 { 2013 u32 tmp; 2014 2015 /* basic endpoint init */ 2016 for (tmp = 0; tmp < 7; tmp++) { 2017 struct net2280_ep *ep = &dev->ep[tmp]; 2018 2019 ep->ep.name = ep_name[tmp]; 2020 ep->dev = dev; 2021 ep->num = tmp; 2022 2023 if (tmp > 0 && tmp <= 4) { 2024 ep->fifo_size = 1024; 2025 ep->dma = &dev->dma[tmp - 1]; 2026 } else 2027 ep->fifo_size = 64; 2028 ep->regs = &dev->epregs[tmp]; 2029 ep->cfg = &dev->epregs[tmp]; 2030 ep_reset_228x(dev->regs, ep); 2031 } 2032 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64); 2033 usb_ep_set_maxpacket_limit(&dev->ep[5].ep, 64); 2034 usb_ep_set_maxpacket_limit(&dev->ep[6].ep, 64); 2035 2036 dev->gadget.ep0 = &dev->ep[0].ep; 2037 dev->ep[0].stopped = 0; 2038 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); 2039 2040 /* we want to prevent lowlevel/insecure access from the USB host, 2041 * but erratum 0119 means this enable bit is ignored 2042 */ 2043 for (tmp = 0; tmp < 5; tmp++) 2044 writel(EP_DONTUSE, &dev->dep[tmp].dep_cfg); 2045 } 2046 2047 static void usb_reinit_338x(struct net2280 *dev) 2048 { 2049 int i; 2050 u32 tmp, val; 2051 static const u32 ne[9] = { 0, 1, 2, 3, 4, 1, 2, 3, 4 }; 2052 static const u32 ep_reg_addr[9] = { 0x00, 0xC0, 0x00, 0xC0, 0x00, 2053 0x00, 0xC0, 0x00, 0xC0 }; 2054 2055 /* basic endpoint init */ 2056 for (i = 0; i < dev->n_ep; i++) { 2057 struct net2280_ep *ep = &dev->ep[i]; 2058 2059 ep->ep.name = dev->enhanced_mode ? ep_name_adv[i] : ep_name[i]; 2060 ep->dev = dev; 2061 ep->num = i; 2062 2063 if (i > 0 && i <= 4) 2064 ep->dma = &dev->dma[i - 1]; 2065 2066 if (dev->enhanced_mode) { 2067 ep->cfg = &dev->epregs[ne[i]]; 2068 ep->regs = (struct net2280_ep_regs __iomem *) 2069 (((void __iomem *)&dev->epregs[ne[i]]) + 2070 ep_reg_addr[i]); 2071 } else { 2072 ep->cfg = &dev->epregs[i]; 2073 ep->regs = &dev->epregs[i]; 2074 } 2075 2076 ep->fifo_size = (i != 0) ? 2048 : 512; 2077 2078 ep_reset_338x(dev->regs, ep); 2079 } 2080 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 512); 2081 2082 dev->gadget.ep0 = &dev->ep[0].ep; 2083 dev->ep[0].stopped = 0; 2084 2085 /* Link layer set up */ 2086 if (dev->bug7734_patched) { 2087 tmp = readl(&dev->usb_ext->usbctl2) & 2088 ~(BIT(U1_ENABLE) | BIT(U2_ENABLE) | BIT(LTM_ENABLE)); 2089 writel(tmp, &dev->usb_ext->usbctl2); 2090 } 2091 2092 /* Hardware Defect and Workaround */ 2093 val = readl(&dev->ll_lfps_regs->ll_lfps_5); 2094 val &= ~(0xf << TIMER_LFPS_6US); 2095 val |= 0x5 << TIMER_LFPS_6US; 2096 writel(val, &dev->ll_lfps_regs->ll_lfps_5); 2097 2098 val = readl(&dev->ll_lfps_regs->ll_lfps_6); 2099 val &= ~(0xffff << TIMER_LFPS_80US); 2100 val |= 0x0100 << TIMER_LFPS_80US; 2101 writel(val, &dev->ll_lfps_regs->ll_lfps_6); 2102 2103 /* 2104 * AA_AB Errata. Issue 4. Workaround for SuperSpeed USB 2105 * Hot Reset Exit Handshake may Fail in Specific Case using 2106 * Default Register Settings. Workaround for Enumeration test. 2107 */ 2108 val = readl(&dev->ll_tsn_regs->ll_tsn_counters_2); 2109 val &= ~(0x1f << HOT_TX_NORESET_TS2); 2110 val |= 0x10 << HOT_TX_NORESET_TS2; 2111 writel(val, &dev->ll_tsn_regs->ll_tsn_counters_2); 2112 2113 val = readl(&dev->ll_tsn_regs->ll_tsn_counters_3); 2114 val &= ~(0x1f << HOT_RX_RESET_TS2); 2115 val |= 0x3 << HOT_RX_RESET_TS2; 2116 writel(val, &dev->ll_tsn_regs->ll_tsn_counters_3); 2117 2118 /* 2119 * Set Recovery Idle to Recover bit: 2120 * - On SS connections, setting Recovery Idle to Recover Fmw improves 2121 * link robustness with various hosts and hubs. 2122 * - It is safe to set for all connection speeds; all chip revisions. 2123 * - R-M-W to leave other bits undisturbed. 2124 * - Reference PLX TT-7372 2125 */ 2126 val = readl(&dev->ll_chicken_reg->ll_tsn_chicken_bit); 2127 val |= BIT(RECOVERY_IDLE_TO_RECOVER_FMW); 2128 writel(val, &dev->ll_chicken_reg->ll_tsn_chicken_bit); 2129 2130 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); 2131 2132 /* disable dedicated endpoints */ 2133 writel(0x0D, &dev->dep[0].dep_cfg); 2134 writel(0x0D, &dev->dep[1].dep_cfg); 2135 writel(0x0E, &dev->dep[2].dep_cfg); 2136 writel(0x0E, &dev->dep[3].dep_cfg); 2137 writel(0x0F, &dev->dep[4].dep_cfg); 2138 writel(0x0C, &dev->dep[5].dep_cfg); 2139 } 2140 2141 static void usb_reinit(struct net2280 *dev) 2142 { 2143 if (dev->quirks & PLX_LEGACY) 2144 return usb_reinit_228x(dev); 2145 return usb_reinit_338x(dev); 2146 } 2147 2148 static void ep0_start_228x(struct net2280 *dev) 2149 { 2150 writel(BIT(CLEAR_EP_HIDE_STATUS_PHASE) | 2151 BIT(CLEAR_NAK_OUT_PACKETS) | 2152 BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE), 2153 &dev->epregs[0].ep_rsp); 2154 2155 /* 2156 * hardware optionally handles a bunch of standard requests 2157 * that the API hides from drivers anyway. have it do so. 2158 * endpoint status/features are handled in software, to 2159 * help pass tests for some dubious behavior. 2160 */ 2161 writel(BIT(SET_TEST_MODE) | 2162 BIT(SET_ADDRESS) | 2163 BIT(DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP) | 2164 BIT(GET_DEVICE_STATUS) | 2165 BIT(GET_INTERFACE_STATUS), 2166 &dev->usb->stdrsp); 2167 writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) | 2168 BIT(SELF_POWERED_USB_DEVICE) | 2169 BIT(REMOTE_WAKEUP_SUPPORT) | 2170 (dev->softconnect << USB_DETECT_ENABLE) | 2171 BIT(SELF_POWERED_STATUS), 2172 &dev->usb->usbctl); 2173 2174 /* enable irqs so we can see ep0 and general operation */ 2175 writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) | 2176 BIT(ENDPOINT_0_INTERRUPT_ENABLE), 2177 &dev->regs->pciirqenb0); 2178 writel(BIT(PCI_INTERRUPT_ENABLE) | 2179 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE) | 2180 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE) | 2181 BIT(PCI_RETRY_ABORT_INTERRUPT_ENABLE) | 2182 BIT(VBUS_INTERRUPT_ENABLE) | 2183 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) | 2184 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE), 2185 &dev->regs->pciirqenb1); 2186 2187 /* don't leave any writes posted */ 2188 (void) readl(&dev->usb->usbctl); 2189 } 2190 2191 static void ep0_start_338x(struct net2280 *dev) 2192 { 2193 2194 if (dev->bug7734_patched) 2195 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE) | 2196 BIT(SET_EP_HIDE_STATUS_PHASE), 2197 &dev->epregs[0].ep_rsp); 2198 2199 /* 2200 * hardware optionally handles a bunch of standard requests 2201 * that the API hides from drivers anyway. have it do so. 2202 * endpoint status/features are handled in software, to 2203 * help pass tests for some dubious behavior. 2204 */ 2205 writel(BIT(SET_ISOCHRONOUS_DELAY) | 2206 BIT(SET_SEL) | 2207 BIT(SET_TEST_MODE) | 2208 BIT(SET_ADDRESS) | 2209 BIT(GET_INTERFACE_STATUS) | 2210 BIT(GET_DEVICE_STATUS), 2211 &dev->usb->stdrsp); 2212 dev->wakeup_enable = 1; 2213 writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) | 2214 (dev->softconnect << USB_DETECT_ENABLE) | 2215 BIT(DEVICE_REMOTE_WAKEUP_ENABLE), 2216 &dev->usb->usbctl); 2217 2218 /* enable irqs so we can see ep0 and general operation */ 2219 writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) | 2220 BIT(ENDPOINT_0_INTERRUPT_ENABLE), 2221 &dev->regs->pciirqenb0); 2222 writel(BIT(PCI_INTERRUPT_ENABLE) | 2223 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) | 2224 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE) | 2225 BIT(VBUS_INTERRUPT_ENABLE), 2226 &dev->regs->pciirqenb1); 2227 2228 /* don't leave any writes posted */ 2229 (void)readl(&dev->usb->usbctl); 2230 } 2231 2232 static void ep0_start(struct net2280 *dev) 2233 { 2234 if (dev->quirks & PLX_LEGACY) 2235 return ep0_start_228x(dev); 2236 return ep0_start_338x(dev); 2237 } 2238 2239 /* when a driver is successfully registered, it will receive 2240 * control requests including set_configuration(), which enables 2241 * non-control requests. then usb traffic follows until a 2242 * disconnect is reported. then a host may connect again, or 2243 * the driver might get unbound. 2244 */ 2245 static int net2280_start(struct usb_gadget *_gadget, 2246 struct usb_gadget_driver *driver) 2247 { 2248 struct net2280 *dev; 2249 int retval; 2250 unsigned i; 2251 2252 /* insist on high speed support from the driver, since 2253 * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE) 2254 * "must not be used in normal operation" 2255 */ 2256 if (!driver || driver->max_speed < USB_SPEED_HIGH || 2257 !driver->setup) 2258 return -EINVAL; 2259 2260 dev = container_of(_gadget, struct net2280, gadget); 2261 2262 for (i = 0; i < dev->n_ep; i++) 2263 dev->ep[i].irqs = 0; 2264 2265 /* hook up the driver ... */ 2266 driver->driver.bus = NULL; 2267 dev->driver = driver; 2268 2269 retval = device_create_file(&dev->pdev->dev, &dev_attr_function); 2270 if (retval) 2271 goto err_unbind; 2272 retval = device_create_file(&dev->pdev->dev, &dev_attr_queues); 2273 if (retval) 2274 goto err_func; 2275 2276 /* enable host detection and ep0; and we're ready 2277 * for set_configuration as well as eventual disconnect. 2278 */ 2279 net2280_led_active(dev, 1); 2280 2281 if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched) 2282 defect7374_enable_data_eps_zero(dev); 2283 2284 ep0_start(dev); 2285 2286 /* pci writes may still be posted */ 2287 return 0; 2288 2289 err_func: 2290 device_remove_file(&dev->pdev->dev, &dev_attr_function); 2291 err_unbind: 2292 dev->driver = NULL; 2293 return retval; 2294 } 2295 2296 static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver) 2297 { 2298 int i; 2299 2300 /* don't disconnect if it's not connected */ 2301 if (dev->gadget.speed == USB_SPEED_UNKNOWN) 2302 driver = NULL; 2303 2304 /* stop hardware; prevent new request submissions; 2305 * and kill any outstanding requests. 2306 */ 2307 usb_reset(dev); 2308 for (i = 0; i < dev->n_ep; i++) 2309 nuke(&dev->ep[i]); 2310 2311 /* report disconnect; the driver is already quiesced */ 2312 if (driver) { 2313 spin_unlock(&dev->lock); 2314 driver->disconnect(&dev->gadget); 2315 spin_lock(&dev->lock); 2316 } 2317 2318 usb_reinit(dev); 2319 } 2320 2321 static int net2280_stop(struct usb_gadget *_gadget) 2322 { 2323 struct net2280 *dev; 2324 unsigned long flags; 2325 2326 dev = container_of(_gadget, struct net2280, gadget); 2327 2328 spin_lock_irqsave(&dev->lock, flags); 2329 stop_activity(dev, NULL); 2330 spin_unlock_irqrestore(&dev->lock, flags); 2331 2332 net2280_led_active(dev, 0); 2333 2334 device_remove_file(&dev->pdev->dev, &dev_attr_function); 2335 device_remove_file(&dev->pdev->dev, &dev_attr_queues); 2336 2337 dev->driver = NULL; 2338 2339 return 0; 2340 } 2341 2342 /*-------------------------------------------------------------------------*/ 2343 2344 /* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq. 2345 * also works for dma-capable endpoints, in pio mode or just 2346 * to manually advance the queue after short OUT transfers. 2347 */ 2348 static void handle_ep_small(struct net2280_ep *ep) 2349 { 2350 struct net2280_request *req; 2351 u32 t; 2352 /* 0 error, 1 mid-data, 2 done */ 2353 int mode = 1; 2354 2355 if (!list_empty(&ep->queue)) 2356 req = list_entry(ep->queue.next, 2357 struct net2280_request, queue); 2358 else 2359 req = NULL; 2360 2361 /* ack all, and handle what we care about */ 2362 t = readl(&ep->regs->ep_stat); 2363 ep->irqs++; 2364 2365 ep_vdbg(ep->dev, "%s ack ep_stat %08x, req %p\n", 2366 ep->ep.name, t, req ? &req->req : NULL); 2367 2368 if (!ep->is_in || (ep->dev->quirks & PLX_2280)) 2369 writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat); 2370 else 2371 /* Added for 2282 */ 2372 writel(t, &ep->regs->ep_stat); 2373 2374 /* for ep0, monitor token irqs to catch data stage length errors 2375 * and to synchronize on status. 2376 * 2377 * also, to defer reporting of protocol stalls ... here's where 2378 * data or status first appears, handling stalls here should never 2379 * cause trouble on the host side.. 2380 * 2381 * control requests could be slightly faster without token synch for 2382 * status, but status can jam up that way. 2383 */ 2384 if (unlikely(ep->num == 0)) { 2385 if (ep->is_in) { 2386 /* status; stop NAKing */ 2387 if (t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) { 2388 if (ep->dev->protocol_stall) { 2389 ep->stopped = 1; 2390 set_halt(ep); 2391 } 2392 if (!req) 2393 allow_status(ep); 2394 mode = 2; 2395 /* reply to extra IN data tokens with a zlp */ 2396 } else if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) { 2397 if (ep->dev->protocol_stall) { 2398 ep->stopped = 1; 2399 set_halt(ep); 2400 mode = 2; 2401 } else if (ep->responded && 2402 !req && !ep->stopped) 2403 write_fifo(ep, NULL); 2404 } 2405 } else { 2406 /* status; stop NAKing */ 2407 if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) { 2408 if (ep->dev->protocol_stall) { 2409 ep->stopped = 1; 2410 set_halt(ep); 2411 } 2412 mode = 2; 2413 /* an extra OUT token is an error */ 2414 } else if (((t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) && 2415 req && 2416 req->req.actual == req->req.length) || 2417 (ep->responded && !req)) { 2418 ep->dev->protocol_stall = 1; 2419 set_halt(ep); 2420 ep->stopped = 1; 2421 if (req) 2422 done(ep, req, -EOVERFLOW); 2423 req = NULL; 2424 } 2425 } 2426 } 2427 2428 if (unlikely(!req)) 2429 return; 2430 2431 /* manual DMA queue advance after short OUT */ 2432 if (likely(ep->dma)) { 2433 if (t & BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT)) { 2434 u32 count; 2435 int stopped = ep->stopped; 2436 2437 /* TRANSFERRED works around OUT_DONE erratum 0112. 2438 * we expect (N <= maxpacket) bytes; host wrote M. 2439 * iff (M < N) we won't ever see a DMA interrupt. 2440 */ 2441 ep->stopped = 1; 2442 for (count = 0; ; t = readl(&ep->regs->ep_stat)) { 2443 2444 /* any preceding dma transfers must finish. 2445 * dma handles (M >= N), may empty the queue 2446 */ 2447 scan_dma_completions(ep); 2448 if (unlikely(list_empty(&ep->queue) || 2449 ep->out_overflow)) { 2450 req = NULL; 2451 break; 2452 } 2453 req = list_entry(ep->queue.next, 2454 struct net2280_request, queue); 2455 2456 /* here either (M < N), a "real" short rx; 2457 * or (M == N) and the queue didn't empty 2458 */ 2459 if (likely(t & BIT(FIFO_EMPTY))) { 2460 count = readl(&ep->dma->dmacount); 2461 count &= DMA_BYTE_COUNT_MASK; 2462 if (readl(&ep->dma->dmadesc) 2463 != req->td_dma) 2464 req = NULL; 2465 break; 2466 } 2467 udelay(1); 2468 } 2469 2470 /* stop DMA, leave ep NAKing */ 2471 writel(BIT(DMA_ABORT), &ep->dma->dmastat); 2472 spin_stop_dma(ep->dma); 2473 2474 if (likely(req)) { 2475 req->td->dmacount = 0; 2476 t = readl(&ep->regs->ep_avail); 2477 dma_done(ep, req, count, 2478 (ep->out_overflow || t) 2479 ? -EOVERFLOW : 0); 2480 } 2481 2482 /* also flush to prevent erratum 0106 trouble */ 2483 if (unlikely(ep->out_overflow || 2484 (ep->dev->chiprev == 0x0100 && 2485 ep->dev->gadget.speed 2486 == USB_SPEED_FULL))) { 2487 out_flush(ep); 2488 ep->out_overflow = 0; 2489 } 2490 2491 /* (re)start dma if needed, stop NAKing */ 2492 ep->stopped = stopped; 2493 if (!list_empty(&ep->queue)) 2494 restart_dma(ep); 2495 } else 2496 ep_dbg(ep->dev, "%s dma ep_stat %08x ??\n", 2497 ep->ep.name, t); 2498 return; 2499 2500 /* data packet(s) received (in the fifo, OUT) */ 2501 } else if (t & BIT(DATA_PACKET_RECEIVED_INTERRUPT)) { 2502 if (read_fifo(ep, req) && ep->num != 0) 2503 mode = 2; 2504 2505 /* data packet(s) transmitted (IN) */ 2506 } else if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT)) { 2507 unsigned len; 2508 2509 len = req->req.length - req->req.actual; 2510 if (len > ep->ep.maxpacket) 2511 len = ep->ep.maxpacket; 2512 req->req.actual += len; 2513 2514 /* if we wrote it all, we're usually done */ 2515 /* send zlps until the status stage */ 2516 if ((req->req.actual == req->req.length) && 2517 (!req->req.zero || len != ep->ep.maxpacket) && ep->num) 2518 mode = 2; 2519 2520 /* there was nothing to do ... */ 2521 } else if (mode == 1) 2522 return; 2523 2524 /* done */ 2525 if (mode == 2) { 2526 /* stream endpoints often resubmit/unlink in completion */ 2527 done(ep, req, 0); 2528 2529 /* maybe advance queue to next request */ 2530 if (ep->num == 0) { 2531 /* NOTE: net2280 could let gadget driver start the 2532 * status stage later. since not all controllers let 2533 * them control that, the api doesn't (yet) allow it. 2534 */ 2535 if (!ep->stopped) 2536 allow_status(ep); 2537 req = NULL; 2538 } else { 2539 if (!list_empty(&ep->queue) && !ep->stopped) 2540 req = list_entry(ep->queue.next, 2541 struct net2280_request, queue); 2542 else 2543 req = NULL; 2544 if (req && !ep->is_in) 2545 stop_out_naking(ep); 2546 } 2547 } 2548 2549 /* is there a buffer for the next packet? 2550 * for best streaming performance, make sure there is one. 2551 */ 2552 if (req && !ep->stopped) { 2553 2554 /* load IN fifo with next packet (may be zlp) */ 2555 if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT)) 2556 write_fifo(ep, &req->req); 2557 } 2558 } 2559 2560 static struct net2280_ep *get_ep_by_addr(struct net2280 *dev, u16 wIndex) 2561 { 2562 struct net2280_ep *ep; 2563 2564 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0) 2565 return &dev->ep[0]; 2566 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) { 2567 u8 bEndpointAddress; 2568 2569 if (!ep->desc) 2570 continue; 2571 bEndpointAddress = ep->desc->bEndpointAddress; 2572 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN) 2573 continue; 2574 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f)) 2575 return ep; 2576 } 2577 return NULL; 2578 } 2579 2580 static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r) 2581 { 2582 u32 scratch, fsmvalue; 2583 u32 ack_wait_timeout, state; 2584 2585 /* Workaround for Defect 7374 (U1/U2 erroneously rejected): */ 2586 scratch = get_idx_reg(dev->regs, SCRATCH); 2587 fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD); 2588 scratch &= ~(0xf << DEFECT7374_FSM_FIELD); 2589 2590 if (!((fsmvalue == DEFECT7374_FSM_WAITING_FOR_CONTROL_READ) && 2591 (r.bRequestType & USB_DIR_IN))) 2592 return; 2593 2594 /* This is the first Control Read for this connection: */ 2595 if (!(readl(&dev->usb->usbstat) & BIT(SUPER_SPEED_MODE))) { 2596 /* 2597 * Connection is NOT SS: 2598 * - Connection must be FS or HS. 2599 * - This FSM state should allow workaround software to 2600 * run after the next USB connection. 2601 */ 2602 scratch |= DEFECT7374_FSM_NON_SS_CONTROL_READ; 2603 dev->bug7734_patched = 1; 2604 goto restore_data_eps; 2605 } 2606 2607 /* Connection is SS: */ 2608 for (ack_wait_timeout = 0; 2609 ack_wait_timeout < DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS; 2610 ack_wait_timeout++) { 2611 2612 state = readl(&dev->plregs->pl_ep_status_1) 2613 & (0xff << STATE); 2614 if ((state >= (ACK_GOOD_NORMAL << STATE)) && 2615 (state <= (ACK_GOOD_MORE_ACKS_TO_COME << STATE))) { 2616 scratch |= DEFECT7374_FSM_SS_CONTROL_READ; 2617 dev->bug7734_patched = 1; 2618 break; 2619 } 2620 2621 /* 2622 * We have not yet received host's Data Phase ACK 2623 * - Wait and try again. 2624 */ 2625 udelay(DEFECT_7374_PROCESSOR_WAIT_TIME); 2626 2627 continue; 2628 } 2629 2630 2631 if (ack_wait_timeout >= DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS) { 2632 ep_err(dev, "FAIL: Defect 7374 workaround waited but failed " 2633 "to detect SS host's data phase ACK."); 2634 ep_err(dev, "PL_EP_STATUS_1(23:16):.Expected from 0x11 to 0x16" 2635 "got 0x%2.2x.\n", state >> STATE); 2636 } else { 2637 ep_warn(dev, "INFO: Defect 7374 workaround waited about\n" 2638 "%duSec for Control Read Data Phase ACK\n", 2639 DEFECT_7374_PROCESSOR_WAIT_TIME * ack_wait_timeout); 2640 } 2641 2642 restore_data_eps: 2643 /* 2644 * Restore data EPs to their pre-workaround settings (disabled, 2645 * initialized, and other details). 2646 */ 2647 defect7374_disable_data_eps(dev); 2648 2649 set_idx_reg(dev->regs, SCRATCH, scratch); 2650 2651 return; 2652 } 2653 2654 static void ep_clear_seqnum(struct net2280_ep *ep) 2655 { 2656 struct net2280 *dev = ep->dev; 2657 u32 val; 2658 static const u32 ep_pl[9] = { 0, 3, 4, 7, 8, 2, 5, 6, 9 }; 2659 2660 val = readl(&dev->plregs->pl_ep_ctrl) & ~0x1f; 2661 val |= ep_pl[ep->num]; 2662 writel(val, &dev->plregs->pl_ep_ctrl); 2663 val |= BIT(SEQUENCE_NUMBER_RESET); 2664 writel(val, &dev->plregs->pl_ep_ctrl); 2665 2666 return; 2667 } 2668 2669 static void handle_stat0_irqs_superspeed(struct net2280 *dev, 2670 struct net2280_ep *ep, struct usb_ctrlrequest r) 2671 { 2672 int tmp = 0; 2673 2674 #define w_value le16_to_cpu(r.wValue) 2675 #define w_index le16_to_cpu(r.wIndex) 2676 #define w_length le16_to_cpu(r.wLength) 2677 2678 switch (r.bRequest) { 2679 struct net2280_ep *e; 2680 u16 status; 2681 2682 case USB_REQ_SET_CONFIGURATION: 2683 dev->addressed_state = !w_value; 2684 goto usb3_delegate; 2685 2686 case USB_REQ_GET_STATUS: 2687 switch (r.bRequestType) { 2688 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE): 2689 status = dev->wakeup_enable ? 0x02 : 0x00; 2690 if (dev->gadget.is_selfpowered) 2691 status |= BIT(0); 2692 status |= (dev->u1_enable << 2 | dev->u2_enable << 3 | 2693 dev->ltm_enable << 4); 2694 writel(0, &dev->epregs[0].ep_irqenb); 2695 set_fifo_bytecount(ep, sizeof(status)); 2696 writel((__force u32) status, &dev->epregs[0].ep_data); 2697 allow_status_338x(ep); 2698 break; 2699 2700 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT): 2701 e = get_ep_by_addr(dev, w_index); 2702 if (!e) 2703 goto do_stall3; 2704 status = readl(&e->regs->ep_rsp) & 2705 BIT(CLEAR_ENDPOINT_HALT); 2706 writel(0, &dev->epregs[0].ep_irqenb); 2707 set_fifo_bytecount(ep, sizeof(status)); 2708 writel((__force u32) status, &dev->epregs[0].ep_data); 2709 allow_status_338x(ep); 2710 break; 2711 2712 default: 2713 goto usb3_delegate; 2714 } 2715 break; 2716 2717 case USB_REQ_CLEAR_FEATURE: 2718 switch (r.bRequestType) { 2719 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE): 2720 if (!dev->addressed_state) { 2721 switch (w_value) { 2722 case USB_DEVICE_U1_ENABLE: 2723 dev->u1_enable = 0; 2724 writel(readl(&dev->usb_ext->usbctl2) & 2725 ~BIT(U1_ENABLE), 2726 &dev->usb_ext->usbctl2); 2727 allow_status_338x(ep); 2728 goto next_endpoints3; 2729 2730 case USB_DEVICE_U2_ENABLE: 2731 dev->u2_enable = 0; 2732 writel(readl(&dev->usb_ext->usbctl2) & 2733 ~BIT(U2_ENABLE), 2734 &dev->usb_ext->usbctl2); 2735 allow_status_338x(ep); 2736 goto next_endpoints3; 2737 2738 case USB_DEVICE_LTM_ENABLE: 2739 dev->ltm_enable = 0; 2740 writel(readl(&dev->usb_ext->usbctl2) & 2741 ~BIT(LTM_ENABLE), 2742 &dev->usb_ext->usbctl2); 2743 allow_status_338x(ep); 2744 goto next_endpoints3; 2745 2746 default: 2747 break; 2748 } 2749 } 2750 if (w_value == USB_DEVICE_REMOTE_WAKEUP) { 2751 dev->wakeup_enable = 0; 2752 writel(readl(&dev->usb->usbctl) & 2753 ~BIT(DEVICE_REMOTE_WAKEUP_ENABLE), 2754 &dev->usb->usbctl); 2755 allow_status_338x(ep); 2756 break; 2757 } 2758 goto usb3_delegate; 2759 2760 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT): 2761 e = get_ep_by_addr(dev, w_index); 2762 if (!e) 2763 goto do_stall3; 2764 if (w_value != USB_ENDPOINT_HALT) 2765 goto do_stall3; 2766 ep_vdbg(dev, "%s clear halt\n", e->ep.name); 2767 /* 2768 * Workaround for SS SeqNum not cleared via 2769 * Endpoint Halt (Clear) bit. select endpoint 2770 */ 2771 ep_clear_seqnum(e); 2772 clear_halt(e); 2773 if (!list_empty(&e->queue) && e->td_dma) 2774 restart_dma(e); 2775 allow_status(ep); 2776 ep->stopped = 1; 2777 break; 2778 2779 default: 2780 goto usb3_delegate; 2781 } 2782 break; 2783 case USB_REQ_SET_FEATURE: 2784 switch (r.bRequestType) { 2785 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE): 2786 if (!dev->addressed_state) { 2787 switch (w_value) { 2788 case USB_DEVICE_U1_ENABLE: 2789 dev->u1_enable = 1; 2790 writel(readl(&dev->usb_ext->usbctl2) | 2791 BIT(U1_ENABLE), 2792 &dev->usb_ext->usbctl2); 2793 allow_status_338x(ep); 2794 goto next_endpoints3; 2795 2796 case USB_DEVICE_U2_ENABLE: 2797 dev->u2_enable = 1; 2798 writel(readl(&dev->usb_ext->usbctl2) | 2799 BIT(U2_ENABLE), 2800 &dev->usb_ext->usbctl2); 2801 allow_status_338x(ep); 2802 goto next_endpoints3; 2803 2804 case USB_DEVICE_LTM_ENABLE: 2805 dev->ltm_enable = 1; 2806 writel(readl(&dev->usb_ext->usbctl2) | 2807 BIT(LTM_ENABLE), 2808 &dev->usb_ext->usbctl2); 2809 allow_status_338x(ep); 2810 goto next_endpoints3; 2811 default: 2812 break; 2813 } 2814 } 2815 2816 if (w_value == USB_DEVICE_REMOTE_WAKEUP) { 2817 dev->wakeup_enable = 1; 2818 writel(readl(&dev->usb->usbctl) | 2819 BIT(DEVICE_REMOTE_WAKEUP_ENABLE), 2820 &dev->usb->usbctl); 2821 allow_status_338x(ep); 2822 break; 2823 } 2824 goto usb3_delegate; 2825 2826 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT): 2827 e = get_ep_by_addr(dev, w_index); 2828 if (!e || (w_value != USB_ENDPOINT_HALT)) 2829 goto do_stall3; 2830 ep->stopped = 1; 2831 if (ep->num == 0) 2832 ep->dev->protocol_stall = 1; 2833 else { 2834 if (ep->dma) 2835 abort_dma(ep); 2836 set_halt(ep); 2837 } 2838 allow_status_338x(ep); 2839 break; 2840 2841 default: 2842 goto usb3_delegate; 2843 } 2844 2845 break; 2846 default: 2847 2848 usb3_delegate: 2849 ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x ep_cfg %08x\n", 2850 r.bRequestType, r.bRequest, 2851 w_value, w_index, w_length, 2852 readl(&ep->cfg->ep_cfg)); 2853 2854 ep->responded = 0; 2855 spin_unlock(&dev->lock); 2856 tmp = dev->driver->setup(&dev->gadget, &r); 2857 spin_lock(&dev->lock); 2858 } 2859 do_stall3: 2860 if (tmp < 0) { 2861 ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n", 2862 r.bRequestType, r.bRequest, tmp); 2863 dev->protocol_stall = 1; 2864 /* TD 9.9 Halt Endpoint test. TD 9.22 Set feature test */ 2865 set_halt(ep); 2866 } 2867 2868 next_endpoints3: 2869 2870 #undef w_value 2871 #undef w_index 2872 #undef w_length 2873 2874 return; 2875 } 2876 2877 static void handle_stat0_irqs(struct net2280 *dev, u32 stat) 2878 { 2879 struct net2280_ep *ep; 2880 u32 num, scratch; 2881 2882 /* most of these don't need individual acks */ 2883 stat &= ~BIT(INTA_ASSERTED); 2884 if (!stat) 2885 return; 2886 /* ep_dbg(dev, "irqstat0 %04x\n", stat); */ 2887 2888 /* starting a control request? */ 2889 if (unlikely(stat & BIT(SETUP_PACKET_INTERRUPT))) { 2890 union { 2891 u32 raw[2]; 2892 struct usb_ctrlrequest r; 2893 } u; 2894 int tmp; 2895 struct net2280_request *req; 2896 2897 if (dev->gadget.speed == USB_SPEED_UNKNOWN) { 2898 u32 val = readl(&dev->usb->usbstat); 2899 if (val & BIT(SUPER_SPEED)) { 2900 dev->gadget.speed = USB_SPEED_SUPER; 2901 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 2902 EP0_SS_MAX_PACKET_SIZE); 2903 } else if (val & BIT(HIGH_SPEED)) { 2904 dev->gadget.speed = USB_SPEED_HIGH; 2905 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 2906 EP0_HS_MAX_PACKET_SIZE); 2907 } else { 2908 dev->gadget.speed = USB_SPEED_FULL; 2909 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 2910 EP0_HS_MAX_PACKET_SIZE); 2911 } 2912 net2280_led_speed(dev, dev->gadget.speed); 2913 ep_dbg(dev, "%s\n", 2914 usb_speed_string(dev->gadget.speed)); 2915 } 2916 2917 ep = &dev->ep[0]; 2918 ep->irqs++; 2919 2920 /* make sure any leftover request state is cleared */ 2921 stat &= ~BIT(ENDPOINT_0_INTERRUPT); 2922 while (!list_empty(&ep->queue)) { 2923 req = list_entry(ep->queue.next, 2924 struct net2280_request, queue); 2925 done(ep, req, (req->req.actual == req->req.length) 2926 ? 0 : -EPROTO); 2927 } 2928 ep->stopped = 0; 2929 dev->protocol_stall = 0; 2930 if (!(dev->quirks & PLX_SUPERSPEED)) { 2931 if (ep->dev->quirks & PLX_2280) 2932 tmp = BIT(FIFO_OVERFLOW) | 2933 BIT(FIFO_UNDERFLOW); 2934 else 2935 tmp = 0; 2936 2937 writel(tmp | BIT(TIMEOUT) | 2938 BIT(USB_STALL_SENT) | 2939 BIT(USB_IN_NAK_SENT) | 2940 BIT(USB_IN_ACK_RCVD) | 2941 BIT(USB_OUT_PING_NAK_SENT) | 2942 BIT(USB_OUT_ACK_SENT) | 2943 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) | 2944 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) | 2945 BIT(DATA_PACKET_RECEIVED_INTERRUPT) | 2946 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) | 2947 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 2948 BIT(DATA_IN_TOKEN_INTERRUPT), 2949 &ep->regs->ep_stat); 2950 } 2951 u.raw[0] = readl(&dev->usb->setup0123); 2952 u.raw[1] = readl(&dev->usb->setup4567); 2953 2954 cpu_to_le32s(&u.raw[0]); 2955 cpu_to_le32s(&u.raw[1]); 2956 2957 if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched) 2958 defect7374_workaround(dev, u.r); 2959 2960 tmp = 0; 2961 2962 #define w_value le16_to_cpu(u.r.wValue) 2963 #define w_index le16_to_cpu(u.r.wIndex) 2964 #define w_length le16_to_cpu(u.r.wLength) 2965 2966 /* ack the irq */ 2967 writel(BIT(SETUP_PACKET_INTERRUPT), &dev->regs->irqstat0); 2968 stat ^= BIT(SETUP_PACKET_INTERRUPT); 2969 2970 /* watch control traffic at the token level, and force 2971 * synchronization before letting the status stage happen. 2972 * FIXME ignore tokens we'll NAK, until driver responds. 2973 * that'll mean a lot less irqs for some drivers. 2974 */ 2975 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0; 2976 if (ep->is_in) { 2977 scratch = BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) | 2978 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 2979 BIT(DATA_IN_TOKEN_INTERRUPT); 2980 stop_out_naking(ep); 2981 } else 2982 scratch = BIT(DATA_PACKET_RECEIVED_INTERRUPT) | 2983 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 2984 BIT(DATA_IN_TOKEN_INTERRUPT); 2985 writel(scratch, &dev->epregs[0].ep_irqenb); 2986 2987 /* we made the hardware handle most lowlevel requests; 2988 * everything else goes uplevel to the gadget code. 2989 */ 2990 ep->responded = 1; 2991 2992 if (dev->gadget.speed == USB_SPEED_SUPER) { 2993 handle_stat0_irqs_superspeed(dev, ep, u.r); 2994 goto next_endpoints; 2995 } 2996 2997 switch (u.r.bRequest) { 2998 case USB_REQ_GET_STATUS: { 2999 struct net2280_ep *e; 3000 __le32 status; 3001 3002 /* hw handles device and interface status */ 3003 if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT)) 3004 goto delegate; 3005 e = get_ep_by_addr(dev, w_index); 3006 if (!e || w_length > 2) 3007 goto do_stall; 3008 3009 if (readl(&e->regs->ep_rsp) & BIT(SET_ENDPOINT_HALT)) 3010 status = cpu_to_le32(1); 3011 else 3012 status = cpu_to_le32(0); 3013 3014 /* don't bother with a request object! */ 3015 writel(0, &dev->epregs[0].ep_irqenb); 3016 set_fifo_bytecount(ep, w_length); 3017 writel((__force u32)status, &dev->epregs[0].ep_data); 3018 allow_status(ep); 3019 ep_vdbg(dev, "%s stat %02x\n", ep->ep.name, status); 3020 goto next_endpoints; 3021 } 3022 break; 3023 case USB_REQ_CLEAR_FEATURE: { 3024 struct net2280_ep *e; 3025 3026 /* hw handles device features */ 3027 if (u.r.bRequestType != USB_RECIP_ENDPOINT) 3028 goto delegate; 3029 if (w_value != USB_ENDPOINT_HALT || w_length != 0) 3030 goto do_stall; 3031 e = get_ep_by_addr(dev, w_index); 3032 if (!e) 3033 goto do_stall; 3034 if (e->wedged) { 3035 ep_vdbg(dev, "%s wedged, halt not cleared\n", 3036 ep->ep.name); 3037 } else { 3038 ep_vdbg(dev, "%s clear halt\n", e->ep.name); 3039 clear_halt(e); 3040 if ((ep->dev->quirks & PLX_SUPERSPEED) && 3041 !list_empty(&e->queue) && e->td_dma) 3042 restart_dma(e); 3043 } 3044 allow_status(ep); 3045 goto next_endpoints; 3046 } 3047 break; 3048 case USB_REQ_SET_FEATURE: { 3049 struct net2280_ep *e; 3050 3051 /* hw handles device features */ 3052 if (u.r.bRequestType != USB_RECIP_ENDPOINT) 3053 goto delegate; 3054 if (w_value != USB_ENDPOINT_HALT || w_length != 0) 3055 goto do_stall; 3056 e = get_ep_by_addr(dev, w_index); 3057 if (!e) 3058 goto do_stall; 3059 if (e->ep.name == ep0name) 3060 goto do_stall; 3061 set_halt(e); 3062 if ((dev->quirks & PLX_SUPERSPEED) && e->dma) 3063 abort_dma(e); 3064 allow_status(ep); 3065 ep_vdbg(dev, "%s set halt\n", ep->ep.name); 3066 goto next_endpoints; 3067 } 3068 break; 3069 default: 3070 delegate: 3071 ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x " 3072 "ep_cfg %08x\n", 3073 u.r.bRequestType, u.r.bRequest, 3074 w_value, w_index, w_length, 3075 readl(&ep->cfg->ep_cfg)); 3076 ep->responded = 0; 3077 spin_unlock(&dev->lock); 3078 tmp = dev->driver->setup(&dev->gadget, &u.r); 3079 spin_lock(&dev->lock); 3080 } 3081 3082 /* stall ep0 on error */ 3083 if (tmp < 0) { 3084 do_stall: 3085 ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n", 3086 u.r.bRequestType, u.r.bRequest, tmp); 3087 dev->protocol_stall = 1; 3088 } 3089 3090 /* some in/out token irq should follow; maybe stall then. 3091 * driver must queue a request (even zlp) or halt ep0 3092 * before the host times out. 3093 */ 3094 } 3095 3096 #undef w_value 3097 #undef w_index 3098 #undef w_length 3099 3100 next_endpoints: 3101 /* endpoint data irq ? */ 3102 scratch = stat & 0x7f; 3103 stat &= ~0x7f; 3104 for (num = 0; scratch; num++) { 3105 u32 t; 3106 3107 /* do this endpoint's FIFO and queue need tending? */ 3108 t = BIT(num); 3109 if ((scratch & t) == 0) 3110 continue; 3111 scratch ^= t; 3112 3113 ep = &dev->ep[num]; 3114 handle_ep_small(ep); 3115 } 3116 3117 if (stat) 3118 ep_dbg(dev, "unhandled irqstat0 %08x\n", stat); 3119 } 3120 3121 #define DMA_INTERRUPTS (BIT(DMA_D_INTERRUPT) | \ 3122 BIT(DMA_C_INTERRUPT) | \ 3123 BIT(DMA_B_INTERRUPT) | \ 3124 BIT(DMA_A_INTERRUPT)) 3125 #define PCI_ERROR_INTERRUPTS ( \ 3126 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT) | \ 3127 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT) | \ 3128 BIT(PCI_RETRY_ABORT_INTERRUPT)) 3129 3130 static void handle_stat1_irqs(struct net2280 *dev, u32 stat) 3131 __releases(dev->lock) 3132 __acquires(dev->lock) 3133 { 3134 struct net2280_ep *ep; 3135 u32 tmp, num, mask, scratch; 3136 3137 /* after disconnect there's nothing else to do! */ 3138 tmp = BIT(VBUS_INTERRUPT) | BIT(ROOT_PORT_RESET_INTERRUPT); 3139 mask = BIT(SUPER_SPEED) | BIT(HIGH_SPEED) | BIT(FULL_SPEED); 3140 3141 /* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set. 3142 * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and 3143 * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT 3144 * only indicates a change in the reset state). 3145 */ 3146 if (stat & tmp) { 3147 bool reset = false; 3148 bool disconnect = false; 3149 3150 /* 3151 * Ignore disconnects and resets if the speed hasn't been set. 3152 * VBUS can bounce and there's always an initial reset. 3153 */ 3154 writel(tmp, &dev->regs->irqstat1); 3155 if (dev->gadget.speed != USB_SPEED_UNKNOWN) { 3156 if ((stat & BIT(VBUS_INTERRUPT)) && 3157 (readl(&dev->usb->usbctl) & 3158 BIT(VBUS_PIN)) == 0) { 3159 disconnect = true; 3160 ep_dbg(dev, "disconnect %s\n", 3161 dev->driver->driver.name); 3162 } else if ((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) && 3163 (readl(&dev->usb->usbstat) & mask) 3164 == 0) { 3165 reset = true; 3166 ep_dbg(dev, "reset %s\n", 3167 dev->driver->driver.name); 3168 } 3169 3170 if (disconnect || reset) { 3171 stop_activity(dev, dev->driver); 3172 ep0_start(dev); 3173 spin_unlock(&dev->lock); 3174 if (reset) 3175 usb_gadget_udc_reset 3176 (&dev->gadget, dev->driver); 3177 else 3178 (dev->driver->disconnect) 3179 (&dev->gadget); 3180 spin_lock(&dev->lock); 3181 return; 3182 } 3183 } 3184 stat &= ~tmp; 3185 3186 /* vBUS can bounce ... one of many reasons to ignore the 3187 * notion of hotplug events on bus connect/disconnect! 3188 */ 3189 if (!stat) 3190 return; 3191 } 3192 3193 /* NOTE: chip stays in PCI D0 state for now, but it could 3194 * enter D1 to save more power 3195 */ 3196 tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT); 3197 if (stat & tmp) { 3198 writel(tmp, &dev->regs->irqstat1); 3199 if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) { 3200 if (dev->driver->suspend) 3201 dev->driver->suspend(&dev->gadget); 3202 if (!enable_suspend) 3203 stat &= ~BIT(SUSPEND_REQUEST_INTERRUPT); 3204 } else { 3205 if (dev->driver->resume) 3206 dev->driver->resume(&dev->gadget); 3207 /* at high speed, note erratum 0133 */ 3208 } 3209 stat &= ~tmp; 3210 } 3211 3212 /* clear any other status/irqs */ 3213 if (stat) 3214 writel(stat, &dev->regs->irqstat1); 3215 3216 /* some status we can just ignore */ 3217 if (dev->quirks & PLX_2280) 3218 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) | 3219 BIT(SUSPEND_REQUEST_INTERRUPT) | 3220 BIT(RESUME_INTERRUPT) | 3221 BIT(SOF_INTERRUPT)); 3222 else 3223 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) | 3224 BIT(RESUME_INTERRUPT) | 3225 BIT(SOF_DOWN_INTERRUPT) | 3226 BIT(SOF_INTERRUPT)); 3227 3228 if (!stat) 3229 return; 3230 /* ep_dbg(dev, "irqstat1 %08x\n", stat);*/ 3231 3232 /* DMA status, for ep-{a,b,c,d} */ 3233 scratch = stat & DMA_INTERRUPTS; 3234 stat &= ~DMA_INTERRUPTS; 3235 scratch >>= 9; 3236 for (num = 0; scratch; num++) { 3237 struct net2280_dma_regs __iomem *dma; 3238 3239 tmp = BIT(num); 3240 if ((tmp & scratch) == 0) 3241 continue; 3242 scratch ^= tmp; 3243 3244 ep = &dev->ep[num + 1]; 3245 dma = ep->dma; 3246 3247 if (!dma) 3248 continue; 3249 3250 /* clear ep's dma status */ 3251 tmp = readl(&dma->dmastat); 3252 writel(tmp, &dma->dmastat); 3253 3254 /* dma sync*/ 3255 if (dev->quirks & PLX_SUPERSPEED) { 3256 u32 r_dmacount = readl(&dma->dmacount); 3257 if (!ep->is_in && (r_dmacount & 0x00FFFFFF) && 3258 (tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) 3259 continue; 3260 } 3261 3262 if (!(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) { 3263 ep_dbg(ep->dev, "%s no xact done? %08x\n", 3264 ep->ep.name, tmp); 3265 continue; 3266 } 3267 stop_dma(ep->dma); 3268 3269 /* OUT transfers terminate when the data from the 3270 * host is in our memory. Process whatever's done. 3271 * On this path, we know transfer's last packet wasn't 3272 * less than req->length. NAK_OUT_PACKETS may be set, 3273 * or the FIFO may already be holding new packets. 3274 * 3275 * IN transfers can linger in the FIFO for a very 3276 * long time ... we ignore that for now, accounting 3277 * precisely (like PIO does) needs per-packet irqs 3278 */ 3279 scan_dma_completions(ep); 3280 3281 /* disable dma on inactive queues; else maybe restart */ 3282 if (!list_empty(&ep->queue)) { 3283 tmp = readl(&dma->dmactl); 3284 restart_dma(ep); 3285 } 3286 ep->irqs++; 3287 } 3288 3289 /* NOTE: there are other PCI errors we might usefully notice. 3290 * if they appear very often, here's where to try recovering. 3291 */ 3292 if (stat & PCI_ERROR_INTERRUPTS) { 3293 ep_err(dev, "pci dma error; stat %08x\n", stat); 3294 stat &= ~PCI_ERROR_INTERRUPTS; 3295 /* these are fatal errors, but "maybe" they won't 3296 * happen again ... 3297 */ 3298 stop_activity(dev, dev->driver); 3299 ep0_start(dev); 3300 stat = 0; 3301 } 3302 3303 if (stat) 3304 ep_dbg(dev, "unhandled irqstat1 %08x\n", stat); 3305 } 3306 3307 static irqreturn_t net2280_irq(int irq, void *_dev) 3308 { 3309 struct net2280 *dev = _dev; 3310 3311 /* shared interrupt, not ours */ 3312 if ((dev->quirks & PLX_LEGACY) && 3313 (!(readl(&dev->regs->irqstat0) & BIT(INTA_ASSERTED)))) 3314 return IRQ_NONE; 3315 3316 spin_lock(&dev->lock); 3317 3318 /* handle disconnect, dma, and more */ 3319 handle_stat1_irqs(dev, readl(&dev->regs->irqstat1)); 3320 3321 /* control requests and PIO */ 3322 handle_stat0_irqs(dev, readl(&dev->regs->irqstat0)); 3323 3324 if (dev->quirks & PLX_SUPERSPEED) { 3325 /* re-enable interrupt to trigger any possible new interrupt */ 3326 u32 pciirqenb1 = readl(&dev->regs->pciirqenb1); 3327 writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1); 3328 writel(pciirqenb1, &dev->regs->pciirqenb1); 3329 } 3330 3331 spin_unlock(&dev->lock); 3332 3333 return IRQ_HANDLED; 3334 } 3335 3336 /*-------------------------------------------------------------------------*/ 3337 3338 static void gadget_release(struct device *_dev) 3339 { 3340 struct net2280 *dev = dev_get_drvdata(_dev); 3341 3342 kfree(dev); 3343 } 3344 3345 /* tear down the binding between this driver and the pci device */ 3346 3347 static void net2280_remove(struct pci_dev *pdev) 3348 { 3349 struct net2280 *dev = pci_get_drvdata(pdev); 3350 3351 usb_del_gadget_udc(&dev->gadget); 3352 3353 BUG_ON(dev->driver); 3354 3355 /* then clean up the resources we allocated during probe() */ 3356 net2280_led_shutdown(dev); 3357 if (dev->requests) { 3358 int i; 3359 for (i = 1; i < 5; i++) { 3360 if (!dev->ep[i].dummy) 3361 continue; 3362 pci_pool_free(dev->requests, dev->ep[i].dummy, 3363 dev->ep[i].td_dma); 3364 } 3365 pci_pool_destroy(dev->requests); 3366 } 3367 if (dev->got_irq) 3368 free_irq(pdev->irq, dev); 3369 if (dev->quirks & PLX_SUPERSPEED) 3370 pci_disable_msi(pdev); 3371 if (dev->regs) 3372 iounmap(dev->regs); 3373 if (dev->region) 3374 release_mem_region(pci_resource_start(pdev, 0), 3375 pci_resource_len(pdev, 0)); 3376 if (dev->enabled) 3377 pci_disable_device(pdev); 3378 device_remove_file(&pdev->dev, &dev_attr_registers); 3379 3380 ep_info(dev, "unbind\n"); 3381 } 3382 3383 /* wrap this driver around the specified device, but 3384 * don't respond over USB until a gadget driver binds to us. 3385 */ 3386 3387 static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id) 3388 { 3389 struct net2280 *dev; 3390 unsigned long resource, len; 3391 void __iomem *base = NULL; 3392 int retval, i; 3393 3394 /* alloc, and start init */ 3395 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 3396 if (dev == NULL) { 3397 retval = -ENOMEM; 3398 goto done; 3399 } 3400 3401 pci_set_drvdata(pdev, dev); 3402 spin_lock_init(&dev->lock); 3403 dev->quirks = id->driver_data; 3404 dev->pdev = pdev; 3405 dev->gadget.ops = &net2280_ops; 3406 dev->gadget.max_speed = (dev->quirks & PLX_SUPERSPEED) ? 3407 USB_SPEED_SUPER : USB_SPEED_HIGH; 3408 3409 /* the "gadget" abstracts/virtualizes the controller */ 3410 dev->gadget.name = driver_name; 3411 3412 /* now all the pci goodies ... */ 3413 if (pci_enable_device(pdev) < 0) { 3414 retval = -ENODEV; 3415 goto done; 3416 } 3417 dev->enabled = 1; 3418 3419 /* BAR 0 holds all the registers 3420 * BAR 1 is 8051 memory; unused here (note erratum 0103) 3421 * BAR 2 is fifo memory; unused here 3422 */ 3423 resource = pci_resource_start(pdev, 0); 3424 len = pci_resource_len(pdev, 0); 3425 if (!request_mem_region(resource, len, driver_name)) { 3426 ep_dbg(dev, "controller already in use\n"); 3427 retval = -EBUSY; 3428 goto done; 3429 } 3430 dev->region = 1; 3431 3432 /* FIXME provide firmware download interface to put 3433 * 8051 code into the chip, e.g. to turn on PCI PM. 3434 */ 3435 3436 base = ioremap_nocache(resource, len); 3437 if (base == NULL) { 3438 ep_dbg(dev, "can't map memory\n"); 3439 retval = -EFAULT; 3440 goto done; 3441 } 3442 dev->regs = (struct net2280_regs __iomem *) base; 3443 dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080); 3444 dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100); 3445 dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180); 3446 dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200); 3447 dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300); 3448 3449 if (dev->quirks & PLX_SUPERSPEED) { 3450 u32 fsmvalue; 3451 u32 usbstat; 3452 dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *) 3453 (base + 0x00b4); 3454 dev->llregs = (struct usb338x_ll_regs __iomem *) 3455 (base + 0x0700); 3456 dev->ll_lfps_regs = (struct usb338x_ll_lfps_regs __iomem *) 3457 (base + 0x0748); 3458 dev->ll_tsn_regs = (struct usb338x_ll_tsn_regs __iomem *) 3459 (base + 0x077c); 3460 dev->ll_chicken_reg = (struct usb338x_ll_chi_regs __iomem *) 3461 (base + 0x079c); 3462 dev->plregs = (struct usb338x_pl_regs __iomem *) 3463 (base + 0x0800); 3464 usbstat = readl(&dev->usb->usbstat); 3465 dev->enhanced_mode = !!(usbstat & BIT(11)); 3466 dev->n_ep = (dev->enhanced_mode) ? 9 : 5; 3467 /* put into initial config, link up all endpoints */ 3468 fsmvalue = get_idx_reg(dev->regs, SCRATCH) & 3469 (0xf << DEFECT7374_FSM_FIELD); 3470 /* See if firmware needs to set up for workaround: */ 3471 if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) { 3472 dev->bug7734_patched = 1; 3473 writel(0, &dev->usb->usbctl); 3474 } else 3475 dev->bug7734_patched = 0; 3476 } else { 3477 dev->enhanced_mode = 0; 3478 dev->n_ep = 7; 3479 /* put into initial config, link up all endpoints */ 3480 writel(0, &dev->usb->usbctl); 3481 } 3482 3483 usb_reset(dev); 3484 usb_reinit(dev); 3485 3486 /* irq setup after old hardware is cleaned up */ 3487 if (!pdev->irq) { 3488 ep_err(dev, "No IRQ. Check PCI setup!\n"); 3489 retval = -ENODEV; 3490 goto done; 3491 } 3492 3493 if (dev->quirks & PLX_SUPERSPEED) 3494 if (pci_enable_msi(pdev)) 3495 ep_err(dev, "Failed to enable MSI mode\n"); 3496 3497 if (request_irq(pdev->irq, net2280_irq, IRQF_SHARED, 3498 driver_name, dev)) { 3499 ep_err(dev, "request interrupt %d failed\n", pdev->irq); 3500 retval = -EBUSY; 3501 goto done; 3502 } 3503 dev->got_irq = 1; 3504 3505 /* DMA setup */ 3506 /* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */ 3507 dev->requests = pci_pool_create("requests", pdev, 3508 sizeof(struct net2280_dma), 3509 0 /* no alignment requirements */, 3510 0 /* or page-crossing issues */); 3511 if (!dev->requests) { 3512 ep_dbg(dev, "can't get request pool\n"); 3513 retval = -ENOMEM; 3514 goto done; 3515 } 3516 for (i = 1; i < 5; i++) { 3517 struct net2280_dma *td; 3518 3519 td = pci_pool_alloc(dev->requests, GFP_KERNEL, 3520 &dev->ep[i].td_dma); 3521 if (!td) { 3522 ep_dbg(dev, "can't get dummy %d\n", i); 3523 retval = -ENOMEM; 3524 goto done; 3525 } 3526 td->dmacount = 0; /* not VALID */ 3527 td->dmadesc = td->dmaaddr; 3528 dev->ep[i].dummy = td; 3529 } 3530 3531 /* enable lower-overhead pci memory bursts during DMA */ 3532 if (dev->quirks & PLX_LEGACY) 3533 writel(BIT(DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE) | 3534 /* 3535 * 256 write retries may not be enough... 3536 BIT(PCI_RETRY_ABORT_ENABLE) | 3537 */ 3538 BIT(DMA_READ_MULTIPLE_ENABLE) | 3539 BIT(DMA_READ_LINE_ENABLE), 3540 &dev->pci->pcimstctl); 3541 /* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */ 3542 pci_set_master(pdev); 3543 pci_try_set_mwi(pdev); 3544 3545 /* ... also flushes any posted pci writes */ 3546 dev->chiprev = get_idx_reg(dev->regs, REG_CHIPREV) & 0xffff; 3547 3548 /* done */ 3549 ep_info(dev, "%s\n", driver_desc); 3550 ep_info(dev, "irq %d, pci mem %p, chip rev %04x\n", 3551 pdev->irq, base, dev->chiprev); 3552 ep_info(dev, "version: " DRIVER_VERSION "; %s\n", 3553 dev->enhanced_mode ? "enhanced mode" : "legacy mode"); 3554 retval = device_create_file(&pdev->dev, &dev_attr_registers); 3555 if (retval) 3556 goto done; 3557 3558 retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget, 3559 gadget_release); 3560 if (retval) 3561 goto done; 3562 return 0; 3563 3564 done: 3565 if (dev) 3566 net2280_remove(pdev); 3567 return retval; 3568 } 3569 3570 /* make sure the board is quiescent; otherwise it will continue 3571 * generating IRQs across the upcoming reboot. 3572 */ 3573 3574 static void net2280_shutdown(struct pci_dev *pdev) 3575 { 3576 struct net2280 *dev = pci_get_drvdata(pdev); 3577 3578 /* disable IRQs */ 3579 writel(0, &dev->regs->pciirqenb0); 3580 writel(0, &dev->regs->pciirqenb1); 3581 3582 /* disable the pullup so the host will think we're gone */ 3583 writel(0, &dev->usb->usbctl); 3584 3585 } 3586 3587 3588 /*-------------------------------------------------------------------------*/ 3589 3590 static const struct pci_device_id pci_ids[] = { { 3591 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), 3592 .class_mask = ~0, 3593 .vendor = PCI_VENDOR_ID_PLX_LEGACY, 3594 .device = 0x2280, 3595 .subvendor = PCI_ANY_ID, 3596 .subdevice = PCI_ANY_ID, 3597 .driver_data = PLX_LEGACY | PLX_2280, 3598 }, { 3599 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), 3600 .class_mask = ~0, 3601 .vendor = PCI_VENDOR_ID_PLX_LEGACY, 3602 .device = 0x2282, 3603 .subvendor = PCI_ANY_ID, 3604 .subdevice = PCI_ANY_ID, 3605 .driver_data = PLX_LEGACY, 3606 }, 3607 { 3608 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), 3609 .class_mask = ~0, 3610 .vendor = PCI_VENDOR_ID_PLX, 3611 .device = 0x3380, 3612 .subvendor = PCI_ANY_ID, 3613 .subdevice = PCI_ANY_ID, 3614 .driver_data = PLX_SUPERSPEED, 3615 }, 3616 { 3617 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), 3618 .class_mask = ~0, 3619 .vendor = PCI_VENDOR_ID_PLX, 3620 .device = 0x3382, 3621 .subvendor = PCI_ANY_ID, 3622 .subdevice = PCI_ANY_ID, 3623 .driver_data = PLX_SUPERSPEED, 3624 }, 3625 { /* end: all zeroes */ } 3626 }; 3627 MODULE_DEVICE_TABLE(pci, pci_ids); 3628 3629 /* pci driver glue; this is a "new style" PCI driver module */ 3630 static struct pci_driver net2280_pci_driver = { 3631 .name = (char *) driver_name, 3632 .id_table = pci_ids, 3633 3634 .probe = net2280_probe, 3635 .remove = net2280_remove, 3636 .shutdown = net2280_shutdown, 3637 3638 /* FIXME add power management support */ 3639 }; 3640 3641 module_pci_driver(net2280_pci_driver); 3642 3643 MODULE_DESCRIPTION(DRIVER_DESC); 3644 MODULE_AUTHOR("David Brownell"); 3645 MODULE_LICENSE("GPL"); 3646