1 /* 2 * Driver for the PLX NET2280 USB device controller. 3 * Specs and errata are available from <http://www.plxtech.com>. 4 * 5 * PLX Technology Inc. (formerly NetChip Technology) supported the 6 * development of this driver. 7 * 8 * 9 * CODE STATUS HIGHLIGHTS 10 * 11 * This driver should work well with most "gadget" drivers, including 12 * the Mass Storage, Serial, and Ethernet/RNDIS gadget drivers 13 * as well as Gadget Zero and Gadgetfs. 14 * 15 * DMA is enabled by default. Drivers using transfer queues might use 16 * DMA chaining to remove IRQ latencies between transfers. (Except when 17 * short OUT transfers happen.) Drivers can use the req->no_interrupt 18 * hint to completely eliminate some IRQs, if a later IRQ is guaranteed 19 * and DMA chaining is enabled. 20 * 21 * MSI is enabled by default. The legacy IRQ is used if MSI couldn't 22 * be enabled. 23 * 24 * Note that almost all the errata workarounds here are only needed for 25 * rev1 chips. Rev1a silicon (0110) fixes almost all of them. 26 */ 27 28 /* 29 * Copyright (C) 2003 David Brownell 30 * Copyright (C) 2003-2005 PLX Technology, Inc. 31 * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS 32 * 33 * Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility 34 * with 2282 chip 35 * 36 * Modified Ricardo Ribalda Qtechnology AS to provide compatibility 37 * with usb 338x chip. Based on PLX driver 38 * 39 * This program is free software; you can redistribute it and/or modify 40 * it under the terms of the GNU General Public License as published by 41 * the Free Software Foundation; either version 2 of the License, or 42 * (at your option) any later version. 43 */ 44 45 #include <linux/module.h> 46 #include <linux/pci.h> 47 #include <linux/dma-mapping.h> 48 #include <linux/kernel.h> 49 #include <linux/delay.h> 50 #include <linux/ioport.h> 51 #include <linux/slab.h> 52 #include <linux/errno.h> 53 #include <linux/init.h> 54 #include <linux/timer.h> 55 #include <linux/list.h> 56 #include <linux/interrupt.h> 57 #include <linux/moduleparam.h> 58 #include <linux/device.h> 59 #include <linux/usb/ch9.h> 60 #include <linux/usb/gadget.h> 61 #include <linux/prefetch.h> 62 #include <linux/io.h> 63 64 #include <asm/byteorder.h> 65 #include <asm/irq.h> 66 #include <asm/unaligned.h> 67 68 #define DRIVER_DESC "PLX NET228x/USB338x USB Peripheral Controller" 69 #define DRIVER_VERSION "2005 Sept 27/v3.0" 70 71 #define EP_DONTUSE 13 /* nonzero */ 72 73 #define USE_RDK_LEDS /* GPIO pins control three LEDs */ 74 75 76 static const char driver_name[] = "net2280"; 77 static const char driver_desc[] = DRIVER_DESC; 78 79 static const u32 ep_bit[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 }; 80 static const char ep0name[] = "ep0"; 81 static const char *const ep_name[] = { 82 ep0name, 83 "ep-a", "ep-b", "ep-c", "ep-d", 84 "ep-e", "ep-f", "ep-g", "ep-h", 85 }; 86 87 /* use_dma -- general goodness, fewer interrupts, less cpu load (vs PIO) 88 * use_dma_chaining -- dma descriptor queueing gives even more irq reduction 89 * 90 * The net2280 DMA engines are not tightly integrated with their FIFOs; 91 * not all cases are (yet) handled well in this driver or the silicon. 92 * Some gadget drivers work better with the dma support here than others. 93 * These two parameters let you use PIO or more aggressive DMA. 94 */ 95 static bool use_dma = true; 96 static bool use_dma_chaining; 97 static bool use_msi = true; 98 99 /* "modprobe net2280 use_dma=n" etc */ 100 module_param(use_dma, bool, 0444); 101 module_param(use_dma_chaining, bool, 0444); 102 module_param(use_msi, bool, 0444); 103 104 /* mode 0 == ep-{a,b,c,d} 1K fifo each 105 * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable 106 * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable 107 */ 108 static ushort fifo_mode; 109 110 /* "modprobe net2280 fifo_mode=1" etc */ 111 module_param(fifo_mode, ushort, 0644); 112 113 /* enable_suspend -- When enabled, the driver will respond to 114 * USB suspend requests by powering down the NET2280. Otherwise, 115 * USB suspend requests will be ignored. This is acceptable for 116 * self-powered devices 117 */ 118 static bool enable_suspend; 119 120 /* "modprobe net2280 enable_suspend=1" etc */ 121 module_param(enable_suspend, bool, 0444); 122 123 /* force full-speed operation */ 124 static bool full_speed; 125 module_param(full_speed, bool, 0444); 126 MODULE_PARM_DESC(full_speed, "force full-speed mode -- for testing only!"); 127 128 #define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out") 129 130 static char *type_string(u8 bmAttributes) 131 { 132 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) { 133 case USB_ENDPOINT_XFER_BULK: return "bulk"; 134 case USB_ENDPOINT_XFER_ISOC: return "iso"; 135 case USB_ENDPOINT_XFER_INT: return "intr"; 136 } 137 return "control"; 138 } 139 140 #include "net2280.h" 141 142 #define valid_bit cpu_to_le32(BIT(VALID_BIT)) 143 #define dma_done_ie cpu_to_le32(BIT(DMA_DONE_INTERRUPT_ENABLE)) 144 145 /*-------------------------------------------------------------------------*/ 146 static inline void enable_pciirqenb(struct net2280_ep *ep) 147 { 148 u32 tmp = readl(&ep->dev->regs->pciirqenb0); 149 150 if (ep->dev->quirks & PLX_LEGACY) 151 tmp |= BIT(ep->num); 152 else 153 tmp |= BIT(ep_bit[ep->num]); 154 writel(tmp, &ep->dev->regs->pciirqenb0); 155 156 return; 157 } 158 159 static int 160 net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) 161 { 162 struct net2280 *dev; 163 struct net2280_ep *ep; 164 u32 max, tmp; 165 unsigned long flags; 166 static const u32 ep_key[9] = { 1, 0, 1, 0, 1, 1, 0, 1, 0 }; 167 168 ep = container_of(_ep, struct net2280_ep, ep); 169 if (!_ep || !desc || ep->desc || _ep->name == ep0name || 170 desc->bDescriptorType != USB_DT_ENDPOINT) 171 return -EINVAL; 172 dev = ep->dev; 173 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) 174 return -ESHUTDOWN; 175 176 /* erratum 0119 workaround ties up an endpoint number */ 177 if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE) 178 return -EDOM; 179 180 if (dev->quirks & PLX_SUPERSPEED) { 181 if ((desc->bEndpointAddress & 0x0f) >= 0x0c) 182 return -EDOM; 183 ep->is_in = !!usb_endpoint_dir_in(desc); 184 if (dev->enhanced_mode && ep->is_in && ep_key[ep->num]) 185 return -EINVAL; 186 } 187 188 /* sanity check ep-e/ep-f since their fifos are small */ 189 max = usb_endpoint_maxp(desc) & 0x1fff; 190 if (ep->num > 4 && max > 64 && (dev->quirks & PLX_LEGACY)) 191 return -ERANGE; 192 193 spin_lock_irqsave(&dev->lock, flags); 194 _ep->maxpacket = max & 0x7ff; 195 ep->desc = desc; 196 197 /* ep_reset() has already been called */ 198 ep->stopped = 0; 199 ep->wedged = 0; 200 ep->out_overflow = 0; 201 202 /* set speed-dependent max packet; may kick in high bandwidth */ 203 set_max_speed(ep, max); 204 205 /* FIFO lines can't go to different packets. PIO is ok, so 206 * use it instead of troublesome (non-bulk) multi-packet DMA. 207 */ 208 if (ep->dma && (max % 4) != 0 && use_dma_chaining) { 209 ep_dbg(ep->dev, "%s, no dma for maxpacket %d\n", 210 ep->ep.name, ep->ep.maxpacket); 211 ep->dma = NULL; 212 } 213 214 /* set type, direction, address; reset fifo counters */ 215 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat); 216 tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK); 217 if (tmp == USB_ENDPOINT_XFER_INT) { 218 /* erratum 0105 workaround prevents hs NYET */ 219 if (dev->chiprev == 0100 && 220 dev->gadget.speed == USB_SPEED_HIGH && 221 !(desc->bEndpointAddress & USB_DIR_IN)) 222 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE), 223 &ep->regs->ep_rsp); 224 } else if (tmp == USB_ENDPOINT_XFER_BULK) { 225 /* catch some particularly blatant driver bugs */ 226 if ((dev->gadget.speed == USB_SPEED_SUPER && max != 1024) || 227 (dev->gadget.speed == USB_SPEED_HIGH && max != 512) || 228 (dev->gadget.speed == USB_SPEED_FULL && max > 64)) { 229 spin_unlock_irqrestore(&dev->lock, flags); 230 return -ERANGE; 231 } 232 } 233 ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC); 234 /* Enable this endpoint */ 235 if (dev->quirks & PLX_LEGACY) { 236 tmp <<= ENDPOINT_TYPE; 237 tmp |= desc->bEndpointAddress; 238 /* default full fifo lines */ 239 tmp |= (4 << ENDPOINT_BYTE_COUNT); 240 tmp |= BIT(ENDPOINT_ENABLE); 241 ep->is_in = (tmp & USB_DIR_IN) != 0; 242 } else { 243 /* In Legacy mode, only OUT endpoints are used */ 244 if (dev->enhanced_mode && ep->is_in) { 245 tmp <<= IN_ENDPOINT_TYPE; 246 tmp |= BIT(IN_ENDPOINT_ENABLE); 247 /* Not applicable to Legacy */ 248 tmp |= BIT(ENDPOINT_DIRECTION); 249 } else { 250 tmp <<= OUT_ENDPOINT_TYPE; 251 tmp |= BIT(OUT_ENDPOINT_ENABLE); 252 tmp |= (ep->is_in << ENDPOINT_DIRECTION); 253 } 254 255 tmp |= usb_endpoint_num(desc); 256 tmp |= (ep->ep.maxburst << MAX_BURST_SIZE); 257 } 258 259 /* Make sure all the registers are written before ep_rsp*/ 260 wmb(); 261 262 /* for OUT transfers, block the rx fifo until a read is posted */ 263 if (!ep->is_in) 264 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp); 265 else if (!(dev->quirks & PLX_2280)) { 266 /* Added for 2282, Don't use nak packets on an in endpoint, 267 * this was ignored on 2280 268 */ 269 writel(BIT(CLEAR_NAK_OUT_PACKETS) | 270 BIT(CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp); 271 } 272 273 writel(tmp, &ep->cfg->ep_cfg); 274 275 /* enable irqs */ 276 if (!ep->dma) { /* pio, per-packet */ 277 enable_pciirqenb(ep); 278 279 tmp = BIT(DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) | 280 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE); 281 if (dev->quirks & PLX_2280) 282 tmp |= readl(&ep->regs->ep_irqenb); 283 writel(tmp, &ep->regs->ep_irqenb); 284 } else { /* dma, per-request */ 285 tmp = BIT((8 + ep->num)); /* completion */ 286 tmp |= readl(&dev->regs->pciirqenb1); 287 writel(tmp, &dev->regs->pciirqenb1); 288 289 /* for short OUT transfers, dma completions can't 290 * advance the queue; do it pio-style, by hand. 291 * NOTE erratum 0112 workaround #2 292 */ 293 if ((desc->bEndpointAddress & USB_DIR_IN) == 0) { 294 tmp = BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE); 295 writel(tmp, &ep->regs->ep_irqenb); 296 297 enable_pciirqenb(ep); 298 } 299 } 300 301 tmp = desc->bEndpointAddress; 302 ep_dbg(dev, "enabled %s (ep%d%s-%s) %s max %04x\n", 303 _ep->name, tmp & 0x0f, DIR_STRING(tmp), 304 type_string(desc->bmAttributes), 305 ep->dma ? "dma" : "pio", max); 306 307 /* pci writes may still be posted */ 308 spin_unlock_irqrestore(&dev->lock, flags); 309 return 0; 310 } 311 312 static int handshake(u32 __iomem *ptr, u32 mask, u32 done, int usec) 313 { 314 u32 result; 315 316 do { 317 result = readl(ptr); 318 if (result == ~(u32)0) /* "device unplugged" */ 319 return -ENODEV; 320 result &= mask; 321 if (result == done) 322 return 0; 323 udelay(1); 324 usec--; 325 } while (usec > 0); 326 return -ETIMEDOUT; 327 } 328 329 static const struct usb_ep_ops net2280_ep_ops; 330 331 static void ep_reset_228x(struct net2280_regs __iomem *regs, 332 struct net2280_ep *ep) 333 { 334 u32 tmp; 335 336 ep->desc = NULL; 337 INIT_LIST_HEAD(&ep->queue); 338 339 usb_ep_set_maxpacket_limit(&ep->ep, ~0); 340 ep->ep.ops = &net2280_ep_ops; 341 342 /* disable the dma, irqs, endpoint... */ 343 if (ep->dma) { 344 writel(0, &ep->dma->dmactl); 345 writel(BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) | 346 BIT(DMA_TRANSACTION_DONE_INTERRUPT) | 347 BIT(DMA_ABORT), 348 &ep->dma->dmastat); 349 350 tmp = readl(®s->pciirqenb0); 351 tmp &= ~BIT(ep->num); 352 writel(tmp, ®s->pciirqenb0); 353 } else { 354 tmp = readl(®s->pciirqenb1); 355 tmp &= ~BIT((8 + ep->num)); /* completion */ 356 writel(tmp, ®s->pciirqenb1); 357 } 358 writel(0, &ep->regs->ep_irqenb); 359 360 /* init to our chosen defaults, notably so that we NAK OUT 361 * packets until the driver queues a read (+note erratum 0112) 362 */ 363 if (!ep->is_in || (ep->dev->quirks & PLX_2280)) { 364 tmp = BIT(SET_NAK_OUT_PACKETS_MODE) | 365 BIT(SET_NAK_OUT_PACKETS) | 366 BIT(CLEAR_EP_HIDE_STATUS_PHASE) | 367 BIT(CLEAR_INTERRUPT_MODE); 368 } else { 369 /* added for 2282 */ 370 tmp = BIT(CLEAR_NAK_OUT_PACKETS_MODE) | 371 BIT(CLEAR_NAK_OUT_PACKETS) | 372 BIT(CLEAR_EP_HIDE_STATUS_PHASE) | 373 BIT(CLEAR_INTERRUPT_MODE); 374 } 375 376 if (ep->num != 0) { 377 tmp |= BIT(CLEAR_ENDPOINT_TOGGLE) | 378 BIT(CLEAR_ENDPOINT_HALT); 379 } 380 writel(tmp, &ep->regs->ep_rsp); 381 382 /* scrub most status bits, and flush any fifo state */ 383 if (ep->dev->quirks & PLX_2280) 384 tmp = BIT(FIFO_OVERFLOW) | 385 BIT(FIFO_UNDERFLOW); 386 else 387 tmp = 0; 388 389 writel(tmp | BIT(TIMEOUT) | 390 BIT(USB_STALL_SENT) | 391 BIT(USB_IN_NAK_SENT) | 392 BIT(USB_IN_ACK_RCVD) | 393 BIT(USB_OUT_PING_NAK_SENT) | 394 BIT(USB_OUT_ACK_SENT) | 395 BIT(FIFO_FLUSH) | 396 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) | 397 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) | 398 BIT(DATA_PACKET_RECEIVED_INTERRUPT) | 399 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) | 400 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 401 BIT(DATA_IN_TOKEN_INTERRUPT), 402 &ep->regs->ep_stat); 403 404 /* fifo size is handled separately */ 405 } 406 407 static void ep_reset_338x(struct net2280_regs __iomem *regs, 408 struct net2280_ep *ep) 409 { 410 u32 tmp, dmastat; 411 412 ep->desc = NULL; 413 INIT_LIST_HEAD(&ep->queue); 414 415 usb_ep_set_maxpacket_limit(&ep->ep, ~0); 416 ep->ep.ops = &net2280_ep_ops; 417 418 /* disable the dma, irqs, endpoint... */ 419 if (ep->dma) { 420 writel(0, &ep->dma->dmactl); 421 writel(BIT(DMA_ABORT_DONE_INTERRUPT) | 422 BIT(DMA_PAUSE_DONE_INTERRUPT) | 423 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) | 424 BIT(DMA_TRANSACTION_DONE_INTERRUPT), 425 /* | BIT(DMA_ABORT), */ 426 &ep->dma->dmastat); 427 428 dmastat = readl(&ep->dma->dmastat); 429 if (dmastat == 0x5002) { 430 ep_warn(ep->dev, "The dmastat return = %x!!\n", 431 dmastat); 432 writel(0x5a, &ep->dma->dmastat); 433 } 434 435 tmp = readl(®s->pciirqenb0); 436 tmp &= ~BIT(ep_bit[ep->num]); 437 writel(tmp, ®s->pciirqenb0); 438 } else { 439 if (ep->num < 5) { 440 tmp = readl(®s->pciirqenb1); 441 tmp &= ~BIT((8 + ep->num)); /* completion */ 442 writel(tmp, ®s->pciirqenb1); 443 } 444 } 445 writel(0, &ep->regs->ep_irqenb); 446 447 writel(BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) | 448 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) | 449 BIT(FIFO_OVERFLOW) | 450 BIT(DATA_PACKET_RECEIVED_INTERRUPT) | 451 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) | 452 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 453 BIT(DATA_IN_TOKEN_INTERRUPT), &ep->regs->ep_stat); 454 } 455 456 static void nuke(struct net2280_ep *); 457 458 static int net2280_disable(struct usb_ep *_ep) 459 { 460 struct net2280_ep *ep; 461 unsigned long flags; 462 463 ep = container_of(_ep, struct net2280_ep, ep); 464 if (!_ep || !ep->desc || _ep->name == ep0name) 465 return -EINVAL; 466 467 spin_lock_irqsave(&ep->dev->lock, flags); 468 nuke(ep); 469 470 if (ep->dev->quirks & PLX_SUPERSPEED) 471 ep_reset_338x(ep->dev->regs, ep); 472 else 473 ep_reset_228x(ep->dev->regs, ep); 474 475 ep_vdbg(ep->dev, "disabled %s %s\n", 476 ep->dma ? "dma" : "pio", _ep->name); 477 478 /* synch memory views with the device */ 479 (void)readl(&ep->cfg->ep_cfg); 480 481 if (use_dma && !ep->dma && ep->num >= 1 && ep->num <= 4) 482 ep->dma = &ep->dev->dma[ep->num - 1]; 483 484 spin_unlock_irqrestore(&ep->dev->lock, flags); 485 return 0; 486 } 487 488 /*-------------------------------------------------------------------------*/ 489 490 static struct usb_request 491 *net2280_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) 492 { 493 struct net2280_ep *ep; 494 struct net2280_request *req; 495 496 if (!_ep) 497 return NULL; 498 ep = container_of(_ep, struct net2280_ep, ep); 499 500 req = kzalloc(sizeof(*req), gfp_flags); 501 if (!req) 502 return NULL; 503 504 INIT_LIST_HEAD(&req->queue); 505 506 /* this dma descriptor may be swapped with the previous dummy */ 507 if (ep->dma) { 508 struct net2280_dma *td; 509 510 td = pci_pool_alloc(ep->dev->requests, gfp_flags, 511 &req->td_dma); 512 if (!td) { 513 kfree(req); 514 return NULL; 515 } 516 td->dmacount = 0; /* not VALID */ 517 td->dmadesc = td->dmaaddr; 518 req->td = td; 519 } 520 return &req->req; 521 } 522 523 static void net2280_free_request(struct usb_ep *_ep, struct usb_request *_req) 524 { 525 struct net2280_ep *ep; 526 struct net2280_request *req; 527 528 ep = container_of(_ep, struct net2280_ep, ep); 529 if (!_ep || !_req) 530 return; 531 532 req = container_of(_req, struct net2280_request, req); 533 WARN_ON(!list_empty(&req->queue)); 534 if (req->td) 535 pci_pool_free(ep->dev->requests, req->td, req->td_dma); 536 kfree(req); 537 } 538 539 /*-------------------------------------------------------------------------*/ 540 541 /* load a packet into the fifo we use for usb IN transfers. 542 * works for all endpoints. 543 * 544 * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo 545 * at a time, but this code is simpler because it knows it only writes 546 * one packet. ep-a..ep-d should use dma instead. 547 */ 548 static void write_fifo(struct net2280_ep *ep, struct usb_request *req) 549 { 550 struct net2280_ep_regs __iomem *regs = ep->regs; 551 u8 *buf; 552 u32 tmp; 553 unsigned count, total; 554 555 /* INVARIANT: fifo is currently empty. (testable) */ 556 557 if (req) { 558 buf = req->buf + req->actual; 559 prefetch(buf); 560 total = req->length - req->actual; 561 } else { 562 total = 0; 563 buf = NULL; 564 } 565 566 /* write just one packet at a time */ 567 count = ep->ep.maxpacket; 568 if (count > total) /* min() cannot be used on a bitfield */ 569 count = total; 570 571 ep_vdbg(ep->dev, "write %s fifo (IN) %d bytes%s req %p\n", 572 ep->ep.name, count, 573 (count != ep->ep.maxpacket) ? " (short)" : "", 574 req); 575 while (count >= 4) { 576 /* NOTE be careful if you try to align these. fifo lines 577 * should normally be full (4 bytes) and successive partial 578 * lines are ok only in certain cases. 579 */ 580 tmp = get_unaligned((u32 *)buf); 581 cpu_to_le32s(&tmp); 582 writel(tmp, ®s->ep_data); 583 buf += 4; 584 count -= 4; 585 } 586 587 /* last fifo entry is "short" unless we wrote a full packet. 588 * also explicitly validate last word in (periodic) transfers 589 * when maxpacket is not a multiple of 4 bytes. 590 */ 591 if (count || total < ep->ep.maxpacket) { 592 tmp = count ? get_unaligned((u32 *)buf) : count; 593 cpu_to_le32s(&tmp); 594 set_fifo_bytecount(ep, count & 0x03); 595 writel(tmp, ®s->ep_data); 596 } 597 598 /* pci writes may still be posted */ 599 } 600 601 /* work around erratum 0106: PCI and USB race over the OUT fifo. 602 * caller guarantees chiprev 0100, out endpoint is NAKing, and 603 * there's no real data in the fifo. 604 * 605 * NOTE: also used in cases where that erratum doesn't apply: 606 * where the host wrote "too much" data to us. 607 */ 608 static void out_flush(struct net2280_ep *ep) 609 { 610 u32 __iomem *statp; 611 u32 tmp; 612 613 ASSERT_OUT_NAKING(ep); 614 615 statp = &ep->regs->ep_stat; 616 writel(BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 617 BIT(DATA_PACKET_RECEIVED_INTERRUPT), 618 statp); 619 writel(BIT(FIFO_FLUSH), statp); 620 /* Make sure that stap is written */ 621 mb(); 622 tmp = readl(statp); 623 if (tmp & BIT(DATA_OUT_PING_TOKEN_INTERRUPT) && 624 /* high speed did bulk NYET; fifo isn't filling */ 625 ep->dev->gadget.speed == USB_SPEED_FULL) { 626 unsigned usec; 627 628 usec = 50; /* 64 byte bulk/interrupt */ 629 handshake(statp, BIT(USB_OUT_PING_NAK_SENT), 630 BIT(USB_OUT_PING_NAK_SENT), usec); 631 /* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */ 632 } 633 } 634 635 /* unload packet(s) from the fifo we use for usb OUT transfers. 636 * returns true iff the request completed, because of short packet 637 * or the request buffer having filled with full packets. 638 * 639 * for ep-a..ep-d this will read multiple packets out when they 640 * have been accepted. 641 */ 642 static int read_fifo(struct net2280_ep *ep, struct net2280_request *req) 643 { 644 struct net2280_ep_regs __iomem *regs = ep->regs; 645 u8 *buf = req->req.buf + req->req.actual; 646 unsigned count, tmp, is_short; 647 unsigned cleanup = 0, prevent = 0; 648 649 /* erratum 0106 ... packets coming in during fifo reads might 650 * be incompletely rejected. not all cases have workarounds. 651 */ 652 if (ep->dev->chiprev == 0x0100 && 653 ep->dev->gadget.speed == USB_SPEED_FULL) { 654 udelay(1); 655 tmp = readl(&ep->regs->ep_stat); 656 if ((tmp & BIT(NAK_OUT_PACKETS))) 657 cleanup = 1; 658 else if ((tmp & BIT(FIFO_FULL))) { 659 start_out_naking(ep); 660 prevent = 1; 661 } 662 /* else: hope we don't see the problem */ 663 } 664 665 /* never overflow the rx buffer. the fifo reads packets until 666 * it sees a short one; we might not be ready for them all. 667 */ 668 prefetchw(buf); 669 count = readl(®s->ep_avail); 670 if (unlikely(count == 0)) { 671 udelay(1); 672 tmp = readl(&ep->regs->ep_stat); 673 count = readl(®s->ep_avail); 674 /* handled that data already? */ 675 if (count == 0 && (tmp & BIT(NAK_OUT_PACKETS)) == 0) 676 return 0; 677 } 678 679 tmp = req->req.length - req->req.actual; 680 if (count > tmp) { 681 /* as with DMA, data overflow gets flushed */ 682 if ((tmp % ep->ep.maxpacket) != 0) { 683 ep_err(ep->dev, 684 "%s out fifo %d bytes, expected %d\n", 685 ep->ep.name, count, tmp); 686 req->req.status = -EOVERFLOW; 687 cleanup = 1; 688 /* NAK_OUT_PACKETS will be set, so flushing is safe; 689 * the next read will start with the next packet 690 */ 691 } /* else it's a ZLP, no worries */ 692 count = tmp; 693 } 694 req->req.actual += count; 695 696 is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0); 697 698 ep_vdbg(ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n", 699 ep->ep.name, count, is_short ? " (short)" : "", 700 cleanup ? " flush" : "", prevent ? " nak" : "", 701 req, req->req.actual, req->req.length); 702 703 while (count >= 4) { 704 tmp = readl(®s->ep_data); 705 cpu_to_le32s(&tmp); 706 put_unaligned(tmp, (u32 *)buf); 707 buf += 4; 708 count -= 4; 709 } 710 if (count) { 711 tmp = readl(®s->ep_data); 712 /* LE conversion is implicit here: */ 713 do { 714 *buf++ = (u8) tmp; 715 tmp >>= 8; 716 } while (--count); 717 } 718 if (cleanup) 719 out_flush(ep); 720 if (prevent) { 721 writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp); 722 (void) readl(&ep->regs->ep_rsp); 723 } 724 725 return is_short || ((req->req.actual == req->req.length) && 726 !req->req.zero); 727 } 728 729 /* fill out dma descriptor to match a given request */ 730 static void fill_dma_desc(struct net2280_ep *ep, 731 struct net2280_request *req, int valid) 732 { 733 struct net2280_dma *td = req->td; 734 u32 dmacount = req->req.length; 735 736 /* don't let DMA continue after a short OUT packet, 737 * so overruns can't affect the next transfer. 738 * in case of overruns on max-size packets, we can't 739 * stop the fifo from filling but we can flush it. 740 */ 741 if (ep->is_in) 742 dmacount |= BIT(DMA_DIRECTION); 743 if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0) || 744 !(ep->dev->quirks & PLX_2280)) 745 dmacount |= BIT(END_OF_CHAIN); 746 747 req->valid = valid; 748 if (valid) 749 dmacount |= BIT(VALID_BIT); 750 if (likely(!req->req.no_interrupt || !use_dma_chaining)) 751 dmacount |= BIT(DMA_DONE_INTERRUPT_ENABLE); 752 753 /* td->dmadesc = previously set by caller */ 754 td->dmaaddr = cpu_to_le32 (req->req.dma); 755 756 /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */ 757 wmb(); 758 td->dmacount = cpu_to_le32(dmacount); 759 } 760 761 static const u32 dmactl_default = 762 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) | 763 BIT(DMA_CLEAR_COUNT_ENABLE) | 764 /* erratum 0116 workaround part 1 (use POLLING) */ 765 (POLL_100_USEC << DESCRIPTOR_POLLING_RATE) | 766 BIT(DMA_VALID_BIT_POLLING_ENABLE) | 767 BIT(DMA_VALID_BIT_ENABLE) | 768 BIT(DMA_SCATTER_GATHER_ENABLE) | 769 /* erratum 0116 workaround part 2 (no AUTOSTART) */ 770 BIT(DMA_ENABLE); 771 772 static inline void spin_stop_dma(struct net2280_dma_regs __iomem *dma) 773 { 774 handshake(&dma->dmactl, BIT(DMA_ENABLE), 0, 50); 775 } 776 777 static inline void stop_dma(struct net2280_dma_regs __iomem *dma) 778 { 779 writel(readl(&dma->dmactl) & ~BIT(DMA_ENABLE), &dma->dmactl); 780 spin_stop_dma(dma); 781 } 782 783 static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma) 784 { 785 struct net2280_dma_regs __iomem *dma = ep->dma; 786 unsigned int tmp = BIT(VALID_BIT) | (ep->is_in << DMA_DIRECTION); 787 788 if (!(ep->dev->quirks & PLX_2280)) 789 tmp |= BIT(END_OF_CHAIN); 790 791 writel(tmp, &dma->dmacount); 792 writel(readl(&dma->dmastat), &dma->dmastat); 793 794 writel(td_dma, &dma->dmadesc); 795 if (ep->dev->quirks & PLX_SUPERSPEED) 796 dmactl |= BIT(DMA_REQUEST_OUTSTANDING); 797 writel(dmactl, &dma->dmactl); 798 799 /* erratum 0116 workaround part 3: pci arbiter away from net2280 */ 800 (void) readl(&ep->dev->pci->pcimstctl); 801 802 writel(BIT(DMA_START), &dma->dmastat); 803 804 if (!ep->is_in) 805 stop_out_naking(ep); 806 } 807 808 static void start_dma(struct net2280_ep *ep, struct net2280_request *req) 809 { 810 u32 tmp; 811 struct net2280_dma_regs __iomem *dma = ep->dma; 812 813 /* FIXME can't use DMA for ZLPs */ 814 815 /* on this path we "know" there's no dma active (yet) */ 816 WARN_ON(readl(&dma->dmactl) & BIT(DMA_ENABLE)); 817 writel(0, &ep->dma->dmactl); 818 819 /* previous OUT packet might have been short */ 820 if (!ep->is_in && (readl(&ep->regs->ep_stat) & 821 BIT(NAK_OUT_PACKETS))) { 822 writel(BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT), 823 &ep->regs->ep_stat); 824 825 tmp = readl(&ep->regs->ep_avail); 826 if (tmp) { 827 writel(readl(&dma->dmastat), &dma->dmastat); 828 829 /* transfer all/some fifo data */ 830 writel(req->req.dma, &dma->dmaaddr); 831 tmp = min(tmp, req->req.length); 832 833 /* dma irq, faking scatterlist status */ 834 req->td->dmacount = cpu_to_le32(req->req.length - tmp); 835 writel(BIT(DMA_DONE_INTERRUPT_ENABLE) | tmp, 836 &dma->dmacount); 837 req->td->dmadesc = 0; 838 req->valid = 1; 839 840 writel(BIT(DMA_ENABLE), &dma->dmactl); 841 writel(BIT(DMA_START), &dma->dmastat); 842 return; 843 } 844 } 845 846 tmp = dmactl_default; 847 848 /* force packet boundaries between dma requests, but prevent the 849 * controller from automagically writing a last "short" packet 850 * (zero length) unless the driver explicitly said to do that. 851 */ 852 if (ep->is_in) { 853 if (likely((req->req.length % ep->ep.maxpacket) || 854 req->req.zero)){ 855 tmp |= BIT(DMA_FIFO_VALIDATE); 856 ep->in_fifo_validate = 1; 857 } else 858 ep->in_fifo_validate = 0; 859 } 860 861 /* init req->td, pointing to the current dummy */ 862 req->td->dmadesc = cpu_to_le32 (ep->td_dma); 863 fill_dma_desc(ep, req, 1); 864 865 if (!use_dma_chaining) 866 req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN)); 867 868 start_queue(ep, tmp, req->td_dma); 869 } 870 871 static inline void resume_dma(struct net2280_ep *ep) 872 { 873 writel(readl(&ep->dma->dmactl) | BIT(DMA_ENABLE), &ep->dma->dmactl); 874 875 ep->dma_started = true; 876 } 877 878 static inline void ep_stop_dma(struct net2280_ep *ep) 879 { 880 writel(readl(&ep->dma->dmactl) & ~BIT(DMA_ENABLE), &ep->dma->dmactl); 881 spin_stop_dma(ep->dma); 882 883 ep->dma_started = false; 884 } 885 886 static inline void 887 queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid) 888 { 889 struct net2280_dma *end; 890 dma_addr_t tmp; 891 892 /* swap new dummy for old, link; fill and maybe activate */ 893 end = ep->dummy; 894 ep->dummy = req->td; 895 req->td = end; 896 897 tmp = ep->td_dma; 898 ep->td_dma = req->td_dma; 899 req->td_dma = tmp; 900 901 end->dmadesc = cpu_to_le32 (ep->td_dma); 902 903 fill_dma_desc(ep, req, valid); 904 } 905 906 static void 907 done(struct net2280_ep *ep, struct net2280_request *req, int status) 908 { 909 struct net2280 *dev; 910 unsigned stopped = ep->stopped; 911 912 list_del_init(&req->queue); 913 914 if (req->req.status == -EINPROGRESS) 915 req->req.status = status; 916 else 917 status = req->req.status; 918 919 dev = ep->dev; 920 if (ep->dma) 921 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in); 922 923 if (status && status != -ESHUTDOWN) 924 ep_vdbg(dev, "complete %s req %p stat %d len %u/%u\n", 925 ep->ep.name, &req->req, status, 926 req->req.actual, req->req.length); 927 928 /* don't modify queue heads during completion callback */ 929 ep->stopped = 1; 930 spin_unlock(&dev->lock); 931 usb_gadget_giveback_request(&ep->ep, &req->req); 932 spin_lock(&dev->lock); 933 ep->stopped = stopped; 934 } 935 936 /*-------------------------------------------------------------------------*/ 937 938 static int 939 net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) 940 { 941 struct net2280_request *req; 942 struct net2280_ep *ep; 943 struct net2280 *dev; 944 unsigned long flags; 945 946 /* we always require a cpu-view buffer, so that we can 947 * always use pio (as fallback or whatever). 948 */ 949 req = container_of(_req, struct net2280_request, req); 950 if (!_req || !_req->complete || !_req->buf || 951 !list_empty(&req->queue)) 952 return -EINVAL; 953 if (_req->length > (~0 & DMA_BYTE_COUNT_MASK)) 954 return -EDOM; 955 ep = container_of(_ep, struct net2280_ep, ep); 956 if (!_ep || (!ep->desc && ep->num != 0)) 957 return -EINVAL; 958 dev = ep->dev; 959 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) 960 return -ESHUTDOWN; 961 962 /* FIXME implement PIO fallback for ZLPs with DMA */ 963 if (ep->dma && _req->length == 0) 964 return -EOPNOTSUPP; 965 966 /* set up dma mapping in case the caller didn't */ 967 if (ep->dma) { 968 int ret; 969 970 ret = usb_gadget_map_request(&dev->gadget, _req, 971 ep->is_in); 972 if (ret) 973 return ret; 974 } 975 976 #if 0 977 ep_vdbg(dev, "%s queue req %p, len %d buf %p\n", 978 _ep->name, _req, _req->length, _req->buf); 979 #endif 980 981 spin_lock_irqsave(&dev->lock, flags); 982 983 _req->status = -EINPROGRESS; 984 _req->actual = 0; 985 986 /* kickstart this i/o queue? */ 987 if (list_empty(&ep->queue) && !ep->stopped) { 988 /* DMA request while EP halted */ 989 if (ep->dma && 990 (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)) && 991 (dev->quirks & PLX_SUPERSPEED)) { 992 int valid = 1; 993 if (ep->is_in) { 994 int expect; 995 expect = likely(req->req.zero || 996 ((req->req.length % 997 ep->ep.maxpacket) != 0)); 998 if (expect != ep->in_fifo_validate) 999 valid = 0; 1000 } 1001 queue_dma(ep, req, valid); 1002 } 1003 /* use DMA if the endpoint supports it, else pio */ 1004 else if (ep->dma) 1005 start_dma(ep, req); 1006 else { 1007 /* maybe there's no control data, just status ack */ 1008 if (ep->num == 0 && _req->length == 0) { 1009 allow_status(ep); 1010 done(ep, req, 0); 1011 ep_vdbg(dev, "%s status ack\n", ep->ep.name); 1012 goto done; 1013 } 1014 1015 /* PIO ... stuff the fifo, or unblock it. */ 1016 if (ep->is_in) 1017 write_fifo(ep, _req); 1018 else if (list_empty(&ep->queue)) { 1019 u32 s; 1020 1021 /* OUT FIFO might have packet(s) buffered */ 1022 s = readl(&ep->regs->ep_stat); 1023 if ((s & BIT(FIFO_EMPTY)) == 0) { 1024 /* note: _req->short_not_ok is 1025 * ignored here since PIO _always_ 1026 * stops queue advance here, and 1027 * _req->status doesn't change for 1028 * short reads (only _req->actual) 1029 */ 1030 if (read_fifo(ep, req) && 1031 ep->num == 0) { 1032 done(ep, req, 0); 1033 allow_status(ep); 1034 /* don't queue it */ 1035 req = NULL; 1036 } else if (read_fifo(ep, req) && 1037 ep->num != 0) { 1038 done(ep, req, 0); 1039 req = NULL; 1040 } else 1041 s = readl(&ep->regs->ep_stat); 1042 } 1043 1044 /* don't NAK, let the fifo fill */ 1045 if (req && (s & BIT(NAK_OUT_PACKETS))) 1046 writel(BIT(CLEAR_NAK_OUT_PACKETS), 1047 &ep->regs->ep_rsp); 1048 } 1049 } 1050 1051 } else if (ep->dma) { 1052 int valid = 1; 1053 1054 if (ep->is_in) { 1055 int expect; 1056 1057 /* preventing magic zlps is per-engine state, not 1058 * per-transfer; irq logic must recover hiccups. 1059 */ 1060 expect = likely(req->req.zero || 1061 (req->req.length % ep->ep.maxpacket)); 1062 if (expect != ep->in_fifo_validate) 1063 valid = 0; 1064 } 1065 queue_dma(ep, req, valid); 1066 1067 } /* else the irq handler advances the queue. */ 1068 1069 ep->responded = 1; 1070 if (req) 1071 list_add_tail(&req->queue, &ep->queue); 1072 done: 1073 spin_unlock_irqrestore(&dev->lock, flags); 1074 1075 /* pci writes may still be posted */ 1076 return 0; 1077 } 1078 1079 static inline void 1080 dma_done(struct net2280_ep *ep, struct net2280_request *req, u32 dmacount, 1081 int status) 1082 { 1083 req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount); 1084 done(ep, req, status); 1085 } 1086 1087 static void restart_dma(struct net2280_ep *ep); 1088 1089 static void scan_dma_completions(struct net2280_ep *ep) 1090 { 1091 /* only look at descriptors that were "naturally" retired, 1092 * so fifo and list head state won't matter 1093 */ 1094 while (!list_empty(&ep->queue)) { 1095 struct net2280_request *req; 1096 u32 tmp; 1097 1098 req = list_entry(ep->queue.next, 1099 struct net2280_request, queue); 1100 if (!req->valid) 1101 break; 1102 rmb(); 1103 tmp = le32_to_cpup(&req->td->dmacount); 1104 if ((tmp & BIT(VALID_BIT)) != 0) 1105 break; 1106 1107 /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short" 1108 * cases where DMA must be aborted; this code handles 1109 * all non-abort DMA completions. 1110 */ 1111 if (unlikely(req->td->dmadesc == 0)) { 1112 /* paranoia */ 1113 tmp = readl(&ep->dma->dmacount); 1114 if (tmp & DMA_BYTE_COUNT_MASK) 1115 break; 1116 /* single transfer mode */ 1117 dma_done(ep, req, tmp, 0); 1118 break; 1119 } else if (!ep->is_in && 1120 (req->req.length % ep->ep.maxpacket) != 0) { 1121 if (ep->dev->quirks & PLX_SUPERSPEED) 1122 return dma_done(ep, req, tmp, 0); 1123 1124 tmp = readl(&ep->regs->ep_stat); 1125 /* AVOID TROUBLE HERE by not issuing short reads from 1126 * your gadget driver. That helps avoids errata 0121, 1127 * 0122, and 0124; not all cases trigger the warning. 1128 */ 1129 if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) { 1130 ep_warn(ep->dev, "%s lost packet sync!\n", 1131 ep->ep.name); 1132 req->req.status = -EOVERFLOW; 1133 } else { 1134 tmp = readl(&ep->regs->ep_avail); 1135 if (tmp) { 1136 /* fifo gets flushed later */ 1137 ep->out_overflow = 1; 1138 ep_dbg(ep->dev, 1139 "%s dma, discard %d len %d\n", 1140 ep->ep.name, tmp, 1141 req->req.length); 1142 req->req.status = -EOVERFLOW; 1143 } 1144 } 1145 } 1146 dma_done(ep, req, tmp, 0); 1147 } 1148 } 1149 1150 static void restart_dma(struct net2280_ep *ep) 1151 { 1152 struct net2280_request *req; 1153 u32 dmactl = dmactl_default; 1154 1155 if (ep->stopped) 1156 return; 1157 req = list_entry(ep->queue.next, struct net2280_request, queue); 1158 1159 if (!use_dma_chaining) { 1160 start_dma(ep, req); 1161 return; 1162 } 1163 1164 /* the 2280 will be processing the queue unless queue hiccups after 1165 * the previous transfer: 1166 * IN: wanted automagic zlp, head doesn't (or vice versa) 1167 * DMA_FIFO_VALIDATE doesn't init from dma descriptors. 1168 * OUT: was "usb-short", we must restart. 1169 */ 1170 if (ep->is_in && !req->valid) { 1171 struct net2280_request *entry, *prev = NULL; 1172 int reqmode, done = 0; 1173 1174 ep_dbg(ep->dev, "%s dma hiccup td %p\n", ep->ep.name, req->td); 1175 ep->in_fifo_validate = likely(req->req.zero || 1176 (req->req.length % ep->ep.maxpacket) != 0); 1177 if (ep->in_fifo_validate) 1178 dmactl |= BIT(DMA_FIFO_VALIDATE); 1179 list_for_each_entry(entry, &ep->queue, queue) { 1180 __le32 dmacount; 1181 1182 if (entry == req) 1183 continue; 1184 dmacount = entry->td->dmacount; 1185 if (!done) { 1186 reqmode = likely(entry->req.zero || 1187 (entry->req.length % ep->ep.maxpacket)); 1188 if (reqmode == ep->in_fifo_validate) { 1189 entry->valid = 1; 1190 dmacount |= valid_bit; 1191 entry->td->dmacount = dmacount; 1192 prev = entry; 1193 continue; 1194 } else { 1195 /* force a hiccup */ 1196 prev->td->dmacount |= dma_done_ie; 1197 done = 1; 1198 } 1199 } 1200 1201 /* walk the rest of the queue so unlinks behave */ 1202 entry->valid = 0; 1203 dmacount &= ~valid_bit; 1204 entry->td->dmacount = dmacount; 1205 prev = entry; 1206 } 1207 } 1208 1209 writel(0, &ep->dma->dmactl); 1210 start_queue(ep, dmactl, req->td_dma); 1211 } 1212 1213 static void abort_dma_228x(struct net2280_ep *ep) 1214 { 1215 /* abort the current transfer */ 1216 if (likely(!list_empty(&ep->queue))) { 1217 /* FIXME work around errata 0121, 0122, 0124 */ 1218 writel(BIT(DMA_ABORT), &ep->dma->dmastat); 1219 spin_stop_dma(ep->dma); 1220 } else 1221 stop_dma(ep->dma); 1222 scan_dma_completions(ep); 1223 } 1224 1225 static void abort_dma_338x(struct net2280_ep *ep) 1226 { 1227 writel(BIT(DMA_ABORT), &ep->dma->dmastat); 1228 spin_stop_dma(ep->dma); 1229 } 1230 1231 static void abort_dma(struct net2280_ep *ep) 1232 { 1233 if (ep->dev->quirks & PLX_LEGACY) 1234 return abort_dma_228x(ep); 1235 return abort_dma_338x(ep); 1236 } 1237 1238 /* dequeue ALL requests */ 1239 static void nuke(struct net2280_ep *ep) 1240 { 1241 struct net2280_request *req; 1242 1243 /* called with spinlock held */ 1244 ep->stopped = 1; 1245 if (ep->dma) 1246 abort_dma(ep); 1247 while (!list_empty(&ep->queue)) { 1248 req = list_entry(ep->queue.next, 1249 struct net2280_request, 1250 queue); 1251 done(ep, req, -ESHUTDOWN); 1252 } 1253 } 1254 1255 /* dequeue JUST ONE request */ 1256 static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req) 1257 { 1258 struct net2280_ep *ep; 1259 struct net2280_request *req; 1260 unsigned long flags; 1261 u32 dmactl; 1262 int stopped; 1263 1264 ep = container_of(_ep, struct net2280_ep, ep); 1265 if (!_ep || (!ep->desc && ep->num != 0) || !_req) 1266 return -EINVAL; 1267 1268 spin_lock_irqsave(&ep->dev->lock, flags); 1269 stopped = ep->stopped; 1270 1271 /* quiesce dma while we patch the queue */ 1272 dmactl = 0; 1273 ep->stopped = 1; 1274 if (ep->dma) { 1275 dmactl = readl(&ep->dma->dmactl); 1276 /* WARNING erratum 0127 may kick in ... */ 1277 stop_dma(ep->dma); 1278 scan_dma_completions(ep); 1279 } 1280 1281 /* make sure it's still queued on this endpoint */ 1282 list_for_each_entry(req, &ep->queue, queue) { 1283 if (&req->req == _req) 1284 break; 1285 } 1286 if (&req->req != _req) { 1287 spin_unlock_irqrestore(&ep->dev->lock, flags); 1288 return -EINVAL; 1289 } 1290 1291 /* queue head may be partially complete. */ 1292 if (ep->queue.next == &req->queue) { 1293 if (ep->dma) { 1294 ep_dbg(ep->dev, "unlink (%s) dma\n", _ep->name); 1295 _req->status = -ECONNRESET; 1296 abort_dma(ep); 1297 if (likely(ep->queue.next == &req->queue)) { 1298 /* NOTE: misreports single-transfer mode*/ 1299 req->td->dmacount = 0; /* invalidate */ 1300 dma_done(ep, req, 1301 readl(&ep->dma->dmacount), 1302 -ECONNRESET); 1303 } 1304 } else { 1305 ep_dbg(ep->dev, "unlink (%s) pio\n", _ep->name); 1306 done(ep, req, -ECONNRESET); 1307 } 1308 req = NULL; 1309 1310 /* patch up hardware chaining data */ 1311 } else if (ep->dma && use_dma_chaining) { 1312 if (req->queue.prev == ep->queue.next) { 1313 writel(le32_to_cpu(req->td->dmadesc), 1314 &ep->dma->dmadesc); 1315 if (req->td->dmacount & dma_done_ie) 1316 writel(readl(&ep->dma->dmacount) | 1317 le32_to_cpu(dma_done_ie), 1318 &ep->dma->dmacount); 1319 } else { 1320 struct net2280_request *prev; 1321 1322 prev = list_entry(req->queue.prev, 1323 struct net2280_request, queue); 1324 prev->td->dmadesc = req->td->dmadesc; 1325 if (req->td->dmacount & dma_done_ie) 1326 prev->td->dmacount |= dma_done_ie; 1327 } 1328 } 1329 1330 if (req) 1331 done(ep, req, -ECONNRESET); 1332 ep->stopped = stopped; 1333 1334 if (ep->dma) { 1335 /* turn off dma on inactive queues */ 1336 if (list_empty(&ep->queue)) 1337 stop_dma(ep->dma); 1338 else if (!ep->stopped) { 1339 /* resume current request, or start new one */ 1340 if (req) 1341 writel(dmactl, &ep->dma->dmactl); 1342 else 1343 start_dma(ep, list_entry(ep->queue.next, 1344 struct net2280_request, queue)); 1345 } 1346 } 1347 1348 spin_unlock_irqrestore(&ep->dev->lock, flags); 1349 return 0; 1350 } 1351 1352 /*-------------------------------------------------------------------------*/ 1353 1354 static int net2280_fifo_status(struct usb_ep *_ep); 1355 1356 static int 1357 net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged) 1358 { 1359 struct net2280_ep *ep; 1360 unsigned long flags; 1361 int retval = 0; 1362 1363 ep = container_of(_ep, struct net2280_ep, ep); 1364 if (!_ep || (!ep->desc && ep->num != 0)) 1365 return -EINVAL; 1366 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) 1367 return -ESHUTDOWN; 1368 if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03) 1369 == USB_ENDPOINT_XFER_ISOC) 1370 return -EINVAL; 1371 1372 spin_lock_irqsave(&ep->dev->lock, flags); 1373 if (!list_empty(&ep->queue)) 1374 retval = -EAGAIN; 1375 else if (ep->is_in && value && net2280_fifo_status(_ep) != 0) 1376 retval = -EAGAIN; 1377 else { 1378 ep_vdbg(ep->dev, "%s %s %s\n", _ep->name, 1379 value ? "set" : "clear", 1380 wedged ? "wedge" : "halt"); 1381 /* set/clear, then synch memory views with the device */ 1382 if (value) { 1383 if (ep->num == 0) 1384 ep->dev->protocol_stall = 1; 1385 else 1386 set_halt(ep); 1387 if (wedged) 1388 ep->wedged = 1; 1389 } else { 1390 clear_halt(ep); 1391 if (ep->dev->quirks & PLX_SUPERSPEED && 1392 !list_empty(&ep->queue) && ep->td_dma) 1393 restart_dma(ep); 1394 ep->wedged = 0; 1395 } 1396 (void) readl(&ep->regs->ep_rsp); 1397 } 1398 spin_unlock_irqrestore(&ep->dev->lock, flags); 1399 1400 return retval; 1401 } 1402 1403 static int net2280_set_halt(struct usb_ep *_ep, int value) 1404 { 1405 return net2280_set_halt_and_wedge(_ep, value, 0); 1406 } 1407 1408 static int net2280_set_wedge(struct usb_ep *_ep) 1409 { 1410 if (!_ep || _ep->name == ep0name) 1411 return -EINVAL; 1412 return net2280_set_halt_and_wedge(_ep, 1, 1); 1413 } 1414 1415 static int net2280_fifo_status(struct usb_ep *_ep) 1416 { 1417 struct net2280_ep *ep; 1418 u32 avail; 1419 1420 ep = container_of(_ep, struct net2280_ep, ep); 1421 if (!_ep || (!ep->desc && ep->num != 0)) 1422 return -ENODEV; 1423 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) 1424 return -ESHUTDOWN; 1425 1426 avail = readl(&ep->regs->ep_avail) & (BIT(12) - 1); 1427 if (avail > ep->fifo_size) 1428 return -EOVERFLOW; 1429 if (ep->is_in) 1430 avail = ep->fifo_size - avail; 1431 return avail; 1432 } 1433 1434 static void net2280_fifo_flush(struct usb_ep *_ep) 1435 { 1436 struct net2280_ep *ep; 1437 1438 ep = container_of(_ep, struct net2280_ep, ep); 1439 if (!_ep || (!ep->desc && ep->num != 0)) 1440 return; 1441 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) 1442 return; 1443 1444 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat); 1445 (void) readl(&ep->regs->ep_rsp); 1446 } 1447 1448 static const struct usb_ep_ops net2280_ep_ops = { 1449 .enable = net2280_enable, 1450 .disable = net2280_disable, 1451 1452 .alloc_request = net2280_alloc_request, 1453 .free_request = net2280_free_request, 1454 1455 .queue = net2280_queue, 1456 .dequeue = net2280_dequeue, 1457 1458 .set_halt = net2280_set_halt, 1459 .set_wedge = net2280_set_wedge, 1460 .fifo_status = net2280_fifo_status, 1461 .fifo_flush = net2280_fifo_flush, 1462 }; 1463 1464 /*-------------------------------------------------------------------------*/ 1465 1466 static int net2280_get_frame(struct usb_gadget *_gadget) 1467 { 1468 struct net2280 *dev; 1469 unsigned long flags; 1470 u16 retval; 1471 1472 if (!_gadget) 1473 return -ENODEV; 1474 dev = container_of(_gadget, struct net2280, gadget); 1475 spin_lock_irqsave(&dev->lock, flags); 1476 retval = get_idx_reg(dev->regs, REG_FRAME) & 0x03ff; 1477 spin_unlock_irqrestore(&dev->lock, flags); 1478 return retval; 1479 } 1480 1481 static int net2280_wakeup(struct usb_gadget *_gadget) 1482 { 1483 struct net2280 *dev; 1484 u32 tmp; 1485 unsigned long flags; 1486 1487 if (!_gadget) 1488 return 0; 1489 dev = container_of(_gadget, struct net2280, gadget); 1490 1491 spin_lock_irqsave(&dev->lock, flags); 1492 tmp = readl(&dev->usb->usbctl); 1493 if (tmp & BIT(DEVICE_REMOTE_WAKEUP_ENABLE)) 1494 writel(BIT(GENERATE_RESUME), &dev->usb->usbstat); 1495 spin_unlock_irqrestore(&dev->lock, flags); 1496 1497 /* pci writes may still be posted */ 1498 return 0; 1499 } 1500 1501 static int net2280_set_selfpowered(struct usb_gadget *_gadget, int value) 1502 { 1503 struct net2280 *dev; 1504 u32 tmp; 1505 unsigned long flags; 1506 1507 if (!_gadget) 1508 return 0; 1509 dev = container_of(_gadget, struct net2280, gadget); 1510 1511 spin_lock_irqsave(&dev->lock, flags); 1512 tmp = readl(&dev->usb->usbctl); 1513 if (value) { 1514 tmp |= BIT(SELF_POWERED_STATUS); 1515 dev->selfpowered = 1; 1516 } else { 1517 tmp &= ~BIT(SELF_POWERED_STATUS); 1518 dev->selfpowered = 0; 1519 } 1520 writel(tmp, &dev->usb->usbctl); 1521 spin_unlock_irqrestore(&dev->lock, flags); 1522 1523 return 0; 1524 } 1525 1526 static int net2280_pullup(struct usb_gadget *_gadget, int is_on) 1527 { 1528 struct net2280 *dev; 1529 u32 tmp; 1530 unsigned long flags; 1531 1532 if (!_gadget) 1533 return -ENODEV; 1534 dev = container_of(_gadget, struct net2280, gadget); 1535 1536 spin_lock_irqsave(&dev->lock, flags); 1537 tmp = readl(&dev->usb->usbctl); 1538 dev->softconnect = (is_on != 0); 1539 if (is_on) 1540 tmp |= BIT(USB_DETECT_ENABLE); 1541 else 1542 tmp &= ~BIT(USB_DETECT_ENABLE); 1543 writel(tmp, &dev->usb->usbctl); 1544 spin_unlock_irqrestore(&dev->lock, flags); 1545 1546 return 0; 1547 } 1548 1549 static int net2280_start(struct usb_gadget *_gadget, 1550 struct usb_gadget_driver *driver); 1551 static int net2280_stop(struct usb_gadget *_gadget); 1552 1553 static const struct usb_gadget_ops net2280_ops = { 1554 .get_frame = net2280_get_frame, 1555 .wakeup = net2280_wakeup, 1556 .set_selfpowered = net2280_set_selfpowered, 1557 .pullup = net2280_pullup, 1558 .udc_start = net2280_start, 1559 .udc_stop = net2280_stop, 1560 }; 1561 1562 /*-------------------------------------------------------------------------*/ 1563 1564 #ifdef CONFIG_USB_GADGET_DEBUG_FILES 1565 1566 /* FIXME move these into procfs, and use seq_file. 1567 * Sysfs _still_ doesn't behave for arbitrarily sized files, 1568 * and also doesn't help products using this with 2.4 kernels. 1569 */ 1570 1571 /* "function" sysfs attribute */ 1572 static ssize_t function_show(struct device *_dev, struct device_attribute *attr, 1573 char *buf) 1574 { 1575 struct net2280 *dev = dev_get_drvdata(_dev); 1576 1577 if (!dev->driver || !dev->driver->function || 1578 strlen(dev->driver->function) > PAGE_SIZE) 1579 return 0; 1580 return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function); 1581 } 1582 static DEVICE_ATTR_RO(function); 1583 1584 static ssize_t registers_show(struct device *_dev, 1585 struct device_attribute *attr, char *buf) 1586 { 1587 struct net2280 *dev; 1588 char *next; 1589 unsigned size, t; 1590 unsigned long flags; 1591 int i; 1592 u32 t1, t2; 1593 const char *s; 1594 1595 dev = dev_get_drvdata(_dev); 1596 next = buf; 1597 size = PAGE_SIZE; 1598 spin_lock_irqsave(&dev->lock, flags); 1599 1600 if (dev->driver) 1601 s = dev->driver->driver.name; 1602 else 1603 s = "(none)"; 1604 1605 /* Main Control Registers */ 1606 t = scnprintf(next, size, "%s version " DRIVER_VERSION 1607 ", chiprev %04x, dma %s\n\n" 1608 "devinit %03x fifoctl %08x gadget '%s'\n" 1609 "pci irqenb0 %02x irqenb1 %08x " 1610 "irqstat0 %04x irqstat1 %08x\n", 1611 driver_name, dev->chiprev, 1612 use_dma 1613 ? (use_dma_chaining ? "chaining" : "enabled") 1614 : "disabled", 1615 readl(&dev->regs->devinit), 1616 readl(&dev->regs->fifoctl), 1617 s, 1618 readl(&dev->regs->pciirqenb0), 1619 readl(&dev->regs->pciirqenb1), 1620 readl(&dev->regs->irqstat0), 1621 readl(&dev->regs->irqstat1)); 1622 size -= t; 1623 next += t; 1624 1625 /* USB Control Registers */ 1626 t1 = readl(&dev->usb->usbctl); 1627 t2 = readl(&dev->usb->usbstat); 1628 if (t1 & BIT(VBUS_PIN)) { 1629 if (t2 & BIT(HIGH_SPEED)) 1630 s = "high speed"; 1631 else if (dev->gadget.speed == USB_SPEED_UNKNOWN) 1632 s = "powered"; 1633 else 1634 s = "full speed"; 1635 /* full speed bit (6) not working?? */ 1636 } else 1637 s = "not attached"; 1638 t = scnprintf(next, size, 1639 "stdrsp %08x usbctl %08x usbstat %08x " 1640 "addr 0x%02x (%s)\n", 1641 readl(&dev->usb->stdrsp), t1, t2, 1642 readl(&dev->usb->ouraddr), s); 1643 size -= t; 1644 next += t; 1645 1646 /* PCI Master Control Registers */ 1647 1648 /* DMA Control Registers */ 1649 1650 /* Configurable EP Control Registers */ 1651 for (i = 0; i < dev->n_ep; i++) { 1652 struct net2280_ep *ep; 1653 1654 ep = &dev->ep[i]; 1655 if (i && !ep->desc) 1656 continue; 1657 1658 t1 = readl(&ep->cfg->ep_cfg); 1659 t2 = readl(&ep->regs->ep_rsp) & 0xff; 1660 t = scnprintf(next, size, 1661 "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s" 1662 "irqenb %02x\n", 1663 ep->ep.name, t1, t2, 1664 (t2 & BIT(CLEAR_NAK_OUT_PACKETS)) 1665 ? "NAK " : "", 1666 (t2 & BIT(CLEAR_EP_HIDE_STATUS_PHASE)) 1667 ? "hide " : "", 1668 (t2 & BIT(CLEAR_EP_FORCE_CRC_ERROR)) 1669 ? "CRC " : "", 1670 (t2 & BIT(CLEAR_INTERRUPT_MODE)) 1671 ? "interrupt " : "", 1672 (t2 & BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)) 1673 ? "status " : "", 1674 (t2 & BIT(CLEAR_NAK_OUT_PACKETS_MODE)) 1675 ? "NAKmode " : "", 1676 (t2 & BIT(CLEAR_ENDPOINT_TOGGLE)) 1677 ? "DATA1 " : "DATA0 ", 1678 (t2 & BIT(CLEAR_ENDPOINT_HALT)) 1679 ? "HALT " : "", 1680 readl(&ep->regs->ep_irqenb)); 1681 size -= t; 1682 next += t; 1683 1684 t = scnprintf(next, size, 1685 "\tstat %08x avail %04x " 1686 "(ep%d%s-%s)%s\n", 1687 readl(&ep->regs->ep_stat), 1688 readl(&ep->regs->ep_avail), 1689 t1 & 0x0f, DIR_STRING(t1), 1690 type_string(t1 >> 8), 1691 ep->stopped ? "*" : ""); 1692 size -= t; 1693 next += t; 1694 1695 if (!ep->dma) 1696 continue; 1697 1698 t = scnprintf(next, size, 1699 " dma\tctl %08x stat %08x count %08x\n" 1700 "\taddr %08x desc %08x\n", 1701 readl(&ep->dma->dmactl), 1702 readl(&ep->dma->dmastat), 1703 readl(&ep->dma->dmacount), 1704 readl(&ep->dma->dmaaddr), 1705 readl(&ep->dma->dmadesc)); 1706 size -= t; 1707 next += t; 1708 1709 } 1710 1711 /* Indexed Registers (none yet) */ 1712 1713 /* Statistics */ 1714 t = scnprintf(next, size, "\nirqs: "); 1715 size -= t; 1716 next += t; 1717 for (i = 0; i < dev->n_ep; i++) { 1718 struct net2280_ep *ep; 1719 1720 ep = &dev->ep[i]; 1721 if (i && !ep->irqs) 1722 continue; 1723 t = scnprintf(next, size, " %s/%lu", ep->ep.name, ep->irqs); 1724 size -= t; 1725 next += t; 1726 1727 } 1728 t = scnprintf(next, size, "\n"); 1729 size -= t; 1730 next += t; 1731 1732 spin_unlock_irqrestore(&dev->lock, flags); 1733 1734 return PAGE_SIZE - size; 1735 } 1736 static DEVICE_ATTR_RO(registers); 1737 1738 static ssize_t queues_show(struct device *_dev, struct device_attribute *attr, 1739 char *buf) 1740 { 1741 struct net2280 *dev; 1742 char *next; 1743 unsigned size; 1744 unsigned long flags; 1745 int i; 1746 1747 dev = dev_get_drvdata(_dev); 1748 next = buf; 1749 size = PAGE_SIZE; 1750 spin_lock_irqsave(&dev->lock, flags); 1751 1752 for (i = 0; i < dev->n_ep; i++) { 1753 struct net2280_ep *ep = &dev->ep[i]; 1754 struct net2280_request *req; 1755 int t; 1756 1757 if (i != 0) { 1758 const struct usb_endpoint_descriptor *d; 1759 1760 d = ep->desc; 1761 if (!d) 1762 continue; 1763 t = d->bEndpointAddress; 1764 t = scnprintf(next, size, 1765 "\n%s (ep%d%s-%s) max %04x %s fifo %d\n", 1766 ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK, 1767 (t & USB_DIR_IN) ? "in" : "out", 1768 type_string(d->bmAttributes), 1769 usb_endpoint_maxp(d) & 0x1fff, 1770 ep->dma ? "dma" : "pio", ep->fifo_size 1771 ); 1772 } else /* ep0 should only have one transfer queued */ 1773 t = scnprintf(next, size, "ep0 max 64 pio %s\n", 1774 ep->is_in ? "in" : "out"); 1775 if (t <= 0 || t > size) 1776 goto done; 1777 size -= t; 1778 next += t; 1779 1780 if (list_empty(&ep->queue)) { 1781 t = scnprintf(next, size, "\t(nothing queued)\n"); 1782 if (t <= 0 || t > size) 1783 goto done; 1784 size -= t; 1785 next += t; 1786 continue; 1787 } 1788 list_for_each_entry(req, &ep->queue, queue) { 1789 if (ep->dma && req->td_dma == readl(&ep->dma->dmadesc)) 1790 t = scnprintf(next, size, 1791 "\treq %p len %d/%d " 1792 "buf %p (dmacount %08x)\n", 1793 &req->req, req->req.actual, 1794 req->req.length, req->req.buf, 1795 readl(&ep->dma->dmacount)); 1796 else 1797 t = scnprintf(next, size, 1798 "\treq %p len %d/%d buf %p\n", 1799 &req->req, req->req.actual, 1800 req->req.length, req->req.buf); 1801 if (t <= 0 || t > size) 1802 goto done; 1803 size -= t; 1804 next += t; 1805 1806 if (ep->dma) { 1807 struct net2280_dma *td; 1808 1809 td = req->td; 1810 t = scnprintf(next, size, "\t td %08x " 1811 " count %08x buf %08x desc %08x\n", 1812 (u32) req->td_dma, 1813 le32_to_cpu(td->dmacount), 1814 le32_to_cpu(td->dmaaddr), 1815 le32_to_cpu(td->dmadesc)); 1816 if (t <= 0 || t > size) 1817 goto done; 1818 size -= t; 1819 next += t; 1820 } 1821 } 1822 } 1823 1824 done: 1825 spin_unlock_irqrestore(&dev->lock, flags); 1826 return PAGE_SIZE - size; 1827 } 1828 static DEVICE_ATTR_RO(queues); 1829 1830 1831 #else 1832 1833 #define device_create_file(a, b) (0) 1834 #define device_remove_file(a, b) do { } while (0) 1835 1836 #endif 1837 1838 /*-------------------------------------------------------------------------*/ 1839 1840 /* another driver-specific mode might be a request type doing dma 1841 * to/from another device fifo instead of to/from memory. 1842 */ 1843 1844 static void set_fifo_mode(struct net2280 *dev, int mode) 1845 { 1846 /* keeping high bits preserves BAR2 */ 1847 writel((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl); 1848 1849 /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */ 1850 INIT_LIST_HEAD(&dev->gadget.ep_list); 1851 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list); 1852 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); 1853 switch (mode) { 1854 case 0: 1855 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list); 1856 list_add_tail(&dev->ep[4].ep.ep_list, &dev->gadget.ep_list); 1857 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024; 1858 break; 1859 case 1: 1860 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 2048; 1861 break; 1862 case 2: 1863 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list); 1864 dev->ep[1].fifo_size = 2048; 1865 dev->ep[2].fifo_size = 1024; 1866 break; 1867 } 1868 /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */ 1869 list_add_tail(&dev->ep[5].ep.ep_list, &dev->gadget.ep_list); 1870 list_add_tail(&dev->ep[6].ep.ep_list, &dev->gadget.ep_list); 1871 } 1872 1873 static void defect7374_disable_data_eps(struct net2280 *dev) 1874 { 1875 /* 1876 * For Defect 7374, disable data EPs (and more): 1877 * - This phase undoes the earlier phase of the Defect 7374 workaround, 1878 * returing ep regs back to normal. 1879 */ 1880 struct net2280_ep *ep; 1881 int i; 1882 unsigned char ep_sel; 1883 u32 tmp_reg; 1884 1885 for (i = 1; i < 5; i++) { 1886 ep = &dev->ep[i]; 1887 writel(0, &ep->cfg->ep_cfg); 1888 } 1889 1890 /* CSROUT, CSRIN, PCIOUT, PCIIN, STATIN, RCIN */ 1891 for (i = 0; i < 6; i++) 1892 writel(0, &dev->dep[i].dep_cfg); 1893 1894 for (ep_sel = 0; ep_sel <= 21; ep_sel++) { 1895 /* Select an endpoint for subsequent operations: */ 1896 tmp_reg = readl(&dev->plregs->pl_ep_ctrl); 1897 writel(((tmp_reg & ~0x1f) | ep_sel), &dev->plregs->pl_ep_ctrl); 1898 1899 if (ep_sel < 2 || (ep_sel > 9 && ep_sel < 14) || 1900 ep_sel == 18 || ep_sel == 20) 1901 continue; 1902 1903 /* Change settings on some selected endpoints */ 1904 tmp_reg = readl(&dev->plregs->pl_ep_cfg_4); 1905 tmp_reg &= ~BIT(NON_CTRL_IN_TOLERATE_BAD_DIR); 1906 writel(tmp_reg, &dev->plregs->pl_ep_cfg_4); 1907 tmp_reg = readl(&dev->plregs->pl_ep_ctrl); 1908 tmp_reg |= BIT(EP_INITIALIZED); 1909 writel(tmp_reg, &dev->plregs->pl_ep_ctrl); 1910 } 1911 } 1912 1913 static void defect7374_enable_data_eps_zero(struct net2280 *dev) 1914 { 1915 u32 tmp = 0, tmp_reg; 1916 u32 fsmvalue, scratch; 1917 int i; 1918 unsigned char ep_sel; 1919 1920 scratch = get_idx_reg(dev->regs, SCRATCH); 1921 fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD); 1922 scratch &= ~(0xf << DEFECT7374_FSM_FIELD); 1923 1924 /*See if firmware needs to set up for workaround*/ 1925 if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) { 1926 ep_warn(dev, "Operate Defect 7374 workaround soft this time"); 1927 ep_warn(dev, "It will operate on cold-reboot and SS connect"); 1928 1929 /*GPEPs:*/ 1930 tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_DIRECTION) | 1931 (2 << OUT_ENDPOINT_TYPE) | (2 << IN_ENDPOINT_TYPE) | 1932 ((dev->enhanced_mode) ? 1933 BIT(OUT_ENDPOINT_ENABLE) : BIT(ENDPOINT_ENABLE)) | 1934 BIT(IN_ENDPOINT_ENABLE)); 1935 1936 for (i = 1; i < 5; i++) 1937 writel(tmp, &dev->ep[i].cfg->ep_cfg); 1938 1939 /* CSRIN, PCIIN, STATIN, RCIN*/ 1940 tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_ENABLE)); 1941 writel(tmp, &dev->dep[1].dep_cfg); 1942 writel(tmp, &dev->dep[3].dep_cfg); 1943 writel(tmp, &dev->dep[4].dep_cfg); 1944 writel(tmp, &dev->dep[5].dep_cfg); 1945 1946 /*Implemented for development and debug. 1947 * Can be refined/tuned later.*/ 1948 for (ep_sel = 0; ep_sel <= 21; ep_sel++) { 1949 /* Select an endpoint for subsequent operations: */ 1950 tmp_reg = readl(&dev->plregs->pl_ep_ctrl); 1951 writel(((tmp_reg & ~0x1f) | ep_sel), 1952 &dev->plregs->pl_ep_ctrl); 1953 1954 if (ep_sel == 1) { 1955 tmp = 1956 (readl(&dev->plregs->pl_ep_ctrl) | 1957 BIT(CLEAR_ACK_ERROR_CODE) | 0); 1958 writel(tmp, &dev->plregs->pl_ep_ctrl); 1959 continue; 1960 } 1961 1962 if (ep_sel == 0 || (ep_sel > 9 && ep_sel < 14) || 1963 ep_sel == 18 || ep_sel == 20) 1964 continue; 1965 1966 tmp = (readl(&dev->plregs->pl_ep_cfg_4) | 1967 BIT(NON_CTRL_IN_TOLERATE_BAD_DIR) | 0); 1968 writel(tmp, &dev->plregs->pl_ep_cfg_4); 1969 1970 tmp = readl(&dev->plregs->pl_ep_ctrl) & 1971 ~BIT(EP_INITIALIZED); 1972 writel(tmp, &dev->plregs->pl_ep_ctrl); 1973 1974 } 1975 1976 /* Set FSM to focus on the first Control Read: 1977 * - Tip: Connection speed is known upon the first 1978 * setup request.*/ 1979 scratch |= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ; 1980 set_idx_reg(dev->regs, SCRATCH, scratch); 1981 1982 } else{ 1983 ep_warn(dev, "Defect 7374 workaround soft will NOT operate"); 1984 ep_warn(dev, "It will operate on cold-reboot and SS connect"); 1985 } 1986 } 1987 1988 /* keeping it simple: 1989 * - one bus driver, initted first; 1990 * - one function driver, initted second 1991 * 1992 * most of the work to support multiple net2280 controllers would 1993 * be to associate this gadget driver (yes?) with all of them, or 1994 * perhaps to bind specific drivers to specific devices. 1995 */ 1996 1997 static void usb_reset_228x(struct net2280 *dev) 1998 { 1999 u32 tmp; 2000 2001 dev->gadget.speed = USB_SPEED_UNKNOWN; 2002 (void) readl(&dev->usb->usbctl); 2003 2004 net2280_led_init(dev); 2005 2006 /* disable automatic responses, and irqs */ 2007 writel(0, &dev->usb->stdrsp); 2008 writel(0, &dev->regs->pciirqenb0); 2009 writel(0, &dev->regs->pciirqenb1); 2010 2011 /* clear old dma and irq state */ 2012 for (tmp = 0; tmp < 4; tmp++) { 2013 struct net2280_ep *ep = &dev->ep[tmp + 1]; 2014 if (ep->dma) 2015 abort_dma(ep); 2016 } 2017 2018 writel(~0, &dev->regs->irqstat0), 2019 writel(~(u32)BIT(SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1), 2020 2021 /* reset, and enable pci */ 2022 tmp = readl(&dev->regs->devinit) | 2023 BIT(PCI_ENABLE) | 2024 BIT(FIFO_SOFT_RESET) | 2025 BIT(USB_SOFT_RESET) | 2026 BIT(M8051_RESET); 2027 writel(tmp, &dev->regs->devinit); 2028 2029 /* standard fifo and endpoint allocations */ 2030 set_fifo_mode(dev, (fifo_mode <= 2) ? fifo_mode : 0); 2031 } 2032 2033 static void usb_reset_338x(struct net2280 *dev) 2034 { 2035 u32 tmp; 2036 u32 fsmvalue; 2037 2038 dev->gadget.speed = USB_SPEED_UNKNOWN; 2039 (void)readl(&dev->usb->usbctl); 2040 2041 net2280_led_init(dev); 2042 2043 fsmvalue = get_idx_reg(dev->regs, SCRATCH) & 2044 (0xf << DEFECT7374_FSM_FIELD); 2045 2046 /* See if firmware needs to set up for workaround: */ 2047 if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) { 2048 ep_info(dev, "%s: Defect 7374 FsmValue 0x%08x\n", __func__, 2049 fsmvalue); 2050 } else { 2051 /* disable automatic responses, and irqs */ 2052 writel(0, &dev->usb->stdrsp); 2053 writel(0, &dev->regs->pciirqenb0); 2054 writel(0, &dev->regs->pciirqenb1); 2055 } 2056 2057 /* clear old dma and irq state */ 2058 for (tmp = 0; tmp < 4; tmp++) { 2059 struct net2280_ep *ep = &dev->ep[tmp + 1]; 2060 2061 if (ep->dma) 2062 abort_dma(ep); 2063 } 2064 2065 writel(~0, &dev->regs->irqstat0), writel(~0, &dev->regs->irqstat1); 2066 2067 if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) { 2068 /* reset, and enable pci */ 2069 tmp = readl(&dev->regs->devinit) | 2070 BIT(PCI_ENABLE) | 2071 BIT(FIFO_SOFT_RESET) | 2072 BIT(USB_SOFT_RESET) | 2073 BIT(M8051_RESET); 2074 2075 writel(tmp, &dev->regs->devinit); 2076 } 2077 2078 /* always ep-{1,2,3,4} ... maybe not ep-3 or ep-4 */ 2079 INIT_LIST_HEAD(&dev->gadget.ep_list); 2080 2081 for (tmp = 1; tmp < dev->n_ep; tmp++) 2082 list_add_tail(&dev->ep[tmp].ep.ep_list, &dev->gadget.ep_list); 2083 2084 } 2085 2086 static void usb_reset(struct net2280 *dev) 2087 { 2088 if (dev->quirks & PLX_LEGACY) 2089 return usb_reset_228x(dev); 2090 return usb_reset_338x(dev); 2091 } 2092 2093 static void usb_reinit_228x(struct net2280 *dev) 2094 { 2095 u32 tmp; 2096 int init_dma; 2097 2098 /* use_dma changes are ignored till next device re-init */ 2099 init_dma = use_dma; 2100 2101 /* basic endpoint init */ 2102 for (tmp = 0; tmp < 7; tmp++) { 2103 struct net2280_ep *ep = &dev->ep[tmp]; 2104 2105 ep->ep.name = ep_name[tmp]; 2106 ep->dev = dev; 2107 ep->num = tmp; 2108 2109 if (tmp > 0 && tmp <= 4) { 2110 ep->fifo_size = 1024; 2111 if (init_dma) 2112 ep->dma = &dev->dma[tmp - 1]; 2113 } else 2114 ep->fifo_size = 64; 2115 ep->regs = &dev->epregs[tmp]; 2116 ep->cfg = &dev->epregs[tmp]; 2117 ep_reset_228x(dev->regs, ep); 2118 } 2119 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64); 2120 usb_ep_set_maxpacket_limit(&dev->ep[5].ep, 64); 2121 usb_ep_set_maxpacket_limit(&dev->ep[6].ep, 64); 2122 2123 dev->gadget.ep0 = &dev->ep[0].ep; 2124 dev->ep[0].stopped = 0; 2125 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); 2126 2127 /* we want to prevent lowlevel/insecure access from the USB host, 2128 * but erratum 0119 means this enable bit is ignored 2129 */ 2130 for (tmp = 0; tmp < 5; tmp++) 2131 writel(EP_DONTUSE, &dev->dep[tmp].dep_cfg); 2132 } 2133 2134 static void usb_reinit_338x(struct net2280 *dev) 2135 { 2136 int init_dma; 2137 int i; 2138 u32 tmp, val; 2139 u32 fsmvalue; 2140 static const u32 ne[9] = { 0, 1, 2, 3, 4, 1, 2, 3, 4 }; 2141 static const u32 ep_reg_addr[9] = { 0x00, 0xC0, 0x00, 0xC0, 0x00, 2142 0x00, 0xC0, 0x00, 0xC0 }; 2143 2144 /* use_dma changes are ignored till next device re-init */ 2145 init_dma = use_dma; 2146 2147 /* basic endpoint init */ 2148 for (i = 0; i < dev->n_ep; i++) { 2149 struct net2280_ep *ep = &dev->ep[i]; 2150 2151 ep->ep.name = ep_name[i]; 2152 ep->dev = dev; 2153 ep->num = i; 2154 2155 if (i > 0 && i <= 4 && init_dma) 2156 ep->dma = &dev->dma[i - 1]; 2157 2158 if (dev->enhanced_mode) { 2159 ep->cfg = &dev->epregs[ne[i]]; 2160 ep->regs = (struct net2280_ep_regs __iomem *) 2161 (((void __iomem *)&dev->epregs[ne[i]]) + 2162 ep_reg_addr[i]); 2163 ep->fiforegs = &dev->fiforegs[i]; 2164 } else { 2165 ep->cfg = &dev->epregs[i]; 2166 ep->regs = &dev->epregs[i]; 2167 ep->fiforegs = &dev->fiforegs[i]; 2168 } 2169 2170 ep->fifo_size = (i != 0) ? 2048 : 512; 2171 2172 ep_reset_338x(dev->regs, ep); 2173 } 2174 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 512); 2175 2176 dev->gadget.ep0 = &dev->ep[0].ep; 2177 dev->ep[0].stopped = 0; 2178 2179 /* Link layer set up */ 2180 fsmvalue = get_idx_reg(dev->regs, SCRATCH) & 2181 (0xf << DEFECT7374_FSM_FIELD); 2182 2183 /* See if driver needs to set up for workaround: */ 2184 if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) 2185 ep_info(dev, "%s: Defect 7374 FsmValue %08x\n", 2186 __func__, fsmvalue); 2187 else { 2188 tmp = readl(&dev->usb_ext->usbctl2) & 2189 ~(BIT(U1_ENABLE) | BIT(U2_ENABLE) | BIT(LTM_ENABLE)); 2190 writel(tmp, &dev->usb_ext->usbctl2); 2191 } 2192 2193 /* Hardware Defect and Workaround */ 2194 val = readl(&dev->ll_lfps_regs->ll_lfps_5); 2195 val &= ~(0xf << TIMER_LFPS_6US); 2196 val |= 0x5 << TIMER_LFPS_6US; 2197 writel(val, &dev->ll_lfps_regs->ll_lfps_5); 2198 2199 val = readl(&dev->ll_lfps_regs->ll_lfps_6); 2200 val &= ~(0xffff << TIMER_LFPS_80US); 2201 val |= 0x0100 << TIMER_LFPS_80US; 2202 writel(val, &dev->ll_lfps_regs->ll_lfps_6); 2203 2204 /* 2205 * AA_AB Errata. Issue 4. Workaround for SuperSpeed USB 2206 * Hot Reset Exit Handshake may Fail in Specific Case using 2207 * Default Register Settings. Workaround for Enumeration test. 2208 */ 2209 val = readl(&dev->ll_tsn_regs->ll_tsn_counters_2); 2210 val &= ~(0x1f << HOT_TX_NORESET_TS2); 2211 val |= 0x10 << HOT_TX_NORESET_TS2; 2212 writel(val, &dev->ll_tsn_regs->ll_tsn_counters_2); 2213 2214 val = readl(&dev->ll_tsn_regs->ll_tsn_counters_3); 2215 val &= ~(0x1f << HOT_RX_RESET_TS2); 2216 val |= 0x3 << HOT_RX_RESET_TS2; 2217 writel(val, &dev->ll_tsn_regs->ll_tsn_counters_3); 2218 2219 /* 2220 * Set Recovery Idle to Recover bit: 2221 * - On SS connections, setting Recovery Idle to Recover Fmw improves 2222 * link robustness with various hosts and hubs. 2223 * - It is safe to set for all connection speeds; all chip revisions. 2224 * - R-M-W to leave other bits undisturbed. 2225 * - Reference PLX TT-7372 2226 */ 2227 val = readl(&dev->ll_chicken_reg->ll_tsn_chicken_bit); 2228 val |= BIT(RECOVERY_IDLE_TO_RECOVER_FMW); 2229 writel(val, &dev->ll_chicken_reg->ll_tsn_chicken_bit); 2230 2231 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); 2232 2233 /* disable dedicated endpoints */ 2234 writel(0x0D, &dev->dep[0].dep_cfg); 2235 writel(0x0D, &dev->dep[1].dep_cfg); 2236 writel(0x0E, &dev->dep[2].dep_cfg); 2237 writel(0x0E, &dev->dep[3].dep_cfg); 2238 writel(0x0F, &dev->dep[4].dep_cfg); 2239 writel(0x0C, &dev->dep[5].dep_cfg); 2240 } 2241 2242 static void usb_reinit(struct net2280 *dev) 2243 { 2244 if (dev->quirks & PLX_LEGACY) 2245 return usb_reinit_228x(dev); 2246 return usb_reinit_338x(dev); 2247 } 2248 2249 static void ep0_start_228x(struct net2280 *dev) 2250 { 2251 writel(BIT(CLEAR_EP_HIDE_STATUS_PHASE) | 2252 BIT(CLEAR_NAK_OUT_PACKETS) | 2253 BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE), 2254 &dev->epregs[0].ep_rsp); 2255 2256 /* 2257 * hardware optionally handles a bunch of standard requests 2258 * that the API hides from drivers anyway. have it do so. 2259 * endpoint status/features are handled in software, to 2260 * help pass tests for some dubious behavior. 2261 */ 2262 writel(BIT(SET_TEST_MODE) | 2263 BIT(SET_ADDRESS) | 2264 BIT(DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP) | 2265 BIT(GET_DEVICE_STATUS) | 2266 BIT(GET_INTERFACE_STATUS), 2267 &dev->usb->stdrsp); 2268 writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) | 2269 BIT(SELF_POWERED_USB_DEVICE) | 2270 BIT(REMOTE_WAKEUP_SUPPORT) | 2271 (dev->softconnect << USB_DETECT_ENABLE) | 2272 BIT(SELF_POWERED_STATUS), 2273 &dev->usb->usbctl); 2274 2275 /* enable irqs so we can see ep0 and general operation */ 2276 writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) | 2277 BIT(ENDPOINT_0_INTERRUPT_ENABLE), 2278 &dev->regs->pciirqenb0); 2279 writel(BIT(PCI_INTERRUPT_ENABLE) | 2280 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE) | 2281 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE) | 2282 BIT(PCI_RETRY_ABORT_INTERRUPT_ENABLE) | 2283 BIT(VBUS_INTERRUPT_ENABLE) | 2284 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) | 2285 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE), 2286 &dev->regs->pciirqenb1); 2287 2288 /* don't leave any writes posted */ 2289 (void) readl(&dev->usb->usbctl); 2290 } 2291 2292 static void ep0_start_338x(struct net2280 *dev) 2293 { 2294 u32 fsmvalue; 2295 2296 fsmvalue = get_idx_reg(dev->regs, SCRATCH) & 2297 (0xf << DEFECT7374_FSM_FIELD); 2298 2299 if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) 2300 ep_info(dev, "%s: Defect 7374 FsmValue %08x\n", __func__, 2301 fsmvalue); 2302 else 2303 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE) | 2304 BIT(SET_EP_HIDE_STATUS_PHASE), 2305 &dev->epregs[0].ep_rsp); 2306 2307 /* 2308 * hardware optionally handles a bunch of standard requests 2309 * that the API hides from drivers anyway. have it do so. 2310 * endpoint status/features are handled in software, to 2311 * help pass tests for some dubious behavior. 2312 */ 2313 writel(BIT(SET_ISOCHRONOUS_DELAY) | 2314 BIT(SET_SEL) | 2315 BIT(SET_TEST_MODE) | 2316 BIT(SET_ADDRESS) | 2317 BIT(GET_INTERFACE_STATUS) | 2318 BIT(GET_DEVICE_STATUS), 2319 &dev->usb->stdrsp); 2320 dev->wakeup_enable = 1; 2321 writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) | 2322 (dev->softconnect << USB_DETECT_ENABLE) | 2323 BIT(DEVICE_REMOTE_WAKEUP_ENABLE), 2324 &dev->usb->usbctl); 2325 2326 /* enable irqs so we can see ep0 and general operation */ 2327 writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) | 2328 BIT(ENDPOINT_0_INTERRUPT_ENABLE), 2329 &dev->regs->pciirqenb0); 2330 writel(BIT(PCI_INTERRUPT_ENABLE) | 2331 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) | 2332 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE) | 2333 BIT(VBUS_INTERRUPT_ENABLE), 2334 &dev->regs->pciirqenb1); 2335 2336 /* don't leave any writes posted */ 2337 (void)readl(&dev->usb->usbctl); 2338 } 2339 2340 static void ep0_start(struct net2280 *dev) 2341 { 2342 if (dev->quirks & PLX_LEGACY) 2343 return ep0_start_228x(dev); 2344 return ep0_start_338x(dev); 2345 } 2346 2347 /* when a driver is successfully registered, it will receive 2348 * control requests including set_configuration(), which enables 2349 * non-control requests. then usb traffic follows until a 2350 * disconnect is reported. then a host may connect again, or 2351 * the driver might get unbound. 2352 */ 2353 static int net2280_start(struct usb_gadget *_gadget, 2354 struct usb_gadget_driver *driver) 2355 { 2356 struct net2280 *dev; 2357 int retval; 2358 unsigned i; 2359 2360 /* insist on high speed support from the driver, since 2361 * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE) 2362 * "must not be used in normal operation" 2363 */ 2364 if (!driver || driver->max_speed < USB_SPEED_HIGH || 2365 !driver->setup) 2366 return -EINVAL; 2367 2368 dev = container_of(_gadget, struct net2280, gadget); 2369 2370 for (i = 0; i < dev->n_ep; i++) 2371 dev->ep[i].irqs = 0; 2372 2373 /* hook up the driver ... */ 2374 dev->softconnect = 1; 2375 driver->driver.bus = NULL; 2376 dev->driver = driver; 2377 2378 retval = device_create_file(&dev->pdev->dev, &dev_attr_function); 2379 if (retval) 2380 goto err_unbind; 2381 retval = device_create_file(&dev->pdev->dev, &dev_attr_queues); 2382 if (retval) 2383 goto err_func; 2384 2385 /* Enable force-full-speed testing mode, if desired */ 2386 if (full_speed && (dev->quirks & PLX_LEGACY)) 2387 writel(BIT(FORCE_FULL_SPEED_MODE), &dev->usb->xcvrdiag); 2388 2389 /* ... then enable host detection and ep0; and we're ready 2390 * for set_configuration as well as eventual disconnect. 2391 */ 2392 net2280_led_active(dev, 1); 2393 2394 if (dev->quirks & PLX_SUPERSPEED) 2395 defect7374_enable_data_eps_zero(dev); 2396 2397 ep0_start(dev); 2398 2399 /* pci writes may still be posted */ 2400 return 0; 2401 2402 err_func: 2403 device_remove_file(&dev->pdev->dev, &dev_attr_function); 2404 err_unbind: 2405 dev->driver = NULL; 2406 return retval; 2407 } 2408 2409 static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver) 2410 { 2411 int i; 2412 2413 /* don't disconnect if it's not connected */ 2414 if (dev->gadget.speed == USB_SPEED_UNKNOWN) 2415 driver = NULL; 2416 2417 /* stop hardware; prevent new request submissions; 2418 * and kill any outstanding requests. 2419 */ 2420 usb_reset(dev); 2421 for (i = 0; i < dev->n_ep; i++) 2422 nuke(&dev->ep[i]); 2423 2424 /* report disconnect; the driver is already quiesced */ 2425 if (driver) { 2426 spin_unlock(&dev->lock); 2427 driver->disconnect(&dev->gadget); 2428 spin_lock(&dev->lock); 2429 } 2430 2431 usb_reinit(dev); 2432 } 2433 2434 static int net2280_stop(struct usb_gadget *_gadget) 2435 { 2436 struct net2280 *dev; 2437 unsigned long flags; 2438 2439 dev = container_of(_gadget, struct net2280, gadget); 2440 2441 spin_lock_irqsave(&dev->lock, flags); 2442 stop_activity(dev, NULL); 2443 spin_unlock_irqrestore(&dev->lock, flags); 2444 2445 net2280_led_active(dev, 0); 2446 2447 /* Disable full-speed test mode */ 2448 if (dev->quirks & PLX_LEGACY) 2449 writel(0, &dev->usb->xcvrdiag); 2450 2451 device_remove_file(&dev->pdev->dev, &dev_attr_function); 2452 device_remove_file(&dev->pdev->dev, &dev_attr_queues); 2453 2454 dev->driver = NULL; 2455 2456 return 0; 2457 } 2458 2459 /*-------------------------------------------------------------------------*/ 2460 2461 /* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq. 2462 * also works for dma-capable endpoints, in pio mode or just 2463 * to manually advance the queue after short OUT transfers. 2464 */ 2465 static void handle_ep_small(struct net2280_ep *ep) 2466 { 2467 struct net2280_request *req; 2468 u32 t; 2469 /* 0 error, 1 mid-data, 2 done */ 2470 int mode = 1; 2471 2472 if (!list_empty(&ep->queue)) 2473 req = list_entry(ep->queue.next, 2474 struct net2280_request, queue); 2475 else 2476 req = NULL; 2477 2478 /* ack all, and handle what we care about */ 2479 t = readl(&ep->regs->ep_stat); 2480 ep->irqs++; 2481 #if 0 2482 ep_vdbg(ep->dev, "%s ack ep_stat %08x, req %p\n", 2483 ep->ep.name, t, req ? &req->req : 0); 2484 #endif 2485 if (!ep->is_in || (ep->dev->quirks & PLX_2280)) 2486 writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat); 2487 else 2488 /* Added for 2282 */ 2489 writel(t, &ep->regs->ep_stat); 2490 2491 /* for ep0, monitor token irqs to catch data stage length errors 2492 * and to synchronize on status. 2493 * 2494 * also, to defer reporting of protocol stalls ... here's where 2495 * data or status first appears, handling stalls here should never 2496 * cause trouble on the host side.. 2497 * 2498 * control requests could be slightly faster without token synch for 2499 * status, but status can jam up that way. 2500 */ 2501 if (unlikely(ep->num == 0)) { 2502 if (ep->is_in) { 2503 /* status; stop NAKing */ 2504 if (t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) { 2505 if (ep->dev->protocol_stall) { 2506 ep->stopped = 1; 2507 set_halt(ep); 2508 } 2509 if (!req) 2510 allow_status(ep); 2511 mode = 2; 2512 /* reply to extra IN data tokens with a zlp */ 2513 } else if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) { 2514 if (ep->dev->protocol_stall) { 2515 ep->stopped = 1; 2516 set_halt(ep); 2517 mode = 2; 2518 } else if (ep->responded && 2519 !req && !ep->stopped) 2520 write_fifo(ep, NULL); 2521 } 2522 } else { 2523 /* status; stop NAKing */ 2524 if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) { 2525 if (ep->dev->protocol_stall) { 2526 ep->stopped = 1; 2527 set_halt(ep); 2528 } 2529 mode = 2; 2530 /* an extra OUT token is an error */ 2531 } else if (((t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) && 2532 req && 2533 req->req.actual == req->req.length) || 2534 (ep->responded && !req)) { 2535 ep->dev->protocol_stall = 1; 2536 set_halt(ep); 2537 ep->stopped = 1; 2538 if (req) 2539 done(ep, req, -EOVERFLOW); 2540 req = NULL; 2541 } 2542 } 2543 } 2544 2545 if (unlikely(!req)) 2546 return; 2547 2548 /* manual DMA queue advance after short OUT */ 2549 if (likely(ep->dma)) { 2550 if (t & BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT)) { 2551 u32 count; 2552 int stopped = ep->stopped; 2553 2554 /* TRANSFERRED works around OUT_DONE erratum 0112. 2555 * we expect (N <= maxpacket) bytes; host wrote M. 2556 * iff (M < N) we won't ever see a DMA interrupt. 2557 */ 2558 ep->stopped = 1; 2559 for (count = 0; ; t = readl(&ep->regs->ep_stat)) { 2560 2561 /* any preceding dma transfers must finish. 2562 * dma handles (M >= N), may empty the queue 2563 */ 2564 scan_dma_completions(ep); 2565 if (unlikely(list_empty(&ep->queue) || 2566 ep->out_overflow)) { 2567 req = NULL; 2568 break; 2569 } 2570 req = list_entry(ep->queue.next, 2571 struct net2280_request, queue); 2572 2573 /* here either (M < N), a "real" short rx; 2574 * or (M == N) and the queue didn't empty 2575 */ 2576 if (likely(t & BIT(FIFO_EMPTY))) { 2577 count = readl(&ep->dma->dmacount); 2578 count &= DMA_BYTE_COUNT_MASK; 2579 if (readl(&ep->dma->dmadesc) 2580 != req->td_dma) 2581 req = NULL; 2582 break; 2583 } 2584 udelay(1); 2585 } 2586 2587 /* stop DMA, leave ep NAKing */ 2588 writel(BIT(DMA_ABORT), &ep->dma->dmastat); 2589 spin_stop_dma(ep->dma); 2590 2591 if (likely(req)) { 2592 req->td->dmacount = 0; 2593 t = readl(&ep->regs->ep_avail); 2594 dma_done(ep, req, count, 2595 (ep->out_overflow || t) 2596 ? -EOVERFLOW : 0); 2597 } 2598 2599 /* also flush to prevent erratum 0106 trouble */ 2600 if (unlikely(ep->out_overflow || 2601 (ep->dev->chiprev == 0x0100 && 2602 ep->dev->gadget.speed 2603 == USB_SPEED_FULL))) { 2604 out_flush(ep); 2605 ep->out_overflow = 0; 2606 } 2607 2608 /* (re)start dma if needed, stop NAKing */ 2609 ep->stopped = stopped; 2610 if (!list_empty(&ep->queue)) 2611 restart_dma(ep); 2612 } else 2613 ep_dbg(ep->dev, "%s dma ep_stat %08x ??\n", 2614 ep->ep.name, t); 2615 return; 2616 2617 /* data packet(s) received (in the fifo, OUT) */ 2618 } else if (t & BIT(DATA_PACKET_RECEIVED_INTERRUPT)) { 2619 if (read_fifo(ep, req) && ep->num != 0) 2620 mode = 2; 2621 2622 /* data packet(s) transmitted (IN) */ 2623 } else if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT)) { 2624 unsigned len; 2625 2626 len = req->req.length - req->req.actual; 2627 if (len > ep->ep.maxpacket) 2628 len = ep->ep.maxpacket; 2629 req->req.actual += len; 2630 2631 /* if we wrote it all, we're usually done */ 2632 /* send zlps until the status stage */ 2633 if ((req->req.actual == req->req.length) && 2634 (!req->req.zero || len != ep->ep.maxpacket) && ep->num) 2635 mode = 2; 2636 2637 /* there was nothing to do ... */ 2638 } else if (mode == 1) 2639 return; 2640 2641 /* done */ 2642 if (mode == 2) { 2643 /* stream endpoints often resubmit/unlink in completion */ 2644 done(ep, req, 0); 2645 2646 /* maybe advance queue to next request */ 2647 if (ep->num == 0) { 2648 /* NOTE: net2280 could let gadget driver start the 2649 * status stage later. since not all controllers let 2650 * them control that, the api doesn't (yet) allow it. 2651 */ 2652 if (!ep->stopped) 2653 allow_status(ep); 2654 req = NULL; 2655 } else { 2656 if (!list_empty(&ep->queue) && !ep->stopped) 2657 req = list_entry(ep->queue.next, 2658 struct net2280_request, queue); 2659 else 2660 req = NULL; 2661 if (req && !ep->is_in) 2662 stop_out_naking(ep); 2663 } 2664 } 2665 2666 /* is there a buffer for the next packet? 2667 * for best streaming performance, make sure there is one. 2668 */ 2669 if (req && !ep->stopped) { 2670 2671 /* load IN fifo with next packet (may be zlp) */ 2672 if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT)) 2673 write_fifo(ep, &req->req); 2674 } 2675 } 2676 2677 static struct net2280_ep *get_ep_by_addr(struct net2280 *dev, u16 wIndex) 2678 { 2679 struct net2280_ep *ep; 2680 2681 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0) 2682 return &dev->ep[0]; 2683 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) { 2684 u8 bEndpointAddress; 2685 2686 if (!ep->desc) 2687 continue; 2688 bEndpointAddress = ep->desc->bEndpointAddress; 2689 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN) 2690 continue; 2691 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f)) 2692 return ep; 2693 } 2694 return NULL; 2695 } 2696 2697 static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r) 2698 { 2699 u32 scratch, fsmvalue; 2700 u32 ack_wait_timeout, state; 2701 2702 /* Workaround for Defect 7374 (U1/U2 erroneously rejected): */ 2703 scratch = get_idx_reg(dev->regs, SCRATCH); 2704 fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD); 2705 scratch &= ~(0xf << DEFECT7374_FSM_FIELD); 2706 2707 if (!((fsmvalue == DEFECT7374_FSM_WAITING_FOR_CONTROL_READ) && 2708 (r.bRequestType & USB_DIR_IN))) 2709 return; 2710 2711 /* This is the first Control Read for this connection: */ 2712 if (!(readl(&dev->usb->usbstat) & BIT(SUPER_SPEED_MODE))) { 2713 /* 2714 * Connection is NOT SS: 2715 * - Connection must be FS or HS. 2716 * - This FSM state should allow workaround software to 2717 * run after the next USB connection. 2718 */ 2719 scratch |= DEFECT7374_FSM_NON_SS_CONTROL_READ; 2720 goto restore_data_eps; 2721 } 2722 2723 /* Connection is SS: */ 2724 for (ack_wait_timeout = 0; 2725 ack_wait_timeout < DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS; 2726 ack_wait_timeout++) { 2727 2728 state = readl(&dev->plregs->pl_ep_status_1) 2729 & (0xff << STATE); 2730 if ((state >= (ACK_GOOD_NORMAL << STATE)) && 2731 (state <= (ACK_GOOD_MORE_ACKS_TO_COME << STATE))) { 2732 scratch |= DEFECT7374_FSM_SS_CONTROL_READ; 2733 break; 2734 } 2735 2736 /* 2737 * We have not yet received host's Data Phase ACK 2738 * - Wait and try again. 2739 */ 2740 udelay(DEFECT_7374_PROCESSOR_WAIT_TIME); 2741 2742 continue; 2743 } 2744 2745 2746 if (ack_wait_timeout >= DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS) { 2747 ep_err(dev, "FAIL: Defect 7374 workaround waited but failed " 2748 "to detect SS host's data phase ACK."); 2749 ep_err(dev, "PL_EP_STATUS_1(23:16):.Expected from 0x11 to 0x16" 2750 "got 0x%2.2x.\n", state >> STATE); 2751 } else { 2752 ep_warn(dev, "INFO: Defect 7374 workaround waited about\n" 2753 "%duSec for Control Read Data Phase ACK\n", 2754 DEFECT_7374_PROCESSOR_WAIT_TIME * ack_wait_timeout); 2755 } 2756 2757 restore_data_eps: 2758 /* 2759 * Restore data EPs to their pre-workaround settings (disabled, 2760 * initialized, and other details). 2761 */ 2762 defect7374_disable_data_eps(dev); 2763 2764 set_idx_reg(dev->regs, SCRATCH, scratch); 2765 2766 return; 2767 } 2768 2769 static void ep_stall(struct net2280_ep *ep, int stall) 2770 { 2771 struct net2280 *dev = ep->dev; 2772 u32 val; 2773 static const u32 ep_pl[9] = { 0, 3, 4, 7, 8, 2, 5, 6, 9 }; 2774 2775 if (stall) { 2776 writel(BIT(SET_ENDPOINT_HALT) | 2777 /* BIT(SET_NAK_PACKETS) | */ 2778 BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE), 2779 &ep->regs->ep_rsp); 2780 ep->is_halt = 1; 2781 } else { 2782 if (dev->gadget.speed == USB_SPEED_SUPER) { 2783 /* 2784 * Workaround for SS SeqNum not cleared via 2785 * Endpoint Halt (Clear) bit. select endpoint 2786 */ 2787 val = readl(&dev->plregs->pl_ep_ctrl); 2788 val = (val & ~0x1f) | ep_pl[ep->num]; 2789 writel(val, &dev->plregs->pl_ep_ctrl); 2790 2791 val |= BIT(SEQUENCE_NUMBER_RESET); 2792 writel(val, &dev->plregs->pl_ep_ctrl); 2793 } 2794 val = readl(&ep->regs->ep_rsp); 2795 val |= BIT(CLEAR_ENDPOINT_HALT) | 2796 BIT(CLEAR_ENDPOINT_TOGGLE); 2797 writel(val, 2798 /* | BIT(CLEAR_NAK_PACKETS),*/ 2799 &ep->regs->ep_rsp); 2800 ep->is_halt = 0; 2801 val = readl(&ep->regs->ep_rsp); 2802 } 2803 } 2804 2805 static void ep_stdrsp(struct net2280_ep *ep, int value, int wedged) 2806 { 2807 /* set/clear, then synch memory views with the device */ 2808 if (value) { 2809 ep->stopped = 1; 2810 if (ep->num == 0) 2811 ep->dev->protocol_stall = 1; 2812 else { 2813 if (ep->dma) 2814 ep_stop_dma(ep); 2815 ep_stall(ep, true); 2816 } 2817 2818 if (wedged) 2819 ep->wedged = 1; 2820 } else { 2821 ep->stopped = 0; 2822 ep->wedged = 0; 2823 2824 ep_stall(ep, false); 2825 2826 /* Flush the queue */ 2827 if (!list_empty(&ep->queue)) { 2828 struct net2280_request *req = 2829 list_entry(ep->queue.next, struct net2280_request, 2830 queue); 2831 if (ep->dma) 2832 resume_dma(ep); 2833 else { 2834 if (ep->is_in) 2835 write_fifo(ep, &req->req); 2836 else { 2837 if (read_fifo(ep, req)) 2838 done(ep, req, 0); 2839 } 2840 } 2841 } 2842 } 2843 } 2844 2845 static void handle_stat0_irqs_superspeed(struct net2280 *dev, 2846 struct net2280_ep *ep, struct usb_ctrlrequest r) 2847 { 2848 int tmp = 0; 2849 2850 #define w_value le16_to_cpu(r.wValue) 2851 #define w_index le16_to_cpu(r.wIndex) 2852 #define w_length le16_to_cpu(r.wLength) 2853 2854 switch (r.bRequest) { 2855 struct net2280_ep *e; 2856 u16 status; 2857 2858 case USB_REQ_SET_CONFIGURATION: 2859 dev->addressed_state = !w_value; 2860 goto usb3_delegate; 2861 2862 case USB_REQ_GET_STATUS: 2863 switch (r.bRequestType) { 2864 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE): 2865 status = dev->wakeup_enable ? 0x02 : 0x00; 2866 if (dev->selfpowered) 2867 status |= BIT(0); 2868 status |= (dev->u1_enable << 2 | dev->u2_enable << 3 | 2869 dev->ltm_enable << 4); 2870 writel(0, &dev->epregs[0].ep_irqenb); 2871 set_fifo_bytecount(ep, sizeof(status)); 2872 writel((__force u32) status, &dev->epregs[0].ep_data); 2873 allow_status_338x(ep); 2874 break; 2875 2876 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT): 2877 e = get_ep_by_addr(dev, w_index); 2878 if (!e) 2879 goto do_stall3; 2880 status = readl(&e->regs->ep_rsp) & 2881 BIT(CLEAR_ENDPOINT_HALT); 2882 writel(0, &dev->epregs[0].ep_irqenb); 2883 set_fifo_bytecount(ep, sizeof(status)); 2884 writel((__force u32) status, &dev->epregs[0].ep_data); 2885 allow_status_338x(ep); 2886 break; 2887 2888 default: 2889 goto usb3_delegate; 2890 } 2891 break; 2892 2893 case USB_REQ_CLEAR_FEATURE: 2894 switch (r.bRequestType) { 2895 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE): 2896 if (!dev->addressed_state) { 2897 switch (w_value) { 2898 case USB_DEVICE_U1_ENABLE: 2899 dev->u1_enable = 0; 2900 writel(readl(&dev->usb_ext->usbctl2) & 2901 ~BIT(U1_ENABLE), 2902 &dev->usb_ext->usbctl2); 2903 allow_status_338x(ep); 2904 goto next_endpoints3; 2905 2906 case USB_DEVICE_U2_ENABLE: 2907 dev->u2_enable = 0; 2908 writel(readl(&dev->usb_ext->usbctl2) & 2909 ~BIT(U2_ENABLE), 2910 &dev->usb_ext->usbctl2); 2911 allow_status_338x(ep); 2912 goto next_endpoints3; 2913 2914 case USB_DEVICE_LTM_ENABLE: 2915 dev->ltm_enable = 0; 2916 writel(readl(&dev->usb_ext->usbctl2) & 2917 ~BIT(LTM_ENABLE), 2918 &dev->usb_ext->usbctl2); 2919 allow_status_338x(ep); 2920 goto next_endpoints3; 2921 2922 default: 2923 break; 2924 } 2925 } 2926 if (w_value == USB_DEVICE_REMOTE_WAKEUP) { 2927 dev->wakeup_enable = 0; 2928 writel(readl(&dev->usb->usbctl) & 2929 ~BIT(DEVICE_REMOTE_WAKEUP_ENABLE), 2930 &dev->usb->usbctl); 2931 allow_status_338x(ep); 2932 break; 2933 } 2934 goto usb3_delegate; 2935 2936 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT): 2937 e = get_ep_by_addr(dev, w_index); 2938 if (!e) 2939 goto do_stall3; 2940 if (w_value != USB_ENDPOINT_HALT) 2941 goto do_stall3; 2942 ep_vdbg(dev, "%s clear halt\n", e->ep.name); 2943 ep_stall(e, false); 2944 if (!list_empty(&e->queue) && e->td_dma) 2945 restart_dma(e); 2946 allow_status(ep); 2947 ep->stopped = 1; 2948 break; 2949 2950 default: 2951 goto usb3_delegate; 2952 } 2953 break; 2954 case USB_REQ_SET_FEATURE: 2955 switch (r.bRequestType) { 2956 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE): 2957 if (!dev->addressed_state) { 2958 switch (w_value) { 2959 case USB_DEVICE_U1_ENABLE: 2960 dev->u1_enable = 1; 2961 writel(readl(&dev->usb_ext->usbctl2) | 2962 BIT(U1_ENABLE), 2963 &dev->usb_ext->usbctl2); 2964 allow_status_338x(ep); 2965 goto next_endpoints3; 2966 2967 case USB_DEVICE_U2_ENABLE: 2968 dev->u2_enable = 1; 2969 writel(readl(&dev->usb_ext->usbctl2) | 2970 BIT(U2_ENABLE), 2971 &dev->usb_ext->usbctl2); 2972 allow_status_338x(ep); 2973 goto next_endpoints3; 2974 2975 case USB_DEVICE_LTM_ENABLE: 2976 dev->ltm_enable = 1; 2977 writel(readl(&dev->usb_ext->usbctl2) | 2978 BIT(LTM_ENABLE), 2979 &dev->usb_ext->usbctl2); 2980 allow_status_338x(ep); 2981 goto next_endpoints3; 2982 default: 2983 break; 2984 } 2985 } 2986 2987 if (w_value == USB_DEVICE_REMOTE_WAKEUP) { 2988 dev->wakeup_enable = 1; 2989 writel(readl(&dev->usb->usbctl) | 2990 BIT(DEVICE_REMOTE_WAKEUP_ENABLE), 2991 &dev->usb->usbctl); 2992 allow_status_338x(ep); 2993 break; 2994 } 2995 goto usb3_delegate; 2996 2997 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT): 2998 e = get_ep_by_addr(dev, w_index); 2999 if (!e || (w_value != USB_ENDPOINT_HALT)) 3000 goto do_stall3; 3001 ep_stdrsp(e, true, false); 3002 allow_status_338x(ep); 3003 break; 3004 3005 default: 3006 goto usb3_delegate; 3007 } 3008 3009 break; 3010 default: 3011 3012 usb3_delegate: 3013 ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x ep_cfg %08x\n", 3014 r.bRequestType, r.bRequest, 3015 w_value, w_index, w_length, 3016 readl(&ep->cfg->ep_cfg)); 3017 3018 ep->responded = 0; 3019 spin_unlock(&dev->lock); 3020 tmp = dev->driver->setup(&dev->gadget, &r); 3021 spin_lock(&dev->lock); 3022 } 3023 do_stall3: 3024 if (tmp < 0) { 3025 ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n", 3026 r.bRequestType, r.bRequest, tmp); 3027 dev->protocol_stall = 1; 3028 /* TD 9.9 Halt Endpoint test. TD 9.22 Set feature test */ 3029 ep_stall(ep, true); 3030 } 3031 3032 next_endpoints3: 3033 3034 #undef w_value 3035 #undef w_index 3036 #undef w_length 3037 3038 return; 3039 } 3040 3041 static void handle_stat0_irqs(struct net2280 *dev, u32 stat) 3042 { 3043 struct net2280_ep *ep; 3044 u32 num, scratch; 3045 3046 /* most of these don't need individual acks */ 3047 stat &= ~BIT(INTA_ASSERTED); 3048 if (!stat) 3049 return; 3050 /* ep_dbg(dev, "irqstat0 %04x\n", stat); */ 3051 3052 /* starting a control request? */ 3053 if (unlikely(stat & BIT(SETUP_PACKET_INTERRUPT))) { 3054 union { 3055 u32 raw[2]; 3056 struct usb_ctrlrequest r; 3057 } u; 3058 int tmp; 3059 struct net2280_request *req; 3060 3061 if (dev->gadget.speed == USB_SPEED_UNKNOWN) { 3062 u32 val = readl(&dev->usb->usbstat); 3063 if (val & BIT(SUPER_SPEED)) { 3064 dev->gadget.speed = USB_SPEED_SUPER; 3065 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 3066 EP0_SS_MAX_PACKET_SIZE); 3067 } else if (val & BIT(HIGH_SPEED)) { 3068 dev->gadget.speed = USB_SPEED_HIGH; 3069 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 3070 EP0_HS_MAX_PACKET_SIZE); 3071 } else { 3072 dev->gadget.speed = USB_SPEED_FULL; 3073 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 3074 EP0_HS_MAX_PACKET_SIZE); 3075 } 3076 net2280_led_speed(dev, dev->gadget.speed); 3077 ep_dbg(dev, "%s\n", 3078 usb_speed_string(dev->gadget.speed)); 3079 } 3080 3081 ep = &dev->ep[0]; 3082 ep->irqs++; 3083 3084 /* make sure any leftover request state is cleared */ 3085 stat &= ~BIT(ENDPOINT_0_INTERRUPT); 3086 while (!list_empty(&ep->queue)) { 3087 req = list_entry(ep->queue.next, 3088 struct net2280_request, queue); 3089 done(ep, req, (req->req.actual == req->req.length) 3090 ? 0 : -EPROTO); 3091 } 3092 ep->stopped = 0; 3093 dev->protocol_stall = 0; 3094 if (dev->quirks & PLX_SUPERSPEED) 3095 ep->is_halt = 0; 3096 else{ 3097 if (ep->dev->quirks & PLX_2280) 3098 tmp = BIT(FIFO_OVERFLOW) | 3099 BIT(FIFO_UNDERFLOW); 3100 else 3101 tmp = 0; 3102 3103 writel(tmp | BIT(TIMEOUT) | 3104 BIT(USB_STALL_SENT) | 3105 BIT(USB_IN_NAK_SENT) | 3106 BIT(USB_IN_ACK_RCVD) | 3107 BIT(USB_OUT_PING_NAK_SENT) | 3108 BIT(USB_OUT_ACK_SENT) | 3109 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) | 3110 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) | 3111 BIT(DATA_PACKET_RECEIVED_INTERRUPT) | 3112 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) | 3113 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 3114 BIT(DATA_IN_TOKEN_INTERRUPT), 3115 &ep->regs->ep_stat); 3116 } 3117 u.raw[0] = readl(&dev->usb->setup0123); 3118 u.raw[1] = readl(&dev->usb->setup4567); 3119 3120 cpu_to_le32s(&u.raw[0]); 3121 cpu_to_le32s(&u.raw[1]); 3122 3123 if (dev->quirks & PLX_SUPERSPEED) 3124 defect7374_workaround(dev, u.r); 3125 3126 tmp = 0; 3127 3128 #define w_value le16_to_cpu(u.r.wValue) 3129 #define w_index le16_to_cpu(u.r.wIndex) 3130 #define w_length le16_to_cpu(u.r.wLength) 3131 3132 /* ack the irq */ 3133 writel(BIT(SETUP_PACKET_INTERRUPT), &dev->regs->irqstat0); 3134 stat ^= BIT(SETUP_PACKET_INTERRUPT); 3135 3136 /* watch control traffic at the token level, and force 3137 * synchronization before letting the status stage happen. 3138 * FIXME ignore tokens we'll NAK, until driver responds. 3139 * that'll mean a lot less irqs for some drivers. 3140 */ 3141 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0; 3142 if (ep->is_in) { 3143 scratch = BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) | 3144 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 3145 BIT(DATA_IN_TOKEN_INTERRUPT); 3146 stop_out_naking(ep); 3147 } else 3148 scratch = BIT(DATA_PACKET_RECEIVED_INTERRUPT) | 3149 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 3150 BIT(DATA_IN_TOKEN_INTERRUPT); 3151 writel(scratch, &dev->epregs[0].ep_irqenb); 3152 3153 /* we made the hardware handle most lowlevel requests; 3154 * everything else goes uplevel to the gadget code. 3155 */ 3156 ep->responded = 1; 3157 3158 if (dev->gadget.speed == USB_SPEED_SUPER) { 3159 handle_stat0_irqs_superspeed(dev, ep, u.r); 3160 goto next_endpoints; 3161 } 3162 3163 switch (u.r.bRequest) { 3164 case USB_REQ_GET_STATUS: { 3165 struct net2280_ep *e; 3166 __le32 status; 3167 3168 /* hw handles device and interface status */ 3169 if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT)) 3170 goto delegate; 3171 e = get_ep_by_addr(dev, w_index); 3172 if (!e || w_length > 2) 3173 goto do_stall; 3174 3175 if (readl(&e->regs->ep_rsp) & BIT(SET_ENDPOINT_HALT)) 3176 status = cpu_to_le32(1); 3177 else 3178 status = cpu_to_le32(0); 3179 3180 /* don't bother with a request object! */ 3181 writel(0, &dev->epregs[0].ep_irqenb); 3182 set_fifo_bytecount(ep, w_length); 3183 writel((__force u32)status, &dev->epregs[0].ep_data); 3184 allow_status(ep); 3185 ep_vdbg(dev, "%s stat %02x\n", ep->ep.name, status); 3186 goto next_endpoints; 3187 } 3188 break; 3189 case USB_REQ_CLEAR_FEATURE: { 3190 struct net2280_ep *e; 3191 3192 /* hw handles device features */ 3193 if (u.r.bRequestType != USB_RECIP_ENDPOINT) 3194 goto delegate; 3195 if (w_value != USB_ENDPOINT_HALT || w_length != 0) 3196 goto do_stall; 3197 e = get_ep_by_addr(dev, w_index); 3198 if (!e) 3199 goto do_stall; 3200 if (e->wedged) { 3201 ep_vdbg(dev, "%s wedged, halt not cleared\n", 3202 ep->ep.name); 3203 } else { 3204 ep_vdbg(dev, "%s clear halt\n", e->ep.name); 3205 clear_halt(e); 3206 if ((ep->dev->quirks & PLX_SUPERSPEED) && 3207 !list_empty(&e->queue) && e->td_dma) 3208 restart_dma(e); 3209 } 3210 allow_status(ep); 3211 goto next_endpoints; 3212 } 3213 break; 3214 case USB_REQ_SET_FEATURE: { 3215 struct net2280_ep *e; 3216 3217 /* hw handles device features */ 3218 if (u.r.bRequestType != USB_RECIP_ENDPOINT) 3219 goto delegate; 3220 if (w_value != USB_ENDPOINT_HALT || w_length != 0) 3221 goto do_stall; 3222 e = get_ep_by_addr(dev, w_index); 3223 if (!e) 3224 goto do_stall; 3225 if (e->ep.name == ep0name) 3226 goto do_stall; 3227 set_halt(e); 3228 if ((dev->quirks & PLX_SUPERSPEED) && e->dma) 3229 abort_dma(e); 3230 allow_status(ep); 3231 ep_vdbg(dev, "%s set halt\n", ep->ep.name); 3232 goto next_endpoints; 3233 } 3234 break; 3235 default: 3236 delegate: 3237 ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x " 3238 "ep_cfg %08x\n", 3239 u.r.bRequestType, u.r.bRequest, 3240 w_value, w_index, w_length, 3241 readl(&ep->cfg->ep_cfg)); 3242 ep->responded = 0; 3243 spin_unlock(&dev->lock); 3244 tmp = dev->driver->setup(&dev->gadget, &u.r); 3245 spin_lock(&dev->lock); 3246 } 3247 3248 /* stall ep0 on error */ 3249 if (tmp < 0) { 3250 do_stall: 3251 ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n", 3252 u.r.bRequestType, u.r.bRequest, tmp); 3253 dev->protocol_stall = 1; 3254 } 3255 3256 /* some in/out token irq should follow; maybe stall then. 3257 * driver must queue a request (even zlp) or halt ep0 3258 * before the host times out. 3259 */ 3260 } 3261 3262 #undef w_value 3263 #undef w_index 3264 #undef w_length 3265 3266 next_endpoints: 3267 /* endpoint data irq ? */ 3268 scratch = stat & 0x7f; 3269 stat &= ~0x7f; 3270 for (num = 0; scratch; num++) { 3271 u32 t; 3272 3273 /* do this endpoint's FIFO and queue need tending? */ 3274 t = BIT(num); 3275 if ((scratch & t) == 0) 3276 continue; 3277 scratch ^= t; 3278 3279 ep = &dev->ep[num]; 3280 handle_ep_small(ep); 3281 } 3282 3283 if (stat) 3284 ep_dbg(dev, "unhandled irqstat0 %08x\n", stat); 3285 } 3286 3287 #define DMA_INTERRUPTS (BIT(DMA_D_INTERRUPT) | \ 3288 BIT(DMA_C_INTERRUPT) | \ 3289 BIT(DMA_B_INTERRUPT) | \ 3290 BIT(DMA_A_INTERRUPT)) 3291 #define PCI_ERROR_INTERRUPTS ( \ 3292 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT) | \ 3293 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT) | \ 3294 BIT(PCI_RETRY_ABORT_INTERRUPT)) 3295 3296 static void handle_stat1_irqs(struct net2280 *dev, u32 stat) 3297 { 3298 struct net2280_ep *ep; 3299 u32 tmp, num, mask, scratch; 3300 3301 /* after disconnect there's nothing else to do! */ 3302 tmp = BIT(VBUS_INTERRUPT) | BIT(ROOT_PORT_RESET_INTERRUPT); 3303 mask = BIT(SUPER_SPEED) | BIT(HIGH_SPEED) | BIT(FULL_SPEED); 3304 3305 /* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set. 3306 * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and 3307 * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT 3308 * only indicates a change in the reset state). 3309 */ 3310 if (stat & tmp) { 3311 bool reset = false; 3312 bool disconnect = false; 3313 3314 /* 3315 * Ignore disconnects and resets if the speed hasn't been set. 3316 * VBUS can bounce and there's always an initial reset. 3317 */ 3318 writel(tmp, &dev->regs->irqstat1); 3319 if (dev->gadget.speed != USB_SPEED_UNKNOWN) { 3320 if ((stat & BIT(VBUS_INTERRUPT)) && 3321 (readl(&dev->usb->usbctl) & 3322 BIT(VBUS_PIN)) == 0) { 3323 disconnect = true; 3324 ep_dbg(dev, "disconnect %s\n", 3325 dev->driver->driver.name); 3326 } else if ((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) && 3327 (readl(&dev->usb->usbstat) & mask) 3328 == 0) { 3329 reset = true; 3330 ep_dbg(dev, "reset %s\n", 3331 dev->driver->driver.name); 3332 } 3333 3334 if (disconnect || reset) { 3335 stop_activity(dev, dev->driver); 3336 ep0_start(dev); 3337 spin_unlock(&dev->lock); 3338 if (reset) 3339 usb_gadget_udc_reset 3340 (&dev->gadget, dev->driver); 3341 else 3342 (dev->driver->disconnect) 3343 (&dev->gadget); 3344 spin_lock(&dev->lock); 3345 return; 3346 } 3347 } 3348 stat &= ~tmp; 3349 3350 /* vBUS can bounce ... one of many reasons to ignore the 3351 * notion of hotplug events on bus connect/disconnect! 3352 */ 3353 if (!stat) 3354 return; 3355 } 3356 3357 /* NOTE: chip stays in PCI D0 state for now, but it could 3358 * enter D1 to save more power 3359 */ 3360 tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT); 3361 if (stat & tmp) { 3362 writel(tmp, &dev->regs->irqstat1); 3363 if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) { 3364 if (dev->driver->suspend) 3365 dev->driver->suspend(&dev->gadget); 3366 if (!enable_suspend) 3367 stat &= ~BIT(SUSPEND_REQUEST_INTERRUPT); 3368 } else { 3369 if (dev->driver->resume) 3370 dev->driver->resume(&dev->gadget); 3371 /* at high speed, note erratum 0133 */ 3372 } 3373 stat &= ~tmp; 3374 } 3375 3376 /* clear any other status/irqs */ 3377 if (stat) 3378 writel(stat, &dev->regs->irqstat1); 3379 3380 /* some status we can just ignore */ 3381 if (dev->quirks & PLX_2280) 3382 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) | 3383 BIT(SUSPEND_REQUEST_INTERRUPT) | 3384 BIT(RESUME_INTERRUPT) | 3385 BIT(SOF_INTERRUPT)); 3386 else 3387 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) | 3388 BIT(RESUME_INTERRUPT) | 3389 BIT(SOF_DOWN_INTERRUPT) | 3390 BIT(SOF_INTERRUPT)); 3391 3392 if (!stat) 3393 return; 3394 /* ep_dbg(dev, "irqstat1 %08x\n", stat);*/ 3395 3396 /* DMA status, for ep-{a,b,c,d} */ 3397 scratch = stat & DMA_INTERRUPTS; 3398 stat &= ~DMA_INTERRUPTS; 3399 scratch >>= 9; 3400 for (num = 0; scratch; num++) { 3401 struct net2280_dma_regs __iomem *dma; 3402 3403 tmp = BIT(num); 3404 if ((tmp & scratch) == 0) 3405 continue; 3406 scratch ^= tmp; 3407 3408 ep = &dev->ep[num + 1]; 3409 dma = ep->dma; 3410 3411 if (!dma) 3412 continue; 3413 3414 /* clear ep's dma status */ 3415 tmp = readl(&dma->dmastat); 3416 writel(tmp, &dma->dmastat); 3417 3418 /* dma sync*/ 3419 if (dev->quirks & PLX_SUPERSPEED) { 3420 u32 r_dmacount = readl(&dma->dmacount); 3421 if (!ep->is_in && (r_dmacount & 0x00FFFFFF) && 3422 (tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) 3423 continue; 3424 } 3425 3426 /* chaining should stop on abort, short OUT from fifo, 3427 * or (stat0 codepath) short OUT transfer. 3428 */ 3429 if (!use_dma_chaining) { 3430 if (!(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) { 3431 ep_dbg(ep->dev, "%s no xact done? %08x\n", 3432 ep->ep.name, tmp); 3433 continue; 3434 } 3435 stop_dma(ep->dma); 3436 } 3437 3438 /* OUT transfers terminate when the data from the 3439 * host is in our memory. Process whatever's done. 3440 * On this path, we know transfer's last packet wasn't 3441 * less than req->length. NAK_OUT_PACKETS may be set, 3442 * or the FIFO may already be holding new packets. 3443 * 3444 * IN transfers can linger in the FIFO for a very 3445 * long time ... we ignore that for now, accounting 3446 * precisely (like PIO does) needs per-packet irqs 3447 */ 3448 scan_dma_completions(ep); 3449 3450 /* disable dma on inactive queues; else maybe restart */ 3451 if (list_empty(&ep->queue)) { 3452 if (use_dma_chaining) 3453 stop_dma(ep->dma); 3454 } else { 3455 tmp = readl(&dma->dmactl); 3456 if (!use_dma_chaining || (tmp & BIT(DMA_ENABLE)) == 0) 3457 restart_dma(ep); 3458 else if (ep->is_in && use_dma_chaining) { 3459 struct net2280_request *req; 3460 __le32 dmacount; 3461 3462 /* the descriptor at the head of the chain 3463 * may still have VALID_BIT clear; that's 3464 * used to trigger changing DMA_FIFO_VALIDATE 3465 * (affects automagic zlp writes). 3466 */ 3467 req = list_entry(ep->queue.next, 3468 struct net2280_request, queue); 3469 dmacount = req->td->dmacount; 3470 dmacount &= cpu_to_le32(BIT(VALID_BIT) | 3471 DMA_BYTE_COUNT_MASK); 3472 if (dmacount && (dmacount & valid_bit) == 0) 3473 restart_dma(ep); 3474 } 3475 } 3476 ep->irqs++; 3477 } 3478 3479 /* NOTE: there are other PCI errors we might usefully notice. 3480 * if they appear very often, here's where to try recovering. 3481 */ 3482 if (stat & PCI_ERROR_INTERRUPTS) { 3483 ep_err(dev, "pci dma error; stat %08x\n", stat); 3484 stat &= ~PCI_ERROR_INTERRUPTS; 3485 /* these are fatal errors, but "maybe" they won't 3486 * happen again ... 3487 */ 3488 stop_activity(dev, dev->driver); 3489 ep0_start(dev); 3490 stat = 0; 3491 } 3492 3493 if (stat) 3494 ep_dbg(dev, "unhandled irqstat1 %08x\n", stat); 3495 } 3496 3497 static irqreturn_t net2280_irq(int irq, void *_dev) 3498 { 3499 struct net2280 *dev = _dev; 3500 3501 /* shared interrupt, not ours */ 3502 if ((dev->quirks & PLX_LEGACY) && 3503 (!(readl(&dev->regs->irqstat0) & BIT(INTA_ASSERTED)))) 3504 return IRQ_NONE; 3505 3506 spin_lock(&dev->lock); 3507 3508 /* handle disconnect, dma, and more */ 3509 handle_stat1_irqs(dev, readl(&dev->regs->irqstat1)); 3510 3511 /* control requests and PIO */ 3512 handle_stat0_irqs(dev, readl(&dev->regs->irqstat0)); 3513 3514 if (dev->quirks & PLX_SUPERSPEED) { 3515 /* re-enable interrupt to trigger any possible new interrupt */ 3516 u32 pciirqenb1 = readl(&dev->regs->pciirqenb1); 3517 writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1); 3518 writel(pciirqenb1, &dev->regs->pciirqenb1); 3519 } 3520 3521 spin_unlock(&dev->lock); 3522 3523 return IRQ_HANDLED; 3524 } 3525 3526 /*-------------------------------------------------------------------------*/ 3527 3528 static void gadget_release(struct device *_dev) 3529 { 3530 struct net2280 *dev = dev_get_drvdata(_dev); 3531 3532 kfree(dev); 3533 } 3534 3535 /* tear down the binding between this driver and the pci device */ 3536 3537 static void net2280_remove(struct pci_dev *pdev) 3538 { 3539 struct net2280 *dev = pci_get_drvdata(pdev); 3540 3541 usb_del_gadget_udc(&dev->gadget); 3542 3543 BUG_ON(dev->driver); 3544 3545 /* then clean up the resources we allocated during probe() */ 3546 net2280_led_shutdown(dev); 3547 if (dev->requests) { 3548 int i; 3549 for (i = 1; i < 5; i++) { 3550 if (!dev->ep[i].dummy) 3551 continue; 3552 pci_pool_free(dev->requests, dev->ep[i].dummy, 3553 dev->ep[i].td_dma); 3554 } 3555 pci_pool_destroy(dev->requests); 3556 } 3557 if (dev->got_irq) 3558 free_irq(pdev->irq, dev); 3559 if (use_msi && dev->quirks & PLX_SUPERSPEED) 3560 pci_disable_msi(pdev); 3561 if (dev->regs) 3562 iounmap(dev->regs); 3563 if (dev->region) 3564 release_mem_region(pci_resource_start(pdev, 0), 3565 pci_resource_len(pdev, 0)); 3566 if (dev->enabled) 3567 pci_disable_device(pdev); 3568 device_remove_file(&pdev->dev, &dev_attr_registers); 3569 3570 ep_info(dev, "unbind\n"); 3571 } 3572 3573 /* wrap this driver around the specified device, but 3574 * don't respond over USB until a gadget driver binds to us. 3575 */ 3576 3577 static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id) 3578 { 3579 struct net2280 *dev; 3580 unsigned long resource, len; 3581 void __iomem *base = NULL; 3582 int retval, i; 3583 3584 if (!use_dma) 3585 use_dma_chaining = 0; 3586 3587 /* alloc, and start init */ 3588 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 3589 if (dev == NULL) { 3590 retval = -ENOMEM; 3591 goto done; 3592 } 3593 3594 pci_set_drvdata(pdev, dev); 3595 spin_lock_init(&dev->lock); 3596 dev->quirks = id->driver_data; 3597 dev->pdev = pdev; 3598 dev->gadget.ops = &net2280_ops; 3599 dev->gadget.max_speed = (dev->quirks & PLX_SUPERSPEED) ? 3600 USB_SPEED_SUPER : USB_SPEED_HIGH; 3601 3602 /* the "gadget" abstracts/virtualizes the controller */ 3603 dev->gadget.name = driver_name; 3604 3605 /* now all the pci goodies ... */ 3606 if (pci_enable_device(pdev) < 0) { 3607 retval = -ENODEV; 3608 goto done; 3609 } 3610 dev->enabled = 1; 3611 3612 /* BAR 0 holds all the registers 3613 * BAR 1 is 8051 memory; unused here (note erratum 0103) 3614 * BAR 2 is fifo memory; unused here 3615 */ 3616 resource = pci_resource_start(pdev, 0); 3617 len = pci_resource_len(pdev, 0); 3618 if (!request_mem_region(resource, len, driver_name)) { 3619 ep_dbg(dev, "controller already in use\n"); 3620 retval = -EBUSY; 3621 goto done; 3622 } 3623 dev->region = 1; 3624 3625 /* FIXME provide firmware download interface to put 3626 * 8051 code into the chip, e.g. to turn on PCI PM. 3627 */ 3628 3629 base = ioremap_nocache(resource, len); 3630 if (base == NULL) { 3631 ep_dbg(dev, "can't map memory\n"); 3632 retval = -EFAULT; 3633 goto done; 3634 } 3635 dev->regs = (struct net2280_regs __iomem *) base; 3636 dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080); 3637 dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100); 3638 dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180); 3639 dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200); 3640 dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300); 3641 3642 if (dev->quirks & PLX_SUPERSPEED) { 3643 u32 fsmvalue; 3644 u32 usbstat; 3645 dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *) 3646 (base + 0x00b4); 3647 dev->fiforegs = (struct usb338x_fifo_regs __iomem *) 3648 (base + 0x0500); 3649 dev->llregs = (struct usb338x_ll_regs __iomem *) 3650 (base + 0x0700); 3651 dev->ll_lfps_regs = (struct usb338x_ll_lfps_regs __iomem *) 3652 (base + 0x0748); 3653 dev->ll_tsn_regs = (struct usb338x_ll_tsn_regs __iomem *) 3654 (base + 0x077c); 3655 dev->ll_chicken_reg = (struct usb338x_ll_chi_regs __iomem *) 3656 (base + 0x079c); 3657 dev->plregs = (struct usb338x_pl_regs __iomem *) 3658 (base + 0x0800); 3659 usbstat = readl(&dev->usb->usbstat); 3660 dev->enhanced_mode = !!(usbstat & BIT(11)); 3661 dev->n_ep = (dev->enhanced_mode) ? 9 : 5; 3662 /* put into initial config, link up all endpoints */ 3663 fsmvalue = get_idx_reg(dev->regs, SCRATCH) & 3664 (0xf << DEFECT7374_FSM_FIELD); 3665 /* See if firmware needs to set up for workaround: */ 3666 if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) 3667 writel(0, &dev->usb->usbctl); 3668 } else{ 3669 dev->enhanced_mode = 0; 3670 dev->n_ep = 7; 3671 /* put into initial config, link up all endpoints */ 3672 writel(0, &dev->usb->usbctl); 3673 } 3674 3675 usb_reset(dev); 3676 usb_reinit(dev); 3677 3678 /* irq setup after old hardware is cleaned up */ 3679 if (!pdev->irq) { 3680 ep_err(dev, "No IRQ. Check PCI setup!\n"); 3681 retval = -ENODEV; 3682 goto done; 3683 } 3684 3685 if (use_msi && (dev->quirks & PLX_SUPERSPEED)) 3686 if (pci_enable_msi(pdev)) 3687 ep_err(dev, "Failed to enable MSI mode\n"); 3688 3689 if (request_irq(pdev->irq, net2280_irq, IRQF_SHARED, 3690 driver_name, dev)) { 3691 ep_err(dev, "request interrupt %d failed\n", pdev->irq); 3692 retval = -EBUSY; 3693 goto done; 3694 } 3695 dev->got_irq = 1; 3696 3697 /* DMA setup */ 3698 /* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */ 3699 dev->requests = pci_pool_create("requests", pdev, 3700 sizeof(struct net2280_dma), 3701 0 /* no alignment requirements */, 3702 0 /* or page-crossing issues */); 3703 if (!dev->requests) { 3704 ep_dbg(dev, "can't get request pool\n"); 3705 retval = -ENOMEM; 3706 goto done; 3707 } 3708 for (i = 1; i < 5; i++) { 3709 struct net2280_dma *td; 3710 3711 td = pci_pool_alloc(dev->requests, GFP_KERNEL, 3712 &dev->ep[i].td_dma); 3713 if (!td) { 3714 ep_dbg(dev, "can't get dummy %d\n", i); 3715 retval = -ENOMEM; 3716 goto done; 3717 } 3718 td->dmacount = 0; /* not VALID */ 3719 td->dmadesc = td->dmaaddr; 3720 dev->ep[i].dummy = td; 3721 } 3722 3723 /* enable lower-overhead pci memory bursts during DMA */ 3724 if (dev->quirks & PLX_LEGACY) 3725 writel(BIT(DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE) | 3726 /* 3727 * 256 write retries may not be enough... 3728 BIT(PCI_RETRY_ABORT_ENABLE) | 3729 */ 3730 BIT(DMA_READ_MULTIPLE_ENABLE) | 3731 BIT(DMA_READ_LINE_ENABLE), 3732 &dev->pci->pcimstctl); 3733 /* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */ 3734 pci_set_master(pdev); 3735 pci_try_set_mwi(pdev); 3736 3737 /* ... also flushes any posted pci writes */ 3738 dev->chiprev = get_idx_reg(dev->regs, REG_CHIPREV) & 0xffff; 3739 3740 /* done */ 3741 ep_info(dev, "%s\n", driver_desc); 3742 ep_info(dev, "irq %d, pci mem %p, chip rev %04x\n", 3743 pdev->irq, base, dev->chiprev); 3744 ep_info(dev, "version: " DRIVER_VERSION "; dma %s %s\n", 3745 use_dma ? (use_dma_chaining ? "chaining" : "enabled") 3746 : "disabled", 3747 dev->enhanced_mode ? "enhanced mode" : "legacy mode"); 3748 retval = device_create_file(&pdev->dev, &dev_attr_registers); 3749 if (retval) 3750 goto done; 3751 3752 retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget, 3753 gadget_release); 3754 if (retval) 3755 goto done; 3756 return 0; 3757 3758 done: 3759 if (dev) 3760 net2280_remove(pdev); 3761 return retval; 3762 } 3763 3764 /* make sure the board is quiescent; otherwise it will continue 3765 * generating IRQs across the upcoming reboot. 3766 */ 3767 3768 static void net2280_shutdown(struct pci_dev *pdev) 3769 { 3770 struct net2280 *dev = pci_get_drvdata(pdev); 3771 3772 /* disable IRQs */ 3773 writel(0, &dev->regs->pciirqenb0); 3774 writel(0, &dev->regs->pciirqenb1); 3775 3776 /* disable the pullup so the host will think we're gone */ 3777 writel(0, &dev->usb->usbctl); 3778 3779 /* Disable full-speed test mode */ 3780 if (dev->quirks & PLX_LEGACY) 3781 writel(0, &dev->usb->xcvrdiag); 3782 } 3783 3784 3785 /*-------------------------------------------------------------------------*/ 3786 3787 static const struct pci_device_id pci_ids[] = { { 3788 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), 3789 .class_mask = ~0, 3790 .vendor = PCI_VENDOR_ID_PLX_LEGACY, 3791 .device = 0x2280, 3792 .subvendor = PCI_ANY_ID, 3793 .subdevice = PCI_ANY_ID, 3794 .driver_data = PLX_LEGACY | PLX_2280, 3795 }, { 3796 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), 3797 .class_mask = ~0, 3798 .vendor = PCI_VENDOR_ID_PLX_LEGACY, 3799 .device = 0x2282, 3800 .subvendor = PCI_ANY_ID, 3801 .subdevice = PCI_ANY_ID, 3802 .driver_data = PLX_LEGACY, 3803 }, 3804 { 3805 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), 3806 .class_mask = ~0, 3807 .vendor = PCI_VENDOR_ID_PLX, 3808 .device = 0x3380, 3809 .subvendor = PCI_ANY_ID, 3810 .subdevice = PCI_ANY_ID, 3811 .driver_data = PLX_SUPERSPEED, 3812 }, 3813 { 3814 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), 3815 .class_mask = ~0, 3816 .vendor = PCI_VENDOR_ID_PLX, 3817 .device = 0x3382, 3818 .subvendor = PCI_ANY_ID, 3819 .subdevice = PCI_ANY_ID, 3820 .driver_data = PLX_SUPERSPEED, 3821 }, 3822 { /* end: all zeroes */ } 3823 }; 3824 MODULE_DEVICE_TABLE(pci, pci_ids); 3825 3826 /* pci driver glue; this is a "new style" PCI driver module */ 3827 static struct pci_driver net2280_pci_driver = { 3828 .name = (char *) driver_name, 3829 .id_table = pci_ids, 3830 3831 .probe = net2280_probe, 3832 .remove = net2280_remove, 3833 .shutdown = net2280_shutdown, 3834 3835 /* FIXME add power management support */ 3836 }; 3837 3838 module_pci_driver(net2280_pci_driver); 3839 3840 MODULE_DESCRIPTION(DRIVER_DESC); 3841 MODULE_AUTHOR("David Brownell"); 3842 MODULE_LICENSE("GPL"); 3843