1 /* 2 * Driver for the PLX NET2280 USB device controller. 3 * Specs and errata are available from <http://www.plxtech.com>. 4 * 5 * PLX Technology Inc. (formerly NetChip Technology) supported the 6 * development of this driver. 7 * 8 * 9 * CODE STATUS HIGHLIGHTS 10 * 11 * This driver should work well with most "gadget" drivers, including 12 * the Mass Storage, Serial, and Ethernet/RNDIS gadget drivers 13 * as well as Gadget Zero and Gadgetfs. 14 * 15 * DMA is enabled by default. Drivers using transfer queues might use 16 * DMA chaining to remove IRQ latencies between transfers. (Except when 17 * short OUT transfers happen.) Drivers can use the req->no_interrupt 18 * hint to completely eliminate some IRQs, if a later IRQ is guaranteed 19 * and DMA chaining is enabled. 20 * 21 * MSI is enabled by default. The legacy IRQ is used if MSI couldn't 22 * be enabled. 23 * 24 * Note that almost all the errata workarounds here are only needed for 25 * rev1 chips. Rev1a silicon (0110) fixes almost all of them. 26 */ 27 28 /* 29 * Copyright (C) 2003 David Brownell 30 * Copyright (C) 2003-2005 PLX Technology, Inc. 31 * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS 32 * 33 * Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility 34 * with 2282 chip 35 * 36 * Modified Ricardo Ribalda Qtechnology AS to provide compatibility 37 * with usb 338x chip. Based on PLX driver 38 * 39 * This program is free software; you can redistribute it and/or modify 40 * it under the terms of the GNU General Public License as published by 41 * the Free Software Foundation; either version 2 of the License, or 42 * (at your option) any later version. 43 */ 44 45 #include <linux/module.h> 46 #include <linux/pci.h> 47 #include <linux/dma-mapping.h> 48 #include <linux/kernel.h> 49 #include <linux/delay.h> 50 #include <linux/ioport.h> 51 #include <linux/slab.h> 52 #include <linux/errno.h> 53 #include <linux/init.h> 54 #include <linux/timer.h> 55 #include <linux/list.h> 56 #include <linux/interrupt.h> 57 #include <linux/moduleparam.h> 58 #include <linux/device.h> 59 #include <linux/usb/ch9.h> 60 #include <linux/usb/gadget.h> 61 #include <linux/prefetch.h> 62 #include <linux/io.h> 63 64 #include <asm/byteorder.h> 65 #include <asm/irq.h> 66 #include <asm/unaligned.h> 67 68 #define DRIVER_DESC "PLX NET228x/USB338x USB Peripheral Controller" 69 #define DRIVER_VERSION "2005 Sept 27/v3.0" 70 71 #define EP_DONTUSE 13 /* nonzero */ 72 73 #define USE_RDK_LEDS /* GPIO pins control three LEDs */ 74 75 76 static const char driver_name[] = "net2280"; 77 static const char driver_desc[] = DRIVER_DESC; 78 79 static const u32 ep_bit[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 }; 80 static const char ep0name[] = "ep0"; 81 static const char *const ep_name[] = { 82 ep0name, 83 "ep-a", "ep-b", "ep-c", "ep-d", 84 "ep-e", "ep-f", "ep-g", "ep-h", 85 }; 86 87 /* use_dma -- general goodness, fewer interrupts, less cpu load (vs PIO) 88 * use_dma_chaining -- dma descriptor queueing gives even more irq reduction 89 * 90 * The net2280 DMA engines are not tightly integrated with their FIFOs; 91 * not all cases are (yet) handled well in this driver or the silicon. 92 * Some gadget drivers work better with the dma support here than others. 93 * These two parameters let you use PIO or more aggressive DMA. 94 */ 95 static bool use_dma = true; 96 static bool use_dma_chaining; 97 static bool use_msi = true; 98 99 /* "modprobe net2280 use_dma=n" etc */ 100 module_param(use_dma, bool, 0444); 101 module_param(use_dma_chaining, bool, 0444); 102 module_param(use_msi, bool, 0444); 103 104 /* mode 0 == ep-{a,b,c,d} 1K fifo each 105 * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable 106 * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable 107 */ 108 static ushort fifo_mode; 109 110 /* "modprobe net2280 fifo_mode=1" etc */ 111 module_param(fifo_mode, ushort, 0644); 112 113 /* enable_suspend -- When enabled, the driver will respond to 114 * USB suspend requests by powering down the NET2280. Otherwise, 115 * USB suspend requests will be ignored. This is acceptable for 116 * self-powered devices 117 */ 118 static bool enable_suspend; 119 120 /* "modprobe net2280 enable_suspend=1" etc */ 121 module_param(enable_suspend, bool, 0444); 122 123 /* force full-speed operation */ 124 static bool full_speed; 125 module_param(full_speed, bool, 0444); 126 MODULE_PARM_DESC(full_speed, "force full-speed mode -- for testing only!"); 127 128 #define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out") 129 130 static char *type_string(u8 bmAttributes) 131 { 132 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) { 133 case USB_ENDPOINT_XFER_BULK: return "bulk"; 134 case USB_ENDPOINT_XFER_ISOC: return "iso"; 135 case USB_ENDPOINT_XFER_INT: return "intr"; 136 } 137 return "control"; 138 } 139 140 #include "net2280.h" 141 142 #define valid_bit cpu_to_le32(BIT(VALID_BIT)) 143 #define dma_done_ie cpu_to_le32(BIT(DMA_DONE_INTERRUPT_ENABLE)) 144 145 /*-------------------------------------------------------------------------*/ 146 static inline void enable_pciirqenb(struct net2280_ep *ep) 147 { 148 u32 tmp = readl(&ep->dev->regs->pciirqenb0); 149 150 if (ep->dev->quirks & PLX_LEGACY) 151 tmp |= BIT(ep->num); 152 else 153 tmp |= BIT(ep_bit[ep->num]); 154 writel(tmp, &ep->dev->regs->pciirqenb0); 155 156 return; 157 } 158 159 static int 160 net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) 161 { 162 struct net2280 *dev; 163 struct net2280_ep *ep; 164 u32 max, tmp; 165 unsigned long flags; 166 static const u32 ep_key[9] = { 1, 0, 1, 0, 1, 1, 0, 1, 0 }; 167 168 ep = container_of(_ep, struct net2280_ep, ep); 169 if (!_ep || !desc || ep->desc || _ep->name == ep0name || 170 desc->bDescriptorType != USB_DT_ENDPOINT) 171 return -EINVAL; 172 dev = ep->dev; 173 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) 174 return -ESHUTDOWN; 175 176 /* erratum 0119 workaround ties up an endpoint number */ 177 if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE) 178 return -EDOM; 179 180 if (dev->quirks & PLX_SUPERSPEED) { 181 if ((desc->bEndpointAddress & 0x0f) >= 0x0c) 182 return -EDOM; 183 ep->is_in = !!usb_endpoint_dir_in(desc); 184 if (dev->enhanced_mode && ep->is_in && ep_key[ep->num]) 185 return -EINVAL; 186 } 187 188 /* sanity check ep-e/ep-f since their fifos are small */ 189 max = usb_endpoint_maxp(desc) & 0x1fff; 190 if (ep->num > 4 && max > 64 && (dev->quirks & PLX_LEGACY)) 191 return -ERANGE; 192 193 spin_lock_irqsave(&dev->lock, flags); 194 _ep->maxpacket = max & 0x7ff; 195 ep->desc = desc; 196 197 /* ep_reset() has already been called */ 198 ep->stopped = 0; 199 ep->wedged = 0; 200 ep->out_overflow = 0; 201 202 /* set speed-dependent max packet; may kick in high bandwidth */ 203 set_max_speed(ep, max); 204 205 /* FIFO lines can't go to different packets. PIO is ok, so 206 * use it instead of troublesome (non-bulk) multi-packet DMA. 207 */ 208 if (ep->dma && (max % 4) != 0 && use_dma_chaining) { 209 ep_dbg(ep->dev, "%s, no dma for maxpacket %d\n", 210 ep->ep.name, ep->ep.maxpacket); 211 ep->dma = NULL; 212 } 213 214 /* set type, direction, address; reset fifo counters */ 215 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat); 216 tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK); 217 if (tmp == USB_ENDPOINT_XFER_INT) { 218 /* erratum 0105 workaround prevents hs NYET */ 219 if (dev->chiprev == 0100 && 220 dev->gadget.speed == USB_SPEED_HIGH && 221 !(desc->bEndpointAddress & USB_DIR_IN)) 222 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE), 223 &ep->regs->ep_rsp); 224 } else if (tmp == USB_ENDPOINT_XFER_BULK) { 225 /* catch some particularly blatant driver bugs */ 226 if ((dev->gadget.speed == USB_SPEED_SUPER && max != 1024) || 227 (dev->gadget.speed == USB_SPEED_HIGH && max != 512) || 228 (dev->gadget.speed == USB_SPEED_FULL && max > 64)) { 229 spin_unlock_irqrestore(&dev->lock, flags); 230 return -ERANGE; 231 } 232 } 233 ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC); 234 /* Enable this endpoint */ 235 if (dev->quirks & PLX_LEGACY) { 236 tmp <<= ENDPOINT_TYPE; 237 tmp |= desc->bEndpointAddress; 238 /* default full fifo lines */ 239 tmp |= (4 << ENDPOINT_BYTE_COUNT); 240 tmp |= BIT(ENDPOINT_ENABLE); 241 ep->is_in = (tmp & USB_DIR_IN) != 0; 242 } else { 243 /* In Legacy mode, only OUT endpoints are used */ 244 if (dev->enhanced_mode && ep->is_in) { 245 tmp <<= IN_ENDPOINT_TYPE; 246 tmp |= BIT(IN_ENDPOINT_ENABLE); 247 /* Not applicable to Legacy */ 248 tmp |= BIT(ENDPOINT_DIRECTION); 249 } else { 250 tmp <<= OUT_ENDPOINT_TYPE; 251 tmp |= BIT(OUT_ENDPOINT_ENABLE); 252 tmp |= (ep->is_in << ENDPOINT_DIRECTION); 253 } 254 255 tmp |= usb_endpoint_num(desc); 256 tmp |= (ep->ep.maxburst << MAX_BURST_SIZE); 257 } 258 259 /* Make sure all the registers are written before ep_rsp*/ 260 wmb(); 261 262 /* for OUT transfers, block the rx fifo until a read is posted */ 263 if (!ep->is_in) 264 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp); 265 else if (!(dev->quirks & PLX_2280)) { 266 /* Added for 2282, Don't use nak packets on an in endpoint, 267 * this was ignored on 2280 268 */ 269 writel(BIT(CLEAR_NAK_OUT_PACKETS) | 270 BIT(CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp); 271 } 272 273 writel(tmp, &ep->cfg->ep_cfg); 274 275 /* enable irqs */ 276 if (!ep->dma) { /* pio, per-packet */ 277 enable_pciirqenb(ep); 278 279 tmp = BIT(DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) | 280 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE); 281 if (dev->quirks & PLX_2280) 282 tmp |= readl(&ep->regs->ep_irqenb); 283 writel(tmp, &ep->regs->ep_irqenb); 284 } else { /* dma, per-request */ 285 tmp = BIT((8 + ep->num)); /* completion */ 286 tmp |= readl(&dev->regs->pciirqenb1); 287 writel(tmp, &dev->regs->pciirqenb1); 288 289 /* for short OUT transfers, dma completions can't 290 * advance the queue; do it pio-style, by hand. 291 * NOTE erratum 0112 workaround #2 292 */ 293 if ((desc->bEndpointAddress & USB_DIR_IN) == 0) { 294 tmp = BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE); 295 writel(tmp, &ep->regs->ep_irqenb); 296 297 enable_pciirqenb(ep); 298 } 299 } 300 301 tmp = desc->bEndpointAddress; 302 ep_dbg(dev, "enabled %s (ep%d%s-%s) %s max %04x\n", 303 _ep->name, tmp & 0x0f, DIR_STRING(tmp), 304 type_string(desc->bmAttributes), 305 ep->dma ? "dma" : "pio", max); 306 307 /* pci writes may still be posted */ 308 spin_unlock_irqrestore(&dev->lock, flags); 309 return 0; 310 } 311 312 static int handshake(u32 __iomem *ptr, u32 mask, u32 done, int usec) 313 { 314 u32 result; 315 316 do { 317 result = readl(ptr); 318 if (result == ~(u32)0) /* "device unplugged" */ 319 return -ENODEV; 320 result &= mask; 321 if (result == done) 322 return 0; 323 udelay(1); 324 usec--; 325 } while (usec > 0); 326 return -ETIMEDOUT; 327 } 328 329 static const struct usb_ep_ops net2280_ep_ops; 330 331 static void ep_reset_228x(struct net2280_regs __iomem *regs, 332 struct net2280_ep *ep) 333 { 334 u32 tmp; 335 336 ep->desc = NULL; 337 INIT_LIST_HEAD(&ep->queue); 338 339 usb_ep_set_maxpacket_limit(&ep->ep, ~0); 340 ep->ep.ops = &net2280_ep_ops; 341 342 /* disable the dma, irqs, endpoint... */ 343 if (ep->dma) { 344 writel(0, &ep->dma->dmactl); 345 writel(BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) | 346 BIT(DMA_TRANSACTION_DONE_INTERRUPT) | 347 BIT(DMA_ABORT), 348 &ep->dma->dmastat); 349 350 tmp = readl(®s->pciirqenb0); 351 tmp &= ~BIT(ep->num); 352 writel(tmp, ®s->pciirqenb0); 353 } else { 354 tmp = readl(®s->pciirqenb1); 355 tmp &= ~BIT((8 + ep->num)); /* completion */ 356 writel(tmp, ®s->pciirqenb1); 357 } 358 writel(0, &ep->regs->ep_irqenb); 359 360 /* init to our chosen defaults, notably so that we NAK OUT 361 * packets until the driver queues a read (+note erratum 0112) 362 */ 363 if (!ep->is_in || (ep->dev->quirks & PLX_2280)) { 364 tmp = BIT(SET_NAK_OUT_PACKETS_MODE) | 365 BIT(SET_NAK_OUT_PACKETS) | 366 BIT(CLEAR_EP_HIDE_STATUS_PHASE) | 367 BIT(CLEAR_INTERRUPT_MODE); 368 } else { 369 /* added for 2282 */ 370 tmp = BIT(CLEAR_NAK_OUT_PACKETS_MODE) | 371 BIT(CLEAR_NAK_OUT_PACKETS) | 372 BIT(CLEAR_EP_HIDE_STATUS_PHASE) | 373 BIT(CLEAR_INTERRUPT_MODE); 374 } 375 376 if (ep->num != 0) { 377 tmp |= BIT(CLEAR_ENDPOINT_TOGGLE) | 378 BIT(CLEAR_ENDPOINT_HALT); 379 } 380 writel(tmp, &ep->regs->ep_rsp); 381 382 /* scrub most status bits, and flush any fifo state */ 383 if (ep->dev->quirks & PLX_2280) 384 tmp = BIT(FIFO_OVERFLOW) | 385 BIT(FIFO_UNDERFLOW); 386 else 387 tmp = 0; 388 389 writel(tmp | BIT(TIMEOUT) | 390 BIT(USB_STALL_SENT) | 391 BIT(USB_IN_NAK_SENT) | 392 BIT(USB_IN_ACK_RCVD) | 393 BIT(USB_OUT_PING_NAK_SENT) | 394 BIT(USB_OUT_ACK_SENT) | 395 BIT(FIFO_FLUSH) | 396 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) | 397 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) | 398 BIT(DATA_PACKET_RECEIVED_INTERRUPT) | 399 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) | 400 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 401 BIT(DATA_IN_TOKEN_INTERRUPT), 402 &ep->regs->ep_stat); 403 404 /* fifo size is handled separately */ 405 } 406 407 static void ep_reset_338x(struct net2280_regs __iomem *regs, 408 struct net2280_ep *ep) 409 { 410 u32 tmp, dmastat; 411 412 ep->desc = NULL; 413 INIT_LIST_HEAD(&ep->queue); 414 415 usb_ep_set_maxpacket_limit(&ep->ep, ~0); 416 ep->ep.ops = &net2280_ep_ops; 417 418 /* disable the dma, irqs, endpoint... */ 419 if (ep->dma) { 420 writel(0, &ep->dma->dmactl); 421 writel(BIT(DMA_ABORT_DONE_INTERRUPT) | 422 BIT(DMA_PAUSE_DONE_INTERRUPT) | 423 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) | 424 BIT(DMA_TRANSACTION_DONE_INTERRUPT), 425 /* | BIT(DMA_ABORT), */ 426 &ep->dma->dmastat); 427 428 dmastat = readl(&ep->dma->dmastat); 429 if (dmastat == 0x5002) { 430 ep_warn(ep->dev, "The dmastat return = %x!!\n", 431 dmastat); 432 writel(0x5a, &ep->dma->dmastat); 433 } 434 435 tmp = readl(®s->pciirqenb0); 436 tmp &= ~BIT(ep_bit[ep->num]); 437 writel(tmp, ®s->pciirqenb0); 438 } else { 439 if (ep->num < 5) { 440 tmp = readl(®s->pciirqenb1); 441 tmp &= ~BIT((8 + ep->num)); /* completion */ 442 writel(tmp, ®s->pciirqenb1); 443 } 444 } 445 writel(0, &ep->regs->ep_irqenb); 446 447 writel(BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) | 448 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) | 449 BIT(FIFO_OVERFLOW) | 450 BIT(DATA_PACKET_RECEIVED_INTERRUPT) | 451 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) | 452 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 453 BIT(DATA_IN_TOKEN_INTERRUPT), &ep->regs->ep_stat); 454 } 455 456 static void nuke(struct net2280_ep *); 457 458 static int net2280_disable(struct usb_ep *_ep) 459 { 460 struct net2280_ep *ep; 461 unsigned long flags; 462 463 ep = container_of(_ep, struct net2280_ep, ep); 464 if (!_ep || !ep->desc || _ep->name == ep0name) 465 return -EINVAL; 466 467 spin_lock_irqsave(&ep->dev->lock, flags); 468 nuke(ep); 469 470 if (ep->dev->quirks & PLX_SUPERSPEED) 471 ep_reset_338x(ep->dev->regs, ep); 472 else 473 ep_reset_228x(ep->dev->regs, ep); 474 475 ep_vdbg(ep->dev, "disabled %s %s\n", 476 ep->dma ? "dma" : "pio", _ep->name); 477 478 /* synch memory views with the device */ 479 (void)readl(&ep->cfg->ep_cfg); 480 481 if (use_dma && !ep->dma && ep->num >= 1 && ep->num <= 4) 482 ep->dma = &ep->dev->dma[ep->num - 1]; 483 484 spin_unlock_irqrestore(&ep->dev->lock, flags); 485 return 0; 486 } 487 488 /*-------------------------------------------------------------------------*/ 489 490 static struct usb_request 491 *net2280_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) 492 { 493 struct net2280_ep *ep; 494 struct net2280_request *req; 495 496 if (!_ep) 497 return NULL; 498 ep = container_of(_ep, struct net2280_ep, ep); 499 500 req = kzalloc(sizeof(*req), gfp_flags); 501 if (!req) 502 return NULL; 503 504 INIT_LIST_HEAD(&req->queue); 505 506 /* this dma descriptor may be swapped with the previous dummy */ 507 if (ep->dma) { 508 struct net2280_dma *td; 509 510 td = pci_pool_alloc(ep->dev->requests, gfp_flags, 511 &req->td_dma); 512 if (!td) { 513 kfree(req); 514 return NULL; 515 } 516 td->dmacount = 0; /* not VALID */ 517 td->dmadesc = td->dmaaddr; 518 req->td = td; 519 } 520 return &req->req; 521 } 522 523 static void net2280_free_request(struct usb_ep *_ep, struct usb_request *_req) 524 { 525 struct net2280_ep *ep; 526 struct net2280_request *req; 527 528 ep = container_of(_ep, struct net2280_ep, ep); 529 if (!_ep || !_req) 530 return; 531 532 req = container_of(_req, struct net2280_request, req); 533 WARN_ON(!list_empty(&req->queue)); 534 if (req->td) 535 pci_pool_free(ep->dev->requests, req->td, req->td_dma); 536 kfree(req); 537 } 538 539 /*-------------------------------------------------------------------------*/ 540 541 /* load a packet into the fifo we use for usb IN transfers. 542 * works for all endpoints. 543 * 544 * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo 545 * at a time, but this code is simpler because it knows it only writes 546 * one packet. ep-a..ep-d should use dma instead. 547 */ 548 static void write_fifo(struct net2280_ep *ep, struct usb_request *req) 549 { 550 struct net2280_ep_regs __iomem *regs = ep->regs; 551 u8 *buf; 552 u32 tmp; 553 unsigned count, total; 554 555 /* INVARIANT: fifo is currently empty. (testable) */ 556 557 if (req) { 558 buf = req->buf + req->actual; 559 prefetch(buf); 560 total = req->length - req->actual; 561 } else { 562 total = 0; 563 buf = NULL; 564 } 565 566 /* write just one packet at a time */ 567 count = ep->ep.maxpacket; 568 if (count > total) /* min() cannot be used on a bitfield */ 569 count = total; 570 571 ep_vdbg(ep->dev, "write %s fifo (IN) %d bytes%s req %p\n", 572 ep->ep.name, count, 573 (count != ep->ep.maxpacket) ? " (short)" : "", 574 req); 575 while (count >= 4) { 576 /* NOTE be careful if you try to align these. fifo lines 577 * should normally be full (4 bytes) and successive partial 578 * lines are ok only in certain cases. 579 */ 580 tmp = get_unaligned((u32 *)buf); 581 cpu_to_le32s(&tmp); 582 writel(tmp, ®s->ep_data); 583 buf += 4; 584 count -= 4; 585 } 586 587 /* last fifo entry is "short" unless we wrote a full packet. 588 * also explicitly validate last word in (periodic) transfers 589 * when maxpacket is not a multiple of 4 bytes. 590 */ 591 if (count || total < ep->ep.maxpacket) { 592 tmp = count ? get_unaligned((u32 *)buf) : count; 593 cpu_to_le32s(&tmp); 594 set_fifo_bytecount(ep, count & 0x03); 595 writel(tmp, ®s->ep_data); 596 } 597 598 /* pci writes may still be posted */ 599 } 600 601 /* work around erratum 0106: PCI and USB race over the OUT fifo. 602 * caller guarantees chiprev 0100, out endpoint is NAKing, and 603 * there's no real data in the fifo. 604 * 605 * NOTE: also used in cases where that erratum doesn't apply: 606 * where the host wrote "too much" data to us. 607 */ 608 static void out_flush(struct net2280_ep *ep) 609 { 610 u32 __iomem *statp; 611 u32 tmp; 612 613 ASSERT_OUT_NAKING(ep); 614 615 statp = &ep->regs->ep_stat; 616 writel(BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 617 BIT(DATA_PACKET_RECEIVED_INTERRUPT), 618 statp); 619 writel(BIT(FIFO_FLUSH), statp); 620 /* Make sure that stap is written */ 621 mb(); 622 tmp = readl(statp); 623 if (tmp & BIT(DATA_OUT_PING_TOKEN_INTERRUPT) && 624 /* high speed did bulk NYET; fifo isn't filling */ 625 ep->dev->gadget.speed == USB_SPEED_FULL) { 626 unsigned usec; 627 628 usec = 50; /* 64 byte bulk/interrupt */ 629 handshake(statp, BIT(USB_OUT_PING_NAK_SENT), 630 BIT(USB_OUT_PING_NAK_SENT), usec); 631 /* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */ 632 } 633 } 634 635 /* unload packet(s) from the fifo we use for usb OUT transfers. 636 * returns true iff the request completed, because of short packet 637 * or the request buffer having filled with full packets. 638 * 639 * for ep-a..ep-d this will read multiple packets out when they 640 * have been accepted. 641 */ 642 static int read_fifo(struct net2280_ep *ep, struct net2280_request *req) 643 { 644 struct net2280_ep_regs __iomem *regs = ep->regs; 645 u8 *buf = req->req.buf + req->req.actual; 646 unsigned count, tmp, is_short; 647 unsigned cleanup = 0, prevent = 0; 648 649 /* erratum 0106 ... packets coming in during fifo reads might 650 * be incompletely rejected. not all cases have workarounds. 651 */ 652 if (ep->dev->chiprev == 0x0100 && 653 ep->dev->gadget.speed == USB_SPEED_FULL) { 654 udelay(1); 655 tmp = readl(&ep->regs->ep_stat); 656 if ((tmp & BIT(NAK_OUT_PACKETS))) 657 cleanup = 1; 658 else if ((tmp & BIT(FIFO_FULL))) { 659 start_out_naking(ep); 660 prevent = 1; 661 } 662 /* else: hope we don't see the problem */ 663 } 664 665 /* never overflow the rx buffer. the fifo reads packets until 666 * it sees a short one; we might not be ready for them all. 667 */ 668 prefetchw(buf); 669 count = readl(®s->ep_avail); 670 if (unlikely(count == 0)) { 671 udelay(1); 672 tmp = readl(&ep->regs->ep_stat); 673 count = readl(®s->ep_avail); 674 /* handled that data already? */ 675 if (count == 0 && (tmp & BIT(NAK_OUT_PACKETS)) == 0) 676 return 0; 677 } 678 679 tmp = req->req.length - req->req.actual; 680 if (count > tmp) { 681 /* as with DMA, data overflow gets flushed */ 682 if ((tmp % ep->ep.maxpacket) != 0) { 683 ep_err(ep->dev, 684 "%s out fifo %d bytes, expected %d\n", 685 ep->ep.name, count, tmp); 686 req->req.status = -EOVERFLOW; 687 cleanup = 1; 688 /* NAK_OUT_PACKETS will be set, so flushing is safe; 689 * the next read will start with the next packet 690 */ 691 } /* else it's a ZLP, no worries */ 692 count = tmp; 693 } 694 req->req.actual += count; 695 696 is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0); 697 698 ep_vdbg(ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n", 699 ep->ep.name, count, is_short ? " (short)" : "", 700 cleanup ? " flush" : "", prevent ? " nak" : "", 701 req, req->req.actual, req->req.length); 702 703 while (count >= 4) { 704 tmp = readl(®s->ep_data); 705 cpu_to_le32s(&tmp); 706 put_unaligned(tmp, (u32 *)buf); 707 buf += 4; 708 count -= 4; 709 } 710 if (count) { 711 tmp = readl(®s->ep_data); 712 /* LE conversion is implicit here: */ 713 do { 714 *buf++ = (u8) tmp; 715 tmp >>= 8; 716 } while (--count); 717 } 718 if (cleanup) 719 out_flush(ep); 720 if (prevent) { 721 writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp); 722 (void) readl(&ep->regs->ep_rsp); 723 } 724 725 return is_short || ((req->req.actual == req->req.length) && 726 !req->req.zero); 727 } 728 729 /* fill out dma descriptor to match a given request */ 730 static void fill_dma_desc(struct net2280_ep *ep, 731 struct net2280_request *req, int valid) 732 { 733 struct net2280_dma *td = req->td; 734 u32 dmacount = req->req.length; 735 736 /* don't let DMA continue after a short OUT packet, 737 * so overruns can't affect the next transfer. 738 * in case of overruns on max-size packets, we can't 739 * stop the fifo from filling but we can flush it. 740 */ 741 if (ep->is_in) 742 dmacount |= BIT(DMA_DIRECTION); 743 if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0) || 744 !(ep->dev->quirks & PLX_2280)) 745 dmacount |= BIT(END_OF_CHAIN); 746 747 req->valid = valid; 748 if (valid) 749 dmacount |= BIT(VALID_BIT); 750 if (likely(!req->req.no_interrupt || !use_dma_chaining)) 751 dmacount |= BIT(DMA_DONE_INTERRUPT_ENABLE); 752 753 /* td->dmadesc = previously set by caller */ 754 td->dmaaddr = cpu_to_le32 (req->req.dma); 755 756 /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */ 757 wmb(); 758 td->dmacount = cpu_to_le32(dmacount); 759 } 760 761 static const u32 dmactl_default = 762 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) | 763 BIT(DMA_CLEAR_COUNT_ENABLE) | 764 /* erratum 0116 workaround part 1 (use POLLING) */ 765 (POLL_100_USEC << DESCRIPTOR_POLLING_RATE) | 766 BIT(DMA_VALID_BIT_POLLING_ENABLE) | 767 BIT(DMA_VALID_BIT_ENABLE) | 768 BIT(DMA_SCATTER_GATHER_ENABLE) | 769 /* erratum 0116 workaround part 2 (no AUTOSTART) */ 770 BIT(DMA_ENABLE); 771 772 static inline void spin_stop_dma(struct net2280_dma_regs __iomem *dma) 773 { 774 handshake(&dma->dmactl, BIT(DMA_ENABLE), 0, 50); 775 } 776 777 static inline void stop_dma(struct net2280_dma_regs __iomem *dma) 778 { 779 writel(readl(&dma->dmactl) & ~BIT(DMA_ENABLE), &dma->dmactl); 780 spin_stop_dma(dma); 781 } 782 783 static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma) 784 { 785 struct net2280_dma_regs __iomem *dma = ep->dma; 786 unsigned int tmp = BIT(VALID_BIT) | (ep->is_in << DMA_DIRECTION); 787 788 if (!(ep->dev->quirks & PLX_2280)) 789 tmp |= BIT(END_OF_CHAIN); 790 791 writel(tmp, &dma->dmacount); 792 writel(readl(&dma->dmastat), &dma->dmastat); 793 794 writel(td_dma, &dma->dmadesc); 795 if (ep->dev->quirks & PLX_SUPERSPEED) 796 dmactl |= BIT(DMA_REQUEST_OUTSTANDING); 797 writel(dmactl, &dma->dmactl); 798 799 /* erratum 0116 workaround part 3: pci arbiter away from net2280 */ 800 (void) readl(&ep->dev->pci->pcimstctl); 801 802 writel(BIT(DMA_START), &dma->dmastat); 803 804 if (!ep->is_in) 805 stop_out_naking(ep); 806 } 807 808 static void start_dma(struct net2280_ep *ep, struct net2280_request *req) 809 { 810 u32 tmp; 811 struct net2280_dma_regs __iomem *dma = ep->dma; 812 813 /* FIXME can't use DMA for ZLPs */ 814 815 /* on this path we "know" there's no dma active (yet) */ 816 WARN_ON(readl(&dma->dmactl) & BIT(DMA_ENABLE)); 817 writel(0, &ep->dma->dmactl); 818 819 /* previous OUT packet might have been short */ 820 if (!ep->is_in && (readl(&ep->regs->ep_stat) & 821 BIT(NAK_OUT_PACKETS))) { 822 writel(BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT), 823 &ep->regs->ep_stat); 824 825 tmp = readl(&ep->regs->ep_avail); 826 if (tmp) { 827 writel(readl(&dma->dmastat), &dma->dmastat); 828 829 /* transfer all/some fifo data */ 830 writel(req->req.dma, &dma->dmaaddr); 831 tmp = min(tmp, req->req.length); 832 833 /* dma irq, faking scatterlist status */ 834 req->td->dmacount = cpu_to_le32(req->req.length - tmp); 835 writel(BIT(DMA_DONE_INTERRUPT_ENABLE) | tmp, 836 &dma->dmacount); 837 req->td->dmadesc = 0; 838 req->valid = 1; 839 840 writel(BIT(DMA_ENABLE), &dma->dmactl); 841 writel(BIT(DMA_START), &dma->dmastat); 842 return; 843 } 844 } 845 846 tmp = dmactl_default; 847 848 /* force packet boundaries between dma requests, but prevent the 849 * controller from automagically writing a last "short" packet 850 * (zero length) unless the driver explicitly said to do that. 851 */ 852 if (ep->is_in) { 853 if (likely((req->req.length % ep->ep.maxpacket) || 854 req->req.zero)){ 855 tmp |= BIT(DMA_FIFO_VALIDATE); 856 ep->in_fifo_validate = 1; 857 } else 858 ep->in_fifo_validate = 0; 859 } 860 861 /* init req->td, pointing to the current dummy */ 862 req->td->dmadesc = cpu_to_le32 (ep->td_dma); 863 fill_dma_desc(ep, req, 1); 864 865 if (!use_dma_chaining) 866 req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN)); 867 868 start_queue(ep, tmp, req->td_dma); 869 } 870 871 static inline void resume_dma(struct net2280_ep *ep) 872 { 873 writel(readl(&ep->dma->dmactl) | BIT(DMA_ENABLE), &ep->dma->dmactl); 874 875 ep->dma_started = true; 876 } 877 878 static inline void ep_stop_dma(struct net2280_ep *ep) 879 { 880 writel(readl(&ep->dma->dmactl) & ~BIT(DMA_ENABLE), &ep->dma->dmactl); 881 spin_stop_dma(ep->dma); 882 883 ep->dma_started = false; 884 } 885 886 static inline void 887 queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid) 888 { 889 struct net2280_dma *end; 890 dma_addr_t tmp; 891 892 /* swap new dummy for old, link; fill and maybe activate */ 893 end = ep->dummy; 894 ep->dummy = req->td; 895 req->td = end; 896 897 tmp = ep->td_dma; 898 ep->td_dma = req->td_dma; 899 req->td_dma = tmp; 900 901 end->dmadesc = cpu_to_le32 (ep->td_dma); 902 903 fill_dma_desc(ep, req, valid); 904 } 905 906 static void 907 done(struct net2280_ep *ep, struct net2280_request *req, int status) 908 { 909 struct net2280 *dev; 910 unsigned stopped = ep->stopped; 911 912 list_del_init(&req->queue); 913 914 if (req->req.status == -EINPROGRESS) 915 req->req.status = status; 916 else 917 status = req->req.status; 918 919 dev = ep->dev; 920 if (ep->dma) 921 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in); 922 923 if (status && status != -ESHUTDOWN) 924 ep_vdbg(dev, "complete %s req %p stat %d len %u/%u\n", 925 ep->ep.name, &req->req, status, 926 req->req.actual, req->req.length); 927 928 /* don't modify queue heads during completion callback */ 929 ep->stopped = 1; 930 spin_unlock(&dev->lock); 931 usb_gadget_giveback_request(&ep->ep, &req->req); 932 spin_lock(&dev->lock); 933 ep->stopped = stopped; 934 } 935 936 /*-------------------------------------------------------------------------*/ 937 938 static int 939 net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) 940 { 941 struct net2280_request *req; 942 struct net2280_ep *ep; 943 struct net2280 *dev; 944 unsigned long flags; 945 946 /* we always require a cpu-view buffer, so that we can 947 * always use pio (as fallback or whatever). 948 */ 949 req = container_of(_req, struct net2280_request, req); 950 if (!_req || !_req->complete || !_req->buf || 951 !list_empty(&req->queue)) 952 return -EINVAL; 953 if (_req->length > (~0 & DMA_BYTE_COUNT_MASK)) 954 return -EDOM; 955 ep = container_of(_ep, struct net2280_ep, ep); 956 if (!_ep || (!ep->desc && ep->num != 0)) 957 return -EINVAL; 958 dev = ep->dev; 959 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) 960 return -ESHUTDOWN; 961 962 /* FIXME implement PIO fallback for ZLPs with DMA */ 963 if (ep->dma && _req->length == 0) 964 return -EOPNOTSUPP; 965 966 /* set up dma mapping in case the caller didn't */ 967 if (ep->dma) { 968 int ret; 969 970 ret = usb_gadget_map_request(&dev->gadget, _req, 971 ep->is_in); 972 if (ret) 973 return ret; 974 } 975 976 #if 0 977 ep_vdbg(dev, "%s queue req %p, len %d buf %p\n", 978 _ep->name, _req, _req->length, _req->buf); 979 #endif 980 981 spin_lock_irqsave(&dev->lock, flags); 982 983 _req->status = -EINPROGRESS; 984 _req->actual = 0; 985 986 /* kickstart this i/o queue? */ 987 if (list_empty(&ep->queue) && !ep->stopped) { 988 /* DMA request while EP halted */ 989 if (ep->dma && 990 (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)) && 991 (dev->quirks & PLX_SUPERSPEED)) { 992 int valid = 1; 993 if (ep->is_in) { 994 int expect; 995 expect = likely(req->req.zero || 996 ((req->req.length % 997 ep->ep.maxpacket) != 0)); 998 if (expect != ep->in_fifo_validate) 999 valid = 0; 1000 } 1001 queue_dma(ep, req, valid); 1002 } 1003 /* use DMA if the endpoint supports it, else pio */ 1004 else if (ep->dma) 1005 start_dma(ep, req); 1006 else { 1007 /* maybe there's no control data, just status ack */ 1008 if (ep->num == 0 && _req->length == 0) { 1009 allow_status(ep); 1010 done(ep, req, 0); 1011 ep_vdbg(dev, "%s status ack\n", ep->ep.name); 1012 goto done; 1013 } 1014 1015 /* PIO ... stuff the fifo, or unblock it. */ 1016 if (ep->is_in) 1017 write_fifo(ep, _req); 1018 else if (list_empty(&ep->queue)) { 1019 u32 s; 1020 1021 /* OUT FIFO might have packet(s) buffered */ 1022 s = readl(&ep->regs->ep_stat); 1023 if ((s & BIT(FIFO_EMPTY)) == 0) { 1024 /* note: _req->short_not_ok is 1025 * ignored here since PIO _always_ 1026 * stops queue advance here, and 1027 * _req->status doesn't change for 1028 * short reads (only _req->actual) 1029 */ 1030 if (read_fifo(ep, req) && 1031 ep->num == 0) { 1032 done(ep, req, 0); 1033 allow_status(ep); 1034 /* don't queue it */ 1035 req = NULL; 1036 } else if (read_fifo(ep, req) && 1037 ep->num != 0) { 1038 done(ep, req, 0); 1039 req = NULL; 1040 } else 1041 s = readl(&ep->regs->ep_stat); 1042 } 1043 1044 /* don't NAK, let the fifo fill */ 1045 if (req && (s & BIT(NAK_OUT_PACKETS))) 1046 writel(BIT(CLEAR_NAK_OUT_PACKETS), 1047 &ep->regs->ep_rsp); 1048 } 1049 } 1050 1051 } else if (ep->dma) { 1052 int valid = 1; 1053 1054 if (ep->is_in) { 1055 int expect; 1056 1057 /* preventing magic zlps is per-engine state, not 1058 * per-transfer; irq logic must recover hiccups. 1059 */ 1060 expect = likely(req->req.zero || 1061 (req->req.length % ep->ep.maxpacket)); 1062 if (expect != ep->in_fifo_validate) 1063 valid = 0; 1064 } 1065 queue_dma(ep, req, valid); 1066 1067 } /* else the irq handler advances the queue. */ 1068 1069 ep->responded = 1; 1070 if (req) 1071 list_add_tail(&req->queue, &ep->queue); 1072 done: 1073 spin_unlock_irqrestore(&dev->lock, flags); 1074 1075 /* pci writes may still be posted */ 1076 return 0; 1077 } 1078 1079 static inline void 1080 dma_done(struct net2280_ep *ep, struct net2280_request *req, u32 dmacount, 1081 int status) 1082 { 1083 req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount); 1084 done(ep, req, status); 1085 } 1086 1087 static void restart_dma(struct net2280_ep *ep); 1088 1089 static void scan_dma_completions(struct net2280_ep *ep) 1090 { 1091 /* only look at descriptors that were "naturally" retired, 1092 * so fifo and list head state won't matter 1093 */ 1094 while (!list_empty(&ep->queue)) { 1095 struct net2280_request *req; 1096 u32 tmp; 1097 1098 req = list_entry(ep->queue.next, 1099 struct net2280_request, queue); 1100 if (!req->valid) 1101 break; 1102 rmb(); 1103 tmp = le32_to_cpup(&req->td->dmacount); 1104 if ((tmp & BIT(VALID_BIT)) != 0) 1105 break; 1106 1107 /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short" 1108 * cases where DMA must be aborted; this code handles 1109 * all non-abort DMA completions. 1110 */ 1111 if (unlikely(req->td->dmadesc == 0)) { 1112 /* paranoia */ 1113 tmp = readl(&ep->dma->dmacount); 1114 if (tmp & DMA_BYTE_COUNT_MASK) 1115 break; 1116 /* single transfer mode */ 1117 dma_done(ep, req, tmp, 0); 1118 break; 1119 } else if (!ep->is_in && 1120 (req->req.length % ep->ep.maxpacket) != 0) { 1121 tmp = readl(&ep->regs->ep_stat); 1122 if (ep->dev->quirks & PLX_SUPERSPEED) 1123 return dma_done(ep, req, tmp, 0); 1124 1125 /* AVOID TROUBLE HERE by not issuing short reads from 1126 * your gadget driver. That helps avoids errata 0121, 1127 * 0122, and 0124; not all cases trigger the warning. 1128 */ 1129 if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) { 1130 ep_warn(ep->dev, "%s lost packet sync!\n", 1131 ep->ep.name); 1132 req->req.status = -EOVERFLOW; 1133 } else { 1134 tmp = readl(&ep->regs->ep_avail); 1135 if (tmp) { 1136 /* fifo gets flushed later */ 1137 ep->out_overflow = 1; 1138 ep_dbg(ep->dev, 1139 "%s dma, discard %d len %d\n", 1140 ep->ep.name, tmp, 1141 req->req.length); 1142 req->req.status = -EOVERFLOW; 1143 } 1144 } 1145 } 1146 dma_done(ep, req, tmp, 0); 1147 } 1148 } 1149 1150 static void restart_dma(struct net2280_ep *ep) 1151 { 1152 struct net2280_request *req; 1153 u32 dmactl = dmactl_default; 1154 1155 if (ep->stopped) 1156 return; 1157 req = list_entry(ep->queue.next, struct net2280_request, queue); 1158 1159 if (!use_dma_chaining) { 1160 start_dma(ep, req); 1161 return; 1162 } 1163 1164 /* the 2280 will be processing the queue unless queue hiccups after 1165 * the previous transfer: 1166 * IN: wanted automagic zlp, head doesn't (or vice versa) 1167 * DMA_FIFO_VALIDATE doesn't init from dma descriptors. 1168 * OUT: was "usb-short", we must restart. 1169 */ 1170 if (ep->is_in && !req->valid) { 1171 struct net2280_request *entry, *prev = NULL; 1172 int reqmode, done = 0; 1173 1174 ep_dbg(ep->dev, "%s dma hiccup td %p\n", ep->ep.name, req->td); 1175 ep->in_fifo_validate = likely(req->req.zero || 1176 (req->req.length % ep->ep.maxpacket) != 0); 1177 if (ep->in_fifo_validate) 1178 dmactl |= BIT(DMA_FIFO_VALIDATE); 1179 list_for_each_entry(entry, &ep->queue, queue) { 1180 __le32 dmacount; 1181 1182 if (entry == req) 1183 continue; 1184 dmacount = entry->td->dmacount; 1185 if (!done) { 1186 reqmode = likely(entry->req.zero || 1187 (entry->req.length % ep->ep.maxpacket)); 1188 if (reqmode == ep->in_fifo_validate) { 1189 entry->valid = 1; 1190 dmacount |= valid_bit; 1191 entry->td->dmacount = dmacount; 1192 prev = entry; 1193 continue; 1194 } else { 1195 /* force a hiccup */ 1196 prev->td->dmacount |= dma_done_ie; 1197 done = 1; 1198 } 1199 } 1200 1201 /* walk the rest of the queue so unlinks behave */ 1202 entry->valid = 0; 1203 dmacount &= ~valid_bit; 1204 entry->td->dmacount = dmacount; 1205 prev = entry; 1206 } 1207 } 1208 1209 writel(0, &ep->dma->dmactl); 1210 start_queue(ep, dmactl, req->td_dma); 1211 } 1212 1213 static void abort_dma_228x(struct net2280_ep *ep) 1214 { 1215 /* abort the current transfer */ 1216 if (likely(!list_empty(&ep->queue))) { 1217 /* FIXME work around errata 0121, 0122, 0124 */ 1218 writel(BIT(DMA_ABORT), &ep->dma->dmastat); 1219 spin_stop_dma(ep->dma); 1220 } else 1221 stop_dma(ep->dma); 1222 scan_dma_completions(ep); 1223 } 1224 1225 static void abort_dma_338x(struct net2280_ep *ep) 1226 { 1227 writel(BIT(DMA_ABORT), &ep->dma->dmastat); 1228 spin_stop_dma(ep->dma); 1229 } 1230 1231 static void abort_dma(struct net2280_ep *ep) 1232 { 1233 if (ep->dev->quirks & PLX_LEGACY) 1234 return abort_dma_228x(ep); 1235 return abort_dma_338x(ep); 1236 } 1237 1238 /* dequeue ALL requests */ 1239 static void nuke(struct net2280_ep *ep) 1240 { 1241 struct net2280_request *req; 1242 1243 /* called with spinlock held */ 1244 ep->stopped = 1; 1245 if (ep->dma) 1246 abort_dma(ep); 1247 while (!list_empty(&ep->queue)) { 1248 req = list_entry(ep->queue.next, 1249 struct net2280_request, 1250 queue); 1251 done(ep, req, -ESHUTDOWN); 1252 } 1253 } 1254 1255 /* dequeue JUST ONE request */ 1256 static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req) 1257 { 1258 struct net2280_ep *ep; 1259 struct net2280_request *req; 1260 unsigned long flags; 1261 u32 dmactl; 1262 int stopped; 1263 1264 ep = container_of(_ep, struct net2280_ep, ep); 1265 if (!_ep || (!ep->desc && ep->num != 0) || !_req) 1266 return -EINVAL; 1267 1268 spin_lock_irqsave(&ep->dev->lock, flags); 1269 stopped = ep->stopped; 1270 1271 /* quiesce dma while we patch the queue */ 1272 dmactl = 0; 1273 ep->stopped = 1; 1274 if (ep->dma) { 1275 dmactl = readl(&ep->dma->dmactl); 1276 /* WARNING erratum 0127 may kick in ... */ 1277 stop_dma(ep->dma); 1278 scan_dma_completions(ep); 1279 } 1280 1281 /* make sure it's still queued on this endpoint */ 1282 list_for_each_entry(req, &ep->queue, queue) { 1283 if (&req->req == _req) 1284 break; 1285 } 1286 if (&req->req != _req) { 1287 spin_unlock_irqrestore(&ep->dev->lock, flags); 1288 return -EINVAL; 1289 } 1290 1291 /* queue head may be partially complete. */ 1292 if (ep->queue.next == &req->queue) { 1293 if (ep->dma) { 1294 ep_dbg(ep->dev, "unlink (%s) dma\n", _ep->name); 1295 _req->status = -ECONNRESET; 1296 abort_dma(ep); 1297 if (likely(ep->queue.next == &req->queue)) { 1298 /* NOTE: misreports single-transfer mode*/ 1299 req->td->dmacount = 0; /* invalidate */ 1300 dma_done(ep, req, 1301 readl(&ep->dma->dmacount), 1302 -ECONNRESET); 1303 } 1304 } else { 1305 ep_dbg(ep->dev, "unlink (%s) pio\n", _ep->name); 1306 done(ep, req, -ECONNRESET); 1307 } 1308 req = NULL; 1309 1310 /* patch up hardware chaining data */ 1311 } else if (ep->dma && use_dma_chaining) { 1312 if (req->queue.prev == ep->queue.next) { 1313 writel(le32_to_cpu(req->td->dmadesc), 1314 &ep->dma->dmadesc); 1315 if (req->td->dmacount & dma_done_ie) 1316 writel(readl(&ep->dma->dmacount) | 1317 le32_to_cpu(dma_done_ie), 1318 &ep->dma->dmacount); 1319 } else { 1320 struct net2280_request *prev; 1321 1322 prev = list_entry(req->queue.prev, 1323 struct net2280_request, queue); 1324 prev->td->dmadesc = req->td->dmadesc; 1325 if (req->td->dmacount & dma_done_ie) 1326 prev->td->dmacount |= dma_done_ie; 1327 } 1328 } 1329 1330 if (req) 1331 done(ep, req, -ECONNRESET); 1332 ep->stopped = stopped; 1333 1334 if (ep->dma) { 1335 /* turn off dma on inactive queues */ 1336 if (list_empty(&ep->queue)) 1337 stop_dma(ep->dma); 1338 else if (!ep->stopped) { 1339 /* resume current request, or start new one */ 1340 if (req) 1341 writel(dmactl, &ep->dma->dmactl); 1342 else 1343 start_dma(ep, list_entry(ep->queue.next, 1344 struct net2280_request, queue)); 1345 } 1346 } 1347 1348 spin_unlock_irqrestore(&ep->dev->lock, flags); 1349 return 0; 1350 } 1351 1352 /*-------------------------------------------------------------------------*/ 1353 1354 static int net2280_fifo_status(struct usb_ep *_ep); 1355 1356 static int 1357 net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged) 1358 { 1359 struct net2280_ep *ep; 1360 unsigned long flags; 1361 int retval = 0; 1362 1363 ep = container_of(_ep, struct net2280_ep, ep); 1364 if (!_ep || (!ep->desc && ep->num != 0)) 1365 return -EINVAL; 1366 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) 1367 return -ESHUTDOWN; 1368 if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03) 1369 == USB_ENDPOINT_XFER_ISOC) 1370 return -EINVAL; 1371 1372 spin_lock_irqsave(&ep->dev->lock, flags); 1373 if (!list_empty(&ep->queue)) 1374 retval = -EAGAIN; 1375 else if (ep->is_in && value && net2280_fifo_status(_ep) != 0) 1376 retval = -EAGAIN; 1377 else { 1378 ep_vdbg(ep->dev, "%s %s %s\n", _ep->name, 1379 value ? "set" : "clear", 1380 wedged ? "wedge" : "halt"); 1381 /* set/clear, then synch memory views with the device */ 1382 if (value) { 1383 if (ep->num == 0) 1384 ep->dev->protocol_stall = 1; 1385 else 1386 set_halt(ep); 1387 if (wedged) 1388 ep->wedged = 1; 1389 } else { 1390 clear_halt(ep); 1391 if (ep->dev->quirks & PLX_SUPERSPEED && 1392 !list_empty(&ep->queue) && ep->td_dma) 1393 restart_dma(ep); 1394 ep->wedged = 0; 1395 } 1396 (void) readl(&ep->regs->ep_rsp); 1397 } 1398 spin_unlock_irqrestore(&ep->dev->lock, flags); 1399 1400 return retval; 1401 } 1402 1403 static int net2280_set_halt(struct usb_ep *_ep, int value) 1404 { 1405 return net2280_set_halt_and_wedge(_ep, value, 0); 1406 } 1407 1408 static int net2280_set_wedge(struct usb_ep *_ep) 1409 { 1410 if (!_ep || _ep->name == ep0name) 1411 return -EINVAL; 1412 return net2280_set_halt_and_wedge(_ep, 1, 1); 1413 } 1414 1415 static int net2280_fifo_status(struct usb_ep *_ep) 1416 { 1417 struct net2280_ep *ep; 1418 u32 avail; 1419 1420 ep = container_of(_ep, struct net2280_ep, ep); 1421 if (!_ep || (!ep->desc && ep->num != 0)) 1422 return -ENODEV; 1423 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) 1424 return -ESHUTDOWN; 1425 1426 avail = readl(&ep->regs->ep_avail) & (BIT(12) - 1); 1427 if (avail > ep->fifo_size) 1428 return -EOVERFLOW; 1429 if (ep->is_in) 1430 avail = ep->fifo_size - avail; 1431 return avail; 1432 } 1433 1434 static void net2280_fifo_flush(struct usb_ep *_ep) 1435 { 1436 struct net2280_ep *ep; 1437 1438 ep = container_of(_ep, struct net2280_ep, ep); 1439 if (!_ep || (!ep->desc && ep->num != 0)) 1440 return; 1441 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) 1442 return; 1443 1444 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat); 1445 (void) readl(&ep->regs->ep_rsp); 1446 } 1447 1448 static const struct usb_ep_ops net2280_ep_ops = { 1449 .enable = net2280_enable, 1450 .disable = net2280_disable, 1451 1452 .alloc_request = net2280_alloc_request, 1453 .free_request = net2280_free_request, 1454 1455 .queue = net2280_queue, 1456 .dequeue = net2280_dequeue, 1457 1458 .set_halt = net2280_set_halt, 1459 .set_wedge = net2280_set_wedge, 1460 .fifo_status = net2280_fifo_status, 1461 .fifo_flush = net2280_fifo_flush, 1462 }; 1463 1464 /*-------------------------------------------------------------------------*/ 1465 1466 static int net2280_get_frame(struct usb_gadget *_gadget) 1467 { 1468 struct net2280 *dev; 1469 unsigned long flags; 1470 u16 retval; 1471 1472 if (!_gadget) 1473 return -ENODEV; 1474 dev = container_of(_gadget, struct net2280, gadget); 1475 spin_lock_irqsave(&dev->lock, flags); 1476 retval = get_idx_reg(dev->regs, REG_FRAME) & 0x03ff; 1477 spin_unlock_irqrestore(&dev->lock, flags); 1478 return retval; 1479 } 1480 1481 static int net2280_wakeup(struct usb_gadget *_gadget) 1482 { 1483 struct net2280 *dev; 1484 u32 tmp; 1485 unsigned long flags; 1486 1487 if (!_gadget) 1488 return 0; 1489 dev = container_of(_gadget, struct net2280, gadget); 1490 1491 spin_lock_irqsave(&dev->lock, flags); 1492 tmp = readl(&dev->usb->usbctl); 1493 if (tmp & BIT(DEVICE_REMOTE_WAKEUP_ENABLE)) 1494 writel(BIT(GENERATE_RESUME), &dev->usb->usbstat); 1495 spin_unlock_irqrestore(&dev->lock, flags); 1496 1497 /* pci writes may still be posted */ 1498 return 0; 1499 } 1500 1501 static int net2280_set_selfpowered(struct usb_gadget *_gadget, int value) 1502 { 1503 struct net2280 *dev; 1504 u32 tmp; 1505 unsigned long flags; 1506 1507 if (!_gadget) 1508 return 0; 1509 dev = container_of(_gadget, struct net2280, gadget); 1510 1511 spin_lock_irqsave(&dev->lock, flags); 1512 tmp = readl(&dev->usb->usbctl); 1513 if (value) { 1514 tmp |= BIT(SELF_POWERED_STATUS); 1515 dev->selfpowered = 1; 1516 } else { 1517 tmp &= ~BIT(SELF_POWERED_STATUS); 1518 dev->selfpowered = 0; 1519 } 1520 writel(tmp, &dev->usb->usbctl); 1521 spin_unlock_irqrestore(&dev->lock, flags); 1522 1523 return 0; 1524 } 1525 1526 static int net2280_pullup(struct usb_gadget *_gadget, int is_on) 1527 { 1528 struct net2280 *dev; 1529 u32 tmp; 1530 unsigned long flags; 1531 1532 if (!_gadget) 1533 return -ENODEV; 1534 dev = container_of(_gadget, struct net2280, gadget); 1535 1536 spin_lock_irqsave(&dev->lock, flags); 1537 tmp = readl(&dev->usb->usbctl); 1538 dev->softconnect = (is_on != 0); 1539 if (is_on) 1540 tmp |= BIT(USB_DETECT_ENABLE); 1541 else 1542 tmp &= ~BIT(USB_DETECT_ENABLE); 1543 writel(tmp, &dev->usb->usbctl); 1544 spin_unlock_irqrestore(&dev->lock, flags); 1545 1546 return 0; 1547 } 1548 1549 static int net2280_start(struct usb_gadget *_gadget, 1550 struct usb_gadget_driver *driver); 1551 static int net2280_stop(struct usb_gadget *_gadget, 1552 struct usb_gadget_driver *driver); 1553 1554 static const struct usb_gadget_ops net2280_ops = { 1555 .get_frame = net2280_get_frame, 1556 .wakeup = net2280_wakeup, 1557 .set_selfpowered = net2280_set_selfpowered, 1558 .pullup = net2280_pullup, 1559 .udc_start = net2280_start, 1560 .udc_stop = net2280_stop, 1561 }; 1562 1563 /*-------------------------------------------------------------------------*/ 1564 1565 #ifdef CONFIG_USB_GADGET_DEBUG_FILES 1566 1567 /* FIXME move these into procfs, and use seq_file. 1568 * Sysfs _still_ doesn't behave for arbitrarily sized files, 1569 * and also doesn't help products using this with 2.4 kernels. 1570 */ 1571 1572 /* "function" sysfs attribute */ 1573 static ssize_t function_show(struct device *_dev, struct device_attribute *attr, 1574 char *buf) 1575 { 1576 struct net2280 *dev = dev_get_drvdata(_dev); 1577 1578 if (!dev->driver || !dev->driver->function || 1579 strlen(dev->driver->function) > PAGE_SIZE) 1580 return 0; 1581 return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function); 1582 } 1583 static DEVICE_ATTR_RO(function); 1584 1585 static ssize_t registers_show(struct device *_dev, 1586 struct device_attribute *attr, char *buf) 1587 { 1588 struct net2280 *dev; 1589 char *next; 1590 unsigned size, t; 1591 unsigned long flags; 1592 int i; 1593 u32 t1, t2; 1594 const char *s; 1595 1596 dev = dev_get_drvdata(_dev); 1597 next = buf; 1598 size = PAGE_SIZE; 1599 spin_lock_irqsave(&dev->lock, flags); 1600 1601 if (dev->driver) 1602 s = dev->driver->driver.name; 1603 else 1604 s = "(none)"; 1605 1606 /* Main Control Registers */ 1607 t = scnprintf(next, size, "%s version " DRIVER_VERSION 1608 ", chiprev %04x, dma %s\n\n" 1609 "devinit %03x fifoctl %08x gadget '%s'\n" 1610 "pci irqenb0 %02x irqenb1 %08x " 1611 "irqstat0 %04x irqstat1 %08x\n", 1612 driver_name, dev->chiprev, 1613 use_dma 1614 ? (use_dma_chaining ? "chaining" : "enabled") 1615 : "disabled", 1616 readl(&dev->regs->devinit), 1617 readl(&dev->regs->fifoctl), 1618 s, 1619 readl(&dev->regs->pciirqenb0), 1620 readl(&dev->regs->pciirqenb1), 1621 readl(&dev->regs->irqstat0), 1622 readl(&dev->regs->irqstat1)); 1623 size -= t; 1624 next += t; 1625 1626 /* USB Control Registers */ 1627 t1 = readl(&dev->usb->usbctl); 1628 t2 = readl(&dev->usb->usbstat); 1629 if (t1 & BIT(VBUS_PIN)) { 1630 if (t2 & BIT(HIGH_SPEED)) 1631 s = "high speed"; 1632 else if (dev->gadget.speed == USB_SPEED_UNKNOWN) 1633 s = "powered"; 1634 else 1635 s = "full speed"; 1636 /* full speed bit (6) not working?? */ 1637 } else 1638 s = "not attached"; 1639 t = scnprintf(next, size, 1640 "stdrsp %08x usbctl %08x usbstat %08x " 1641 "addr 0x%02x (%s)\n", 1642 readl(&dev->usb->stdrsp), t1, t2, 1643 readl(&dev->usb->ouraddr), s); 1644 size -= t; 1645 next += t; 1646 1647 /* PCI Master Control Registers */ 1648 1649 /* DMA Control Registers */ 1650 1651 /* Configurable EP Control Registers */ 1652 for (i = 0; i < dev->n_ep; i++) { 1653 struct net2280_ep *ep; 1654 1655 ep = &dev->ep[i]; 1656 if (i && !ep->desc) 1657 continue; 1658 1659 t1 = readl(&ep->cfg->ep_cfg); 1660 t2 = readl(&ep->regs->ep_rsp) & 0xff; 1661 t = scnprintf(next, size, 1662 "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s" 1663 "irqenb %02x\n", 1664 ep->ep.name, t1, t2, 1665 (t2 & BIT(CLEAR_NAK_OUT_PACKETS)) 1666 ? "NAK " : "", 1667 (t2 & BIT(CLEAR_EP_HIDE_STATUS_PHASE)) 1668 ? "hide " : "", 1669 (t2 & BIT(CLEAR_EP_FORCE_CRC_ERROR)) 1670 ? "CRC " : "", 1671 (t2 & BIT(CLEAR_INTERRUPT_MODE)) 1672 ? "interrupt " : "", 1673 (t2 & BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)) 1674 ? "status " : "", 1675 (t2 & BIT(CLEAR_NAK_OUT_PACKETS_MODE)) 1676 ? "NAKmode " : "", 1677 (t2 & BIT(CLEAR_ENDPOINT_TOGGLE)) 1678 ? "DATA1 " : "DATA0 ", 1679 (t2 & BIT(CLEAR_ENDPOINT_HALT)) 1680 ? "HALT " : "", 1681 readl(&ep->regs->ep_irqenb)); 1682 size -= t; 1683 next += t; 1684 1685 t = scnprintf(next, size, 1686 "\tstat %08x avail %04x " 1687 "(ep%d%s-%s)%s\n", 1688 readl(&ep->regs->ep_stat), 1689 readl(&ep->regs->ep_avail), 1690 t1 & 0x0f, DIR_STRING(t1), 1691 type_string(t1 >> 8), 1692 ep->stopped ? "*" : ""); 1693 size -= t; 1694 next += t; 1695 1696 if (!ep->dma) 1697 continue; 1698 1699 t = scnprintf(next, size, 1700 " dma\tctl %08x stat %08x count %08x\n" 1701 "\taddr %08x desc %08x\n", 1702 readl(&ep->dma->dmactl), 1703 readl(&ep->dma->dmastat), 1704 readl(&ep->dma->dmacount), 1705 readl(&ep->dma->dmaaddr), 1706 readl(&ep->dma->dmadesc)); 1707 size -= t; 1708 next += t; 1709 1710 } 1711 1712 /* Indexed Registers (none yet) */ 1713 1714 /* Statistics */ 1715 t = scnprintf(next, size, "\nirqs: "); 1716 size -= t; 1717 next += t; 1718 for (i = 0; i < dev->n_ep; i++) { 1719 struct net2280_ep *ep; 1720 1721 ep = &dev->ep[i]; 1722 if (i && !ep->irqs) 1723 continue; 1724 t = scnprintf(next, size, " %s/%lu", ep->ep.name, ep->irqs); 1725 size -= t; 1726 next += t; 1727 1728 } 1729 t = scnprintf(next, size, "\n"); 1730 size -= t; 1731 next += t; 1732 1733 spin_unlock_irqrestore(&dev->lock, flags); 1734 1735 return PAGE_SIZE - size; 1736 } 1737 static DEVICE_ATTR_RO(registers); 1738 1739 static ssize_t queues_show(struct device *_dev, struct device_attribute *attr, 1740 char *buf) 1741 { 1742 struct net2280 *dev; 1743 char *next; 1744 unsigned size; 1745 unsigned long flags; 1746 int i; 1747 1748 dev = dev_get_drvdata(_dev); 1749 next = buf; 1750 size = PAGE_SIZE; 1751 spin_lock_irqsave(&dev->lock, flags); 1752 1753 for (i = 0; i < dev->n_ep; i++) { 1754 struct net2280_ep *ep = &dev->ep[i]; 1755 struct net2280_request *req; 1756 int t; 1757 1758 if (i != 0) { 1759 const struct usb_endpoint_descriptor *d; 1760 1761 d = ep->desc; 1762 if (!d) 1763 continue; 1764 t = d->bEndpointAddress; 1765 t = scnprintf(next, size, 1766 "\n%s (ep%d%s-%s) max %04x %s fifo %d\n", 1767 ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK, 1768 (t & USB_DIR_IN) ? "in" : "out", 1769 type_string(d->bmAttributes), 1770 usb_endpoint_maxp(d) & 0x1fff, 1771 ep->dma ? "dma" : "pio", ep->fifo_size 1772 ); 1773 } else /* ep0 should only have one transfer queued */ 1774 t = scnprintf(next, size, "ep0 max 64 pio %s\n", 1775 ep->is_in ? "in" : "out"); 1776 if (t <= 0 || t > size) 1777 goto done; 1778 size -= t; 1779 next += t; 1780 1781 if (list_empty(&ep->queue)) { 1782 t = scnprintf(next, size, "\t(nothing queued)\n"); 1783 if (t <= 0 || t > size) 1784 goto done; 1785 size -= t; 1786 next += t; 1787 continue; 1788 } 1789 list_for_each_entry(req, &ep->queue, queue) { 1790 if (ep->dma && req->td_dma == readl(&ep->dma->dmadesc)) 1791 t = scnprintf(next, size, 1792 "\treq %p len %d/%d " 1793 "buf %p (dmacount %08x)\n", 1794 &req->req, req->req.actual, 1795 req->req.length, req->req.buf, 1796 readl(&ep->dma->dmacount)); 1797 else 1798 t = scnprintf(next, size, 1799 "\treq %p len %d/%d buf %p\n", 1800 &req->req, req->req.actual, 1801 req->req.length, req->req.buf); 1802 if (t <= 0 || t > size) 1803 goto done; 1804 size -= t; 1805 next += t; 1806 1807 if (ep->dma) { 1808 struct net2280_dma *td; 1809 1810 td = req->td; 1811 t = scnprintf(next, size, "\t td %08x " 1812 " count %08x buf %08x desc %08x\n", 1813 (u32) req->td_dma, 1814 le32_to_cpu(td->dmacount), 1815 le32_to_cpu(td->dmaaddr), 1816 le32_to_cpu(td->dmadesc)); 1817 if (t <= 0 || t > size) 1818 goto done; 1819 size -= t; 1820 next += t; 1821 } 1822 } 1823 } 1824 1825 done: 1826 spin_unlock_irqrestore(&dev->lock, flags); 1827 return PAGE_SIZE - size; 1828 } 1829 static DEVICE_ATTR_RO(queues); 1830 1831 1832 #else 1833 1834 #define device_create_file(a, b) (0) 1835 #define device_remove_file(a, b) do { } while (0) 1836 1837 #endif 1838 1839 /*-------------------------------------------------------------------------*/ 1840 1841 /* another driver-specific mode might be a request type doing dma 1842 * to/from another device fifo instead of to/from memory. 1843 */ 1844 1845 static void set_fifo_mode(struct net2280 *dev, int mode) 1846 { 1847 /* keeping high bits preserves BAR2 */ 1848 writel((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl); 1849 1850 /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */ 1851 INIT_LIST_HEAD(&dev->gadget.ep_list); 1852 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list); 1853 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); 1854 switch (mode) { 1855 case 0: 1856 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list); 1857 list_add_tail(&dev->ep[4].ep.ep_list, &dev->gadget.ep_list); 1858 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024; 1859 break; 1860 case 1: 1861 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 2048; 1862 break; 1863 case 2: 1864 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list); 1865 dev->ep[1].fifo_size = 2048; 1866 dev->ep[2].fifo_size = 1024; 1867 break; 1868 } 1869 /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */ 1870 list_add_tail(&dev->ep[5].ep.ep_list, &dev->gadget.ep_list); 1871 list_add_tail(&dev->ep[6].ep.ep_list, &dev->gadget.ep_list); 1872 } 1873 1874 static void defect7374_disable_data_eps(struct net2280 *dev) 1875 { 1876 /* 1877 * For Defect 7374, disable data EPs (and more): 1878 * - This phase undoes the earlier phase of the Defect 7374 workaround, 1879 * returing ep regs back to normal. 1880 */ 1881 struct net2280_ep *ep; 1882 int i; 1883 unsigned char ep_sel; 1884 u32 tmp_reg; 1885 1886 for (i = 1; i < 5; i++) { 1887 ep = &dev->ep[i]; 1888 writel(0, &ep->cfg->ep_cfg); 1889 } 1890 1891 /* CSROUT, CSRIN, PCIOUT, PCIIN, STATIN, RCIN */ 1892 for (i = 0; i < 6; i++) 1893 writel(0, &dev->dep[i].dep_cfg); 1894 1895 for (ep_sel = 0; ep_sel <= 21; ep_sel++) { 1896 /* Select an endpoint for subsequent operations: */ 1897 tmp_reg = readl(&dev->plregs->pl_ep_ctrl); 1898 writel(((tmp_reg & ~0x1f) | ep_sel), &dev->plregs->pl_ep_ctrl); 1899 1900 if (ep_sel < 2 || (ep_sel > 9 && ep_sel < 14) || 1901 ep_sel == 18 || ep_sel == 20) 1902 continue; 1903 1904 /* Change settings on some selected endpoints */ 1905 tmp_reg = readl(&dev->plregs->pl_ep_cfg_4); 1906 tmp_reg &= ~BIT(NON_CTRL_IN_TOLERATE_BAD_DIR); 1907 writel(tmp_reg, &dev->plregs->pl_ep_cfg_4); 1908 tmp_reg = readl(&dev->plregs->pl_ep_ctrl); 1909 tmp_reg |= BIT(EP_INITIALIZED); 1910 writel(tmp_reg, &dev->plregs->pl_ep_ctrl); 1911 } 1912 } 1913 1914 static void defect7374_enable_data_eps_zero(struct net2280 *dev) 1915 { 1916 u32 tmp = 0, tmp_reg; 1917 u32 fsmvalue, scratch; 1918 int i; 1919 unsigned char ep_sel; 1920 1921 scratch = get_idx_reg(dev->regs, SCRATCH); 1922 fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD); 1923 scratch &= ~(0xf << DEFECT7374_FSM_FIELD); 1924 1925 /*See if firmware needs to set up for workaround*/ 1926 if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) { 1927 ep_warn(dev, "Operate Defect 7374 workaround soft this time"); 1928 ep_warn(dev, "It will operate on cold-reboot and SS connect"); 1929 1930 /*GPEPs:*/ 1931 tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_DIRECTION) | 1932 (2 << OUT_ENDPOINT_TYPE) | (2 << IN_ENDPOINT_TYPE) | 1933 ((dev->enhanced_mode) ? 1934 BIT(OUT_ENDPOINT_ENABLE) : BIT(ENDPOINT_ENABLE)) | 1935 BIT(IN_ENDPOINT_ENABLE)); 1936 1937 for (i = 1; i < 5; i++) 1938 writel(tmp, &dev->ep[i].cfg->ep_cfg); 1939 1940 /* CSRIN, PCIIN, STATIN, RCIN*/ 1941 tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_ENABLE)); 1942 writel(tmp, &dev->dep[1].dep_cfg); 1943 writel(tmp, &dev->dep[3].dep_cfg); 1944 writel(tmp, &dev->dep[4].dep_cfg); 1945 writel(tmp, &dev->dep[5].dep_cfg); 1946 1947 /*Implemented for development and debug. 1948 * Can be refined/tuned later.*/ 1949 for (ep_sel = 0; ep_sel <= 21; ep_sel++) { 1950 /* Select an endpoint for subsequent operations: */ 1951 tmp_reg = readl(&dev->plregs->pl_ep_ctrl); 1952 writel(((tmp_reg & ~0x1f) | ep_sel), 1953 &dev->plregs->pl_ep_ctrl); 1954 1955 if (ep_sel == 1) { 1956 tmp = 1957 (readl(&dev->plregs->pl_ep_ctrl) | 1958 BIT(CLEAR_ACK_ERROR_CODE) | 0); 1959 writel(tmp, &dev->plregs->pl_ep_ctrl); 1960 continue; 1961 } 1962 1963 if (ep_sel == 0 || (ep_sel > 9 && ep_sel < 14) || 1964 ep_sel == 18 || ep_sel == 20) 1965 continue; 1966 1967 tmp = (readl(&dev->plregs->pl_ep_cfg_4) | 1968 BIT(NON_CTRL_IN_TOLERATE_BAD_DIR) | 0); 1969 writel(tmp, &dev->plregs->pl_ep_cfg_4); 1970 1971 tmp = readl(&dev->plregs->pl_ep_ctrl) & 1972 ~BIT(EP_INITIALIZED); 1973 writel(tmp, &dev->plregs->pl_ep_ctrl); 1974 1975 } 1976 1977 /* Set FSM to focus on the first Control Read: 1978 * - Tip: Connection speed is known upon the first 1979 * setup request.*/ 1980 scratch |= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ; 1981 set_idx_reg(dev->regs, SCRATCH, scratch); 1982 1983 } else{ 1984 ep_warn(dev, "Defect 7374 workaround soft will NOT operate"); 1985 ep_warn(dev, "It will operate on cold-reboot and SS connect"); 1986 } 1987 } 1988 1989 /* keeping it simple: 1990 * - one bus driver, initted first; 1991 * - one function driver, initted second 1992 * 1993 * most of the work to support multiple net2280 controllers would 1994 * be to associate this gadget driver (yes?) with all of them, or 1995 * perhaps to bind specific drivers to specific devices. 1996 */ 1997 1998 static void usb_reset_228x(struct net2280 *dev) 1999 { 2000 u32 tmp; 2001 2002 dev->gadget.speed = USB_SPEED_UNKNOWN; 2003 (void) readl(&dev->usb->usbctl); 2004 2005 net2280_led_init(dev); 2006 2007 /* disable automatic responses, and irqs */ 2008 writel(0, &dev->usb->stdrsp); 2009 writel(0, &dev->regs->pciirqenb0); 2010 writel(0, &dev->regs->pciirqenb1); 2011 2012 /* clear old dma and irq state */ 2013 for (tmp = 0; tmp < 4; tmp++) { 2014 struct net2280_ep *ep = &dev->ep[tmp + 1]; 2015 if (ep->dma) 2016 abort_dma(ep); 2017 } 2018 2019 writel(~0, &dev->regs->irqstat0), 2020 writel(~(u32)BIT(SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1), 2021 2022 /* reset, and enable pci */ 2023 tmp = readl(&dev->regs->devinit) | 2024 BIT(PCI_ENABLE) | 2025 BIT(FIFO_SOFT_RESET) | 2026 BIT(USB_SOFT_RESET) | 2027 BIT(M8051_RESET); 2028 writel(tmp, &dev->regs->devinit); 2029 2030 /* standard fifo and endpoint allocations */ 2031 set_fifo_mode(dev, (fifo_mode <= 2) ? fifo_mode : 0); 2032 } 2033 2034 static void usb_reset_338x(struct net2280 *dev) 2035 { 2036 u32 tmp; 2037 u32 fsmvalue; 2038 2039 dev->gadget.speed = USB_SPEED_UNKNOWN; 2040 (void)readl(&dev->usb->usbctl); 2041 2042 net2280_led_init(dev); 2043 2044 fsmvalue = get_idx_reg(dev->regs, SCRATCH) & 2045 (0xf << DEFECT7374_FSM_FIELD); 2046 2047 /* See if firmware needs to set up for workaround: */ 2048 if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) { 2049 ep_info(dev, "%s: Defect 7374 FsmValue 0x%08x\n", __func__, 2050 fsmvalue); 2051 } else { 2052 /* disable automatic responses, and irqs */ 2053 writel(0, &dev->usb->stdrsp); 2054 writel(0, &dev->regs->pciirqenb0); 2055 writel(0, &dev->regs->pciirqenb1); 2056 } 2057 2058 /* clear old dma and irq state */ 2059 for (tmp = 0; tmp < 4; tmp++) { 2060 struct net2280_ep *ep = &dev->ep[tmp + 1]; 2061 2062 if (ep->dma) 2063 abort_dma(ep); 2064 } 2065 2066 writel(~0, &dev->regs->irqstat0), writel(~0, &dev->regs->irqstat1); 2067 2068 if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) { 2069 /* reset, and enable pci */ 2070 tmp = readl(&dev->regs->devinit) | 2071 BIT(PCI_ENABLE) | 2072 BIT(FIFO_SOFT_RESET) | 2073 BIT(USB_SOFT_RESET) | 2074 BIT(M8051_RESET); 2075 2076 writel(tmp, &dev->regs->devinit); 2077 } 2078 2079 /* always ep-{1,2,3,4} ... maybe not ep-3 or ep-4 */ 2080 INIT_LIST_HEAD(&dev->gadget.ep_list); 2081 2082 for (tmp = 1; tmp < dev->n_ep; tmp++) 2083 list_add_tail(&dev->ep[tmp].ep.ep_list, &dev->gadget.ep_list); 2084 2085 } 2086 2087 static void usb_reset(struct net2280 *dev) 2088 { 2089 if (dev->quirks & PLX_LEGACY) 2090 return usb_reset_228x(dev); 2091 return usb_reset_338x(dev); 2092 } 2093 2094 static void usb_reinit_228x(struct net2280 *dev) 2095 { 2096 u32 tmp; 2097 int init_dma; 2098 2099 /* use_dma changes are ignored till next device re-init */ 2100 init_dma = use_dma; 2101 2102 /* basic endpoint init */ 2103 for (tmp = 0; tmp < 7; tmp++) { 2104 struct net2280_ep *ep = &dev->ep[tmp]; 2105 2106 ep->ep.name = ep_name[tmp]; 2107 ep->dev = dev; 2108 ep->num = tmp; 2109 2110 if (tmp > 0 && tmp <= 4) { 2111 ep->fifo_size = 1024; 2112 if (init_dma) 2113 ep->dma = &dev->dma[tmp - 1]; 2114 } else 2115 ep->fifo_size = 64; 2116 ep->regs = &dev->epregs[tmp]; 2117 ep->cfg = &dev->epregs[tmp]; 2118 ep_reset_228x(dev->regs, ep); 2119 } 2120 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64); 2121 usb_ep_set_maxpacket_limit(&dev->ep[5].ep, 64); 2122 usb_ep_set_maxpacket_limit(&dev->ep[6].ep, 64); 2123 2124 dev->gadget.ep0 = &dev->ep[0].ep; 2125 dev->ep[0].stopped = 0; 2126 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); 2127 2128 /* we want to prevent lowlevel/insecure access from the USB host, 2129 * but erratum 0119 means this enable bit is ignored 2130 */ 2131 for (tmp = 0; tmp < 5; tmp++) 2132 writel(EP_DONTUSE, &dev->dep[tmp].dep_cfg); 2133 } 2134 2135 static void usb_reinit_338x(struct net2280 *dev) 2136 { 2137 int init_dma; 2138 int i; 2139 u32 tmp, val; 2140 u32 fsmvalue; 2141 static const u32 ne[9] = { 0, 1, 2, 3, 4, 1, 2, 3, 4 }; 2142 static const u32 ep_reg_addr[9] = { 0x00, 0xC0, 0x00, 0xC0, 0x00, 2143 0x00, 0xC0, 0x00, 0xC0 }; 2144 2145 /* use_dma changes are ignored till next device re-init */ 2146 init_dma = use_dma; 2147 2148 /* basic endpoint init */ 2149 for (i = 0; i < dev->n_ep; i++) { 2150 struct net2280_ep *ep = &dev->ep[i]; 2151 2152 ep->ep.name = ep_name[i]; 2153 ep->dev = dev; 2154 ep->num = i; 2155 2156 if (i > 0 && i <= 4 && init_dma) 2157 ep->dma = &dev->dma[i - 1]; 2158 2159 if (dev->enhanced_mode) { 2160 ep->cfg = &dev->epregs[ne[i]]; 2161 ep->regs = (struct net2280_ep_regs __iomem *) 2162 (((void __iomem *)&dev->epregs[ne[i]]) + 2163 ep_reg_addr[i]); 2164 ep->fiforegs = &dev->fiforegs[i]; 2165 } else { 2166 ep->cfg = &dev->epregs[i]; 2167 ep->regs = &dev->epregs[i]; 2168 ep->fiforegs = &dev->fiforegs[i]; 2169 } 2170 2171 ep->fifo_size = (i != 0) ? 2048 : 512; 2172 2173 ep_reset_338x(dev->regs, ep); 2174 } 2175 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 512); 2176 2177 dev->gadget.ep0 = &dev->ep[0].ep; 2178 dev->ep[0].stopped = 0; 2179 2180 /* Link layer set up */ 2181 fsmvalue = get_idx_reg(dev->regs, SCRATCH) & 2182 (0xf << DEFECT7374_FSM_FIELD); 2183 2184 /* See if driver needs to set up for workaround: */ 2185 if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) 2186 ep_info(dev, "%s: Defect 7374 FsmValue %08x\n", 2187 __func__, fsmvalue); 2188 else { 2189 tmp = readl(&dev->usb_ext->usbctl2) & 2190 ~(BIT(U1_ENABLE) | BIT(U2_ENABLE) | BIT(LTM_ENABLE)); 2191 writel(tmp, &dev->usb_ext->usbctl2); 2192 } 2193 2194 /* Hardware Defect and Workaround */ 2195 val = readl(&dev->ll_lfps_regs->ll_lfps_5); 2196 val &= ~(0xf << TIMER_LFPS_6US); 2197 val |= 0x5 << TIMER_LFPS_6US; 2198 writel(val, &dev->ll_lfps_regs->ll_lfps_5); 2199 2200 val = readl(&dev->ll_lfps_regs->ll_lfps_6); 2201 val &= ~(0xffff << TIMER_LFPS_80US); 2202 val |= 0x0100 << TIMER_LFPS_80US; 2203 writel(val, &dev->ll_lfps_regs->ll_lfps_6); 2204 2205 /* 2206 * AA_AB Errata. Issue 4. Workaround for SuperSpeed USB 2207 * Hot Reset Exit Handshake may Fail in Specific Case using 2208 * Default Register Settings. Workaround for Enumeration test. 2209 */ 2210 val = readl(&dev->ll_tsn_regs->ll_tsn_counters_2); 2211 val &= ~(0x1f << HOT_TX_NORESET_TS2); 2212 val |= 0x10 << HOT_TX_NORESET_TS2; 2213 writel(val, &dev->ll_tsn_regs->ll_tsn_counters_2); 2214 2215 val = readl(&dev->ll_tsn_regs->ll_tsn_counters_3); 2216 val &= ~(0x1f << HOT_RX_RESET_TS2); 2217 val |= 0x3 << HOT_RX_RESET_TS2; 2218 writel(val, &dev->ll_tsn_regs->ll_tsn_counters_3); 2219 2220 /* 2221 * Set Recovery Idle to Recover bit: 2222 * - On SS connections, setting Recovery Idle to Recover Fmw improves 2223 * link robustness with various hosts and hubs. 2224 * - It is safe to set for all connection speeds; all chip revisions. 2225 * - R-M-W to leave other bits undisturbed. 2226 * - Reference PLX TT-7372 2227 */ 2228 val = readl(&dev->ll_chicken_reg->ll_tsn_chicken_bit); 2229 val |= BIT(RECOVERY_IDLE_TO_RECOVER_FMW); 2230 writel(val, &dev->ll_chicken_reg->ll_tsn_chicken_bit); 2231 2232 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); 2233 2234 /* disable dedicated endpoints */ 2235 writel(0x0D, &dev->dep[0].dep_cfg); 2236 writel(0x0D, &dev->dep[1].dep_cfg); 2237 writel(0x0E, &dev->dep[2].dep_cfg); 2238 writel(0x0E, &dev->dep[3].dep_cfg); 2239 writel(0x0F, &dev->dep[4].dep_cfg); 2240 writel(0x0C, &dev->dep[5].dep_cfg); 2241 } 2242 2243 static void usb_reinit(struct net2280 *dev) 2244 { 2245 if (dev->quirks & PLX_LEGACY) 2246 return usb_reinit_228x(dev); 2247 return usb_reinit_338x(dev); 2248 } 2249 2250 static void ep0_start_228x(struct net2280 *dev) 2251 { 2252 writel(BIT(CLEAR_EP_HIDE_STATUS_PHASE) | 2253 BIT(CLEAR_NAK_OUT_PACKETS) | 2254 BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE), 2255 &dev->epregs[0].ep_rsp); 2256 2257 /* 2258 * hardware optionally handles a bunch of standard requests 2259 * that the API hides from drivers anyway. have it do so. 2260 * endpoint status/features are handled in software, to 2261 * help pass tests for some dubious behavior. 2262 */ 2263 writel(BIT(SET_TEST_MODE) | 2264 BIT(SET_ADDRESS) | 2265 BIT(DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP) | 2266 BIT(GET_DEVICE_STATUS) | 2267 BIT(GET_INTERFACE_STATUS), 2268 &dev->usb->stdrsp); 2269 writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) | 2270 BIT(SELF_POWERED_USB_DEVICE) | 2271 BIT(REMOTE_WAKEUP_SUPPORT) | 2272 (dev->softconnect << USB_DETECT_ENABLE) | 2273 BIT(SELF_POWERED_STATUS), 2274 &dev->usb->usbctl); 2275 2276 /* enable irqs so we can see ep0 and general operation */ 2277 writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) | 2278 BIT(ENDPOINT_0_INTERRUPT_ENABLE), 2279 &dev->regs->pciirqenb0); 2280 writel(BIT(PCI_INTERRUPT_ENABLE) | 2281 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE) | 2282 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE) | 2283 BIT(PCI_RETRY_ABORT_INTERRUPT_ENABLE) | 2284 BIT(VBUS_INTERRUPT_ENABLE) | 2285 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) | 2286 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE), 2287 &dev->regs->pciirqenb1); 2288 2289 /* don't leave any writes posted */ 2290 (void) readl(&dev->usb->usbctl); 2291 } 2292 2293 static void ep0_start_338x(struct net2280 *dev) 2294 { 2295 u32 fsmvalue; 2296 2297 fsmvalue = get_idx_reg(dev->regs, SCRATCH) & 2298 (0xf << DEFECT7374_FSM_FIELD); 2299 2300 if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) 2301 ep_info(dev, "%s: Defect 7374 FsmValue %08x\n", __func__, 2302 fsmvalue); 2303 else 2304 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE) | 2305 BIT(SET_EP_HIDE_STATUS_PHASE), 2306 &dev->epregs[0].ep_rsp); 2307 2308 /* 2309 * hardware optionally handles a bunch of standard requests 2310 * that the API hides from drivers anyway. have it do so. 2311 * endpoint status/features are handled in software, to 2312 * help pass tests for some dubious behavior. 2313 */ 2314 writel(BIT(SET_ISOCHRONOUS_DELAY) | 2315 BIT(SET_SEL) | 2316 BIT(SET_TEST_MODE) | 2317 BIT(SET_ADDRESS) | 2318 BIT(GET_INTERFACE_STATUS) | 2319 BIT(GET_DEVICE_STATUS), 2320 &dev->usb->stdrsp); 2321 dev->wakeup_enable = 1; 2322 writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) | 2323 (dev->softconnect << USB_DETECT_ENABLE) | 2324 BIT(DEVICE_REMOTE_WAKEUP_ENABLE), 2325 &dev->usb->usbctl); 2326 2327 /* enable irqs so we can see ep0 and general operation */ 2328 writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) | 2329 BIT(ENDPOINT_0_INTERRUPT_ENABLE), 2330 &dev->regs->pciirqenb0); 2331 writel(BIT(PCI_INTERRUPT_ENABLE) | 2332 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) | 2333 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE) | 2334 BIT(VBUS_INTERRUPT_ENABLE), 2335 &dev->regs->pciirqenb1); 2336 2337 /* don't leave any writes posted */ 2338 (void)readl(&dev->usb->usbctl); 2339 } 2340 2341 static void ep0_start(struct net2280 *dev) 2342 { 2343 if (dev->quirks & PLX_LEGACY) 2344 return ep0_start_228x(dev); 2345 return ep0_start_338x(dev); 2346 } 2347 2348 /* when a driver is successfully registered, it will receive 2349 * control requests including set_configuration(), which enables 2350 * non-control requests. then usb traffic follows until a 2351 * disconnect is reported. then a host may connect again, or 2352 * the driver might get unbound. 2353 */ 2354 static int net2280_start(struct usb_gadget *_gadget, 2355 struct usb_gadget_driver *driver) 2356 { 2357 struct net2280 *dev; 2358 int retval; 2359 unsigned i; 2360 2361 /* insist on high speed support from the driver, since 2362 * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE) 2363 * "must not be used in normal operation" 2364 */ 2365 if (!driver || driver->max_speed < USB_SPEED_HIGH || 2366 !driver->setup) 2367 return -EINVAL; 2368 2369 dev = container_of(_gadget, struct net2280, gadget); 2370 2371 for (i = 0; i < dev->n_ep; i++) 2372 dev->ep[i].irqs = 0; 2373 2374 /* hook up the driver ... */ 2375 dev->softconnect = 1; 2376 driver->driver.bus = NULL; 2377 dev->driver = driver; 2378 2379 retval = device_create_file(&dev->pdev->dev, &dev_attr_function); 2380 if (retval) 2381 goto err_unbind; 2382 retval = device_create_file(&dev->pdev->dev, &dev_attr_queues); 2383 if (retval) 2384 goto err_func; 2385 2386 /* Enable force-full-speed testing mode, if desired */ 2387 if (full_speed && (dev->quirks & PLX_LEGACY)) 2388 writel(BIT(FORCE_FULL_SPEED_MODE), &dev->usb->xcvrdiag); 2389 2390 /* ... then enable host detection and ep0; and we're ready 2391 * for set_configuration as well as eventual disconnect. 2392 */ 2393 net2280_led_active(dev, 1); 2394 2395 if (dev->quirks & PLX_SUPERSPEED) 2396 defect7374_enable_data_eps_zero(dev); 2397 2398 ep0_start(dev); 2399 2400 ep_dbg(dev, "%s ready, usbctl %08x stdrsp %08x\n", 2401 driver->driver.name, 2402 readl(&dev->usb->usbctl), 2403 readl(&dev->usb->stdrsp)); 2404 2405 /* pci writes may still be posted */ 2406 return 0; 2407 2408 err_func: 2409 device_remove_file(&dev->pdev->dev, &dev_attr_function); 2410 err_unbind: 2411 dev->driver = NULL; 2412 return retval; 2413 } 2414 2415 static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver) 2416 { 2417 int i; 2418 2419 /* don't disconnect if it's not connected */ 2420 if (dev->gadget.speed == USB_SPEED_UNKNOWN) 2421 driver = NULL; 2422 2423 /* stop hardware; prevent new request submissions; 2424 * and kill any outstanding requests. 2425 */ 2426 usb_reset(dev); 2427 for (i = 0; i < dev->n_ep; i++) 2428 nuke(&dev->ep[i]); 2429 2430 /* report disconnect; the driver is already quiesced */ 2431 if (driver) { 2432 spin_unlock(&dev->lock); 2433 driver->disconnect(&dev->gadget); 2434 spin_lock(&dev->lock); 2435 } 2436 2437 usb_reinit(dev); 2438 } 2439 2440 static int net2280_stop(struct usb_gadget *_gadget, 2441 struct usb_gadget_driver *driver) 2442 { 2443 struct net2280 *dev; 2444 unsigned long flags; 2445 2446 dev = container_of(_gadget, struct net2280, gadget); 2447 2448 spin_lock_irqsave(&dev->lock, flags); 2449 stop_activity(dev, driver); 2450 spin_unlock_irqrestore(&dev->lock, flags); 2451 2452 dev->driver = NULL; 2453 2454 net2280_led_active(dev, 0); 2455 2456 /* Disable full-speed test mode */ 2457 if (dev->quirks & PLX_LEGACY) 2458 writel(0, &dev->usb->xcvrdiag); 2459 2460 device_remove_file(&dev->pdev->dev, &dev_attr_function); 2461 device_remove_file(&dev->pdev->dev, &dev_attr_queues); 2462 2463 ep_dbg(dev, "unregistered driver '%s'\n", 2464 driver ? driver->driver.name : ""); 2465 2466 return 0; 2467 } 2468 2469 /*-------------------------------------------------------------------------*/ 2470 2471 /* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq. 2472 * also works for dma-capable endpoints, in pio mode or just 2473 * to manually advance the queue after short OUT transfers. 2474 */ 2475 static void handle_ep_small(struct net2280_ep *ep) 2476 { 2477 struct net2280_request *req; 2478 u32 t; 2479 /* 0 error, 1 mid-data, 2 done */ 2480 int mode = 1; 2481 2482 if (!list_empty(&ep->queue)) 2483 req = list_entry(ep->queue.next, 2484 struct net2280_request, queue); 2485 else 2486 req = NULL; 2487 2488 /* ack all, and handle what we care about */ 2489 t = readl(&ep->regs->ep_stat); 2490 ep->irqs++; 2491 #if 0 2492 ep_vdbg(ep->dev, "%s ack ep_stat %08x, req %p\n", 2493 ep->ep.name, t, req ? &req->req : 0); 2494 #endif 2495 if (!ep->is_in || (ep->dev->quirks & PLX_2280)) 2496 writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat); 2497 else 2498 /* Added for 2282 */ 2499 writel(t, &ep->regs->ep_stat); 2500 2501 /* for ep0, monitor token irqs to catch data stage length errors 2502 * and to synchronize on status. 2503 * 2504 * also, to defer reporting of protocol stalls ... here's where 2505 * data or status first appears, handling stalls here should never 2506 * cause trouble on the host side.. 2507 * 2508 * control requests could be slightly faster without token synch for 2509 * status, but status can jam up that way. 2510 */ 2511 if (unlikely(ep->num == 0)) { 2512 if (ep->is_in) { 2513 /* status; stop NAKing */ 2514 if (t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) { 2515 if (ep->dev->protocol_stall) { 2516 ep->stopped = 1; 2517 set_halt(ep); 2518 } 2519 if (!req) 2520 allow_status(ep); 2521 mode = 2; 2522 /* reply to extra IN data tokens with a zlp */ 2523 } else if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) { 2524 if (ep->dev->protocol_stall) { 2525 ep->stopped = 1; 2526 set_halt(ep); 2527 mode = 2; 2528 } else if (ep->responded && 2529 !req && !ep->stopped) 2530 write_fifo(ep, NULL); 2531 } 2532 } else { 2533 /* status; stop NAKing */ 2534 if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) { 2535 if (ep->dev->protocol_stall) { 2536 ep->stopped = 1; 2537 set_halt(ep); 2538 } 2539 mode = 2; 2540 /* an extra OUT token is an error */ 2541 } else if (((t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) && 2542 req && 2543 req->req.actual == req->req.length) || 2544 (ep->responded && !req)) { 2545 ep->dev->protocol_stall = 1; 2546 set_halt(ep); 2547 ep->stopped = 1; 2548 if (req) 2549 done(ep, req, -EOVERFLOW); 2550 req = NULL; 2551 } 2552 } 2553 } 2554 2555 if (unlikely(!req)) 2556 return; 2557 2558 /* manual DMA queue advance after short OUT */ 2559 if (likely(ep->dma)) { 2560 if (t & BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT)) { 2561 u32 count; 2562 int stopped = ep->stopped; 2563 2564 /* TRANSFERRED works around OUT_DONE erratum 0112. 2565 * we expect (N <= maxpacket) bytes; host wrote M. 2566 * iff (M < N) we won't ever see a DMA interrupt. 2567 */ 2568 ep->stopped = 1; 2569 for (count = 0; ; t = readl(&ep->regs->ep_stat)) { 2570 2571 /* any preceding dma transfers must finish. 2572 * dma handles (M >= N), may empty the queue 2573 */ 2574 scan_dma_completions(ep); 2575 if (unlikely(list_empty(&ep->queue) || 2576 ep->out_overflow)) { 2577 req = NULL; 2578 break; 2579 } 2580 req = list_entry(ep->queue.next, 2581 struct net2280_request, queue); 2582 2583 /* here either (M < N), a "real" short rx; 2584 * or (M == N) and the queue didn't empty 2585 */ 2586 if (likely(t & BIT(FIFO_EMPTY))) { 2587 count = readl(&ep->dma->dmacount); 2588 count &= DMA_BYTE_COUNT_MASK; 2589 if (readl(&ep->dma->dmadesc) 2590 != req->td_dma) 2591 req = NULL; 2592 break; 2593 } 2594 udelay(1); 2595 } 2596 2597 /* stop DMA, leave ep NAKing */ 2598 writel(BIT(DMA_ABORT), &ep->dma->dmastat); 2599 spin_stop_dma(ep->dma); 2600 2601 if (likely(req)) { 2602 req->td->dmacount = 0; 2603 t = readl(&ep->regs->ep_avail); 2604 dma_done(ep, req, count, 2605 (ep->out_overflow || t) 2606 ? -EOVERFLOW : 0); 2607 } 2608 2609 /* also flush to prevent erratum 0106 trouble */ 2610 if (unlikely(ep->out_overflow || 2611 (ep->dev->chiprev == 0x0100 && 2612 ep->dev->gadget.speed 2613 == USB_SPEED_FULL))) { 2614 out_flush(ep); 2615 ep->out_overflow = 0; 2616 } 2617 2618 /* (re)start dma if needed, stop NAKing */ 2619 ep->stopped = stopped; 2620 if (!list_empty(&ep->queue)) 2621 restart_dma(ep); 2622 } else 2623 ep_dbg(ep->dev, "%s dma ep_stat %08x ??\n", 2624 ep->ep.name, t); 2625 return; 2626 2627 /* data packet(s) received (in the fifo, OUT) */ 2628 } else if (t & BIT(DATA_PACKET_RECEIVED_INTERRUPT)) { 2629 if (read_fifo(ep, req) && ep->num != 0) 2630 mode = 2; 2631 2632 /* data packet(s) transmitted (IN) */ 2633 } else if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT)) { 2634 unsigned len; 2635 2636 len = req->req.length - req->req.actual; 2637 if (len > ep->ep.maxpacket) 2638 len = ep->ep.maxpacket; 2639 req->req.actual += len; 2640 2641 /* if we wrote it all, we're usually done */ 2642 /* send zlps until the status stage */ 2643 if ((req->req.actual == req->req.length) && 2644 (!req->req.zero || len != ep->ep.maxpacket) && ep->num) 2645 mode = 2; 2646 2647 /* there was nothing to do ... */ 2648 } else if (mode == 1) 2649 return; 2650 2651 /* done */ 2652 if (mode == 2) { 2653 /* stream endpoints often resubmit/unlink in completion */ 2654 done(ep, req, 0); 2655 2656 /* maybe advance queue to next request */ 2657 if (ep->num == 0) { 2658 /* NOTE: net2280 could let gadget driver start the 2659 * status stage later. since not all controllers let 2660 * them control that, the api doesn't (yet) allow it. 2661 */ 2662 if (!ep->stopped) 2663 allow_status(ep); 2664 req = NULL; 2665 } else { 2666 if (!list_empty(&ep->queue) && !ep->stopped) 2667 req = list_entry(ep->queue.next, 2668 struct net2280_request, queue); 2669 else 2670 req = NULL; 2671 if (req && !ep->is_in) 2672 stop_out_naking(ep); 2673 } 2674 } 2675 2676 /* is there a buffer for the next packet? 2677 * for best streaming performance, make sure there is one. 2678 */ 2679 if (req && !ep->stopped) { 2680 2681 /* load IN fifo with next packet (may be zlp) */ 2682 if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT)) 2683 write_fifo(ep, &req->req); 2684 } 2685 } 2686 2687 static struct net2280_ep *get_ep_by_addr(struct net2280 *dev, u16 wIndex) 2688 { 2689 struct net2280_ep *ep; 2690 2691 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0) 2692 return &dev->ep[0]; 2693 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) { 2694 u8 bEndpointAddress; 2695 2696 if (!ep->desc) 2697 continue; 2698 bEndpointAddress = ep->desc->bEndpointAddress; 2699 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN) 2700 continue; 2701 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f)) 2702 return ep; 2703 } 2704 return NULL; 2705 } 2706 2707 static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r) 2708 { 2709 u32 scratch, fsmvalue; 2710 u32 ack_wait_timeout, state; 2711 2712 /* Workaround for Defect 7374 (U1/U2 erroneously rejected): */ 2713 scratch = get_idx_reg(dev->regs, SCRATCH); 2714 fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD); 2715 scratch &= ~(0xf << DEFECT7374_FSM_FIELD); 2716 2717 if (!((fsmvalue == DEFECT7374_FSM_WAITING_FOR_CONTROL_READ) && 2718 (r.bRequestType & USB_DIR_IN))) 2719 return; 2720 2721 /* This is the first Control Read for this connection: */ 2722 if (!(readl(&dev->usb->usbstat) & BIT(SUPER_SPEED_MODE))) { 2723 /* 2724 * Connection is NOT SS: 2725 * - Connection must be FS or HS. 2726 * - This FSM state should allow workaround software to 2727 * run after the next USB connection. 2728 */ 2729 scratch |= DEFECT7374_FSM_NON_SS_CONTROL_READ; 2730 goto restore_data_eps; 2731 } 2732 2733 /* Connection is SS: */ 2734 for (ack_wait_timeout = 0; 2735 ack_wait_timeout < DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS; 2736 ack_wait_timeout++) { 2737 2738 state = readl(&dev->plregs->pl_ep_status_1) 2739 & (0xff << STATE); 2740 if ((state >= (ACK_GOOD_NORMAL << STATE)) && 2741 (state <= (ACK_GOOD_MORE_ACKS_TO_COME << STATE))) { 2742 scratch |= DEFECT7374_FSM_SS_CONTROL_READ; 2743 break; 2744 } 2745 2746 /* 2747 * We have not yet received host's Data Phase ACK 2748 * - Wait and try again. 2749 */ 2750 udelay(DEFECT_7374_PROCESSOR_WAIT_TIME); 2751 2752 continue; 2753 } 2754 2755 2756 if (ack_wait_timeout >= DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS) { 2757 ep_err(dev, "FAIL: Defect 7374 workaround waited but failed " 2758 "to detect SS host's data phase ACK."); 2759 ep_err(dev, "PL_EP_STATUS_1(23:16):.Expected from 0x11 to 0x16" 2760 "got 0x%2.2x.\n", state >> STATE); 2761 } else { 2762 ep_warn(dev, "INFO: Defect 7374 workaround waited about\n" 2763 "%duSec for Control Read Data Phase ACK\n", 2764 DEFECT_7374_PROCESSOR_WAIT_TIME * ack_wait_timeout); 2765 } 2766 2767 restore_data_eps: 2768 /* 2769 * Restore data EPs to their pre-workaround settings (disabled, 2770 * initialized, and other details). 2771 */ 2772 defect7374_disable_data_eps(dev); 2773 2774 set_idx_reg(dev->regs, SCRATCH, scratch); 2775 2776 return; 2777 } 2778 2779 static void ep_stall(struct net2280_ep *ep, int stall) 2780 { 2781 struct net2280 *dev = ep->dev; 2782 u32 val; 2783 static const u32 ep_pl[9] = { 0, 3, 4, 7, 8, 2, 5, 6, 9 }; 2784 2785 if (stall) { 2786 writel(BIT(SET_ENDPOINT_HALT) | 2787 /* BIT(SET_NAK_PACKETS) | */ 2788 BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE), 2789 &ep->regs->ep_rsp); 2790 ep->is_halt = 1; 2791 } else { 2792 if (dev->gadget.speed == USB_SPEED_SUPER) { 2793 /* 2794 * Workaround for SS SeqNum not cleared via 2795 * Endpoint Halt (Clear) bit. select endpoint 2796 */ 2797 val = readl(&dev->plregs->pl_ep_ctrl); 2798 val = (val & ~0x1f) | ep_pl[ep->num]; 2799 writel(val, &dev->plregs->pl_ep_ctrl); 2800 2801 val |= BIT(SEQUENCE_NUMBER_RESET); 2802 writel(val, &dev->plregs->pl_ep_ctrl); 2803 } 2804 val = readl(&ep->regs->ep_rsp); 2805 val |= BIT(CLEAR_ENDPOINT_HALT) | 2806 BIT(CLEAR_ENDPOINT_TOGGLE); 2807 writel(val, 2808 /* | BIT(CLEAR_NAK_PACKETS),*/ 2809 &ep->regs->ep_rsp); 2810 ep->is_halt = 0; 2811 val = readl(&ep->regs->ep_rsp); 2812 } 2813 } 2814 2815 static void ep_stdrsp(struct net2280_ep *ep, int value, int wedged) 2816 { 2817 /* set/clear, then synch memory views with the device */ 2818 if (value) { 2819 ep->stopped = 1; 2820 if (ep->num == 0) 2821 ep->dev->protocol_stall = 1; 2822 else { 2823 if (ep->dma) 2824 ep_stop_dma(ep); 2825 ep_stall(ep, true); 2826 } 2827 2828 if (wedged) 2829 ep->wedged = 1; 2830 } else { 2831 ep->stopped = 0; 2832 ep->wedged = 0; 2833 2834 ep_stall(ep, false); 2835 2836 /* Flush the queue */ 2837 if (!list_empty(&ep->queue)) { 2838 struct net2280_request *req = 2839 list_entry(ep->queue.next, struct net2280_request, 2840 queue); 2841 if (ep->dma) 2842 resume_dma(ep); 2843 else { 2844 if (ep->is_in) 2845 write_fifo(ep, &req->req); 2846 else { 2847 if (read_fifo(ep, req)) 2848 done(ep, req, 0); 2849 } 2850 } 2851 } 2852 } 2853 } 2854 2855 static void handle_stat0_irqs_superspeed(struct net2280 *dev, 2856 struct net2280_ep *ep, struct usb_ctrlrequest r) 2857 { 2858 int tmp = 0; 2859 2860 #define w_value le16_to_cpu(r.wValue) 2861 #define w_index le16_to_cpu(r.wIndex) 2862 #define w_length le16_to_cpu(r.wLength) 2863 2864 switch (r.bRequest) { 2865 struct net2280_ep *e; 2866 u16 status; 2867 2868 case USB_REQ_SET_CONFIGURATION: 2869 dev->addressed_state = !w_value; 2870 goto usb3_delegate; 2871 2872 case USB_REQ_GET_STATUS: 2873 switch (r.bRequestType) { 2874 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE): 2875 status = dev->wakeup_enable ? 0x02 : 0x00; 2876 if (dev->selfpowered) 2877 status |= BIT(0); 2878 status |= (dev->u1_enable << 2 | dev->u2_enable << 3 | 2879 dev->ltm_enable << 4); 2880 writel(0, &dev->epregs[0].ep_irqenb); 2881 set_fifo_bytecount(ep, sizeof(status)); 2882 writel((__force u32) status, &dev->epregs[0].ep_data); 2883 allow_status_338x(ep); 2884 break; 2885 2886 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT): 2887 e = get_ep_by_addr(dev, w_index); 2888 if (!e) 2889 goto do_stall3; 2890 status = readl(&e->regs->ep_rsp) & 2891 BIT(CLEAR_ENDPOINT_HALT); 2892 writel(0, &dev->epregs[0].ep_irqenb); 2893 set_fifo_bytecount(ep, sizeof(status)); 2894 writel((__force u32) status, &dev->epregs[0].ep_data); 2895 allow_status_338x(ep); 2896 break; 2897 2898 default: 2899 goto usb3_delegate; 2900 } 2901 break; 2902 2903 case USB_REQ_CLEAR_FEATURE: 2904 switch (r.bRequestType) { 2905 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE): 2906 if (!dev->addressed_state) { 2907 switch (w_value) { 2908 case USB_DEVICE_U1_ENABLE: 2909 dev->u1_enable = 0; 2910 writel(readl(&dev->usb_ext->usbctl2) & 2911 ~BIT(U1_ENABLE), 2912 &dev->usb_ext->usbctl2); 2913 allow_status_338x(ep); 2914 goto next_endpoints3; 2915 2916 case USB_DEVICE_U2_ENABLE: 2917 dev->u2_enable = 0; 2918 writel(readl(&dev->usb_ext->usbctl2) & 2919 ~BIT(U2_ENABLE), 2920 &dev->usb_ext->usbctl2); 2921 allow_status_338x(ep); 2922 goto next_endpoints3; 2923 2924 case USB_DEVICE_LTM_ENABLE: 2925 dev->ltm_enable = 0; 2926 writel(readl(&dev->usb_ext->usbctl2) & 2927 ~BIT(LTM_ENABLE), 2928 &dev->usb_ext->usbctl2); 2929 allow_status_338x(ep); 2930 goto next_endpoints3; 2931 2932 default: 2933 break; 2934 } 2935 } 2936 if (w_value == USB_DEVICE_REMOTE_WAKEUP) { 2937 dev->wakeup_enable = 0; 2938 writel(readl(&dev->usb->usbctl) & 2939 ~BIT(DEVICE_REMOTE_WAKEUP_ENABLE), 2940 &dev->usb->usbctl); 2941 allow_status_338x(ep); 2942 break; 2943 } 2944 goto usb3_delegate; 2945 2946 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT): 2947 e = get_ep_by_addr(dev, w_index); 2948 if (!e) 2949 goto do_stall3; 2950 if (w_value != USB_ENDPOINT_HALT) 2951 goto do_stall3; 2952 ep_vdbg(dev, "%s clear halt\n", e->ep.name); 2953 ep_stall(e, false); 2954 if (!list_empty(&e->queue) && e->td_dma) 2955 restart_dma(e); 2956 allow_status(ep); 2957 ep->stopped = 1; 2958 break; 2959 2960 default: 2961 goto usb3_delegate; 2962 } 2963 break; 2964 case USB_REQ_SET_FEATURE: 2965 switch (r.bRequestType) { 2966 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE): 2967 if (!dev->addressed_state) { 2968 switch (w_value) { 2969 case USB_DEVICE_U1_ENABLE: 2970 dev->u1_enable = 1; 2971 writel(readl(&dev->usb_ext->usbctl2) | 2972 BIT(U1_ENABLE), 2973 &dev->usb_ext->usbctl2); 2974 allow_status_338x(ep); 2975 goto next_endpoints3; 2976 2977 case USB_DEVICE_U2_ENABLE: 2978 dev->u2_enable = 1; 2979 writel(readl(&dev->usb_ext->usbctl2) | 2980 BIT(U2_ENABLE), 2981 &dev->usb_ext->usbctl2); 2982 allow_status_338x(ep); 2983 goto next_endpoints3; 2984 2985 case USB_DEVICE_LTM_ENABLE: 2986 dev->ltm_enable = 1; 2987 writel(readl(&dev->usb_ext->usbctl2) | 2988 BIT(LTM_ENABLE), 2989 &dev->usb_ext->usbctl2); 2990 allow_status_338x(ep); 2991 goto next_endpoints3; 2992 default: 2993 break; 2994 } 2995 } 2996 2997 if (w_value == USB_DEVICE_REMOTE_WAKEUP) { 2998 dev->wakeup_enable = 1; 2999 writel(readl(&dev->usb->usbctl) | 3000 BIT(DEVICE_REMOTE_WAKEUP_ENABLE), 3001 &dev->usb->usbctl); 3002 allow_status_338x(ep); 3003 break; 3004 } 3005 goto usb3_delegate; 3006 3007 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT): 3008 e = get_ep_by_addr(dev, w_index); 3009 if (!e || (w_value != USB_ENDPOINT_HALT)) 3010 goto do_stall3; 3011 ep_stdrsp(e, true, false); 3012 allow_status_338x(ep); 3013 break; 3014 3015 default: 3016 goto usb3_delegate; 3017 } 3018 3019 break; 3020 default: 3021 3022 usb3_delegate: 3023 ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x ep_cfg %08x\n", 3024 r.bRequestType, r.bRequest, 3025 w_value, w_index, w_length, 3026 readl(&ep->cfg->ep_cfg)); 3027 3028 ep->responded = 0; 3029 spin_unlock(&dev->lock); 3030 tmp = dev->driver->setup(&dev->gadget, &r); 3031 spin_lock(&dev->lock); 3032 } 3033 do_stall3: 3034 if (tmp < 0) { 3035 ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n", 3036 r.bRequestType, r.bRequest, tmp); 3037 dev->protocol_stall = 1; 3038 /* TD 9.9 Halt Endpoint test. TD 9.22 Set feature test */ 3039 ep_stall(ep, true); 3040 } 3041 3042 next_endpoints3: 3043 3044 #undef w_value 3045 #undef w_index 3046 #undef w_length 3047 3048 return; 3049 } 3050 3051 static void handle_stat0_irqs(struct net2280 *dev, u32 stat) 3052 { 3053 struct net2280_ep *ep; 3054 u32 num, scratch; 3055 3056 /* most of these don't need individual acks */ 3057 stat &= ~BIT(INTA_ASSERTED); 3058 if (!stat) 3059 return; 3060 /* ep_dbg(dev, "irqstat0 %04x\n", stat); */ 3061 3062 /* starting a control request? */ 3063 if (unlikely(stat & BIT(SETUP_PACKET_INTERRUPT))) { 3064 union { 3065 u32 raw[2]; 3066 struct usb_ctrlrequest r; 3067 } u; 3068 int tmp; 3069 struct net2280_request *req; 3070 3071 if (dev->gadget.speed == USB_SPEED_UNKNOWN) { 3072 u32 val = readl(&dev->usb->usbstat); 3073 if (val & BIT(SUPER_SPEED)) { 3074 dev->gadget.speed = USB_SPEED_SUPER; 3075 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 3076 EP0_SS_MAX_PACKET_SIZE); 3077 } else if (val & BIT(HIGH_SPEED)) { 3078 dev->gadget.speed = USB_SPEED_HIGH; 3079 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 3080 EP0_HS_MAX_PACKET_SIZE); 3081 } else { 3082 dev->gadget.speed = USB_SPEED_FULL; 3083 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 3084 EP0_HS_MAX_PACKET_SIZE); 3085 } 3086 net2280_led_speed(dev, dev->gadget.speed); 3087 ep_dbg(dev, "%s\n", 3088 usb_speed_string(dev->gadget.speed)); 3089 } 3090 3091 ep = &dev->ep[0]; 3092 ep->irqs++; 3093 3094 /* make sure any leftover request state is cleared */ 3095 stat &= ~BIT(ENDPOINT_0_INTERRUPT); 3096 while (!list_empty(&ep->queue)) { 3097 req = list_entry(ep->queue.next, 3098 struct net2280_request, queue); 3099 done(ep, req, (req->req.actual == req->req.length) 3100 ? 0 : -EPROTO); 3101 } 3102 ep->stopped = 0; 3103 dev->protocol_stall = 0; 3104 if (dev->quirks & PLX_SUPERSPEED) 3105 ep->is_halt = 0; 3106 else{ 3107 if (ep->dev->quirks & PLX_2280) 3108 tmp = BIT(FIFO_OVERFLOW) | 3109 BIT(FIFO_UNDERFLOW); 3110 else 3111 tmp = 0; 3112 3113 writel(tmp | BIT(TIMEOUT) | 3114 BIT(USB_STALL_SENT) | 3115 BIT(USB_IN_NAK_SENT) | 3116 BIT(USB_IN_ACK_RCVD) | 3117 BIT(USB_OUT_PING_NAK_SENT) | 3118 BIT(USB_OUT_ACK_SENT) | 3119 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) | 3120 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) | 3121 BIT(DATA_PACKET_RECEIVED_INTERRUPT) | 3122 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) | 3123 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 3124 BIT(DATA_IN_TOKEN_INTERRUPT), 3125 &ep->regs->ep_stat); 3126 } 3127 u.raw[0] = readl(&dev->usb->setup0123); 3128 u.raw[1] = readl(&dev->usb->setup4567); 3129 3130 cpu_to_le32s(&u.raw[0]); 3131 cpu_to_le32s(&u.raw[1]); 3132 3133 if (dev->quirks & PLX_SUPERSPEED) 3134 defect7374_workaround(dev, u.r); 3135 3136 tmp = 0; 3137 3138 #define w_value le16_to_cpu(u.r.wValue) 3139 #define w_index le16_to_cpu(u.r.wIndex) 3140 #define w_length le16_to_cpu(u.r.wLength) 3141 3142 /* ack the irq */ 3143 writel(BIT(SETUP_PACKET_INTERRUPT), &dev->regs->irqstat0); 3144 stat ^= BIT(SETUP_PACKET_INTERRUPT); 3145 3146 /* watch control traffic at the token level, and force 3147 * synchronization before letting the status stage happen. 3148 * FIXME ignore tokens we'll NAK, until driver responds. 3149 * that'll mean a lot less irqs for some drivers. 3150 */ 3151 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0; 3152 if (ep->is_in) { 3153 scratch = BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) | 3154 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 3155 BIT(DATA_IN_TOKEN_INTERRUPT); 3156 stop_out_naking(ep); 3157 } else 3158 scratch = BIT(DATA_PACKET_RECEIVED_INTERRUPT) | 3159 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 3160 BIT(DATA_IN_TOKEN_INTERRUPT); 3161 writel(scratch, &dev->epregs[0].ep_irqenb); 3162 3163 /* we made the hardware handle most lowlevel requests; 3164 * everything else goes uplevel to the gadget code. 3165 */ 3166 ep->responded = 1; 3167 3168 if (dev->gadget.speed == USB_SPEED_SUPER) { 3169 handle_stat0_irqs_superspeed(dev, ep, u.r); 3170 goto next_endpoints; 3171 } 3172 3173 switch (u.r.bRequest) { 3174 case USB_REQ_GET_STATUS: { 3175 struct net2280_ep *e; 3176 __le32 status; 3177 3178 /* hw handles device and interface status */ 3179 if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT)) 3180 goto delegate; 3181 e = get_ep_by_addr(dev, w_index); 3182 if (!e || w_length > 2) 3183 goto do_stall; 3184 3185 if (readl(&e->regs->ep_rsp) & BIT(SET_ENDPOINT_HALT)) 3186 status = cpu_to_le32(1); 3187 else 3188 status = cpu_to_le32(0); 3189 3190 /* don't bother with a request object! */ 3191 writel(0, &dev->epregs[0].ep_irqenb); 3192 set_fifo_bytecount(ep, w_length); 3193 writel((__force u32)status, &dev->epregs[0].ep_data); 3194 allow_status(ep); 3195 ep_vdbg(dev, "%s stat %02x\n", ep->ep.name, status); 3196 goto next_endpoints; 3197 } 3198 break; 3199 case USB_REQ_CLEAR_FEATURE: { 3200 struct net2280_ep *e; 3201 3202 /* hw handles device features */ 3203 if (u.r.bRequestType != USB_RECIP_ENDPOINT) 3204 goto delegate; 3205 if (w_value != USB_ENDPOINT_HALT || w_length != 0) 3206 goto do_stall; 3207 e = get_ep_by_addr(dev, w_index); 3208 if (!e) 3209 goto do_stall; 3210 if (e->wedged) { 3211 ep_vdbg(dev, "%s wedged, halt not cleared\n", 3212 ep->ep.name); 3213 } else { 3214 ep_vdbg(dev, "%s clear halt\n", e->ep.name); 3215 clear_halt(e); 3216 if ((ep->dev->quirks & PLX_SUPERSPEED) && 3217 !list_empty(&e->queue) && e->td_dma) 3218 restart_dma(e); 3219 } 3220 allow_status(ep); 3221 goto next_endpoints; 3222 } 3223 break; 3224 case USB_REQ_SET_FEATURE: { 3225 struct net2280_ep *e; 3226 3227 /* hw handles device features */ 3228 if (u.r.bRequestType != USB_RECIP_ENDPOINT) 3229 goto delegate; 3230 if (w_value != USB_ENDPOINT_HALT || w_length != 0) 3231 goto do_stall; 3232 e = get_ep_by_addr(dev, w_index); 3233 if (!e) 3234 goto do_stall; 3235 if (e->ep.name == ep0name) 3236 goto do_stall; 3237 set_halt(e); 3238 if ((dev->quirks & PLX_SUPERSPEED) && e->dma) 3239 abort_dma(e); 3240 allow_status(ep); 3241 ep_vdbg(dev, "%s set halt\n", ep->ep.name); 3242 goto next_endpoints; 3243 } 3244 break; 3245 default: 3246 delegate: 3247 ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x " 3248 "ep_cfg %08x\n", 3249 u.r.bRequestType, u.r.bRequest, 3250 w_value, w_index, w_length, 3251 readl(&ep->cfg->ep_cfg)); 3252 ep->responded = 0; 3253 spin_unlock(&dev->lock); 3254 tmp = dev->driver->setup(&dev->gadget, &u.r); 3255 spin_lock(&dev->lock); 3256 } 3257 3258 /* stall ep0 on error */ 3259 if (tmp < 0) { 3260 do_stall: 3261 ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n", 3262 u.r.bRequestType, u.r.bRequest, tmp); 3263 dev->protocol_stall = 1; 3264 } 3265 3266 /* some in/out token irq should follow; maybe stall then. 3267 * driver must queue a request (even zlp) or halt ep0 3268 * before the host times out. 3269 */ 3270 } 3271 3272 #undef w_value 3273 #undef w_index 3274 #undef w_length 3275 3276 next_endpoints: 3277 /* endpoint data irq ? */ 3278 scratch = stat & 0x7f; 3279 stat &= ~0x7f; 3280 for (num = 0; scratch; num++) { 3281 u32 t; 3282 3283 /* do this endpoint's FIFO and queue need tending? */ 3284 t = BIT(num); 3285 if ((scratch & t) == 0) 3286 continue; 3287 scratch ^= t; 3288 3289 ep = &dev->ep[num]; 3290 handle_ep_small(ep); 3291 } 3292 3293 if (stat) 3294 ep_dbg(dev, "unhandled irqstat0 %08x\n", stat); 3295 } 3296 3297 #define DMA_INTERRUPTS (BIT(DMA_D_INTERRUPT) | \ 3298 BIT(DMA_C_INTERRUPT) | \ 3299 BIT(DMA_B_INTERRUPT) | \ 3300 BIT(DMA_A_INTERRUPT)) 3301 #define PCI_ERROR_INTERRUPTS ( \ 3302 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT) | \ 3303 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT) | \ 3304 BIT(PCI_RETRY_ABORT_INTERRUPT)) 3305 3306 static void handle_stat1_irqs(struct net2280 *dev, u32 stat) 3307 { 3308 struct net2280_ep *ep; 3309 u32 tmp, num, mask, scratch; 3310 3311 /* after disconnect there's nothing else to do! */ 3312 tmp = BIT(VBUS_INTERRUPT) | BIT(ROOT_PORT_RESET_INTERRUPT); 3313 mask = BIT(SUPER_SPEED) | BIT(HIGH_SPEED) | BIT(FULL_SPEED); 3314 3315 /* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set. 3316 * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and 3317 * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT 3318 * only indicates a change in the reset state). 3319 */ 3320 if (stat & tmp) { 3321 writel(tmp, &dev->regs->irqstat1); 3322 if ((((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) && 3323 ((readl(&dev->usb->usbstat) & mask) == 0)) || 3324 ((readl(&dev->usb->usbctl) & 3325 BIT(VBUS_PIN)) == 0)) && 3326 (dev->gadget.speed != USB_SPEED_UNKNOWN)) { 3327 ep_dbg(dev, "disconnect %s\n", 3328 dev->driver->driver.name); 3329 stop_activity(dev, dev->driver); 3330 ep0_start(dev); 3331 return; 3332 } 3333 stat &= ~tmp; 3334 3335 /* vBUS can bounce ... one of many reasons to ignore the 3336 * notion of hotplug events on bus connect/disconnect! 3337 */ 3338 if (!stat) 3339 return; 3340 } 3341 3342 /* NOTE: chip stays in PCI D0 state for now, but it could 3343 * enter D1 to save more power 3344 */ 3345 tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT); 3346 if (stat & tmp) { 3347 writel(tmp, &dev->regs->irqstat1); 3348 if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) { 3349 if (dev->driver->suspend) 3350 dev->driver->suspend(&dev->gadget); 3351 if (!enable_suspend) 3352 stat &= ~BIT(SUSPEND_REQUEST_INTERRUPT); 3353 } else { 3354 if (dev->driver->resume) 3355 dev->driver->resume(&dev->gadget); 3356 /* at high speed, note erratum 0133 */ 3357 } 3358 stat &= ~tmp; 3359 } 3360 3361 /* clear any other status/irqs */ 3362 if (stat) 3363 writel(stat, &dev->regs->irqstat1); 3364 3365 /* some status we can just ignore */ 3366 if (dev->quirks & PLX_2280) 3367 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) | 3368 BIT(SUSPEND_REQUEST_INTERRUPT) | 3369 BIT(RESUME_INTERRUPT) | 3370 BIT(SOF_INTERRUPT)); 3371 else 3372 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) | 3373 BIT(RESUME_INTERRUPT) | 3374 BIT(SOF_DOWN_INTERRUPT) | 3375 BIT(SOF_INTERRUPT)); 3376 3377 if (!stat) 3378 return; 3379 /* ep_dbg(dev, "irqstat1 %08x\n", stat);*/ 3380 3381 /* DMA status, for ep-{a,b,c,d} */ 3382 scratch = stat & DMA_INTERRUPTS; 3383 stat &= ~DMA_INTERRUPTS; 3384 scratch >>= 9; 3385 for (num = 0; scratch; num++) { 3386 struct net2280_dma_regs __iomem *dma; 3387 3388 tmp = BIT(num); 3389 if ((tmp & scratch) == 0) 3390 continue; 3391 scratch ^= tmp; 3392 3393 ep = &dev->ep[num + 1]; 3394 dma = ep->dma; 3395 3396 if (!dma) 3397 continue; 3398 3399 /* clear ep's dma status */ 3400 tmp = readl(&dma->dmastat); 3401 writel(tmp, &dma->dmastat); 3402 3403 /* dma sync*/ 3404 if (dev->quirks & PLX_SUPERSPEED) { 3405 u32 r_dmacount = readl(&dma->dmacount); 3406 if (!ep->is_in && (r_dmacount & 0x00FFFFFF) && 3407 (tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) 3408 continue; 3409 } 3410 3411 /* chaining should stop on abort, short OUT from fifo, 3412 * or (stat0 codepath) short OUT transfer. 3413 */ 3414 if (!use_dma_chaining) { 3415 if (!(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) { 3416 ep_dbg(ep->dev, "%s no xact done? %08x\n", 3417 ep->ep.name, tmp); 3418 continue; 3419 } 3420 stop_dma(ep->dma); 3421 } 3422 3423 /* OUT transfers terminate when the data from the 3424 * host is in our memory. Process whatever's done. 3425 * On this path, we know transfer's last packet wasn't 3426 * less than req->length. NAK_OUT_PACKETS may be set, 3427 * or the FIFO may already be holding new packets. 3428 * 3429 * IN transfers can linger in the FIFO for a very 3430 * long time ... we ignore that for now, accounting 3431 * precisely (like PIO does) needs per-packet irqs 3432 */ 3433 scan_dma_completions(ep); 3434 3435 /* disable dma on inactive queues; else maybe restart */ 3436 if (list_empty(&ep->queue)) { 3437 if (use_dma_chaining) 3438 stop_dma(ep->dma); 3439 } else { 3440 tmp = readl(&dma->dmactl); 3441 if (!use_dma_chaining || (tmp & BIT(DMA_ENABLE)) == 0) 3442 restart_dma(ep); 3443 else if (ep->is_in && use_dma_chaining) { 3444 struct net2280_request *req; 3445 __le32 dmacount; 3446 3447 /* the descriptor at the head of the chain 3448 * may still have VALID_BIT clear; that's 3449 * used to trigger changing DMA_FIFO_VALIDATE 3450 * (affects automagic zlp writes). 3451 */ 3452 req = list_entry(ep->queue.next, 3453 struct net2280_request, queue); 3454 dmacount = req->td->dmacount; 3455 dmacount &= cpu_to_le32(BIT(VALID_BIT) | 3456 DMA_BYTE_COUNT_MASK); 3457 if (dmacount && (dmacount & valid_bit) == 0) 3458 restart_dma(ep); 3459 } 3460 } 3461 ep->irqs++; 3462 } 3463 3464 /* NOTE: there are other PCI errors we might usefully notice. 3465 * if they appear very often, here's where to try recovering. 3466 */ 3467 if (stat & PCI_ERROR_INTERRUPTS) { 3468 ep_err(dev, "pci dma error; stat %08x\n", stat); 3469 stat &= ~PCI_ERROR_INTERRUPTS; 3470 /* these are fatal errors, but "maybe" they won't 3471 * happen again ... 3472 */ 3473 stop_activity(dev, dev->driver); 3474 ep0_start(dev); 3475 stat = 0; 3476 } 3477 3478 if (stat) 3479 ep_dbg(dev, "unhandled irqstat1 %08x\n", stat); 3480 } 3481 3482 static irqreturn_t net2280_irq(int irq, void *_dev) 3483 { 3484 struct net2280 *dev = _dev; 3485 3486 /* shared interrupt, not ours */ 3487 if ((dev->quirks & PLX_LEGACY) && 3488 (!(readl(&dev->regs->irqstat0) & BIT(INTA_ASSERTED)))) 3489 return IRQ_NONE; 3490 3491 spin_lock(&dev->lock); 3492 3493 /* handle disconnect, dma, and more */ 3494 handle_stat1_irqs(dev, readl(&dev->regs->irqstat1)); 3495 3496 /* control requests and PIO */ 3497 handle_stat0_irqs(dev, readl(&dev->regs->irqstat0)); 3498 3499 if (dev->quirks & PLX_SUPERSPEED) { 3500 /* re-enable interrupt to trigger any possible new interrupt */ 3501 u32 pciirqenb1 = readl(&dev->regs->pciirqenb1); 3502 writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1); 3503 writel(pciirqenb1, &dev->regs->pciirqenb1); 3504 } 3505 3506 spin_unlock(&dev->lock); 3507 3508 return IRQ_HANDLED; 3509 } 3510 3511 /*-------------------------------------------------------------------------*/ 3512 3513 static void gadget_release(struct device *_dev) 3514 { 3515 struct net2280 *dev = dev_get_drvdata(_dev); 3516 3517 kfree(dev); 3518 } 3519 3520 /* tear down the binding between this driver and the pci device */ 3521 3522 static void net2280_remove(struct pci_dev *pdev) 3523 { 3524 struct net2280 *dev = pci_get_drvdata(pdev); 3525 3526 usb_del_gadget_udc(&dev->gadget); 3527 3528 BUG_ON(dev->driver); 3529 3530 /* then clean up the resources we allocated during probe() */ 3531 net2280_led_shutdown(dev); 3532 if (dev->requests) { 3533 int i; 3534 for (i = 1; i < 5; i++) { 3535 if (!dev->ep[i].dummy) 3536 continue; 3537 pci_pool_free(dev->requests, dev->ep[i].dummy, 3538 dev->ep[i].td_dma); 3539 } 3540 pci_pool_destroy(dev->requests); 3541 } 3542 if (dev->got_irq) 3543 free_irq(pdev->irq, dev); 3544 if (use_msi && dev->quirks & PLX_SUPERSPEED) 3545 pci_disable_msi(pdev); 3546 if (dev->regs) 3547 iounmap(dev->regs); 3548 if (dev->region) 3549 release_mem_region(pci_resource_start(pdev, 0), 3550 pci_resource_len(pdev, 0)); 3551 if (dev->enabled) 3552 pci_disable_device(pdev); 3553 device_remove_file(&pdev->dev, &dev_attr_registers); 3554 3555 ep_info(dev, "unbind\n"); 3556 } 3557 3558 /* wrap this driver around the specified device, but 3559 * don't respond over USB until a gadget driver binds to us. 3560 */ 3561 3562 static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id) 3563 { 3564 struct net2280 *dev; 3565 unsigned long resource, len; 3566 void __iomem *base = NULL; 3567 int retval, i; 3568 3569 if (!use_dma) 3570 use_dma_chaining = 0; 3571 3572 /* alloc, and start init */ 3573 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 3574 if (dev == NULL) { 3575 retval = -ENOMEM; 3576 goto done; 3577 } 3578 3579 pci_set_drvdata(pdev, dev); 3580 spin_lock_init(&dev->lock); 3581 dev->quirks = id->driver_data; 3582 dev->pdev = pdev; 3583 dev->gadget.ops = &net2280_ops; 3584 dev->gadget.max_speed = (dev->quirks & PLX_SUPERSPEED) ? 3585 USB_SPEED_SUPER : USB_SPEED_HIGH; 3586 3587 /* the "gadget" abstracts/virtualizes the controller */ 3588 dev->gadget.name = driver_name; 3589 3590 /* now all the pci goodies ... */ 3591 if (pci_enable_device(pdev) < 0) { 3592 retval = -ENODEV; 3593 goto done; 3594 } 3595 dev->enabled = 1; 3596 3597 /* BAR 0 holds all the registers 3598 * BAR 1 is 8051 memory; unused here (note erratum 0103) 3599 * BAR 2 is fifo memory; unused here 3600 */ 3601 resource = pci_resource_start(pdev, 0); 3602 len = pci_resource_len(pdev, 0); 3603 if (!request_mem_region(resource, len, driver_name)) { 3604 ep_dbg(dev, "controller already in use\n"); 3605 retval = -EBUSY; 3606 goto done; 3607 } 3608 dev->region = 1; 3609 3610 /* FIXME provide firmware download interface to put 3611 * 8051 code into the chip, e.g. to turn on PCI PM. 3612 */ 3613 3614 base = ioremap_nocache(resource, len); 3615 if (base == NULL) { 3616 ep_dbg(dev, "can't map memory\n"); 3617 retval = -EFAULT; 3618 goto done; 3619 } 3620 dev->regs = (struct net2280_regs __iomem *) base; 3621 dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080); 3622 dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100); 3623 dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180); 3624 dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200); 3625 dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300); 3626 3627 if (dev->quirks & PLX_SUPERSPEED) { 3628 u32 fsmvalue; 3629 u32 usbstat; 3630 dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *) 3631 (base + 0x00b4); 3632 dev->fiforegs = (struct usb338x_fifo_regs __iomem *) 3633 (base + 0x0500); 3634 dev->llregs = (struct usb338x_ll_regs __iomem *) 3635 (base + 0x0700); 3636 dev->ll_lfps_regs = (struct usb338x_ll_lfps_regs __iomem *) 3637 (base + 0x0748); 3638 dev->ll_tsn_regs = (struct usb338x_ll_tsn_regs __iomem *) 3639 (base + 0x077c); 3640 dev->ll_chicken_reg = (struct usb338x_ll_chi_regs __iomem *) 3641 (base + 0x079c); 3642 dev->plregs = (struct usb338x_pl_regs __iomem *) 3643 (base + 0x0800); 3644 usbstat = readl(&dev->usb->usbstat); 3645 dev->enhanced_mode = !!(usbstat & BIT(11)); 3646 dev->n_ep = (dev->enhanced_mode) ? 9 : 5; 3647 /* put into initial config, link up all endpoints */ 3648 fsmvalue = get_idx_reg(dev->regs, SCRATCH) & 3649 (0xf << DEFECT7374_FSM_FIELD); 3650 /* See if firmware needs to set up for workaround: */ 3651 if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) 3652 writel(0, &dev->usb->usbctl); 3653 } else{ 3654 dev->enhanced_mode = 0; 3655 dev->n_ep = 7; 3656 /* put into initial config, link up all endpoints */ 3657 writel(0, &dev->usb->usbctl); 3658 } 3659 3660 usb_reset(dev); 3661 usb_reinit(dev); 3662 3663 /* irq setup after old hardware is cleaned up */ 3664 if (!pdev->irq) { 3665 ep_err(dev, "No IRQ. Check PCI setup!\n"); 3666 retval = -ENODEV; 3667 goto done; 3668 } 3669 3670 if (use_msi && (dev->quirks & PLX_SUPERSPEED)) 3671 if (pci_enable_msi(pdev)) 3672 ep_err(dev, "Failed to enable MSI mode\n"); 3673 3674 if (request_irq(pdev->irq, net2280_irq, IRQF_SHARED, 3675 driver_name, dev)) { 3676 ep_err(dev, "request interrupt %d failed\n", pdev->irq); 3677 retval = -EBUSY; 3678 goto done; 3679 } 3680 dev->got_irq = 1; 3681 3682 /* DMA setup */ 3683 /* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */ 3684 dev->requests = pci_pool_create("requests", pdev, 3685 sizeof(struct net2280_dma), 3686 0 /* no alignment requirements */, 3687 0 /* or page-crossing issues */); 3688 if (!dev->requests) { 3689 ep_dbg(dev, "can't get request pool\n"); 3690 retval = -ENOMEM; 3691 goto done; 3692 } 3693 for (i = 1; i < 5; i++) { 3694 struct net2280_dma *td; 3695 3696 td = pci_pool_alloc(dev->requests, GFP_KERNEL, 3697 &dev->ep[i].td_dma); 3698 if (!td) { 3699 ep_dbg(dev, "can't get dummy %d\n", i); 3700 retval = -ENOMEM; 3701 goto done; 3702 } 3703 td->dmacount = 0; /* not VALID */ 3704 td->dmadesc = td->dmaaddr; 3705 dev->ep[i].dummy = td; 3706 } 3707 3708 /* enable lower-overhead pci memory bursts during DMA */ 3709 if (dev->quirks & PLX_LEGACY) 3710 writel(BIT(DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE) | 3711 /* 3712 * 256 write retries may not be enough... 3713 BIT(PCI_RETRY_ABORT_ENABLE) | 3714 */ 3715 BIT(DMA_READ_MULTIPLE_ENABLE) | 3716 BIT(DMA_READ_LINE_ENABLE), 3717 &dev->pci->pcimstctl); 3718 /* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */ 3719 pci_set_master(pdev); 3720 pci_try_set_mwi(pdev); 3721 3722 /* ... also flushes any posted pci writes */ 3723 dev->chiprev = get_idx_reg(dev->regs, REG_CHIPREV) & 0xffff; 3724 3725 /* done */ 3726 ep_info(dev, "%s\n", driver_desc); 3727 ep_info(dev, "irq %d, pci mem %p, chip rev %04x\n", 3728 pdev->irq, base, dev->chiprev); 3729 ep_info(dev, "version: " DRIVER_VERSION "; dma %s %s\n", 3730 use_dma ? (use_dma_chaining ? "chaining" : "enabled") 3731 : "disabled", 3732 dev->enhanced_mode ? "enhanced mode" : "legacy mode"); 3733 retval = device_create_file(&pdev->dev, &dev_attr_registers); 3734 if (retval) 3735 goto done; 3736 3737 retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget, 3738 gadget_release); 3739 if (retval) 3740 goto done; 3741 return 0; 3742 3743 done: 3744 if (dev) 3745 net2280_remove(pdev); 3746 return retval; 3747 } 3748 3749 /* make sure the board is quiescent; otherwise it will continue 3750 * generating IRQs across the upcoming reboot. 3751 */ 3752 3753 static void net2280_shutdown(struct pci_dev *pdev) 3754 { 3755 struct net2280 *dev = pci_get_drvdata(pdev); 3756 3757 /* disable IRQs */ 3758 writel(0, &dev->regs->pciirqenb0); 3759 writel(0, &dev->regs->pciirqenb1); 3760 3761 /* disable the pullup so the host will think we're gone */ 3762 writel(0, &dev->usb->usbctl); 3763 3764 /* Disable full-speed test mode */ 3765 if (dev->quirks & PLX_LEGACY) 3766 writel(0, &dev->usb->xcvrdiag); 3767 } 3768 3769 3770 /*-------------------------------------------------------------------------*/ 3771 3772 static const struct pci_device_id pci_ids[] = { { 3773 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), 3774 .class_mask = ~0, 3775 .vendor = PCI_VENDOR_ID_PLX_LEGACY, 3776 .device = 0x2280, 3777 .subvendor = PCI_ANY_ID, 3778 .subdevice = PCI_ANY_ID, 3779 .driver_data = PLX_LEGACY | PLX_2280, 3780 }, { 3781 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), 3782 .class_mask = ~0, 3783 .vendor = PCI_VENDOR_ID_PLX_LEGACY, 3784 .device = 0x2282, 3785 .subvendor = PCI_ANY_ID, 3786 .subdevice = PCI_ANY_ID, 3787 .driver_data = PLX_LEGACY, 3788 }, 3789 { 3790 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), 3791 .class_mask = ~0, 3792 .vendor = PCI_VENDOR_ID_PLX, 3793 .device = 0x3380, 3794 .subvendor = PCI_ANY_ID, 3795 .subdevice = PCI_ANY_ID, 3796 .driver_data = PLX_SUPERSPEED, 3797 }, 3798 { 3799 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), 3800 .class_mask = ~0, 3801 .vendor = PCI_VENDOR_ID_PLX, 3802 .device = 0x3382, 3803 .subvendor = PCI_ANY_ID, 3804 .subdevice = PCI_ANY_ID, 3805 .driver_data = PLX_SUPERSPEED, 3806 }, 3807 { /* end: all zeroes */ } 3808 }; 3809 MODULE_DEVICE_TABLE(pci, pci_ids); 3810 3811 /* pci driver glue; this is a "new style" PCI driver module */ 3812 static struct pci_driver net2280_pci_driver = { 3813 .name = (char *) driver_name, 3814 .id_table = pci_ids, 3815 3816 .probe = net2280_probe, 3817 .remove = net2280_remove, 3818 .shutdown = net2280_shutdown, 3819 3820 /* FIXME add power management support */ 3821 }; 3822 3823 module_pci_driver(net2280_pci_driver); 3824 3825 MODULE_DESCRIPTION(DRIVER_DESC); 3826 MODULE_AUTHOR("David Brownell"); 3827 MODULE_LICENSE("GPL"); 3828