1 /* 2 * Driver for the PLX NET2280 USB device controller. 3 * Specs and errata are available from <http://www.plxtech.com>. 4 * 5 * PLX Technology Inc. (formerly NetChip Technology) supported the 6 * development of this driver. 7 * 8 * 9 * CODE STATUS HIGHLIGHTS 10 * 11 * This driver should work well with most "gadget" drivers, including 12 * the Mass Storage, Serial, and Ethernet/RNDIS gadget drivers 13 * as well as Gadget Zero and Gadgetfs. 14 * 15 * DMA is enabled by default. 16 * 17 * MSI is enabled by default. The legacy IRQ is used if MSI couldn't 18 * be enabled. 19 * 20 * Note that almost all the errata workarounds here are only needed for 21 * rev1 chips. Rev1a silicon (0110) fixes almost all of them. 22 */ 23 24 /* 25 * Copyright (C) 2003 David Brownell 26 * Copyright (C) 2003-2005 PLX Technology, Inc. 27 * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS 28 * 29 * Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility 30 * with 2282 chip 31 * 32 * Modified Ricardo Ribalda Qtechnology AS to provide compatibility 33 * with usb 338x chip. Based on PLX driver 34 * 35 * This program is free software; you can redistribute it and/or modify 36 * it under the terms of the GNU General Public License as published by 37 * the Free Software Foundation; either version 2 of the License, or 38 * (at your option) any later version. 39 */ 40 41 #include <linux/module.h> 42 #include <linux/pci.h> 43 #include <linux/dma-mapping.h> 44 #include <linux/kernel.h> 45 #include <linux/delay.h> 46 #include <linux/ioport.h> 47 #include <linux/slab.h> 48 #include <linux/errno.h> 49 #include <linux/init.h> 50 #include <linux/timer.h> 51 #include <linux/list.h> 52 #include <linux/interrupt.h> 53 #include <linux/moduleparam.h> 54 #include <linux/device.h> 55 #include <linux/usb/ch9.h> 56 #include <linux/usb/gadget.h> 57 #include <linux/prefetch.h> 58 #include <linux/io.h> 59 60 #include <asm/byteorder.h> 61 #include <asm/irq.h> 62 #include <asm/unaligned.h> 63 64 #define DRIVER_DESC "PLX NET228x/USB338x USB Peripheral Controller" 65 #define DRIVER_VERSION "2005 Sept 27/v3.0" 66 67 #define EP_DONTUSE 13 /* nonzero */ 68 69 #define USE_RDK_LEDS /* GPIO pins control three LEDs */ 70 71 72 static const char driver_name[] = "net2280"; 73 static const char driver_desc[] = DRIVER_DESC; 74 75 static const u32 ep_bit[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 }; 76 static const char ep0name[] = "ep0"; 77 static const char *const ep_name[] = { 78 ep0name, 79 "ep-a", "ep-b", "ep-c", "ep-d", 80 "ep-e", "ep-f", "ep-g", "ep-h", 81 }; 82 83 /* Endpoint names for usb3380 advance mode */ 84 static const char *const ep_name_adv[] = { 85 ep0name, 86 "ep1in", "ep2out", "ep3in", "ep4out", 87 "ep1out", "ep2in", "ep3out", "ep4in", 88 }; 89 90 /* mode 0 == ep-{a,b,c,d} 1K fifo each 91 * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable 92 * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable 93 */ 94 static ushort fifo_mode; 95 96 /* "modprobe net2280 fifo_mode=1" etc */ 97 module_param(fifo_mode, ushort, 0644); 98 99 /* enable_suspend -- When enabled, the driver will respond to 100 * USB suspend requests by powering down the NET2280. Otherwise, 101 * USB suspend requests will be ignored. This is acceptable for 102 * self-powered devices 103 */ 104 static bool enable_suspend; 105 106 /* "modprobe net2280 enable_suspend=1" etc */ 107 module_param(enable_suspend, bool, 0444); 108 109 #define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out") 110 111 static char *type_string(u8 bmAttributes) 112 { 113 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) { 114 case USB_ENDPOINT_XFER_BULK: return "bulk"; 115 case USB_ENDPOINT_XFER_ISOC: return "iso"; 116 case USB_ENDPOINT_XFER_INT: return "intr"; 117 } 118 return "control"; 119 } 120 121 #include "net2280.h" 122 123 #define valid_bit cpu_to_le32(BIT(VALID_BIT)) 124 #define dma_done_ie cpu_to_le32(BIT(DMA_DONE_INTERRUPT_ENABLE)) 125 126 static void ep_clear_seqnum(struct net2280_ep *ep); 127 static void stop_activity(struct net2280 *dev, 128 struct usb_gadget_driver *driver); 129 static void ep0_start(struct net2280 *dev); 130 131 /*-------------------------------------------------------------------------*/ 132 static inline void enable_pciirqenb(struct net2280_ep *ep) 133 { 134 u32 tmp = readl(&ep->dev->regs->pciirqenb0); 135 136 if (ep->dev->quirks & PLX_LEGACY) 137 tmp |= BIT(ep->num); 138 else 139 tmp |= BIT(ep_bit[ep->num]); 140 writel(tmp, &ep->dev->regs->pciirqenb0); 141 142 return; 143 } 144 145 static int 146 net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) 147 { 148 struct net2280 *dev; 149 struct net2280_ep *ep; 150 u32 max; 151 u32 tmp = 0; 152 u32 type; 153 unsigned long flags; 154 static const u32 ep_key[9] = { 1, 0, 1, 0, 1, 1, 0, 1, 0 }; 155 int ret = 0; 156 157 ep = container_of(_ep, struct net2280_ep, ep); 158 if (!_ep || !desc || ep->desc || _ep->name == ep0name || 159 desc->bDescriptorType != USB_DT_ENDPOINT) { 160 pr_err("%s: failed at line=%d\n", __func__, __LINE__); 161 return -EINVAL; 162 } 163 dev = ep->dev; 164 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) { 165 ret = -ESHUTDOWN; 166 goto print_err; 167 } 168 169 /* erratum 0119 workaround ties up an endpoint number */ 170 if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE) { 171 ret = -EDOM; 172 goto print_err; 173 } 174 175 if (dev->quirks & PLX_SUPERSPEED) { 176 if ((desc->bEndpointAddress & 0x0f) >= 0x0c) { 177 ret = -EDOM; 178 goto print_err; 179 } 180 ep->is_in = !!usb_endpoint_dir_in(desc); 181 if (dev->enhanced_mode && ep->is_in && ep_key[ep->num]) { 182 ret = -EINVAL; 183 goto print_err; 184 } 185 } 186 187 /* sanity check ep-e/ep-f since their fifos are small */ 188 max = usb_endpoint_maxp(desc) & 0x1fff; 189 if (ep->num > 4 && max > 64 && (dev->quirks & PLX_LEGACY)) { 190 ret = -ERANGE; 191 goto print_err; 192 } 193 194 spin_lock_irqsave(&dev->lock, flags); 195 _ep->maxpacket = max & 0x7ff; 196 ep->desc = desc; 197 198 /* ep_reset() has already been called */ 199 ep->stopped = 0; 200 ep->wedged = 0; 201 ep->out_overflow = 0; 202 203 /* set speed-dependent max packet; may kick in high bandwidth */ 204 set_max_speed(ep, max); 205 206 /* set type, direction, address; reset fifo counters */ 207 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat); 208 209 if ((dev->quirks & PLX_SUPERSPEED) && dev->enhanced_mode) { 210 tmp = readl(&ep->cfg->ep_cfg); 211 /* If USB ep number doesn't match hardware ep number */ 212 if ((tmp & 0xf) != usb_endpoint_num(desc)) { 213 ret = -EINVAL; 214 spin_unlock_irqrestore(&dev->lock, flags); 215 goto print_err; 216 } 217 if (ep->is_in) 218 tmp &= ~USB3380_EP_CFG_MASK_IN; 219 else 220 tmp &= ~USB3380_EP_CFG_MASK_OUT; 221 } 222 type = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK); 223 if (type == USB_ENDPOINT_XFER_INT) { 224 /* erratum 0105 workaround prevents hs NYET */ 225 if (dev->chiprev == 0100 && 226 dev->gadget.speed == USB_SPEED_HIGH && 227 !(desc->bEndpointAddress & USB_DIR_IN)) 228 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE), 229 &ep->regs->ep_rsp); 230 } else if (type == USB_ENDPOINT_XFER_BULK) { 231 /* catch some particularly blatant driver bugs */ 232 if ((dev->gadget.speed == USB_SPEED_SUPER && max != 1024) || 233 (dev->gadget.speed == USB_SPEED_HIGH && max != 512) || 234 (dev->gadget.speed == USB_SPEED_FULL && max > 64)) { 235 spin_unlock_irqrestore(&dev->lock, flags); 236 ret = -ERANGE; 237 goto print_err; 238 } 239 } 240 ep->is_iso = (type == USB_ENDPOINT_XFER_ISOC); 241 /* Enable this endpoint */ 242 if (dev->quirks & PLX_LEGACY) { 243 tmp |= type << ENDPOINT_TYPE; 244 tmp |= desc->bEndpointAddress; 245 /* default full fifo lines */ 246 tmp |= (4 << ENDPOINT_BYTE_COUNT); 247 tmp |= BIT(ENDPOINT_ENABLE); 248 ep->is_in = (tmp & USB_DIR_IN) != 0; 249 } else { 250 /* In Legacy mode, only OUT endpoints are used */ 251 if (dev->enhanced_mode && ep->is_in) { 252 tmp |= type << IN_ENDPOINT_TYPE; 253 tmp |= BIT(IN_ENDPOINT_ENABLE); 254 } else { 255 tmp |= type << OUT_ENDPOINT_TYPE; 256 tmp |= BIT(OUT_ENDPOINT_ENABLE); 257 tmp |= (ep->is_in << ENDPOINT_DIRECTION); 258 } 259 260 tmp |= (4 << ENDPOINT_BYTE_COUNT); 261 if (!dev->enhanced_mode) 262 tmp |= usb_endpoint_num(desc); 263 tmp |= (ep->ep.maxburst << MAX_BURST_SIZE); 264 } 265 266 /* Make sure all the registers are written before ep_rsp*/ 267 wmb(); 268 269 /* for OUT transfers, block the rx fifo until a read is posted */ 270 if (!ep->is_in) 271 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp); 272 else if (!(dev->quirks & PLX_2280)) { 273 /* Added for 2282, Don't use nak packets on an in endpoint, 274 * this was ignored on 2280 275 */ 276 writel(BIT(CLEAR_NAK_OUT_PACKETS) | 277 BIT(CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp); 278 } 279 280 if (dev->quirks & PLX_SUPERSPEED) 281 ep_clear_seqnum(ep); 282 writel(tmp, &ep->cfg->ep_cfg); 283 284 /* enable irqs */ 285 if (!ep->dma) { /* pio, per-packet */ 286 enable_pciirqenb(ep); 287 288 tmp = BIT(DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) | 289 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE); 290 if (dev->quirks & PLX_2280) 291 tmp |= readl(&ep->regs->ep_irqenb); 292 writel(tmp, &ep->regs->ep_irqenb); 293 } else { /* dma, per-request */ 294 tmp = BIT((8 + ep->num)); /* completion */ 295 tmp |= readl(&dev->regs->pciirqenb1); 296 writel(tmp, &dev->regs->pciirqenb1); 297 298 /* for short OUT transfers, dma completions can't 299 * advance the queue; do it pio-style, by hand. 300 * NOTE erratum 0112 workaround #2 301 */ 302 if ((desc->bEndpointAddress & USB_DIR_IN) == 0) { 303 tmp = BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE); 304 writel(tmp, &ep->regs->ep_irqenb); 305 306 enable_pciirqenb(ep); 307 } 308 } 309 310 tmp = desc->bEndpointAddress; 311 ep_dbg(dev, "enabled %s (ep%d%s-%s) %s max %04x\n", 312 _ep->name, tmp & 0x0f, DIR_STRING(tmp), 313 type_string(desc->bmAttributes), 314 ep->dma ? "dma" : "pio", max); 315 316 /* pci writes may still be posted */ 317 spin_unlock_irqrestore(&dev->lock, flags); 318 return ret; 319 320 print_err: 321 dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, ret); 322 return ret; 323 } 324 325 static int handshake(u32 __iomem *ptr, u32 mask, u32 done, int usec) 326 { 327 u32 result; 328 329 do { 330 result = readl(ptr); 331 if (result == ~(u32)0) /* "device unplugged" */ 332 return -ENODEV; 333 result &= mask; 334 if (result == done) 335 return 0; 336 udelay(1); 337 usec--; 338 } while (usec > 0); 339 return -ETIMEDOUT; 340 } 341 342 static const struct usb_ep_ops net2280_ep_ops; 343 344 static void ep_reset_228x(struct net2280_regs __iomem *regs, 345 struct net2280_ep *ep) 346 { 347 u32 tmp; 348 349 ep->desc = NULL; 350 INIT_LIST_HEAD(&ep->queue); 351 352 usb_ep_set_maxpacket_limit(&ep->ep, ~0); 353 ep->ep.ops = &net2280_ep_ops; 354 355 /* disable the dma, irqs, endpoint... */ 356 if (ep->dma) { 357 writel(0, &ep->dma->dmactl); 358 writel(BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) | 359 BIT(DMA_TRANSACTION_DONE_INTERRUPT) | 360 BIT(DMA_ABORT), 361 &ep->dma->dmastat); 362 363 tmp = readl(®s->pciirqenb0); 364 tmp &= ~BIT(ep->num); 365 writel(tmp, ®s->pciirqenb0); 366 } else { 367 tmp = readl(®s->pciirqenb1); 368 tmp &= ~BIT((8 + ep->num)); /* completion */ 369 writel(tmp, ®s->pciirqenb1); 370 } 371 writel(0, &ep->regs->ep_irqenb); 372 373 /* init to our chosen defaults, notably so that we NAK OUT 374 * packets until the driver queues a read (+note erratum 0112) 375 */ 376 if (!ep->is_in || (ep->dev->quirks & PLX_2280)) { 377 tmp = BIT(SET_NAK_OUT_PACKETS_MODE) | 378 BIT(SET_NAK_OUT_PACKETS) | 379 BIT(CLEAR_EP_HIDE_STATUS_PHASE) | 380 BIT(CLEAR_INTERRUPT_MODE); 381 } else { 382 /* added for 2282 */ 383 tmp = BIT(CLEAR_NAK_OUT_PACKETS_MODE) | 384 BIT(CLEAR_NAK_OUT_PACKETS) | 385 BIT(CLEAR_EP_HIDE_STATUS_PHASE) | 386 BIT(CLEAR_INTERRUPT_MODE); 387 } 388 389 if (ep->num != 0) { 390 tmp |= BIT(CLEAR_ENDPOINT_TOGGLE) | 391 BIT(CLEAR_ENDPOINT_HALT); 392 } 393 writel(tmp, &ep->regs->ep_rsp); 394 395 /* scrub most status bits, and flush any fifo state */ 396 if (ep->dev->quirks & PLX_2280) 397 tmp = BIT(FIFO_OVERFLOW) | 398 BIT(FIFO_UNDERFLOW); 399 else 400 tmp = 0; 401 402 writel(tmp | BIT(TIMEOUT) | 403 BIT(USB_STALL_SENT) | 404 BIT(USB_IN_NAK_SENT) | 405 BIT(USB_IN_ACK_RCVD) | 406 BIT(USB_OUT_PING_NAK_SENT) | 407 BIT(USB_OUT_ACK_SENT) | 408 BIT(FIFO_FLUSH) | 409 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) | 410 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) | 411 BIT(DATA_PACKET_RECEIVED_INTERRUPT) | 412 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) | 413 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 414 BIT(DATA_IN_TOKEN_INTERRUPT), 415 &ep->regs->ep_stat); 416 417 /* fifo size is handled separately */ 418 } 419 420 static void ep_reset_338x(struct net2280_regs __iomem *regs, 421 struct net2280_ep *ep) 422 { 423 u32 tmp, dmastat; 424 425 ep->desc = NULL; 426 INIT_LIST_HEAD(&ep->queue); 427 428 usb_ep_set_maxpacket_limit(&ep->ep, ~0); 429 ep->ep.ops = &net2280_ep_ops; 430 431 /* disable the dma, irqs, endpoint... */ 432 if (ep->dma) { 433 writel(0, &ep->dma->dmactl); 434 writel(BIT(DMA_ABORT_DONE_INTERRUPT) | 435 BIT(DMA_PAUSE_DONE_INTERRUPT) | 436 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) | 437 BIT(DMA_TRANSACTION_DONE_INTERRUPT), 438 /* | BIT(DMA_ABORT), */ 439 &ep->dma->dmastat); 440 441 dmastat = readl(&ep->dma->dmastat); 442 if (dmastat == 0x5002) { 443 ep_warn(ep->dev, "The dmastat return = %x!!\n", 444 dmastat); 445 writel(0x5a, &ep->dma->dmastat); 446 } 447 448 tmp = readl(®s->pciirqenb0); 449 tmp &= ~BIT(ep_bit[ep->num]); 450 writel(tmp, ®s->pciirqenb0); 451 } else { 452 if (ep->num < 5) { 453 tmp = readl(®s->pciirqenb1); 454 tmp &= ~BIT((8 + ep->num)); /* completion */ 455 writel(tmp, ®s->pciirqenb1); 456 } 457 } 458 writel(0, &ep->regs->ep_irqenb); 459 460 writel(BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) | 461 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) | 462 BIT(FIFO_OVERFLOW) | 463 BIT(DATA_PACKET_RECEIVED_INTERRUPT) | 464 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) | 465 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 466 BIT(DATA_IN_TOKEN_INTERRUPT), &ep->regs->ep_stat); 467 468 tmp = readl(&ep->cfg->ep_cfg); 469 if (ep->is_in) 470 tmp &= ~USB3380_EP_CFG_MASK_IN; 471 else 472 tmp &= ~USB3380_EP_CFG_MASK_OUT; 473 writel(tmp, &ep->cfg->ep_cfg); 474 } 475 476 static void nuke(struct net2280_ep *); 477 478 static int net2280_disable(struct usb_ep *_ep) 479 { 480 struct net2280_ep *ep; 481 unsigned long flags; 482 483 ep = container_of(_ep, struct net2280_ep, ep); 484 if (!_ep || !ep->desc || _ep->name == ep0name) { 485 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep); 486 return -EINVAL; 487 } 488 spin_lock_irqsave(&ep->dev->lock, flags); 489 nuke(ep); 490 491 if (ep->dev->quirks & PLX_SUPERSPEED) 492 ep_reset_338x(ep->dev->regs, ep); 493 else 494 ep_reset_228x(ep->dev->regs, ep); 495 496 ep_vdbg(ep->dev, "disabled %s %s\n", 497 ep->dma ? "dma" : "pio", _ep->name); 498 499 /* synch memory views with the device */ 500 (void)readl(&ep->cfg->ep_cfg); 501 502 if (!ep->dma && ep->num >= 1 && ep->num <= 4) 503 ep->dma = &ep->dev->dma[ep->num - 1]; 504 505 spin_unlock_irqrestore(&ep->dev->lock, flags); 506 return 0; 507 } 508 509 /*-------------------------------------------------------------------------*/ 510 511 static struct usb_request 512 *net2280_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) 513 { 514 struct net2280_ep *ep; 515 struct net2280_request *req; 516 517 if (!_ep) { 518 pr_err("%s: Invalid ep\n", __func__); 519 return NULL; 520 } 521 ep = container_of(_ep, struct net2280_ep, ep); 522 523 req = kzalloc(sizeof(*req), gfp_flags); 524 if (!req) 525 return NULL; 526 527 INIT_LIST_HEAD(&req->queue); 528 529 /* this dma descriptor may be swapped with the previous dummy */ 530 if (ep->dma) { 531 struct net2280_dma *td; 532 533 td = pci_pool_alloc(ep->dev->requests, gfp_flags, 534 &req->td_dma); 535 if (!td) { 536 kfree(req); 537 return NULL; 538 } 539 td->dmacount = 0; /* not VALID */ 540 td->dmadesc = td->dmaaddr; 541 req->td = td; 542 } 543 return &req->req; 544 } 545 546 static void net2280_free_request(struct usb_ep *_ep, struct usb_request *_req) 547 { 548 struct net2280_ep *ep; 549 struct net2280_request *req; 550 551 ep = container_of(_ep, struct net2280_ep, ep); 552 if (!_ep || !_req) { 553 dev_err(&ep->dev->pdev->dev, "%s: Inavlid ep=%p or req=%p\n", 554 __func__, _ep, _req); 555 return; 556 } 557 558 req = container_of(_req, struct net2280_request, req); 559 WARN_ON(!list_empty(&req->queue)); 560 if (req->td) 561 pci_pool_free(ep->dev->requests, req->td, req->td_dma); 562 kfree(req); 563 } 564 565 /*-------------------------------------------------------------------------*/ 566 567 /* load a packet into the fifo we use for usb IN transfers. 568 * works for all endpoints. 569 * 570 * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo 571 * at a time, but this code is simpler because it knows it only writes 572 * one packet. ep-a..ep-d should use dma instead. 573 */ 574 static void write_fifo(struct net2280_ep *ep, struct usb_request *req) 575 { 576 struct net2280_ep_regs __iomem *regs = ep->regs; 577 u8 *buf; 578 u32 tmp; 579 unsigned count, total; 580 581 /* INVARIANT: fifo is currently empty. (testable) */ 582 583 if (req) { 584 buf = req->buf + req->actual; 585 prefetch(buf); 586 total = req->length - req->actual; 587 } else { 588 total = 0; 589 buf = NULL; 590 } 591 592 /* write just one packet at a time */ 593 count = ep->ep.maxpacket; 594 if (count > total) /* min() cannot be used on a bitfield */ 595 count = total; 596 597 ep_vdbg(ep->dev, "write %s fifo (IN) %d bytes%s req %p\n", 598 ep->ep.name, count, 599 (count != ep->ep.maxpacket) ? " (short)" : "", 600 req); 601 while (count >= 4) { 602 /* NOTE be careful if you try to align these. fifo lines 603 * should normally be full (4 bytes) and successive partial 604 * lines are ok only in certain cases. 605 */ 606 tmp = get_unaligned((u32 *)buf); 607 cpu_to_le32s(&tmp); 608 writel(tmp, ®s->ep_data); 609 buf += 4; 610 count -= 4; 611 } 612 613 /* last fifo entry is "short" unless we wrote a full packet. 614 * also explicitly validate last word in (periodic) transfers 615 * when maxpacket is not a multiple of 4 bytes. 616 */ 617 if (count || total < ep->ep.maxpacket) { 618 tmp = count ? get_unaligned((u32 *)buf) : count; 619 cpu_to_le32s(&tmp); 620 set_fifo_bytecount(ep, count & 0x03); 621 writel(tmp, ®s->ep_data); 622 } 623 624 /* pci writes may still be posted */ 625 } 626 627 /* work around erratum 0106: PCI and USB race over the OUT fifo. 628 * caller guarantees chiprev 0100, out endpoint is NAKing, and 629 * there's no real data in the fifo. 630 * 631 * NOTE: also used in cases where that erratum doesn't apply: 632 * where the host wrote "too much" data to us. 633 */ 634 static void out_flush(struct net2280_ep *ep) 635 { 636 u32 __iomem *statp; 637 u32 tmp; 638 639 statp = &ep->regs->ep_stat; 640 641 tmp = readl(statp); 642 if (tmp & BIT(NAK_OUT_PACKETS)) { 643 ep_dbg(ep->dev, "%s %s %08x !NAK\n", 644 ep->ep.name, __func__, tmp); 645 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp); 646 } 647 648 writel(BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 649 BIT(DATA_PACKET_RECEIVED_INTERRUPT), 650 statp); 651 writel(BIT(FIFO_FLUSH), statp); 652 /* Make sure that stap is written */ 653 mb(); 654 tmp = readl(statp); 655 if (tmp & BIT(DATA_OUT_PING_TOKEN_INTERRUPT) && 656 /* high speed did bulk NYET; fifo isn't filling */ 657 ep->dev->gadget.speed == USB_SPEED_FULL) { 658 unsigned usec; 659 660 usec = 50; /* 64 byte bulk/interrupt */ 661 handshake(statp, BIT(USB_OUT_PING_NAK_SENT), 662 BIT(USB_OUT_PING_NAK_SENT), usec); 663 /* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */ 664 } 665 } 666 667 /* unload packet(s) from the fifo we use for usb OUT transfers. 668 * returns true iff the request completed, because of short packet 669 * or the request buffer having filled with full packets. 670 * 671 * for ep-a..ep-d this will read multiple packets out when they 672 * have been accepted. 673 */ 674 static int read_fifo(struct net2280_ep *ep, struct net2280_request *req) 675 { 676 struct net2280_ep_regs __iomem *regs = ep->regs; 677 u8 *buf = req->req.buf + req->req.actual; 678 unsigned count, tmp, is_short; 679 unsigned cleanup = 0, prevent = 0; 680 681 /* erratum 0106 ... packets coming in during fifo reads might 682 * be incompletely rejected. not all cases have workarounds. 683 */ 684 if (ep->dev->chiprev == 0x0100 && 685 ep->dev->gadget.speed == USB_SPEED_FULL) { 686 udelay(1); 687 tmp = readl(&ep->regs->ep_stat); 688 if ((tmp & BIT(NAK_OUT_PACKETS))) 689 cleanup = 1; 690 else if ((tmp & BIT(FIFO_FULL))) { 691 start_out_naking(ep); 692 prevent = 1; 693 } 694 /* else: hope we don't see the problem */ 695 } 696 697 /* never overflow the rx buffer. the fifo reads packets until 698 * it sees a short one; we might not be ready for them all. 699 */ 700 prefetchw(buf); 701 count = readl(®s->ep_avail); 702 if (unlikely(count == 0)) { 703 udelay(1); 704 tmp = readl(&ep->regs->ep_stat); 705 count = readl(®s->ep_avail); 706 /* handled that data already? */ 707 if (count == 0 && (tmp & BIT(NAK_OUT_PACKETS)) == 0) 708 return 0; 709 } 710 711 tmp = req->req.length - req->req.actual; 712 if (count > tmp) { 713 /* as with DMA, data overflow gets flushed */ 714 if ((tmp % ep->ep.maxpacket) != 0) { 715 ep_err(ep->dev, 716 "%s out fifo %d bytes, expected %d\n", 717 ep->ep.name, count, tmp); 718 req->req.status = -EOVERFLOW; 719 cleanup = 1; 720 /* NAK_OUT_PACKETS will be set, so flushing is safe; 721 * the next read will start with the next packet 722 */ 723 } /* else it's a ZLP, no worries */ 724 count = tmp; 725 } 726 req->req.actual += count; 727 728 is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0); 729 730 ep_vdbg(ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n", 731 ep->ep.name, count, is_short ? " (short)" : "", 732 cleanup ? " flush" : "", prevent ? " nak" : "", 733 req, req->req.actual, req->req.length); 734 735 while (count >= 4) { 736 tmp = readl(®s->ep_data); 737 cpu_to_le32s(&tmp); 738 put_unaligned(tmp, (u32 *)buf); 739 buf += 4; 740 count -= 4; 741 } 742 if (count) { 743 tmp = readl(®s->ep_data); 744 /* LE conversion is implicit here: */ 745 do { 746 *buf++ = (u8) tmp; 747 tmp >>= 8; 748 } while (--count); 749 } 750 if (cleanup) 751 out_flush(ep); 752 if (prevent) { 753 writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp); 754 (void) readl(&ep->regs->ep_rsp); 755 } 756 757 return is_short || ((req->req.actual == req->req.length) && 758 !req->req.zero); 759 } 760 761 /* fill out dma descriptor to match a given request */ 762 static void fill_dma_desc(struct net2280_ep *ep, 763 struct net2280_request *req, int valid) 764 { 765 struct net2280_dma *td = req->td; 766 u32 dmacount = req->req.length; 767 768 /* don't let DMA continue after a short OUT packet, 769 * so overruns can't affect the next transfer. 770 * in case of overruns on max-size packets, we can't 771 * stop the fifo from filling but we can flush it. 772 */ 773 if (ep->is_in) 774 dmacount |= BIT(DMA_DIRECTION); 775 if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0) || 776 !(ep->dev->quirks & PLX_2280)) 777 dmacount |= BIT(END_OF_CHAIN); 778 779 req->valid = valid; 780 if (valid) 781 dmacount |= BIT(VALID_BIT); 782 dmacount |= BIT(DMA_DONE_INTERRUPT_ENABLE); 783 784 /* td->dmadesc = previously set by caller */ 785 td->dmaaddr = cpu_to_le32 (req->req.dma); 786 787 /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */ 788 wmb(); 789 td->dmacount = cpu_to_le32(dmacount); 790 } 791 792 static const u32 dmactl_default = 793 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) | 794 BIT(DMA_CLEAR_COUNT_ENABLE) | 795 /* erratum 0116 workaround part 1 (use POLLING) */ 796 (POLL_100_USEC << DESCRIPTOR_POLLING_RATE) | 797 BIT(DMA_VALID_BIT_POLLING_ENABLE) | 798 BIT(DMA_VALID_BIT_ENABLE) | 799 BIT(DMA_SCATTER_GATHER_ENABLE) | 800 /* erratum 0116 workaround part 2 (no AUTOSTART) */ 801 BIT(DMA_ENABLE); 802 803 static inline void spin_stop_dma(struct net2280_dma_regs __iomem *dma) 804 { 805 handshake(&dma->dmactl, BIT(DMA_ENABLE), 0, 50); 806 } 807 808 static inline void stop_dma(struct net2280_dma_regs __iomem *dma) 809 { 810 writel(readl(&dma->dmactl) & ~BIT(DMA_ENABLE), &dma->dmactl); 811 spin_stop_dma(dma); 812 } 813 814 static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma) 815 { 816 struct net2280_dma_regs __iomem *dma = ep->dma; 817 unsigned int tmp = BIT(VALID_BIT) | (ep->is_in << DMA_DIRECTION); 818 819 if (!(ep->dev->quirks & PLX_2280)) 820 tmp |= BIT(END_OF_CHAIN); 821 822 writel(tmp, &dma->dmacount); 823 writel(readl(&dma->dmastat), &dma->dmastat); 824 825 writel(td_dma, &dma->dmadesc); 826 if (ep->dev->quirks & PLX_SUPERSPEED) 827 dmactl |= BIT(DMA_REQUEST_OUTSTANDING); 828 writel(dmactl, &dma->dmactl); 829 830 /* erratum 0116 workaround part 3: pci arbiter away from net2280 */ 831 (void) readl(&ep->dev->pci->pcimstctl); 832 833 writel(BIT(DMA_START), &dma->dmastat); 834 835 if (!ep->is_in) 836 stop_out_naking(ep); 837 } 838 839 static void start_dma(struct net2280_ep *ep, struct net2280_request *req) 840 { 841 u32 tmp; 842 struct net2280_dma_regs __iomem *dma = ep->dma; 843 844 /* FIXME can't use DMA for ZLPs */ 845 846 /* on this path we "know" there's no dma active (yet) */ 847 WARN_ON(readl(&dma->dmactl) & BIT(DMA_ENABLE)); 848 writel(0, &ep->dma->dmactl); 849 850 /* previous OUT packet might have been short */ 851 if (!ep->is_in && (readl(&ep->regs->ep_stat) & 852 BIT(NAK_OUT_PACKETS))) { 853 writel(BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT), 854 &ep->regs->ep_stat); 855 856 tmp = readl(&ep->regs->ep_avail); 857 if (tmp) { 858 writel(readl(&dma->dmastat), &dma->dmastat); 859 860 /* transfer all/some fifo data */ 861 writel(req->req.dma, &dma->dmaaddr); 862 tmp = min(tmp, req->req.length); 863 864 /* dma irq, faking scatterlist status */ 865 req->td->dmacount = cpu_to_le32(req->req.length - tmp); 866 writel(BIT(DMA_DONE_INTERRUPT_ENABLE) | tmp, 867 &dma->dmacount); 868 req->td->dmadesc = 0; 869 req->valid = 1; 870 871 writel(BIT(DMA_ENABLE), &dma->dmactl); 872 writel(BIT(DMA_START), &dma->dmastat); 873 return; 874 } 875 } 876 877 tmp = dmactl_default; 878 879 /* force packet boundaries between dma requests, but prevent the 880 * controller from automagically writing a last "short" packet 881 * (zero length) unless the driver explicitly said to do that. 882 */ 883 if (ep->is_in) { 884 if (likely((req->req.length % ep->ep.maxpacket) || 885 req->req.zero)){ 886 tmp |= BIT(DMA_FIFO_VALIDATE); 887 ep->in_fifo_validate = 1; 888 } else 889 ep->in_fifo_validate = 0; 890 } 891 892 /* init req->td, pointing to the current dummy */ 893 req->td->dmadesc = cpu_to_le32 (ep->td_dma); 894 fill_dma_desc(ep, req, 1); 895 896 req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN)); 897 898 start_queue(ep, tmp, req->td_dma); 899 } 900 901 static inline void 902 queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid) 903 { 904 struct net2280_dma *end; 905 dma_addr_t tmp; 906 907 /* swap new dummy for old, link; fill and maybe activate */ 908 end = ep->dummy; 909 ep->dummy = req->td; 910 req->td = end; 911 912 tmp = ep->td_dma; 913 ep->td_dma = req->td_dma; 914 req->td_dma = tmp; 915 916 end->dmadesc = cpu_to_le32 (ep->td_dma); 917 918 fill_dma_desc(ep, req, valid); 919 } 920 921 static void 922 done(struct net2280_ep *ep, struct net2280_request *req, int status) 923 { 924 struct net2280 *dev; 925 unsigned stopped = ep->stopped; 926 927 list_del_init(&req->queue); 928 929 if (req->req.status == -EINPROGRESS) 930 req->req.status = status; 931 else 932 status = req->req.status; 933 934 dev = ep->dev; 935 if (ep->dma) 936 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in); 937 938 if (status && status != -ESHUTDOWN) 939 ep_vdbg(dev, "complete %s req %p stat %d len %u/%u\n", 940 ep->ep.name, &req->req, status, 941 req->req.actual, req->req.length); 942 943 /* don't modify queue heads during completion callback */ 944 ep->stopped = 1; 945 spin_unlock(&dev->lock); 946 usb_gadget_giveback_request(&ep->ep, &req->req); 947 spin_lock(&dev->lock); 948 ep->stopped = stopped; 949 } 950 951 /*-------------------------------------------------------------------------*/ 952 953 static int 954 net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) 955 { 956 struct net2280_request *req; 957 struct net2280_ep *ep; 958 struct net2280 *dev; 959 unsigned long flags; 960 int ret = 0; 961 962 /* we always require a cpu-view buffer, so that we can 963 * always use pio (as fallback or whatever). 964 */ 965 ep = container_of(_ep, struct net2280_ep, ep); 966 if (!_ep || (!ep->desc && ep->num != 0)) { 967 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep); 968 return -EINVAL; 969 } 970 req = container_of(_req, struct net2280_request, req); 971 if (!_req || !_req->complete || !_req->buf || 972 !list_empty(&req->queue)) { 973 ret = -EINVAL; 974 goto print_err; 975 } 976 if (_req->length > (~0 & DMA_BYTE_COUNT_MASK)) { 977 ret = -EDOM; 978 goto print_err; 979 } 980 dev = ep->dev; 981 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) { 982 ret = -ESHUTDOWN; 983 goto print_err; 984 } 985 986 /* FIXME implement PIO fallback for ZLPs with DMA */ 987 if (ep->dma && _req->length == 0) { 988 ret = -EOPNOTSUPP; 989 goto print_err; 990 } 991 992 /* set up dma mapping in case the caller didn't */ 993 if (ep->dma) { 994 ret = usb_gadget_map_request(&dev->gadget, _req, 995 ep->is_in); 996 if (ret) 997 goto print_err; 998 } 999 1000 ep_vdbg(dev, "%s queue req %p, len %d buf %p\n", 1001 _ep->name, _req, _req->length, _req->buf); 1002 1003 spin_lock_irqsave(&dev->lock, flags); 1004 1005 _req->status = -EINPROGRESS; 1006 _req->actual = 0; 1007 1008 /* kickstart this i/o queue? */ 1009 if (list_empty(&ep->queue) && !ep->stopped && 1010 !((dev->quirks & PLX_SUPERSPEED) && ep->dma && 1011 (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)))) { 1012 1013 /* use DMA if the endpoint supports it, else pio */ 1014 if (ep->dma) 1015 start_dma(ep, req); 1016 else { 1017 /* maybe there's no control data, just status ack */ 1018 if (ep->num == 0 && _req->length == 0) { 1019 allow_status(ep); 1020 done(ep, req, 0); 1021 ep_vdbg(dev, "%s status ack\n", ep->ep.name); 1022 goto done; 1023 } 1024 1025 /* PIO ... stuff the fifo, or unblock it. */ 1026 if (ep->is_in) 1027 write_fifo(ep, _req); 1028 else if (list_empty(&ep->queue)) { 1029 u32 s; 1030 1031 /* OUT FIFO might have packet(s) buffered */ 1032 s = readl(&ep->regs->ep_stat); 1033 if ((s & BIT(FIFO_EMPTY)) == 0) { 1034 /* note: _req->short_not_ok is 1035 * ignored here since PIO _always_ 1036 * stops queue advance here, and 1037 * _req->status doesn't change for 1038 * short reads (only _req->actual) 1039 */ 1040 if (read_fifo(ep, req) && 1041 ep->num == 0) { 1042 done(ep, req, 0); 1043 allow_status(ep); 1044 /* don't queue it */ 1045 req = NULL; 1046 } else if (read_fifo(ep, req) && 1047 ep->num != 0) { 1048 done(ep, req, 0); 1049 req = NULL; 1050 } else 1051 s = readl(&ep->regs->ep_stat); 1052 } 1053 1054 /* don't NAK, let the fifo fill */ 1055 if (req && (s & BIT(NAK_OUT_PACKETS))) 1056 writel(BIT(CLEAR_NAK_OUT_PACKETS), 1057 &ep->regs->ep_rsp); 1058 } 1059 } 1060 1061 } else if (ep->dma) { 1062 int valid = 1; 1063 1064 if (ep->is_in) { 1065 int expect; 1066 1067 /* preventing magic zlps is per-engine state, not 1068 * per-transfer; irq logic must recover hiccups. 1069 */ 1070 expect = likely(req->req.zero || 1071 (req->req.length % ep->ep.maxpacket)); 1072 if (expect != ep->in_fifo_validate) 1073 valid = 0; 1074 } 1075 queue_dma(ep, req, valid); 1076 1077 } /* else the irq handler advances the queue. */ 1078 1079 ep->responded = 1; 1080 if (req) 1081 list_add_tail(&req->queue, &ep->queue); 1082 done: 1083 spin_unlock_irqrestore(&dev->lock, flags); 1084 1085 /* pci writes may still be posted */ 1086 return ret; 1087 1088 print_err: 1089 dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, ret); 1090 return ret; 1091 } 1092 1093 static inline void 1094 dma_done(struct net2280_ep *ep, struct net2280_request *req, u32 dmacount, 1095 int status) 1096 { 1097 req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount); 1098 done(ep, req, status); 1099 } 1100 1101 static void scan_dma_completions(struct net2280_ep *ep) 1102 { 1103 /* only look at descriptors that were "naturally" retired, 1104 * so fifo and list head state won't matter 1105 */ 1106 while (!list_empty(&ep->queue)) { 1107 struct net2280_request *req; 1108 u32 tmp; 1109 1110 req = list_entry(ep->queue.next, 1111 struct net2280_request, queue); 1112 if (!req->valid) 1113 break; 1114 rmb(); 1115 tmp = le32_to_cpup(&req->td->dmacount); 1116 if ((tmp & BIT(VALID_BIT)) != 0) 1117 break; 1118 1119 /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short" 1120 * cases where DMA must be aborted; this code handles 1121 * all non-abort DMA completions. 1122 */ 1123 if (unlikely(req->td->dmadesc == 0)) { 1124 /* paranoia */ 1125 tmp = readl(&ep->dma->dmacount); 1126 if (tmp & DMA_BYTE_COUNT_MASK) 1127 break; 1128 /* single transfer mode */ 1129 dma_done(ep, req, tmp, 0); 1130 break; 1131 } else if (!ep->is_in && 1132 (req->req.length % ep->ep.maxpacket) && 1133 !(ep->dev->quirks & PLX_SUPERSPEED)) { 1134 1135 tmp = readl(&ep->regs->ep_stat); 1136 /* AVOID TROUBLE HERE by not issuing short reads from 1137 * your gadget driver. That helps avoids errata 0121, 1138 * 0122, and 0124; not all cases trigger the warning. 1139 */ 1140 if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) { 1141 ep_warn(ep->dev, "%s lost packet sync!\n", 1142 ep->ep.name); 1143 req->req.status = -EOVERFLOW; 1144 } else { 1145 tmp = readl(&ep->regs->ep_avail); 1146 if (tmp) { 1147 /* fifo gets flushed later */ 1148 ep->out_overflow = 1; 1149 ep_dbg(ep->dev, 1150 "%s dma, discard %d len %d\n", 1151 ep->ep.name, tmp, 1152 req->req.length); 1153 req->req.status = -EOVERFLOW; 1154 } 1155 } 1156 } 1157 dma_done(ep, req, tmp, 0); 1158 } 1159 } 1160 1161 static void restart_dma(struct net2280_ep *ep) 1162 { 1163 struct net2280_request *req; 1164 1165 if (ep->stopped) 1166 return; 1167 req = list_entry(ep->queue.next, struct net2280_request, queue); 1168 1169 start_dma(ep, req); 1170 } 1171 1172 static void abort_dma(struct net2280_ep *ep) 1173 { 1174 /* abort the current transfer */ 1175 if (likely(!list_empty(&ep->queue))) { 1176 /* FIXME work around errata 0121, 0122, 0124 */ 1177 writel(BIT(DMA_ABORT), &ep->dma->dmastat); 1178 spin_stop_dma(ep->dma); 1179 } else 1180 stop_dma(ep->dma); 1181 scan_dma_completions(ep); 1182 } 1183 1184 /* dequeue ALL requests */ 1185 static void nuke(struct net2280_ep *ep) 1186 { 1187 struct net2280_request *req; 1188 1189 /* called with spinlock held */ 1190 ep->stopped = 1; 1191 if (ep->dma) 1192 abort_dma(ep); 1193 while (!list_empty(&ep->queue)) { 1194 req = list_entry(ep->queue.next, 1195 struct net2280_request, 1196 queue); 1197 done(ep, req, -ESHUTDOWN); 1198 } 1199 } 1200 1201 /* dequeue JUST ONE request */ 1202 static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req) 1203 { 1204 struct net2280_ep *ep; 1205 struct net2280_request *req; 1206 unsigned long flags; 1207 u32 dmactl; 1208 int stopped; 1209 1210 ep = container_of(_ep, struct net2280_ep, ep); 1211 if (!_ep || (!ep->desc && ep->num != 0) || !_req) { 1212 pr_err("%s: Invalid ep=%p or ep->desc or req=%p\n", 1213 __func__, _ep, _req); 1214 return -EINVAL; 1215 } 1216 1217 spin_lock_irqsave(&ep->dev->lock, flags); 1218 stopped = ep->stopped; 1219 1220 /* quiesce dma while we patch the queue */ 1221 dmactl = 0; 1222 ep->stopped = 1; 1223 if (ep->dma) { 1224 dmactl = readl(&ep->dma->dmactl); 1225 /* WARNING erratum 0127 may kick in ... */ 1226 stop_dma(ep->dma); 1227 scan_dma_completions(ep); 1228 } 1229 1230 /* make sure it's still queued on this endpoint */ 1231 list_for_each_entry(req, &ep->queue, queue) { 1232 if (&req->req == _req) 1233 break; 1234 } 1235 if (&req->req != _req) { 1236 spin_unlock_irqrestore(&ep->dev->lock, flags); 1237 dev_err(&ep->dev->pdev->dev, "%s: Request mismatch\n", 1238 __func__); 1239 return -EINVAL; 1240 } 1241 1242 /* queue head may be partially complete. */ 1243 if (ep->queue.next == &req->queue) { 1244 if (ep->dma) { 1245 ep_dbg(ep->dev, "unlink (%s) dma\n", _ep->name); 1246 _req->status = -ECONNRESET; 1247 abort_dma(ep); 1248 if (likely(ep->queue.next == &req->queue)) { 1249 /* NOTE: misreports single-transfer mode*/ 1250 req->td->dmacount = 0; /* invalidate */ 1251 dma_done(ep, req, 1252 readl(&ep->dma->dmacount), 1253 -ECONNRESET); 1254 } 1255 } else { 1256 ep_dbg(ep->dev, "unlink (%s) pio\n", _ep->name); 1257 done(ep, req, -ECONNRESET); 1258 } 1259 req = NULL; 1260 } 1261 1262 if (req) 1263 done(ep, req, -ECONNRESET); 1264 ep->stopped = stopped; 1265 1266 if (ep->dma) { 1267 /* turn off dma on inactive queues */ 1268 if (list_empty(&ep->queue)) 1269 stop_dma(ep->dma); 1270 else if (!ep->stopped) { 1271 /* resume current request, or start new one */ 1272 if (req) 1273 writel(dmactl, &ep->dma->dmactl); 1274 else 1275 start_dma(ep, list_entry(ep->queue.next, 1276 struct net2280_request, queue)); 1277 } 1278 } 1279 1280 spin_unlock_irqrestore(&ep->dev->lock, flags); 1281 return 0; 1282 } 1283 1284 /*-------------------------------------------------------------------------*/ 1285 1286 static int net2280_fifo_status(struct usb_ep *_ep); 1287 1288 static int 1289 net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged) 1290 { 1291 struct net2280_ep *ep; 1292 unsigned long flags; 1293 int retval = 0; 1294 1295 ep = container_of(_ep, struct net2280_ep, ep); 1296 if (!_ep || (!ep->desc && ep->num != 0)) { 1297 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep); 1298 return -EINVAL; 1299 } 1300 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) { 1301 retval = -ESHUTDOWN; 1302 goto print_err; 1303 } 1304 if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03) 1305 == USB_ENDPOINT_XFER_ISOC) { 1306 retval = -EINVAL; 1307 goto print_err; 1308 } 1309 1310 spin_lock_irqsave(&ep->dev->lock, flags); 1311 if (!list_empty(&ep->queue)) { 1312 retval = -EAGAIN; 1313 goto print_unlock; 1314 } else if (ep->is_in && value && net2280_fifo_status(_ep) != 0) { 1315 retval = -EAGAIN; 1316 goto print_unlock; 1317 } else { 1318 ep_vdbg(ep->dev, "%s %s %s\n", _ep->name, 1319 value ? "set" : "clear", 1320 wedged ? "wedge" : "halt"); 1321 /* set/clear, then synch memory views with the device */ 1322 if (value) { 1323 if (ep->num == 0) 1324 ep->dev->protocol_stall = 1; 1325 else 1326 set_halt(ep); 1327 if (wedged) 1328 ep->wedged = 1; 1329 } else { 1330 clear_halt(ep); 1331 if (ep->dev->quirks & PLX_SUPERSPEED && 1332 !list_empty(&ep->queue) && ep->td_dma) 1333 restart_dma(ep); 1334 ep->wedged = 0; 1335 } 1336 (void) readl(&ep->regs->ep_rsp); 1337 } 1338 spin_unlock_irqrestore(&ep->dev->lock, flags); 1339 1340 return retval; 1341 1342 print_unlock: 1343 spin_unlock_irqrestore(&ep->dev->lock, flags); 1344 print_err: 1345 dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, retval); 1346 return retval; 1347 } 1348 1349 static int net2280_set_halt(struct usb_ep *_ep, int value) 1350 { 1351 return net2280_set_halt_and_wedge(_ep, value, 0); 1352 } 1353 1354 static int net2280_set_wedge(struct usb_ep *_ep) 1355 { 1356 if (!_ep || _ep->name == ep0name) { 1357 pr_err("%s: Invalid ep=%p or ep0\n", __func__, _ep); 1358 return -EINVAL; 1359 } 1360 return net2280_set_halt_and_wedge(_ep, 1, 1); 1361 } 1362 1363 static int net2280_fifo_status(struct usb_ep *_ep) 1364 { 1365 struct net2280_ep *ep; 1366 u32 avail; 1367 1368 ep = container_of(_ep, struct net2280_ep, ep); 1369 if (!_ep || (!ep->desc && ep->num != 0)) { 1370 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep); 1371 return -ENODEV; 1372 } 1373 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) { 1374 dev_err(&ep->dev->pdev->dev, 1375 "%s: Invalid driver=%p or speed=%d\n", 1376 __func__, ep->dev->driver, ep->dev->gadget.speed); 1377 return -ESHUTDOWN; 1378 } 1379 1380 avail = readl(&ep->regs->ep_avail) & (BIT(12) - 1); 1381 if (avail > ep->fifo_size) { 1382 dev_err(&ep->dev->pdev->dev, "%s: Fifo overflow\n", __func__); 1383 return -EOVERFLOW; 1384 } 1385 if (ep->is_in) 1386 avail = ep->fifo_size - avail; 1387 return avail; 1388 } 1389 1390 static void net2280_fifo_flush(struct usb_ep *_ep) 1391 { 1392 struct net2280_ep *ep; 1393 1394 ep = container_of(_ep, struct net2280_ep, ep); 1395 if (!_ep || (!ep->desc && ep->num != 0)) { 1396 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep); 1397 return; 1398 } 1399 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) { 1400 dev_err(&ep->dev->pdev->dev, 1401 "%s: Invalid driver=%p or speed=%d\n", 1402 __func__, ep->dev->driver, ep->dev->gadget.speed); 1403 return; 1404 } 1405 1406 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat); 1407 (void) readl(&ep->regs->ep_rsp); 1408 } 1409 1410 static const struct usb_ep_ops net2280_ep_ops = { 1411 .enable = net2280_enable, 1412 .disable = net2280_disable, 1413 1414 .alloc_request = net2280_alloc_request, 1415 .free_request = net2280_free_request, 1416 1417 .queue = net2280_queue, 1418 .dequeue = net2280_dequeue, 1419 1420 .set_halt = net2280_set_halt, 1421 .set_wedge = net2280_set_wedge, 1422 .fifo_status = net2280_fifo_status, 1423 .fifo_flush = net2280_fifo_flush, 1424 }; 1425 1426 /*-------------------------------------------------------------------------*/ 1427 1428 static int net2280_get_frame(struct usb_gadget *_gadget) 1429 { 1430 struct net2280 *dev; 1431 unsigned long flags; 1432 u16 retval; 1433 1434 if (!_gadget) 1435 return -ENODEV; 1436 dev = container_of(_gadget, struct net2280, gadget); 1437 spin_lock_irqsave(&dev->lock, flags); 1438 retval = get_idx_reg(dev->regs, REG_FRAME) & 0x03ff; 1439 spin_unlock_irqrestore(&dev->lock, flags); 1440 return retval; 1441 } 1442 1443 static int net2280_wakeup(struct usb_gadget *_gadget) 1444 { 1445 struct net2280 *dev; 1446 u32 tmp; 1447 unsigned long flags; 1448 1449 if (!_gadget) 1450 return 0; 1451 dev = container_of(_gadget, struct net2280, gadget); 1452 1453 spin_lock_irqsave(&dev->lock, flags); 1454 tmp = readl(&dev->usb->usbctl); 1455 if (tmp & BIT(DEVICE_REMOTE_WAKEUP_ENABLE)) 1456 writel(BIT(GENERATE_RESUME), &dev->usb->usbstat); 1457 spin_unlock_irqrestore(&dev->lock, flags); 1458 1459 /* pci writes may still be posted */ 1460 return 0; 1461 } 1462 1463 static int net2280_set_selfpowered(struct usb_gadget *_gadget, int value) 1464 { 1465 struct net2280 *dev; 1466 u32 tmp; 1467 unsigned long flags; 1468 1469 if (!_gadget) 1470 return 0; 1471 dev = container_of(_gadget, struct net2280, gadget); 1472 1473 spin_lock_irqsave(&dev->lock, flags); 1474 tmp = readl(&dev->usb->usbctl); 1475 if (value) { 1476 tmp |= BIT(SELF_POWERED_STATUS); 1477 _gadget->is_selfpowered = 1; 1478 } else { 1479 tmp &= ~BIT(SELF_POWERED_STATUS); 1480 _gadget->is_selfpowered = 0; 1481 } 1482 writel(tmp, &dev->usb->usbctl); 1483 spin_unlock_irqrestore(&dev->lock, flags); 1484 1485 return 0; 1486 } 1487 1488 static int net2280_pullup(struct usb_gadget *_gadget, int is_on) 1489 { 1490 struct net2280 *dev; 1491 u32 tmp; 1492 unsigned long flags; 1493 1494 if (!_gadget) 1495 return -ENODEV; 1496 dev = container_of(_gadget, struct net2280, gadget); 1497 1498 spin_lock_irqsave(&dev->lock, flags); 1499 tmp = readl(&dev->usb->usbctl); 1500 dev->softconnect = (is_on != 0); 1501 if (is_on) { 1502 ep0_start(dev); 1503 writel(tmp | BIT(USB_DETECT_ENABLE), &dev->usb->usbctl); 1504 } else { 1505 writel(tmp & ~BIT(USB_DETECT_ENABLE), &dev->usb->usbctl); 1506 stop_activity(dev, dev->driver); 1507 } 1508 1509 spin_unlock_irqrestore(&dev->lock, flags); 1510 1511 return 0; 1512 } 1513 1514 static int net2280_start(struct usb_gadget *_gadget, 1515 struct usb_gadget_driver *driver); 1516 static int net2280_stop(struct usb_gadget *_gadget); 1517 1518 static const struct usb_gadget_ops net2280_ops = { 1519 .get_frame = net2280_get_frame, 1520 .wakeup = net2280_wakeup, 1521 .set_selfpowered = net2280_set_selfpowered, 1522 .pullup = net2280_pullup, 1523 .udc_start = net2280_start, 1524 .udc_stop = net2280_stop, 1525 }; 1526 1527 /*-------------------------------------------------------------------------*/ 1528 1529 #ifdef CONFIG_USB_GADGET_DEBUG_FILES 1530 1531 /* FIXME move these into procfs, and use seq_file. 1532 * Sysfs _still_ doesn't behave for arbitrarily sized files, 1533 * and also doesn't help products using this with 2.4 kernels. 1534 */ 1535 1536 /* "function" sysfs attribute */ 1537 static ssize_t function_show(struct device *_dev, struct device_attribute *attr, 1538 char *buf) 1539 { 1540 struct net2280 *dev = dev_get_drvdata(_dev); 1541 1542 if (!dev->driver || !dev->driver->function || 1543 strlen(dev->driver->function) > PAGE_SIZE) 1544 return 0; 1545 return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function); 1546 } 1547 static DEVICE_ATTR_RO(function); 1548 1549 static ssize_t registers_show(struct device *_dev, 1550 struct device_attribute *attr, char *buf) 1551 { 1552 struct net2280 *dev; 1553 char *next; 1554 unsigned size, t; 1555 unsigned long flags; 1556 int i; 1557 u32 t1, t2; 1558 const char *s; 1559 1560 dev = dev_get_drvdata(_dev); 1561 next = buf; 1562 size = PAGE_SIZE; 1563 spin_lock_irqsave(&dev->lock, flags); 1564 1565 if (dev->driver) 1566 s = dev->driver->driver.name; 1567 else 1568 s = "(none)"; 1569 1570 /* Main Control Registers */ 1571 t = scnprintf(next, size, "%s version " DRIVER_VERSION 1572 ", chiprev %04x\n\n" 1573 "devinit %03x fifoctl %08x gadget '%s'\n" 1574 "pci irqenb0 %02x irqenb1 %08x " 1575 "irqstat0 %04x irqstat1 %08x\n", 1576 driver_name, dev->chiprev, 1577 readl(&dev->regs->devinit), 1578 readl(&dev->regs->fifoctl), 1579 s, 1580 readl(&dev->regs->pciirqenb0), 1581 readl(&dev->regs->pciirqenb1), 1582 readl(&dev->regs->irqstat0), 1583 readl(&dev->regs->irqstat1)); 1584 size -= t; 1585 next += t; 1586 1587 /* USB Control Registers */ 1588 t1 = readl(&dev->usb->usbctl); 1589 t2 = readl(&dev->usb->usbstat); 1590 if (t1 & BIT(VBUS_PIN)) { 1591 if (t2 & BIT(HIGH_SPEED)) 1592 s = "high speed"; 1593 else if (dev->gadget.speed == USB_SPEED_UNKNOWN) 1594 s = "powered"; 1595 else 1596 s = "full speed"; 1597 /* full speed bit (6) not working?? */ 1598 } else 1599 s = "not attached"; 1600 t = scnprintf(next, size, 1601 "stdrsp %08x usbctl %08x usbstat %08x " 1602 "addr 0x%02x (%s)\n", 1603 readl(&dev->usb->stdrsp), t1, t2, 1604 readl(&dev->usb->ouraddr), s); 1605 size -= t; 1606 next += t; 1607 1608 /* PCI Master Control Registers */ 1609 1610 /* DMA Control Registers */ 1611 1612 /* Configurable EP Control Registers */ 1613 for (i = 0; i < dev->n_ep; i++) { 1614 struct net2280_ep *ep; 1615 1616 ep = &dev->ep[i]; 1617 if (i && !ep->desc) 1618 continue; 1619 1620 t1 = readl(&ep->cfg->ep_cfg); 1621 t2 = readl(&ep->regs->ep_rsp) & 0xff; 1622 t = scnprintf(next, size, 1623 "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s" 1624 "irqenb %02x\n", 1625 ep->ep.name, t1, t2, 1626 (t2 & BIT(CLEAR_NAK_OUT_PACKETS)) 1627 ? "NAK " : "", 1628 (t2 & BIT(CLEAR_EP_HIDE_STATUS_PHASE)) 1629 ? "hide " : "", 1630 (t2 & BIT(CLEAR_EP_FORCE_CRC_ERROR)) 1631 ? "CRC " : "", 1632 (t2 & BIT(CLEAR_INTERRUPT_MODE)) 1633 ? "interrupt " : "", 1634 (t2 & BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)) 1635 ? "status " : "", 1636 (t2 & BIT(CLEAR_NAK_OUT_PACKETS_MODE)) 1637 ? "NAKmode " : "", 1638 (t2 & BIT(CLEAR_ENDPOINT_TOGGLE)) 1639 ? "DATA1 " : "DATA0 ", 1640 (t2 & BIT(CLEAR_ENDPOINT_HALT)) 1641 ? "HALT " : "", 1642 readl(&ep->regs->ep_irqenb)); 1643 size -= t; 1644 next += t; 1645 1646 t = scnprintf(next, size, 1647 "\tstat %08x avail %04x " 1648 "(ep%d%s-%s)%s\n", 1649 readl(&ep->regs->ep_stat), 1650 readl(&ep->regs->ep_avail), 1651 t1 & 0x0f, DIR_STRING(t1), 1652 type_string(t1 >> 8), 1653 ep->stopped ? "*" : ""); 1654 size -= t; 1655 next += t; 1656 1657 if (!ep->dma) 1658 continue; 1659 1660 t = scnprintf(next, size, 1661 " dma\tctl %08x stat %08x count %08x\n" 1662 "\taddr %08x desc %08x\n", 1663 readl(&ep->dma->dmactl), 1664 readl(&ep->dma->dmastat), 1665 readl(&ep->dma->dmacount), 1666 readl(&ep->dma->dmaaddr), 1667 readl(&ep->dma->dmadesc)); 1668 size -= t; 1669 next += t; 1670 1671 } 1672 1673 /* Indexed Registers (none yet) */ 1674 1675 /* Statistics */ 1676 t = scnprintf(next, size, "\nirqs: "); 1677 size -= t; 1678 next += t; 1679 for (i = 0; i < dev->n_ep; i++) { 1680 struct net2280_ep *ep; 1681 1682 ep = &dev->ep[i]; 1683 if (i && !ep->irqs) 1684 continue; 1685 t = scnprintf(next, size, " %s/%lu", ep->ep.name, ep->irqs); 1686 size -= t; 1687 next += t; 1688 1689 } 1690 t = scnprintf(next, size, "\n"); 1691 size -= t; 1692 next += t; 1693 1694 spin_unlock_irqrestore(&dev->lock, flags); 1695 1696 return PAGE_SIZE - size; 1697 } 1698 static DEVICE_ATTR_RO(registers); 1699 1700 static ssize_t queues_show(struct device *_dev, struct device_attribute *attr, 1701 char *buf) 1702 { 1703 struct net2280 *dev; 1704 char *next; 1705 unsigned size; 1706 unsigned long flags; 1707 int i; 1708 1709 dev = dev_get_drvdata(_dev); 1710 next = buf; 1711 size = PAGE_SIZE; 1712 spin_lock_irqsave(&dev->lock, flags); 1713 1714 for (i = 0; i < dev->n_ep; i++) { 1715 struct net2280_ep *ep = &dev->ep[i]; 1716 struct net2280_request *req; 1717 int t; 1718 1719 if (i != 0) { 1720 const struct usb_endpoint_descriptor *d; 1721 1722 d = ep->desc; 1723 if (!d) 1724 continue; 1725 t = d->bEndpointAddress; 1726 t = scnprintf(next, size, 1727 "\n%s (ep%d%s-%s) max %04x %s fifo %d\n", 1728 ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK, 1729 (t & USB_DIR_IN) ? "in" : "out", 1730 type_string(d->bmAttributes), 1731 usb_endpoint_maxp(d) & 0x1fff, 1732 ep->dma ? "dma" : "pio", ep->fifo_size 1733 ); 1734 } else /* ep0 should only have one transfer queued */ 1735 t = scnprintf(next, size, "ep0 max 64 pio %s\n", 1736 ep->is_in ? "in" : "out"); 1737 if (t <= 0 || t > size) 1738 goto done; 1739 size -= t; 1740 next += t; 1741 1742 if (list_empty(&ep->queue)) { 1743 t = scnprintf(next, size, "\t(nothing queued)\n"); 1744 if (t <= 0 || t > size) 1745 goto done; 1746 size -= t; 1747 next += t; 1748 continue; 1749 } 1750 list_for_each_entry(req, &ep->queue, queue) { 1751 if (ep->dma && req->td_dma == readl(&ep->dma->dmadesc)) 1752 t = scnprintf(next, size, 1753 "\treq %p len %d/%d " 1754 "buf %p (dmacount %08x)\n", 1755 &req->req, req->req.actual, 1756 req->req.length, req->req.buf, 1757 readl(&ep->dma->dmacount)); 1758 else 1759 t = scnprintf(next, size, 1760 "\treq %p len %d/%d buf %p\n", 1761 &req->req, req->req.actual, 1762 req->req.length, req->req.buf); 1763 if (t <= 0 || t > size) 1764 goto done; 1765 size -= t; 1766 next += t; 1767 1768 if (ep->dma) { 1769 struct net2280_dma *td; 1770 1771 td = req->td; 1772 t = scnprintf(next, size, "\t td %08x " 1773 " count %08x buf %08x desc %08x\n", 1774 (u32) req->td_dma, 1775 le32_to_cpu(td->dmacount), 1776 le32_to_cpu(td->dmaaddr), 1777 le32_to_cpu(td->dmadesc)); 1778 if (t <= 0 || t > size) 1779 goto done; 1780 size -= t; 1781 next += t; 1782 } 1783 } 1784 } 1785 1786 done: 1787 spin_unlock_irqrestore(&dev->lock, flags); 1788 return PAGE_SIZE - size; 1789 } 1790 static DEVICE_ATTR_RO(queues); 1791 1792 1793 #else 1794 1795 #define device_create_file(a, b) (0) 1796 #define device_remove_file(a, b) do { } while (0) 1797 1798 #endif 1799 1800 /*-------------------------------------------------------------------------*/ 1801 1802 /* another driver-specific mode might be a request type doing dma 1803 * to/from another device fifo instead of to/from memory. 1804 */ 1805 1806 static void set_fifo_mode(struct net2280 *dev, int mode) 1807 { 1808 /* keeping high bits preserves BAR2 */ 1809 writel((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl); 1810 1811 /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */ 1812 INIT_LIST_HEAD(&dev->gadget.ep_list); 1813 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list); 1814 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); 1815 switch (mode) { 1816 case 0: 1817 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list); 1818 list_add_tail(&dev->ep[4].ep.ep_list, &dev->gadget.ep_list); 1819 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024; 1820 break; 1821 case 1: 1822 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 2048; 1823 break; 1824 case 2: 1825 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list); 1826 dev->ep[1].fifo_size = 2048; 1827 dev->ep[2].fifo_size = 1024; 1828 break; 1829 } 1830 /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */ 1831 list_add_tail(&dev->ep[5].ep.ep_list, &dev->gadget.ep_list); 1832 list_add_tail(&dev->ep[6].ep.ep_list, &dev->gadget.ep_list); 1833 } 1834 1835 static void defect7374_disable_data_eps(struct net2280 *dev) 1836 { 1837 /* 1838 * For Defect 7374, disable data EPs (and more): 1839 * - This phase undoes the earlier phase of the Defect 7374 workaround, 1840 * returing ep regs back to normal. 1841 */ 1842 struct net2280_ep *ep; 1843 int i; 1844 unsigned char ep_sel; 1845 u32 tmp_reg; 1846 1847 for (i = 1; i < 5; i++) { 1848 ep = &dev->ep[i]; 1849 writel(0, &ep->cfg->ep_cfg); 1850 } 1851 1852 /* CSROUT, CSRIN, PCIOUT, PCIIN, STATIN, RCIN */ 1853 for (i = 0; i < 6; i++) 1854 writel(0, &dev->dep[i].dep_cfg); 1855 1856 for (ep_sel = 0; ep_sel <= 21; ep_sel++) { 1857 /* Select an endpoint for subsequent operations: */ 1858 tmp_reg = readl(&dev->plregs->pl_ep_ctrl); 1859 writel(((tmp_reg & ~0x1f) | ep_sel), &dev->plregs->pl_ep_ctrl); 1860 1861 if (ep_sel < 2 || (ep_sel > 9 && ep_sel < 14) || 1862 ep_sel == 18 || ep_sel == 20) 1863 continue; 1864 1865 /* Change settings on some selected endpoints */ 1866 tmp_reg = readl(&dev->plregs->pl_ep_cfg_4); 1867 tmp_reg &= ~BIT(NON_CTRL_IN_TOLERATE_BAD_DIR); 1868 writel(tmp_reg, &dev->plregs->pl_ep_cfg_4); 1869 tmp_reg = readl(&dev->plregs->pl_ep_ctrl); 1870 tmp_reg |= BIT(EP_INITIALIZED); 1871 writel(tmp_reg, &dev->plregs->pl_ep_ctrl); 1872 } 1873 } 1874 1875 static void defect7374_enable_data_eps_zero(struct net2280 *dev) 1876 { 1877 u32 tmp = 0, tmp_reg; 1878 u32 scratch; 1879 int i; 1880 unsigned char ep_sel; 1881 1882 scratch = get_idx_reg(dev->regs, SCRATCH); 1883 1884 WARN_ON((scratch & (0xf << DEFECT7374_FSM_FIELD)) 1885 == DEFECT7374_FSM_SS_CONTROL_READ); 1886 1887 scratch &= ~(0xf << DEFECT7374_FSM_FIELD); 1888 1889 ep_warn(dev, "Operate Defect 7374 workaround soft this time"); 1890 ep_warn(dev, "It will operate on cold-reboot and SS connect"); 1891 1892 /*GPEPs:*/ 1893 tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_DIRECTION) | 1894 (2 << OUT_ENDPOINT_TYPE) | (2 << IN_ENDPOINT_TYPE) | 1895 ((dev->enhanced_mode) ? 1896 BIT(OUT_ENDPOINT_ENABLE) | BIT(IN_ENDPOINT_ENABLE) : 1897 BIT(ENDPOINT_ENABLE))); 1898 1899 for (i = 1; i < 5; i++) 1900 writel(tmp, &dev->ep[i].cfg->ep_cfg); 1901 1902 /* CSRIN, PCIIN, STATIN, RCIN*/ 1903 tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_ENABLE)); 1904 writel(tmp, &dev->dep[1].dep_cfg); 1905 writel(tmp, &dev->dep[3].dep_cfg); 1906 writel(tmp, &dev->dep[4].dep_cfg); 1907 writel(tmp, &dev->dep[5].dep_cfg); 1908 1909 /*Implemented for development and debug. 1910 * Can be refined/tuned later.*/ 1911 for (ep_sel = 0; ep_sel <= 21; ep_sel++) { 1912 /* Select an endpoint for subsequent operations: */ 1913 tmp_reg = readl(&dev->plregs->pl_ep_ctrl); 1914 writel(((tmp_reg & ~0x1f) | ep_sel), 1915 &dev->plregs->pl_ep_ctrl); 1916 1917 if (ep_sel == 1) { 1918 tmp = 1919 (readl(&dev->plregs->pl_ep_ctrl) | 1920 BIT(CLEAR_ACK_ERROR_CODE) | 0); 1921 writel(tmp, &dev->plregs->pl_ep_ctrl); 1922 continue; 1923 } 1924 1925 if (ep_sel == 0 || (ep_sel > 9 && ep_sel < 14) || 1926 ep_sel == 18 || ep_sel == 20) 1927 continue; 1928 1929 tmp = (readl(&dev->plregs->pl_ep_cfg_4) | 1930 BIT(NON_CTRL_IN_TOLERATE_BAD_DIR) | 0); 1931 writel(tmp, &dev->plregs->pl_ep_cfg_4); 1932 1933 tmp = readl(&dev->plregs->pl_ep_ctrl) & 1934 ~BIT(EP_INITIALIZED); 1935 writel(tmp, &dev->plregs->pl_ep_ctrl); 1936 1937 } 1938 1939 /* Set FSM to focus on the first Control Read: 1940 * - Tip: Connection speed is known upon the first 1941 * setup request.*/ 1942 scratch |= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ; 1943 set_idx_reg(dev->regs, SCRATCH, scratch); 1944 1945 } 1946 1947 /* keeping it simple: 1948 * - one bus driver, initted first; 1949 * - one function driver, initted second 1950 * 1951 * most of the work to support multiple net2280 controllers would 1952 * be to associate this gadget driver (yes?) with all of them, or 1953 * perhaps to bind specific drivers to specific devices. 1954 */ 1955 1956 static void usb_reset_228x(struct net2280 *dev) 1957 { 1958 u32 tmp; 1959 1960 dev->gadget.speed = USB_SPEED_UNKNOWN; 1961 (void) readl(&dev->usb->usbctl); 1962 1963 net2280_led_init(dev); 1964 1965 /* disable automatic responses, and irqs */ 1966 writel(0, &dev->usb->stdrsp); 1967 writel(0, &dev->regs->pciirqenb0); 1968 writel(0, &dev->regs->pciirqenb1); 1969 1970 /* clear old dma and irq state */ 1971 for (tmp = 0; tmp < 4; tmp++) { 1972 struct net2280_ep *ep = &dev->ep[tmp + 1]; 1973 if (ep->dma) 1974 abort_dma(ep); 1975 } 1976 1977 writel(~0, &dev->regs->irqstat0), 1978 writel(~(u32)BIT(SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1), 1979 1980 /* reset, and enable pci */ 1981 tmp = readl(&dev->regs->devinit) | 1982 BIT(PCI_ENABLE) | 1983 BIT(FIFO_SOFT_RESET) | 1984 BIT(USB_SOFT_RESET) | 1985 BIT(M8051_RESET); 1986 writel(tmp, &dev->regs->devinit); 1987 1988 /* standard fifo and endpoint allocations */ 1989 set_fifo_mode(dev, (fifo_mode <= 2) ? fifo_mode : 0); 1990 } 1991 1992 static void usb_reset_338x(struct net2280 *dev) 1993 { 1994 u32 tmp; 1995 1996 dev->gadget.speed = USB_SPEED_UNKNOWN; 1997 (void)readl(&dev->usb->usbctl); 1998 1999 net2280_led_init(dev); 2000 2001 if (dev->bug7734_patched) { 2002 /* disable automatic responses, and irqs */ 2003 writel(0, &dev->usb->stdrsp); 2004 writel(0, &dev->regs->pciirqenb0); 2005 writel(0, &dev->regs->pciirqenb1); 2006 } 2007 2008 /* clear old dma and irq state */ 2009 for (tmp = 0; tmp < 4; tmp++) { 2010 struct net2280_ep *ep = &dev->ep[tmp + 1]; 2011 struct net2280_dma_regs __iomem *dma; 2012 2013 if (ep->dma) { 2014 abort_dma(ep); 2015 } else { 2016 dma = &dev->dma[tmp]; 2017 writel(BIT(DMA_ABORT), &dma->dmastat); 2018 writel(0, &dma->dmactl); 2019 } 2020 } 2021 2022 writel(~0, &dev->regs->irqstat0), writel(~0, &dev->regs->irqstat1); 2023 2024 if (dev->bug7734_patched) { 2025 /* reset, and enable pci */ 2026 tmp = readl(&dev->regs->devinit) | 2027 BIT(PCI_ENABLE) | 2028 BIT(FIFO_SOFT_RESET) | 2029 BIT(USB_SOFT_RESET) | 2030 BIT(M8051_RESET); 2031 2032 writel(tmp, &dev->regs->devinit); 2033 } 2034 2035 /* always ep-{1,2,3,4} ... maybe not ep-3 or ep-4 */ 2036 INIT_LIST_HEAD(&dev->gadget.ep_list); 2037 2038 for (tmp = 1; tmp < dev->n_ep; tmp++) 2039 list_add_tail(&dev->ep[tmp].ep.ep_list, &dev->gadget.ep_list); 2040 2041 } 2042 2043 static void usb_reset(struct net2280 *dev) 2044 { 2045 if (dev->quirks & PLX_LEGACY) 2046 return usb_reset_228x(dev); 2047 return usb_reset_338x(dev); 2048 } 2049 2050 static void usb_reinit_228x(struct net2280 *dev) 2051 { 2052 u32 tmp; 2053 2054 /* basic endpoint init */ 2055 for (tmp = 0; tmp < 7; tmp++) { 2056 struct net2280_ep *ep = &dev->ep[tmp]; 2057 2058 ep->ep.name = ep_name[tmp]; 2059 ep->dev = dev; 2060 ep->num = tmp; 2061 2062 if (tmp > 0 && tmp <= 4) { 2063 ep->fifo_size = 1024; 2064 ep->dma = &dev->dma[tmp - 1]; 2065 } else 2066 ep->fifo_size = 64; 2067 ep->regs = &dev->epregs[tmp]; 2068 ep->cfg = &dev->epregs[tmp]; 2069 ep_reset_228x(dev->regs, ep); 2070 } 2071 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64); 2072 usb_ep_set_maxpacket_limit(&dev->ep[5].ep, 64); 2073 usb_ep_set_maxpacket_limit(&dev->ep[6].ep, 64); 2074 2075 dev->gadget.ep0 = &dev->ep[0].ep; 2076 dev->ep[0].stopped = 0; 2077 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); 2078 2079 /* we want to prevent lowlevel/insecure access from the USB host, 2080 * but erratum 0119 means this enable bit is ignored 2081 */ 2082 for (tmp = 0; tmp < 5; tmp++) 2083 writel(EP_DONTUSE, &dev->dep[tmp].dep_cfg); 2084 } 2085 2086 static void usb_reinit_338x(struct net2280 *dev) 2087 { 2088 int i; 2089 u32 tmp, val; 2090 static const u32 ne[9] = { 0, 1, 2, 3, 4, 1, 2, 3, 4 }; 2091 static const u32 ep_reg_addr[9] = { 0x00, 0xC0, 0x00, 0xC0, 0x00, 2092 0x00, 0xC0, 0x00, 0xC0 }; 2093 2094 /* basic endpoint init */ 2095 for (i = 0; i < dev->n_ep; i++) { 2096 struct net2280_ep *ep = &dev->ep[i]; 2097 2098 ep->ep.name = dev->enhanced_mode ? ep_name_adv[i] : ep_name[i]; 2099 ep->dev = dev; 2100 ep->num = i; 2101 2102 if (i > 0 && i <= 4) 2103 ep->dma = &dev->dma[i - 1]; 2104 2105 if (dev->enhanced_mode) { 2106 ep->cfg = &dev->epregs[ne[i]]; 2107 /* 2108 * Set USB endpoint number, hardware allows same number 2109 * in both directions. 2110 */ 2111 if (i > 0 && i < 5) 2112 writel(ne[i], &ep->cfg->ep_cfg); 2113 ep->regs = (struct net2280_ep_regs __iomem *) 2114 (((void __iomem *)&dev->epregs[ne[i]]) + 2115 ep_reg_addr[i]); 2116 } else { 2117 ep->cfg = &dev->epregs[i]; 2118 ep->regs = &dev->epregs[i]; 2119 } 2120 2121 ep->fifo_size = (i != 0) ? 2048 : 512; 2122 2123 ep_reset_338x(dev->regs, ep); 2124 } 2125 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 512); 2126 2127 dev->gadget.ep0 = &dev->ep[0].ep; 2128 dev->ep[0].stopped = 0; 2129 2130 /* Link layer set up */ 2131 if (dev->bug7734_patched) { 2132 tmp = readl(&dev->usb_ext->usbctl2) & 2133 ~(BIT(U1_ENABLE) | BIT(U2_ENABLE) | BIT(LTM_ENABLE)); 2134 writel(tmp, &dev->usb_ext->usbctl2); 2135 } 2136 2137 /* Hardware Defect and Workaround */ 2138 val = readl(&dev->ll_lfps_regs->ll_lfps_5); 2139 val &= ~(0xf << TIMER_LFPS_6US); 2140 val |= 0x5 << TIMER_LFPS_6US; 2141 writel(val, &dev->ll_lfps_regs->ll_lfps_5); 2142 2143 val = readl(&dev->ll_lfps_regs->ll_lfps_6); 2144 val &= ~(0xffff << TIMER_LFPS_80US); 2145 val |= 0x0100 << TIMER_LFPS_80US; 2146 writel(val, &dev->ll_lfps_regs->ll_lfps_6); 2147 2148 /* 2149 * AA_AB Errata. Issue 4. Workaround for SuperSpeed USB 2150 * Hot Reset Exit Handshake may Fail in Specific Case using 2151 * Default Register Settings. Workaround for Enumeration test. 2152 */ 2153 val = readl(&dev->ll_tsn_regs->ll_tsn_counters_2); 2154 val &= ~(0x1f << HOT_TX_NORESET_TS2); 2155 val |= 0x10 << HOT_TX_NORESET_TS2; 2156 writel(val, &dev->ll_tsn_regs->ll_tsn_counters_2); 2157 2158 val = readl(&dev->ll_tsn_regs->ll_tsn_counters_3); 2159 val &= ~(0x1f << HOT_RX_RESET_TS2); 2160 val |= 0x3 << HOT_RX_RESET_TS2; 2161 writel(val, &dev->ll_tsn_regs->ll_tsn_counters_3); 2162 2163 /* 2164 * Set Recovery Idle to Recover bit: 2165 * - On SS connections, setting Recovery Idle to Recover Fmw improves 2166 * link robustness with various hosts and hubs. 2167 * - It is safe to set for all connection speeds; all chip revisions. 2168 * - R-M-W to leave other bits undisturbed. 2169 * - Reference PLX TT-7372 2170 */ 2171 val = readl(&dev->ll_chicken_reg->ll_tsn_chicken_bit); 2172 val |= BIT(RECOVERY_IDLE_TO_RECOVER_FMW); 2173 writel(val, &dev->ll_chicken_reg->ll_tsn_chicken_bit); 2174 2175 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); 2176 2177 /* disable dedicated endpoints */ 2178 writel(0x0D, &dev->dep[0].dep_cfg); 2179 writel(0x0D, &dev->dep[1].dep_cfg); 2180 writel(0x0E, &dev->dep[2].dep_cfg); 2181 writel(0x0E, &dev->dep[3].dep_cfg); 2182 writel(0x0F, &dev->dep[4].dep_cfg); 2183 writel(0x0C, &dev->dep[5].dep_cfg); 2184 } 2185 2186 static void usb_reinit(struct net2280 *dev) 2187 { 2188 if (dev->quirks & PLX_LEGACY) 2189 return usb_reinit_228x(dev); 2190 return usb_reinit_338x(dev); 2191 } 2192 2193 static void ep0_start_228x(struct net2280 *dev) 2194 { 2195 writel(BIT(CLEAR_EP_HIDE_STATUS_PHASE) | 2196 BIT(CLEAR_NAK_OUT_PACKETS) | 2197 BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE), 2198 &dev->epregs[0].ep_rsp); 2199 2200 /* 2201 * hardware optionally handles a bunch of standard requests 2202 * that the API hides from drivers anyway. have it do so. 2203 * endpoint status/features are handled in software, to 2204 * help pass tests for some dubious behavior. 2205 */ 2206 writel(BIT(SET_TEST_MODE) | 2207 BIT(SET_ADDRESS) | 2208 BIT(DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP) | 2209 BIT(GET_DEVICE_STATUS) | 2210 BIT(GET_INTERFACE_STATUS), 2211 &dev->usb->stdrsp); 2212 writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) | 2213 BIT(SELF_POWERED_USB_DEVICE) | 2214 BIT(REMOTE_WAKEUP_SUPPORT) | 2215 (dev->softconnect << USB_DETECT_ENABLE) | 2216 BIT(SELF_POWERED_STATUS), 2217 &dev->usb->usbctl); 2218 2219 /* enable irqs so we can see ep0 and general operation */ 2220 writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) | 2221 BIT(ENDPOINT_0_INTERRUPT_ENABLE), 2222 &dev->regs->pciirqenb0); 2223 writel(BIT(PCI_INTERRUPT_ENABLE) | 2224 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE) | 2225 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE) | 2226 BIT(PCI_RETRY_ABORT_INTERRUPT_ENABLE) | 2227 BIT(VBUS_INTERRUPT_ENABLE) | 2228 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) | 2229 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE), 2230 &dev->regs->pciirqenb1); 2231 2232 /* don't leave any writes posted */ 2233 (void) readl(&dev->usb->usbctl); 2234 } 2235 2236 static void ep0_start_338x(struct net2280 *dev) 2237 { 2238 2239 if (dev->bug7734_patched) 2240 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE) | 2241 BIT(SET_EP_HIDE_STATUS_PHASE), 2242 &dev->epregs[0].ep_rsp); 2243 2244 /* 2245 * hardware optionally handles a bunch of standard requests 2246 * that the API hides from drivers anyway. have it do so. 2247 * endpoint status/features are handled in software, to 2248 * help pass tests for some dubious behavior. 2249 */ 2250 writel(BIT(SET_ISOCHRONOUS_DELAY) | 2251 BIT(SET_SEL) | 2252 BIT(SET_TEST_MODE) | 2253 BIT(SET_ADDRESS) | 2254 BIT(GET_INTERFACE_STATUS) | 2255 BIT(GET_DEVICE_STATUS), 2256 &dev->usb->stdrsp); 2257 dev->wakeup_enable = 1; 2258 writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) | 2259 (dev->softconnect << USB_DETECT_ENABLE) | 2260 BIT(DEVICE_REMOTE_WAKEUP_ENABLE), 2261 &dev->usb->usbctl); 2262 2263 /* enable irqs so we can see ep0 and general operation */ 2264 writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) | 2265 BIT(ENDPOINT_0_INTERRUPT_ENABLE), 2266 &dev->regs->pciirqenb0); 2267 writel(BIT(PCI_INTERRUPT_ENABLE) | 2268 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) | 2269 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE) | 2270 BIT(VBUS_INTERRUPT_ENABLE), 2271 &dev->regs->pciirqenb1); 2272 2273 /* don't leave any writes posted */ 2274 (void)readl(&dev->usb->usbctl); 2275 } 2276 2277 static void ep0_start(struct net2280 *dev) 2278 { 2279 if (dev->quirks & PLX_LEGACY) 2280 return ep0_start_228x(dev); 2281 return ep0_start_338x(dev); 2282 } 2283 2284 /* when a driver is successfully registered, it will receive 2285 * control requests including set_configuration(), which enables 2286 * non-control requests. then usb traffic follows until a 2287 * disconnect is reported. then a host may connect again, or 2288 * the driver might get unbound. 2289 */ 2290 static int net2280_start(struct usb_gadget *_gadget, 2291 struct usb_gadget_driver *driver) 2292 { 2293 struct net2280 *dev; 2294 int retval; 2295 unsigned i; 2296 2297 /* insist on high speed support from the driver, since 2298 * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE) 2299 * "must not be used in normal operation" 2300 */ 2301 if (!driver || driver->max_speed < USB_SPEED_HIGH || 2302 !driver->setup) 2303 return -EINVAL; 2304 2305 dev = container_of(_gadget, struct net2280, gadget); 2306 2307 for (i = 0; i < dev->n_ep; i++) 2308 dev->ep[i].irqs = 0; 2309 2310 /* hook up the driver ... */ 2311 driver->driver.bus = NULL; 2312 dev->driver = driver; 2313 2314 retval = device_create_file(&dev->pdev->dev, &dev_attr_function); 2315 if (retval) 2316 goto err_unbind; 2317 retval = device_create_file(&dev->pdev->dev, &dev_attr_queues); 2318 if (retval) 2319 goto err_func; 2320 2321 /* enable host detection and ep0; and we're ready 2322 * for set_configuration as well as eventual disconnect. 2323 */ 2324 net2280_led_active(dev, 1); 2325 2326 if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched) 2327 defect7374_enable_data_eps_zero(dev); 2328 2329 ep0_start(dev); 2330 2331 /* pci writes may still be posted */ 2332 return 0; 2333 2334 err_func: 2335 device_remove_file(&dev->pdev->dev, &dev_attr_function); 2336 err_unbind: 2337 dev->driver = NULL; 2338 return retval; 2339 } 2340 2341 static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver) 2342 { 2343 int i; 2344 2345 /* don't disconnect if it's not connected */ 2346 if (dev->gadget.speed == USB_SPEED_UNKNOWN) 2347 driver = NULL; 2348 2349 /* stop hardware; prevent new request submissions; 2350 * and kill any outstanding requests. 2351 */ 2352 usb_reset(dev); 2353 for (i = 0; i < dev->n_ep; i++) 2354 nuke(&dev->ep[i]); 2355 2356 /* report disconnect; the driver is already quiesced */ 2357 if (driver) { 2358 spin_unlock(&dev->lock); 2359 driver->disconnect(&dev->gadget); 2360 spin_lock(&dev->lock); 2361 } 2362 2363 usb_reinit(dev); 2364 } 2365 2366 static int net2280_stop(struct usb_gadget *_gadget) 2367 { 2368 struct net2280 *dev; 2369 unsigned long flags; 2370 2371 dev = container_of(_gadget, struct net2280, gadget); 2372 2373 spin_lock_irqsave(&dev->lock, flags); 2374 stop_activity(dev, NULL); 2375 spin_unlock_irqrestore(&dev->lock, flags); 2376 2377 net2280_led_active(dev, 0); 2378 2379 device_remove_file(&dev->pdev->dev, &dev_attr_function); 2380 device_remove_file(&dev->pdev->dev, &dev_attr_queues); 2381 2382 dev->driver = NULL; 2383 2384 return 0; 2385 } 2386 2387 /*-------------------------------------------------------------------------*/ 2388 2389 /* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq. 2390 * also works for dma-capable endpoints, in pio mode or just 2391 * to manually advance the queue after short OUT transfers. 2392 */ 2393 static void handle_ep_small(struct net2280_ep *ep) 2394 { 2395 struct net2280_request *req; 2396 u32 t; 2397 /* 0 error, 1 mid-data, 2 done */ 2398 int mode = 1; 2399 2400 if (!list_empty(&ep->queue)) 2401 req = list_entry(ep->queue.next, 2402 struct net2280_request, queue); 2403 else 2404 req = NULL; 2405 2406 /* ack all, and handle what we care about */ 2407 t = readl(&ep->regs->ep_stat); 2408 ep->irqs++; 2409 2410 ep_vdbg(ep->dev, "%s ack ep_stat %08x, req %p\n", 2411 ep->ep.name, t, req ? &req->req : NULL); 2412 2413 if (!ep->is_in || (ep->dev->quirks & PLX_2280)) 2414 writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat); 2415 else 2416 /* Added for 2282 */ 2417 writel(t, &ep->regs->ep_stat); 2418 2419 /* for ep0, monitor token irqs to catch data stage length errors 2420 * and to synchronize on status. 2421 * 2422 * also, to defer reporting of protocol stalls ... here's where 2423 * data or status first appears, handling stalls here should never 2424 * cause trouble on the host side.. 2425 * 2426 * control requests could be slightly faster without token synch for 2427 * status, but status can jam up that way. 2428 */ 2429 if (unlikely(ep->num == 0)) { 2430 if (ep->is_in) { 2431 /* status; stop NAKing */ 2432 if (t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) { 2433 if (ep->dev->protocol_stall) { 2434 ep->stopped = 1; 2435 set_halt(ep); 2436 } 2437 if (!req) 2438 allow_status(ep); 2439 mode = 2; 2440 /* reply to extra IN data tokens with a zlp */ 2441 } else if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) { 2442 if (ep->dev->protocol_stall) { 2443 ep->stopped = 1; 2444 set_halt(ep); 2445 mode = 2; 2446 } else if (ep->responded && 2447 !req && !ep->stopped) 2448 write_fifo(ep, NULL); 2449 } 2450 } else { 2451 /* status; stop NAKing */ 2452 if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) { 2453 if (ep->dev->protocol_stall) { 2454 ep->stopped = 1; 2455 set_halt(ep); 2456 } 2457 mode = 2; 2458 /* an extra OUT token is an error */ 2459 } else if (((t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) && 2460 req && 2461 req->req.actual == req->req.length) || 2462 (ep->responded && !req)) { 2463 ep->dev->protocol_stall = 1; 2464 set_halt(ep); 2465 ep->stopped = 1; 2466 if (req) 2467 done(ep, req, -EOVERFLOW); 2468 req = NULL; 2469 } 2470 } 2471 } 2472 2473 if (unlikely(!req)) 2474 return; 2475 2476 /* manual DMA queue advance after short OUT */ 2477 if (likely(ep->dma)) { 2478 if (t & BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT)) { 2479 u32 count; 2480 int stopped = ep->stopped; 2481 2482 /* TRANSFERRED works around OUT_DONE erratum 0112. 2483 * we expect (N <= maxpacket) bytes; host wrote M. 2484 * iff (M < N) we won't ever see a DMA interrupt. 2485 */ 2486 ep->stopped = 1; 2487 for (count = 0; ; t = readl(&ep->regs->ep_stat)) { 2488 2489 /* any preceding dma transfers must finish. 2490 * dma handles (M >= N), may empty the queue 2491 */ 2492 scan_dma_completions(ep); 2493 if (unlikely(list_empty(&ep->queue) || 2494 ep->out_overflow)) { 2495 req = NULL; 2496 break; 2497 } 2498 req = list_entry(ep->queue.next, 2499 struct net2280_request, queue); 2500 2501 /* here either (M < N), a "real" short rx; 2502 * or (M == N) and the queue didn't empty 2503 */ 2504 if (likely(t & BIT(FIFO_EMPTY))) { 2505 count = readl(&ep->dma->dmacount); 2506 count &= DMA_BYTE_COUNT_MASK; 2507 if (readl(&ep->dma->dmadesc) 2508 != req->td_dma) 2509 req = NULL; 2510 break; 2511 } 2512 udelay(1); 2513 } 2514 2515 /* stop DMA, leave ep NAKing */ 2516 writel(BIT(DMA_ABORT), &ep->dma->dmastat); 2517 spin_stop_dma(ep->dma); 2518 2519 if (likely(req)) { 2520 req->td->dmacount = 0; 2521 t = readl(&ep->regs->ep_avail); 2522 dma_done(ep, req, count, 2523 (ep->out_overflow || t) 2524 ? -EOVERFLOW : 0); 2525 } 2526 2527 /* also flush to prevent erratum 0106 trouble */ 2528 if (unlikely(ep->out_overflow || 2529 (ep->dev->chiprev == 0x0100 && 2530 ep->dev->gadget.speed 2531 == USB_SPEED_FULL))) { 2532 out_flush(ep); 2533 ep->out_overflow = 0; 2534 } 2535 2536 /* (re)start dma if needed, stop NAKing */ 2537 ep->stopped = stopped; 2538 if (!list_empty(&ep->queue)) 2539 restart_dma(ep); 2540 } else 2541 ep_dbg(ep->dev, "%s dma ep_stat %08x ??\n", 2542 ep->ep.name, t); 2543 return; 2544 2545 /* data packet(s) received (in the fifo, OUT) */ 2546 } else if (t & BIT(DATA_PACKET_RECEIVED_INTERRUPT)) { 2547 if (read_fifo(ep, req) && ep->num != 0) 2548 mode = 2; 2549 2550 /* data packet(s) transmitted (IN) */ 2551 } else if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT)) { 2552 unsigned len; 2553 2554 len = req->req.length - req->req.actual; 2555 if (len > ep->ep.maxpacket) 2556 len = ep->ep.maxpacket; 2557 req->req.actual += len; 2558 2559 /* if we wrote it all, we're usually done */ 2560 /* send zlps until the status stage */ 2561 if ((req->req.actual == req->req.length) && 2562 (!req->req.zero || len != ep->ep.maxpacket) && ep->num) 2563 mode = 2; 2564 2565 /* there was nothing to do ... */ 2566 } else if (mode == 1) 2567 return; 2568 2569 /* done */ 2570 if (mode == 2) { 2571 /* stream endpoints often resubmit/unlink in completion */ 2572 done(ep, req, 0); 2573 2574 /* maybe advance queue to next request */ 2575 if (ep->num == 0) { 2576 /* NOTE: net2280 could let gadget driver start the 2577 * status stage later. since not all controllers let 2578 * them control that, the api doesn't (yet) allow it. 2579 */ 2580 if (!ep->stopped) 2581 allow_status(ep); 2582 req = NULL; 2583 } else { 2584 if (!list_empty(&ep->queue) && !ep->stopped) 2585 req = list_entry(ep->queue.next, 2586 struct net2280_request, queue); 2587 else 2588 req = NULL; 2589 if (req && !ep->is_in) 2590 stop_out_naking(ep); 2591 } 2592 } 2593 2594 /* is there a buffer for the next packet? 2595 * for best streaming performance, make sure there is one. 2596 */ 2597 if (req && !ep->stopped) { 2598 2599 /* load IN fifo with next packet (may be zlp) */ 2600 if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT)) 2601 write_fifo(ep, &req->req); 2602 } 2603 } 2604 2605 static struct net2280_ep *get_ep_by_addr(struct net2280 *dev, u16 wIndex) 2606 { 2607 struct net2280_ep *ep; 2608 2609 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0) 2610 return &dev->ep[0]; 2611 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) { 2612 u8 bEndpointAddress; 2613 2614 if (!ep->desc) 2615 continue; 2616 bEndpointAddress = ep->desc->bEndpointAddress; 2617 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN) 2618 continue; 2619 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f)) 2620 return ep; 2621 } 2622 return NULL; 2623 } 2624 2625 static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r) 2626 { 2627 u32 scratch, fsmvalue; 2628 u32 ack_wait_timeout, state; 2629 2630 /* Workaround for Defect 7374 (U1/U2 erroneously rejected): */ 2631 scratch = get_idx_reg(dev->regs, SCRATCH); 2632 fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD); 2633 scratch &= ~(0xf << DEFECT7374_FSM_FIELD); 2634 2635 if (!((fsmvalue == DEFECT7374_FSM_WAITING_FOR_CONTROL_READ) && 2636 (r.bRequestType & USB_DIR_IN))) 2637 return; 2638 2639 /* This is the first Control Read for this connection: */ 2640 if (!(readl(&dev->usb->usbstat) & BIT(SUPER_SPEED_MODE))) { 2641 /* 2642 * Connection is NOT SS: 2643 * - Connection must be FS or HS. 2644 * - This FSM state should allow workaround software to 2645 * run after the next USB connection. 2646 */ 2647 scratch |= DEFECT7374_FSM_NON_SS_CONTROL_READ; 2648 dev->bug7734_patched = 1; 2649 goto restore_data_eps; 2650 } 2651 2652 /* Connection is SS: */ 2653 for (ack_wait_timeout = 0; 2654 ack_wait_timeout < DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS; 2655 ack_wait_timeout++) { 2656 2657 state = readl(&dev->plregs->pl_ep_status_1) 2658 & (0xff << STATE); 2659 if ((state >= (ACK_GOOD_NORMAL << STATE)) && 2660 (state <= (ACK_GOOD_MORE_ACKS_TO_COME << STATE))) { 2661 scratch |= DEFECT7374_FSM_SS_CONTROL_READ; 2662 dev->bug7734_patched = 1; 2663 break; 2664 } 2665 2666 /* 2667 * We have not yet received host's Data Phase ACK 2668 * - Wait and try again. 2669 */ 2670 udelay(DEFECT_7374_PROCESSOR_WAIT_TIME); 2671 2672 continue; 2673 } 2674 2675 2676 if (ack_wait_timeout >= DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS) { 2677 ep_err(dev, "FAIL: Defect 7374 workaround waited but failed " 2678 "to detect SS host's data phase ACK."); 2679 ep_err(dev, "PL_EP_STATUS_1(23:16):.Expected from 0x11 to 0x16" 2680 "got 0x%2.2x.\n", state >> STATE); 2681 } else { 2682 ep_warn(dev, "INFO: Defect 7374 workaround waited about\n" 2683 "%duSec for Control Read Data Phase ACK\n", 2684 DEFECT_7374_PROCESSOR_WAIT_TIME * ack_wait_timeout); 2685 } 2686 2687 restore_data_eps: 2688 /* 2689 * Restore data EPs to their pre-workaround settings (disabled, 2690 * initialized, and other details). 2691 */ 2692 defect7374_disable_data_eps(dev); 2693 2694 set_idx_reg(dev->regs, SCRATCH, scratch); 2695 2696 return; 2697 } 2698 2699 static void ep_clear_seqnum(struct net2280_ep *ep) 2700 { 2701 struct net2280 *dev = ep->dev; 2702 u32 val; 2703 static const u32 ep_pl[9] = { 0, 3, 4, 7, 8, 2, 5, 6, 9 }; 2704 2705 val = readl(&dev->plregs->pl_ep_ctrl) & ~0x1f; 2706 val |= ep_pl[ep->num]; 2707 writel(val, &dev->plregs->pl_ep_ctrl); 2708 val |= BIT(SEQUENCE_NUMBER_RESET); 2709 writel(val, &dev->plregs->pl_ep_ctrl); 2710 2711 return; 2712 } 2713 2714 static void handle_stat0_irqs_superspeed(struct net2280 *dev, 2715 struct net2280_ep *ep, struct usb_ctrlrequest r) 2716 { 2717 int tmp = 0; 2718 2719 #define w_value le16_to_cpu(r.wValue) 2720 #define w_index le16_to_cpu(r.wIndex) 2721 #define w_length le16_to_cpu(r.wLength) 2722 2723 switch (r.bRequest) { 2724 struct net2280_ep *e; 2725 u16 status; 2726 2727 case USB_REQ_SET_CONFIGURATION: 2728 dev->addressed_state = !w_value; 2729 goto usb3_delegate; 2730 2731 case USB_REQ_GET_STATUS: 2732 switch (r.bRequestType) { 2733 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE): 2734 status = dev->wakeup_enable ? 0x02 : 0x00; 2735 if (dev->gadget.is_selfpowered) 2736 status |= BIT(0); 2737 status |= (dev->u1_enable << 2 | dev->u2_enable << 3 | 2738 dev->ltm_enable << 4); 2739 writel(0, &dev->epregs[0].ep_irqenb); 2740 set_fifo_bytecount(ep, sizeof(status)); 2741 writel((__force u32) status, &dev->epregs[0].ep_data); 2742 allow_status_338x(ep); 2743 break; 2744 2745 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT): 2746 e = get_ep_by_addr(dev, w_index); 2747 if (!e) 2748 goto do_stall3; 2749 status = readl(&e->regs->ep_rsp) & 2750 BIT(CLEAR_ENDPOINT_HALT); 2751 writel(0, &dev->epregs[0].ep_irqenb); 2752 set_fifo_bytecount(ep, sizeof(status)); 2753 writel((__force u32) status, &dev->epregs[0].ep_data); 2754 allow_status_338x(ep); 2755 break; 2756 2757 default: 2758 goto usb3_delegate; 2759 } 2760 break; 2761 2762 case USB_REQ_CLEAR_FEATURE: 2763 switch (r.bRequestType) { 2764 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE): 2765 if (!dev->addressed_state) { 2766 switch (w_value) { 2767 case USB_DEVICE_U1_ENABLE: 2768 dev->u1_enable = 0; 2769 writel(readl(&dev->usb_ext->usbctl2) & 2770 ~BIT(U1_ENABLE), 2771 &dev->usb_ext->usbctl2); 2772 allow_status_338x(ep); 2773 goto next_endpoints3; 2774 2775 case USB_DEVICE_U2_ENABLE: 2776 dev->u2_enable = 0; 2777 writel(readl(&dev->usb_ext->usbctl2) & 2778 ~BIT(U2_ENABLE), 2779 &dev->usb_ext->usbctl2); 2780 allow_status_338x(ep); 2781 goto next_endpoints3; 2782 2783 case USB_DEVICE_LTM_ENABLE: 2784 dev->ltm_enable = 0; 2785 writel(readl(&dev->usb_ext->usbctl2) & 2786 ~BIT(LTM_ENABLE), 2787 &dev->usb_ext->usbctl2); 2788 allow_status_338x(ep); 2789 goto next_endpoints3; 2790 2791 default: 2792 break; 2793 } 2794 } 2795 if (w_value == USB_DEVICE_REMOTE_WAKEUP) { 2796 dev->wakeup_enable = 0; 2797 writel(readl(&dev->usb->usbctl) & 2798 ~BIT(DEVICE_REMOTE_WAKEUP_ENABLE), 2799 &dev->usb->usbctl); 2800 allow_status_338x(ep); 2801 break; 2802 } 2803 goto usb3_delegate; 2804 2805 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT): 2806 e = get_ep_by_addr(dev, w_index); 2807 if (!e) 2808 goto do_stall3; 2809 if (w_value != USB_ENDPOINT_HALT) 2810 goto do_stall3; 2811 ep_vdbg(dev, "%s clear halt\n", e->ep.name); 2812 /* 2813 * Workaround for SS SeqNum not cleared via 2814 * Endpoint Halt (Clear) bit. select endpoint 2815 */ 2816 ep_clear_seqnum(e); 2817 clear_halt(e); 2818 if (!list_empty(&e->queue) && e->td_dma) 2819 restart_dma(e); 2820 allow_status(ep); 2821 ep->stopped = 1; 2822 break; 2823 2824 default: 2825 goto usb3_delegate; 2826 } 2827 break; 2828 case USB_REQ_SET_FEATURE: 2829 switch (r.bRequestType) { 2830 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE): 2831 if (!dev->addressed_state) { 2832 switch (w_value) { 2833 case USB_DEVICE_U1_ENABLE: 2834 dev->u1_enable = 1; 2835 writel(readl(&dev->usb_ext->usbctl2) | 2836 BIT(U1_ENABLE), 2837 &dev->usb_ext->usbctl2); 2838 allow_status_338x(ep); 2839 goto next_endpoints3; 2840 2841 case USB_DEVICE_U2_ENABLE: 2842 dev->u2_enable = 1; 2843 writel(readl(&dev->usb_ext->usbctl2) | 2844 BIT(U2_ENABLE), 2845 &dev->usb_ext->usbctl2); 2846 allow_status_338x(ep); 2847 goto next_endpoints3; 2848 2849 case USB_DEVICE_LTM_ENABLE: 2850 dev->ltm_enable = 1; 2851 writel(readl(&dev->usb_ext->usbctl2) | 2852 BIT(LTM_ENABLE), 2853 &dev->usb_ext->usbctl2); 2854 allow_status_338x(ep); 2855 goto next_endpoints3; 2856 default: 2857 break; 2858 } 2859 } 2860 2861 if (w_value == USB_DEVICE_REMOTE_WAKEUP) { 2862 dev->wakeup_enable = 1; 2863 writel(readl(&dev->usb->usbctl) | 2864 BIT(DEVICE_REMOTE_WAKEUP_ENABLE), 2865 &dev->usb->usbctl); 2866 allow_status_338x(ep); 2867 break; 2868 } 2869 goto usb3_delegate; 2870 2871 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT): 2872 e = get_ep_by_addr(dev, w_index); 2873 if (!e || (w_value != USB_ENDPOINT_HALT)) 2874 goto do_stall3; 2875 ep->stopped = 1; 2876 if (ep->num == 0) 2877 ep->dev->protocol_stall = 1; 2878 else { 2879 if (ep->dma) 2880 abort_dma(ep); 2881 set_halt(ep); 2882 } 2883 allow_status_338x(ep); 2884 break; 2885 2886 default: 2887 goto usb3_delegate; 2888 } 2889 2890 break; 2891 default: 2892 2893 usb3_delegate: 2894 ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x ep_cfg %08x\n", 2895 r.bRequestType, r.bRequest, 2896 w_value, w_index, w_length, 2897 readl(&ep->cfg->ep_cfg)); 2898 2899 ep->responded = 0; 2900 spin_unlock(&dev->lock); 2901 tmp = dev->driver->setup(&dev->gadget, &r); 2902 spin_lock(&dev->lock); 2903 } 2904 do_stall3: 2905 if (tmp < 0) { 2906 ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n", 2907 r.bRequestType, r.bRequest, tmp); 2908 dev->protocol_stall = 1; 2909 /* TD 9.9 Halt Endpoint test. TD 9.22 Set feature test */ 2910 set_halt(ep); 2911 } 2912 2913 next_endpoints3: 2914 2915 #undef w_value 2916 #undef w_index 2917 #undef w_length 2918 2919 return; 2920 } 2921 2922 static void usb338x_handle_ep_intr(struct net2280 *dev, u32 stat0) 2923 { 2924 u32 index; 2925 u32 bit; 2926 2927 for (index = 0; index < ARRAY_SIZE(ep_bit); index++) { 2928 bit = BIT(ep_bit[index]); 2929 2930 if (!stat0) 2931 break; 2932 2933 if (!(stat0 & bit)) 2934 continue; 2935 2936 stat0 &= ~bit; 2937 2938 handle_ep_small(&dev->ep[index]); 2939 } 2940 } 2941 2942 static void handle_stat0_irqs(struct net2280 *dev, u32 stat) 2943 { 2944 struct net2280_ep *ep; 2945 u32 num, scratch; 2946 2947 /* most of these don't need individual acks */ 2948 stat &= ~BIT(INTA_ASSERTED); 2949 if (!stat) 2950 return; 2951 /* ep_dbg(dev, "irqstat0 %04x\n", stat); */ 2952 2953 /* starting a control request? */ 2954 if (unlikely(stat & BIT(SETUP_PACKET_INTERRUPT))) { 2955 union { 2956 u32 raw[2]; 2957 struct usb_ctrlrequest r; 2958 } u; 2959 int tmp; 2960 struct net2280_request *req; 2961 2962 if (dev->gadget.speed == USB_SPEED_UNKNOWN) { 2963 u32 val = readl(&dev->usb->usbstat); 2964 if (val & BIT(SUPER_SPEED)) { 2965 dev->gadget.speed = USB_SPEED_SUPER; 2966 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 2967 EP0_SS_MAX_PACKET_SIZE); 2968 } else if (val & BIT(HIGH_SPEED)) { 2969 dev->gadget.speed = USB_SPEED_HIGH; 2970 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 2971 EP0_HS_MAX_PACKET_SIZE); 2972 } else { 2973 dev->gadget.speed = USB_SPEED_FULL; 2974 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 2975 EP0_HS_MAX_PACKET_SIZE); 2976 } 2977 net2280_led_speed(dev, dev->gadget.speed); 2978 ep_dbg(dev, "%s\n", 2979 usb_speed_string(dev->gadget.speed)); 2980 } 2981 2982 ep = &dev->ep[0]; 2983 ep->irqs++; 2984 2985 /* make sure any leftover request state is cleared */ 2986 stat &= ~BIT(ENDPOINT_0_INTERRUPT); 2987 while (!list_empty(&ep->queue)) { 2988 req = list_entry(ep->queue.next, 2989 struct net2280_request, queue); 2990 done(ep, req, (req->req.actual == req->req.length) 2991 ? 0 : -EPROTO); 2992 } 2993 ep->stopped = 0; 2994 dev->protocol_stall = 0; 2995 if (!(dev->quirks & PLX_SUPERSPEED)) { 2996 if (ep->dev->quirks & PLX_2280) 2997 tmp = BIT(FIFO_OVERFLOW) | 2998 BIT(FIFO_UNDERFLOW); 2999 else 3000 tmp = 0; 3001 3002 writel(tmp | BIT(TIMEOUT) | 3003 BIT(USB_STALL_SENT) | 3004 BIT(USB_IN_NAK_SENT) | 3005 BIT(USB_IN_ACK_RCVD) | 3006 BIT(USB_OUT_PING_NAK_SENT) | 3007 BIT(USB_OUT_ACK_SENT) | 3008 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) | 3009 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) | 3010 BIT(DATA_PACKET_RECEIVED_INTERRUPT) | 3011 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) | 3012 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 3013 BIT(DATA_IN_TOKEN_INTERRUPT), 3014 &ep->regs->ep_stat); 3015 } 3016 u.raw[0] = readl(&dev->usb->setup0123); 3017 u.raw[1] = readl(&dev->usb->setup4567); 3018 3019 cpu_to_le32s(&u.raw[0]); 3020 cpu_to_le32s(&u.raw[1]); 3021 3022 if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched) 3023 defect7374_workaround(dev, u.r); 3024 3025 tmp = 0; 3026 3027 #define w_value le16_to_cpu(u.r.wValue) 3028 #define w_index le16_to_cpu(u.r.wIndex) 3029 #define w_length le16_to_cpu(u.r.wLength) 3030 3031 /* ack the irq */ 3032 writel(BIT(SETUP_PACKET_INTERRUPT), &dev->regs->irqstat0); 3033 stat ^= BIT(SETUP_PACKET_INTERRUPT); 3034 3035 /* watch control traffic at the token level, and force 3036 * synchronization before letting the status stage happen. 3037 * FIXME ignore tokens we'll NAK, until driver responds. 3038 * that'll mean a lot less irqs for some drivers. 3039 */ 3040 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0; 3041 if (ep->is_in) { 3042 scratch = BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) | 3043 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 3044 BIT(DATA_IN_TOKEN_INTERRUPT); 3045 stop_out_naking(ep); 3046 } else 3047 scratch = BIT(DATA_PACKET_RECEIVED_INTERRUPT) | 3048 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 3049 BIT(DATA_IN_TOKEN_INTERRUPT); 3050 writel(scratch, &dev->epregs[0].ep_irqenb); 3051 3052 /* we made the hardware handle most lowlevel requests; 3053 * everything else goes uplevel to the gadget code. 3054 */ 3055 ep->responded = 1; 3056 3057 if (dev->gadget.speed == USB_SPEED_SUPER) { 3058 handle_stat0_irqs_superspeed(dev, ep, u.r); 3059 goto next_endpoints; 3060 } 3061 3062 switch (u.r.bRequest) { 3063 case USB_REQ_GET_STATUS: { 3064 struct net2280_ep *e; 3065 __le32 status; 3066 3067 /* hw handles device and interface status */ 3068 if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT)) 3069 goto delegate; 3070 e = get_ep_by_addr(dev, w_index); 3071 if (!e || w_length > 2) 3072 goto do_stall; 3073 3074 if (readl(&e->regs->ep_rsp) & BIT(SET_ENDPOINT_HALT)) 3075 status = cpu_to_le32(1); 3076 else 3077 status = cpu_to_le32(0); 3078 3079 /* don't bother with a request object! */ 3080 writel(0, &dev->epregs[0].ep_irqenb); 3081 set_fifo_bytecount(ep, w_length); 3082 writel((__force u32)status, &dev->epregs[0].ep_data); 3083 allow_status(ep); 3084 ep_vdbg(dev, "%s stat %02x\n", ep->ep.name, status); 3085 goto next_endpoints; 3086 } 3087 break; 3088 case USB_REQ_CLEAR_FEATURE: { 3089 struct net2280_ep *e; 3090 3091 /* hw handles device features */ 3092 if (u.r.bRequestType != USB_RECIP_ENDPOINT) 3093 goto delegate; 3094 if (w_value != USB_ENDPOINT_HALT || w_length != 0) 3095 goto do_stall; 3096 e = get_ep_by_addr(dev, w_index); 3097 if (!e) 3098 goto do_stall; 3099 if (e->wedged) { 3100 ep_vdbg(dev, "%s wedged, halt not cleared\n", 3101 ep->ep.name); 3102 } else { 3103 ep_vdbg(dev, "%s clear halt\n", e->ep.name); 3104 clear_halt(e); 3105 if ((ep->dev->quirks & PLX_SUPERSPEED) && 3106 !list_empty(&e->queue) && e->td_dma) 3107 restart_dma(e); 3108 } 3109 allow_status(ep); 3110 goto next_endpoints; 3111 } 3112 break; 3113 case USB_REQ_SET_FEATURE: { 3114 struct net2280_ep *e; 3115 3116 /* hw handles device features */ 3117 if (u.r.bRequestType != USB_RECIP_ENDPOINT) 3118 goto delegate; 3119 if (w_value != USB_ENDPOINT_HALT || w_length != 0) 3120 goto do_stall; 3121 e = get_ep_by_addr(dev, w_index); 3122 if (!e) 3123 goto do_stall; 3124 if (e->ep.name == ep0name) 3125 goto do_stall; 3126 set_halt(e); 3127 if ((dev->quirks & PLX_SUPERSPEED) && e->dma) 3128 abort_dma(e); 3129 allow_status(ep); 3130 ep_vdbg(dev, "%s set halt\n", ep->ep.name); 3131 goto next_endpoints; 3132 } 3133 break; 3134 default: 3135 delegate: 3136 ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x " 3137 "ep_cfg %08x\n", 3138 u.r.bRequestType, u.r.bRequest, 3139 w_value, w_index, w_length, 3140 readl(&ep->cfg->ep_cfg)); 3141 ep->responded = 0; 3142 spin_unlock(&dev->lock); 3143 tmp = dev->driver->setup(&dev->gadget, &u.r); 3144 spin_lock(&dev->lock); 3145 } 3146 3147 /* stall ep0 on error */ 3148 if (tmp < 0) { 3149 do_stall: 3150 ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n", 3151 u.r.bRequestType, u.r.bRequest, tmp); 3152 dev->protocol_stall = 1; 3153 } 3154 3155 /* some in/out token irq should follow; maybe stall then. 3156 * driver must queue a request (even zlp) or halt ep0 3157 * before the host times out. 3158 */ 3159 } 3160 3161 #undef w_value 3162 #undef w_index 3163 #undef w_length 3164 3165 next_endpoints: 3166 if ((dev->quirks & PLX_SUPERSPEED) && dev->enhanced_mode) { 3167 u32 mask = (BIT(ENDPOINT_0_INTERRUPT) | 3168 USB3380_IRQSTAT0_EP_INTR_MASK_IN | 3169 USB3380_IRQSTAT0_EP_INTR_MASK_OUT); 3170 3171 if (stat & mask) { 3172 usb338x_handle_ep_intr(dev, stat & mask); 3173 stat &= ~mask; 3174 } 3175 } else { 3176 /* endpoint data irq ? */ 3177 scratch = stat & 0x7f; 3178 stat &= ~0x7f; 3179 for (num = 0; scratch; num++) { 3180 u32 t; 3181 3182 /* do this endpoint's FIFO and queue need tending? */ 3183 t = BIT(num); 3184 if ((scratch & t) == 0) 3185 continue; 3186 scratch ^= t; 3187 3188 ep = &dev->ep[num]; 3189 handle_ep_small(ep); 3190 } 3191 } 3192 3193 if (stat) 3194 ep_dbg(dev, "unhandled irqstat0 %08x\n", stat); 3195 } 3196 3197 #define DMA_INTERRUPTS (BIT(DMA_D_INTERRUPT) | \ 3198 BIT(DMA_C_INTERRUPT) | \ 3199 BIT(DMA_B_INTERRUPT) | \ 3200 BIT(DMA_A_INTERRUPT)) 3201 #define PCI_ERROR_INTERRUPTS ( \ 3202 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT) | \ 3203 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT) | \ 3204 BIT(PCI_RETRY_ABORT_INTERRUPT)) 3205 3206 static void handle_stat1_irqs(struct net2280 *dev, u32 stat) 3207 __releases(dev->lock) 3208 __acquires(dev->lock) 3209 { 3210 struct net2280_ep *ep; 3211 u32 tmp, num, mask, scratch; 3212 3213 /* after disconnect there's nothing else to do! */ 3214 tmp = BIT(VBUS_INTERRUPT) | BIT(ROOT_PORT_RESET_INTERRUPT); 3215 mask = BIT(SUPER_SPEED) | BIT(HIGH_SPEED) | BIT(FULL_SPEED); 3216 3217 /* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set. 3218 * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and 3219 * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT 3220 * only indicates a change in the reset state). 3221 */ 3222 if (stat & tmp) { 3223 bool reset = false; 3224 bool disconnect = false; 3225 3226 /* 3227 * Ignore disconnects and resets if the speed hasn't been set. 3228 * VBUS can bounce and there's always an initial reset. 3229 */ 3230 writel(tmp, &dev->regs->irqstat1); 3231 if (dev->gadget.speed != USB_SPEED_UNKNOWN) { 3232 if ((stat & BIT(VBUS_INTERRUPT)) && 3233 (readl(&dev->usb->usbctl) & 3234 BIT(VBUS_PIN)) == 0) { 3235 disconnect = true; 3236 ep_dbg(dev, "disconnect %s\n", 3237 dev->driver->driver.name); 3238 } else if ((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) && 3239 (readl(&dev->usb->usbstat) & mask) 3240 == 0) { 3241 reset = true; 3242 ep_dbg(dev, "reset %s\n", 3243 dev->driver->driver.name); 3244 } 3245 3246 if (disconnect || reset) { 3247 stop_activity(dev, dev->driver); 3248 ep0_start(dev); 3249 spin_unlock(&dev->lock); 3250 if (reset) 3251 usb_gadget_udc_reset 3252 (&dev->gadget, dev->driver); 3253 else 3254 (dev->driver->disconnect) 3255 (&dev->gadget); 3256 spin_lock(&dev->lock); 3257 return; 3258 } 3259 } 3260 stat &= ~tmp; 3261 3262 /* vBUS can bounce ... one of many reasons to ignore the 3263 * notion of hotplug events on bus connect/disconnect! 3264 */ 3265 if (!stat) 3266 return; 3267 } 3268 3269 /* NOTE: chip stays in PCI D0 state for now, but it could 3270 * enter D1 to save more power 3271 */ 3272 tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT); 3273 if (stat & tmp) { 3274 writel(tmp, &dev->regs->irqstat1); 3275 if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) { 3276 if (dev->driver->suspend) 3277 dev->driver->suspend(&dev->gadget); 3278 if (!enable_suspend) 3279 stat &= ~BIT(SUSPEND_REQUEST_INTERRUPT); 3280 } else { 3281 if (dev->driver->resume) 3282 dev->driver->resume(&dev->gadget); 3283 /* at high speed, note erratum 0133 */ 3284 } 3285 stat &= ~tmp; 3286 } 3287 3288 /* clear any other status/irqs */ 3289 if (stat) 3290 writel(stat, &dev->regs->irqstat1); 3291 3292 /* some status we can just ignore */ 3293 if (dev->quirks & PLX_2280) 3294 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) | 3295 BIT(SUSPEND_REQUEST_INTERRUPT) | 3296 BIT(RESUME_INTERRUPT) | 3297 BIT(SOF_INTERRUPT)); 3298 else 3299 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) | 3300 BIT(RESUME_INTERRUPT) | 3301 BIT(SOF_DOWN_INTERRUPT) | 3302 BIT(SOF_INTERRUPT)); 3303 3304 if (!stat) 3305 return; 3306 /* ep_dbg(dev, "irqstat1 %08x\n", stat);*/ 3307 3308 /* DMA status, for ep-{a,b,c,d} */ 3309 scratch = stat & DMA_INTERRUPTS; 3310 stat &= ~DMA_INTERRUPTS; 3311 scratch >>= 9; 3312 for (num = 0; scratch; num++) { 3313 struct net2280_dma_regs __iomem *dma; 3314 3315 tmp = BIT(num); 3316 if ((tmp & scratch) == 0) 3317 continue; 3318 scratch ^= tmp; 3319 3320 ep = &dev->ep[num + 1]; 3321 dma = ep->dma; 3322 3323 if (!dma) 3324 continue; 3325 3326 /* clear ep's dma status */ 3327 tmp = readl(&dma->dmastat); 3328 writel(tmp, &dma->dmastat); 3329 3330 /* dma sync*/ 3331 if (dev->quirks & PLX_SUPERSPEED) { 3332 u32 r_dmacount = readl(&dma->dmacount); 3333 if (!ep->is_in && (r_dmacount & 0x00FFFFFF) && 3334 (tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) 3335 continue; 3336 } 3337 3338 if (!(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) { 3339 ep_dbg(ep->dev, "%s no xact done? %08x\n", 3340 ep->ep.name, tmp); 3341 continue; 3342 } 3343 stop_dma(ep->dma); 3344 3345 /* OUT transfers terminate when the data from the 3346 * host is in our memory. Process whatever's done. 3347 * On this path, we know transfer's last packet wasn't 3348 * less than req->length. NAK_OUT_PACKETS may be set, 3349 * or the FIFO may already be holding new packets. 3350 * 3351 * IN transfers can linger in the FIFO for a very 3352 * long time ... we ignore that for now, accounting 3353 * precisely (like PIO does) needs per-packet irqs 3354 */ 3355 scan_dma_completions(ep); 3356 3357 /* disable dma on inactive queues; else maybe restart */ 3358 if (!list_empty(&ep->queue)) { 3359 tmp = readl(&dma->dmactl); 3360 restart_dma(ep); 3361 } 3362 ep->irqs++; 3363 } 3364 3365 /* NOTE: there are other PCI errors we might usefully notice. 3366 * if they appear very often, here's where to try recovering. 3367 */ 3368 if (stat & PCI_ERROR_INTERRUPTS) { 3369 ep_err(dev, "pci dma error; stat %08x\n", stat); 3370 stat &= ~PCI_ERROR_INTERRUPTS; 3371 /* these are fatal errors, but "maybe" they won't 3372 * happen again ... 3373 */ 3374 stop_activity(dev, dev->driver); 3375 ep0_start(dev); 3376 stat = 0; 3377 } 3378 3379 if (stat) 3380 ep_dbg(dev, "unhandled irqstat1 %08x\n", stat); 3381 } 3382 3383 static irqreturn_t net2280_irq(int irq, void *_dev) 3384 { 3385 struct net2280 *dev = _dev; 3386 3387 /* shared interrupt, not ours */ 3388 if ((dev->quirks & PLX_LEGACY) && 3389 (!(readl(&dev->regs->irqstat0) & BIT(INTA_ASSERTED)))) 3390 return IRQ_NONE; 3391 3392 spin_lock(&dev->lock); 3393 3394 /* handle disconnect, dma, and more */ 3395 handle_stat1_irqs(dev, readl(&dev->regs->irqstat1)); 3396 3397 /* control requests and PIO */ 3398 handle_stat0_irqs(dev, readl(&dev->regs->irqstat0)); 3399 3400 if (dev->quirks & PLX_SUPERSPEED) { 3401 /* re-enable interrupt to trigger any possible new interrupt */ 3402 u32 pciirqenb1 = readl(&dev->regs->pciirqenb1); 3403 writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1); 3404 writel(pciirqenb1, &dev->regs->pciirqenb1); 3405 } 3406 3407 spin_unlock(&dev->lock); 3408 3409 return IRQ_HANDLED; 3410 } 3411 3412 /*-------------------------------------------------------------------------*/ 3413 3414 static void gadget_release(struct device *_dev) 3415 { 3416 struct net2280 *dev = dev_get_drvdata(_dev); 3417 3418 kfree(dev); 3419 } 3420 3421 /* tear down the binding between this driver and the pci device */ 3422 3423 static void net2280_remove(struct pci_dev *pdev) 3424 { 3425 struct net2280 *dev = pci_get_drvdata(pdev); 3426 3427 usb_del_gadget_udc(&dev->gadget); 3428 3429 BUG_ON(dev->driver); 3430 3431 /* then clean up the resources we allocated during probe() */ 3432 net2280_led_shutdown(dev); 3433 if (dev->requests) { 3434 int i; 3435 for (i = 1; i < 5; i++) { 3436 if (!dev->ep[i].dummy) 3437 continue; 3438 pci_pool_free(dev->requests, dev->ep[i].dummy, 3439 dev->ep[i].td_dma); 3440 } 3441 pci_pool_destroy(dev->requests); 3442 } 3443 if (dev->got_irq) 3444 free_irq(pdev->irq, dev); 3445 if (dev->quirks & PLX_SUPERSPEED) 3446 pci_disable_msi(pdev); 3447 if (dev->regs) 3448 iounmap(dev->regs); 3449 if (dev->region) 3450 release_mem_region(pci_resource_start(pdev, 0), 3451 pci_resource_len(pdev, 0)); 3452 if (dev->enabled) 3453 pci_disable_device(pdev); 3454 device_remove_file(&pdev->dev, &dev_attr_registers); 3455 3456 ep_info(dev, "unbind\n"); 3457 } 3458 3459 /* wrap this driver around the specified device, but 3460 * don't respond over USB until a gadget driver binds to us. 3461 */ 3462 3463 static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id) 3464 { 3465 struct net2280 *dev; 3466 unsigned long resource, len; 3467 void __iomem *base = NULL; 3468 int retval, i; 3469 3470 /* alloc, and start init */ 3471 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 3472 if (dev == NULL) { 3473 retval = -ENOMEM; 3474 goto done; 3475 } 3476 3477 pci_set_drvdata(pdev, dev); 3478 spin_lock_init(&dev->lock); 3479 dev->quirks = id->driver_data; 3480 dev->pdev = pdev; 3481 dev->gadget.ops = &net2280_ops; 3482 dev->gadget.max_speed = (dev->quirks & PLX_SUPERSPEED) ? 3483 USB_SPEED_SUPER : USB_SPEED_HIGH; 3484 3485 /* the "gadget" abstracts/virtualizes the controller */ 3486 dev->gadget.name = driver_name; 3487 3488 /* now all the pci goodies ... */ 3489 if (pci_enable_device(pdev) < 0) { 3490 retval = -ENODEV; 3491 goto done; 3492 } 3493 dev->enabled = 1; 3494 3495 /* BAR 0 holds all the registers 3496 * BAR 1 is 8051 memory; unused here (note erratum 0103) 3497 * BAR 2 is fifo memory; unused here 3498 */ 3499 resource = pci_resource_start(pdev, 0); 3500 len = pci_resource_len(pdev, 0); 3501 if (!request_mem_region(resource, len, driver_name)) { 3502 ep_dbg(dev, "controller already in use\n"); 3503 retval = -EBUSY; 3504 goto done; 3505 } 3506 dev->region = 1; 3507 3508 /* FIXME provide firmware download interface to put 3509 * 8051 code into the chip, e.g. to turn on PCI PM. 3510 */ 3511 3512 base = ioremap_nocache(resource, len); 3513 if (base == NULL) { 3514 ep_dbg(dev, "can't map memory\n"); 3515 retval = -EFAULT; 3516 goto done; 3517 } 3518 dev->regs = (struct net2280_regs __iomem *) base; 3519 dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080); 3520 dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100); 3521 dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180); 3522 dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200); 3523 dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300); 3524 3525 if (dev->quirks & PLX_SUPERSPEED) { 3526 u32 fsmvalue; 3527 u32 usbstat; 3528 dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *) 3529 (base + 0x00b4); 3530 dev->llregs = (struct usb338x_ll_regs __iomem *) 3531 (base + 0x0700); 3532 dev->ll_lfps_regs = (struct usb338x_ll_lfps_regs __iomem *) 3533 (base + 0x0748); 3534 dev->ll_tsn_regs = (struct usb338x_ll_tsn_regs __iomem *) 3535 (base + 0x077c); 3536 dev->ll_chicken_reg = (struct usb338x_ll_chi_regs __iomem *) 3537 (base + 0x079c); 3538 dev->plregs = (struct usb338x_pl_regs __iomem *) 3539 (base + 0x0800); 3540 usbstat = readl(&dev->usb->usbstat); 3541 dev->enhanced_mode = !!(usbstat & BIT(11)); 3542 dev->n_ep = (dev->enhanced_mode) ? 9 : 5; 3543 /* put into initial config, link up all endpoints */ 3544 fsmvalue = get_idx_reg(dev->regs, SCRATCH) & 3545 (0xf << DEFECT7374_FSM_FIELD); 3546 /* See if firmware needs to set up for workaround: */ 3547 if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) { 3548 dev->bug7734_patched = 1; 3549 writel(0, &dev->usb->usbctl); 3550 } else 3551 dev->bug7734_patched = 0; 3552 } else { 3553 dev->enhanced_mode = 0; 3554 dev->n_ep = 7; 3555 /* put into initial config, link up all endpoints */ 3556 writel(0, &dev->usb->usbctl); 3557 } 3558 3559 usb_reset(dev); 3560 usb_reinit(dev); 3561 3562 /* irq setup after old hardware is cleaned up */ 3563 if (!pdev->irq) { 3564 ep_err(dev, "No IRQ. Check PCI setup!\n"); 3565 retval = -ENODEV; 3566 goto done; 3567 } 3568 3569 if (dev->quirks & PLX_SUPERSPEED) 3570 if (pci_enable_msi(pdev)) 3571 ep_err(dev, "Failed to enable MSI mode\n"); 3572 3573 if (request_irq(pdev->irq, net2280_irq, IRQF_SHARED, 3574 driver_name, dev)) { 3575 ep_err(dev, "request interrupt %d failed\n", pdev->irq); 3576 retval = -EBUSY; 3577 goto done; 3578 } 3579 dev->got_irq = 1; 3580 3581 /* DMA setup */ 3582 /* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */ 3583 dev->requests = pci_pool_create("requests", pdev, 3584 sizeof(struct net2280_dma), 3585 0 /* no alignment requirements */, 3586 0 /* or page-crossing issues */); 3587 if (!dev->requests) { 3588 ep_dbg(dev, "can't get request pool\n"); 3589 retval = -ENOMEM; 3590 goto done; 3591 } 3592 for (i = 1; i < 5; i++) { 3593 struct net2280_dma *td; 3594 3595 td = pci_pool_alloc(dev->requests, GFP_KERNEL, 3596 &dev->ep[i].td_dma); 3597 if (!td) { 3598 ep_dbg(dev, "can't get dummy %d\n", i); 3599 retval = -ENOMEM; 3600 goto done; 3601 } 3602 td->dmacount = 0; /* not VALID */ 3603 td->dmadesc = td->dmaaddr; 3604 dev->ep[i].dummy = td; 3605 } 3606 3607 /* enable lower-overhead pci memory bursts during DMA */ 3608 if (dev->quirks & PLX_LEGACY) 3609 writel(BIT(DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE) | 3610 /* 3611 * 256 write retries may not be enough... 3612 BIT(PCI_RETRY_ABORT_ENABLE) | 3613 */ 3614 BIT(DMA_READ_MULTIPLE_ENABLE) | 3615 BIT(DMA_READ_LINE_ENABLE), 3616 &dev->pci->pcimstctl); 3617 /* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */ 3618 pci_set_master(pdev); 3619 pci_try_set_mwi(pdev); 3620 3621 /* ... also flushes any posted pci writes */ 3622 dev->chiprev = get_idx_reg(dev->regs, REG_CHIPREV) & 0xffff; 3623 3624 /* done */ 3625 ep_info(dev, "%s\n", driver_desc); 3626 ep_info(dev, "irq %d, pci mem %p, chip rev %04x\n", 3627 pdev->irq, base, dev->chiprev); 3628 ep_info(dev, "version: " DRIVER_VERSION "; %s\n", 3629 dev->enhanced_mode ? "enhanced mode" : "legacy mode"); 3630 retval = device_create_file(&pdev->dev, &dev_attr_registers); 3631 if (retval) 3632 goto done; 3633 3634 retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget, 3635 gadget_release); 3636 if (retval) 3637 goto done; 3638 return 0; 3639 3640 done: 3641 if (dev) 3642 net2280_remove(pdev); 3643 return retval; 3644 } 3645 3646 /* make sure the board is quiescent; otherwise it will continue 3647 * generating IRQs across the upcoming reboot. 3648 */ 3649 3650 static void net2280_shutdown(struct pci_dev *pdev) 3651 { 3652 struct net2280 *dev = pci_get_drvdata(pdev); 3653 3654 /* disable IRQs */ 3655 writel(0, &dev->regs->pciirqenb0); 3656 writel(0, &dev->regs->pciirqenb1); 3657 3658 /* disable the pullup so the host will think we're gone */ 3659 writel(0, &dev->usb->usbctl); 3660 3661 } 3662 3663 3664 /*-------------------------------------------------------------------------*/ 3665 3666 static const struct pci_device_id pci_ids[] = { { 3667 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), 3668 .class_mask = ~0, 3669 .vendor = PCI_VENDOR_ID_PLX_LEGACY, 3670 .device = 0x2280, 3671 .subvendor = PCI_ANY_ID, 3672 .subdevice = PCI_ANY_ID, 3673 .driver_data = PLX_LEGACY | PLX_2280, 3674 }, { 3675 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), 3676 .class_mask = ~0, 3677 .vendor = PCI_VENDOR_ID_PLX_LEGACY, 3678 .device = 0x2282, 3679 .subvendor = PCI_ANY_ID, 3680 .subdevice = PCI_ANY_ID, 3681 .driver_data = PLX_LEGACY, 3682 }, 3683 { 3684 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), 3685 .class_mask = ~0, 3686 .vendor = PCI_VENDOR_ID_PLX, 3687 .device = 0x3380, 3688 .subvendor = PCI_ANY_ID, 3689 .subdevice = PCI_ANY_ID, 3690 .driver_data = PLX_SUPERSPEED, 3691 }, 3692 { 3693 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), 3694 .class_mask = ~0, 3695 .vendor = PCI_VENDOR_ID_PLX, 3696 .device = 0x3382, 3697 .subvendor = PCI_ANY_ID, 3698 .subdevice = PCI_ANY_ID, 3699 .driver_data = PLX_SUPERSPEED, 3700 }, 3701 { /* end: all zeroes */ } 3702 }; 3703 MODULE_DEVICE_TABLE(pci, pci_ids); 3704 3705 /* pci driver glue; this is a "new style" PCI driver module */ 3706 static struct pci_driver net2280_pci_driver = { 3707 .name = (char *) driver_name, 3708 .id_table = pci_ids, 3709 3710 .probe = net2280_probe, 3711 .remove = net2280_remove, 3712 .shutdown = net2280_shutdown, 3713 3714 /* FIXME add power management support */ 3715 }; 3716 3717 module_pci_driver(net2280_pci_driver); 3718 3719 MODULE_DESCRIPTION(DRIVER_DESC); 3720 MODULE_AUTHOR("David Brownell"); 3721 MODULE_LICENSE("GPL"); 3722