1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Driver for PLX NET2272 USB device controller 4 * 5 * Copyright (C) 2005-2006 PLX Technology, Inc. 6 * Copyright (C) 2006-2011 Analog Devices, Inc. 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/device.h> 11 #include <linux/errno.h> 12 #include <linux/init.h> 13 #include <linux/interrupt.h> 14 #include <linux/io.h> 15 #include <linux/ioport.h> 16 #include <linux/kernel.h> 17 #include <linux/list.h> 18 #include <linux/module.h> 19 #include <linux/moduleparam.h> 20 #include <linux/pci.h> 21 #include <linux/platform_device.h> 22 #include <linux/prefetch.h> 23 #include <linux/sched.h> 24 #include <linux/slab.h> 25 #include <linux/timer.h> 26 #include <linux/usb.h> 27 #include <linux/usb/ch9.h> 28 #include <linux/usb/gadget.h> 29 30 #include <asm/byteorder.h> 31 #include <asm/unaligned.h> 32 33 #include "net2272.h" 34 35 #define DRIVER_DESC "PLX NET2272 USB Peripheral Controller" 36 37 static const char driver_name[] = "net2272"; 38 static const char driver_vers[] = "2006 October 17/mainline"; 39 static const char driver_desc[] = DRIVER_DESC; 40 41 static const char ep0name[] = "ep0"; 42 static const char * const ep_name[] = { 43 ep0name, 44 "ep-a", "ep-b", "ep-c", 45 }; 46 47 #ifdef CONFIG_USB_NET2272_DMA 48 /* 49 * use_dma: the NET2272 can use an external DMA controller. 50 * Note that since there is no generic DMA api, some functions, 51 * notably request_dma, start_dma, and cancel_dma will need to be 52 * modified for your platform's particular dma controller. 53 * 54 * If use_dma is disabled, pio will be used instead. 55 */ 56 static bool use_dma = false; 57 module_param(use_dma, bool, 0644); 58 59 /* 60 * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b) 61 * The NET2272 can only use dma for a single endpoint at a time. 62 * At some point this could be modified to allow either endpoint 63 * to take control of dma as it becomes available. 64 * 65 * Note that DMA should not be used on OUT endpoints unless it can 66 * be guaranteed that no short packets will arrive on an IN endpoint 67 * while the DMA operation is pending. Otherwise the OUT DMA will 68 * terminate prematurely (See NET2272 Errata 630-0213-0101) 69 */ 70 static ushort dma_ep = 1; 71 module_param(dma_ep, ushort, 0644); 72 73 /* 74 * dma_mode: net2272 dma mode setting (see LOCCTL1 definition): 75 * mode 0 == Slow DREQ mode 76 * mode 1 == Fast DREQ mode 77 * mode 2 == Burst mode 78 */ 79 static ushort dma_mode = 2; 80 module_param(dma_mode, ushort, 0644); 81 #else 82 #define use_dma 0 83 #define dma_ep 1 84 #define dma_mode 2 85 #endif 86 87 /* 88 * fifo_mode: net2272 buffer configuration: 89 * mode 0 == ep-{a,b,c} 512db each 90 * mode 1 == ep-a 1k, ep-{b,c} 512db 91 * mode 2 == ep-a 1k, ep-b 1k, ep-c 512db 92 * mode 3 == ep-a 1k, ep-b disabled, ep-c 512db 93 */ 94 static ushort fifo_mode; 95 module_param(fifo_mode, ushort, 0644); 96 97 /* 98 * enable_suspend: When enabled, the driver will respond to 99 * USB suspend requests by powering down the NET2272. Otherwise, 100 * USB suspend requests will be ignored. This is acceptable for 101 * self-powered devices. For bus powered devices set this to 1. 102 */ 103 static ushort enable_suspend; 104 module_param(enable_suspend, ushort, 0644); 105 106 static void assert_out_naking(struct net2272_ep *ep, const char *where) 107 { 108 u8 tmp; 109 110 #ifndef DEBUG 111 return; 112 #endif 113 114 tmp = net2272_ep_read(ep, EP_STAT0); 115 if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) { 116 dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n", 117 ep->ep.name, where, tmp); 118 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS); 119 } 120 } 121 #define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__) 122 123 static void stop_out_naking(struct net2272_ep *ep) 124 { 125 u8 tmp = net2272_ep_read(ep, EP_STAT0); 126 127 if ((tmp & (1 << NAK_OUT_PACKETS)) != 0) 128 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS); 129 } 130 131 #define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out") 132 133 static char *type_string(u8 bmAttributes) 134 { 135 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) { 136 case USB_ENDPOINT_XFER_BULK: return "bulk"; 137 case USB_ENDPOINT_XFER_ISOC: return "iso"; 138 case USB_ENDPOINT_XFER_INT: return "intr"; 139 default: return "control"; 140 } 141 } 142 143 static char *buf_state_string(unsigned state) 144 { 145 switch (state) { 146 case BUFF_FREE: return "free"; 147 case BUFF_VALID: return "valid"; 148 case BUFF_LCL: return "local"; 149 case BUFF_USB: return "usb"; 150 default: return "unknown"; 151 } 152 } 153 154 static char *dma_mode_string(void) 155 { 156 if (!use_dma) 157 return "PIO"; 158 switch (dma_mode) { 159 case 0: return "SLOW DREQ"; 160 case 1: return "FAST DREQ"; 161 case 2: return "BURST"; 162 default: return "invalid"; 163 } 164 } 165 166 static void net2272_dequeue_all(struct net2272_ep *); 167 static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *); 168 static int net2272_fifo_status(struct usb_ep *); 169 170 static const struct usb_ep_ops net2272_ep_ops; 171 172 /*---------------------------------------------------------------------------*/ 173 174 static int 175 net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) 176 { 177 struct net2272 *dev; 178 struct net2272_ep *ep; 179 u32 max; 180 u8 tmp; 181 unsigned long flags; 182 183 ep = container_of(_ep, struct net2272_ep, ep); 184 if (!_ep || !desc || ep->desc || _ep->name == ep0name 185 || desc->bDescriptorType != USB_DT_ENDPOINT) 186 return -EINVAL; 187 dev = ep->dev; 188 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) 189 return -ESHUTDOWN; 190 191 max = usb_endpoint_maxp(desc); 192 193 spin_lock_irqsave(&dev->lock, flags); 194 _ep->maxpacket = max; 195 ep->desc = desc; 196 197 /* net2272_ep_reset() has already been called */ 198 ep->stopped = 0; 199 ep->wedged = 0; 200 201 /* set speed-dependent max packet */ 202 net2272_ep_write(ep, EP_MAXPKT0, max & 0xff); 203 net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8); 204 205 /* set type, direction, address; reset fifo counters */ 206 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH); 207 tmp = usb_endpoint_type(desc); 208 if (usb_endpoint_xfer_bulk(desc)) { 209 /* catch some particularly blatant driver bugs */ 210 if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) || 211 (dev->gadget.speed == USB_SPEED_FULL && max > 64)) { 212 spin_unlock_irqrestore(&dev->lock, flags); 213 return -ERANGE; 214 } 215 } 216 ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0; 217 tmp <<= ENDPOINT_TYPE; 218 tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER); 219 tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION; 220 tmp |= (1 << ENDPOINT_ENABLE); 221 222 /* for OUT transfers, block the rx fifo until a read is posted */ 223 ep->is_in = usb_endpoint_dir_in(desc); 224 if (!ep->is_in) 225 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS); 226 227 net2272_ep_write(ep, EP_CFG, tmp); 228 229 /* enable irqs */ 230 tmp = (1 << ep->num) | net2272_read(dev, IRQENB0); 231 net2272_write(dev, IRQENB0, tmp); 232 233 tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) 234 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE) 235 | net2272_ep_read(ep, EP_IRQENB); 236 net2272_ep_write(ep, EP_IRQENB, tmp); 237 238 tmp = desc->bEndpointAddress; 239 dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n", 240 _ep->name, tmp & 0x0f, PIPEDIR(tmp), 241 type_string(desc->bmAttributes), max, 242 net2272_ep_read(ep, EP_CFG)); 243 244 spin_unlock_irqrestore(&dev->lock, flags); 245 return 0; 246 } 247 248 static void net2272_ep_reset(struct net2272_ep *ep) 249 { 250 u8 tmp; 251 252 ep->desc = NULL; 253 INIT_LIST_HEAD(&ep->queue); 254 255 usb_ep_set_maxpacket_limit(&ep->ep, ~0); 256 ep->ep.ops = &net2272_ep_ops; 257 258 /* disable irqs, endpoint */ 259 net2272_ep_write(ep, EP_IRQENB, 0); 260 261 /* init to our chosen defaults, notably so that we NAK OUT 262 * packets until the driver queues a read. 263 */ 264 tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS); 265 net2272_ep_write(ep, EP_RSPSET, tmp); 266 267 tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE); 268 if (ep->num != 0) 269 tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT); 270 271 net2272_ep_write(ep, EP_RSPCLR, tmp); 272 273 /* scrub most status bits, and flush any fifo state */ 274 net2272_ep_write(ep, EP_STAT0, 275 (1 << DATA_IN_TOKEN_INTERRUPT) 276 | (1 << DATA_OUT_TOKEN_INTERRUPT) 277 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT) 278 | (1 << DATA_PACKET_RECEIVED_INTERRUPT) 279 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)); 280 281 net2272_ep_write(ep, EP_STAT1, 282 (1 << TIMEOUT) 283 | (1 << USB_OUT_ACK_SENT) 284 | (1 << USB_OUT_NAK_SENT) 285 | (1 << USB_IN_ACK_RCVD) 286 | (1 << USB_IN_NAK_SENT) 287 | (1 << USB_STALL_SENT) 288 | (1 << LOCAL_OUT_ZLP) 289 | (1 << BUFFER_FLUSH)); 290 291 /* fifo size is handled separately */ 292 } 293 294 static int net2272_disable(struct usb_ep *_ep) 295 { 296 struct net2272_ep *ep; 297 unsigned long flags; 298 299 ep = container_of(_ep, struct net2272_ep, ep); 300 if (!_ep || !ep->desc || _ep->name == ep0name) 301 return -EINVAL; 302 303 spin_lock_irqsave(&ep->dev->lock, flags); 304 net2272_dequeue_all(ep); 305 net2272_ep_reset(ep); 306 307 dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name); 308 309 spin_unlock_irqrestore(&ep->dev->lock, flags); 310 return 0; 311 } 312 313 /*---------------------------------------------------------------------------*/ 314 315 static struct usb_request * 316 net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) 317 { 318 struct net2272_request *req; 319 320 if (!_ep) 321 return NULL; 322 323 req = kzalloc(sizeof(*req), gfp_flags); 324 if (!req) 325 return NULL; 326 327 INIT_LIST_HEAD(&req->queue); 328 329 return &req->req; 330 } 331 332 static void 333 net2272_free_request(struct usb_ep *_ep, struct usb_request *_req) 334 { 335 struct net2272_request *req; 336 337 if (!_ep || !_req) 338 return; 339 340 req = container_of(_req, struct net2272_request, req); 341 WARN_ON(!list_empty(&req->queue)); 342 kfree(req); 343 } 344 345 static void 346 net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status) 347 { 348 struct net2272 *dev; 349 unsigned stopped = ep->stopped; 350 351 if (ep->num == 0) { 352 if (ep->dev->protocol_stall) { 353 ep->stopped = 1; 354 set_halt(ep); 355 } 356 allow_status(ep); 357 } 358 359 list_del_init(&req->queue); 360 361 if (req->req.status == -EINPROGRESS) 362 req->req.status = status; 363 else 364 status = req->req.status; 365 366 dev = ep->dev; 367 if (use_dma && ep->dma) 368 usb_gadget_unmap_request(&dev->gadget, &req->req, 369 ep->is_in); 370 371 if (status && status != -ESHUTDOWN) 372 dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n", 373 ep->ep.name, &req->req, status, 374 req->req.actual, req->req.length, req->req.buf); 375 376 /* don't modify queue heads during completion callback */ 377 ep->stopped = 1; 378 spin_unlock(&dev->lock); 379 usb_gadget_giveback_request(&ep->ep, &req->req); 380 spin_lock(&dev->lock); 381 ep->stopped = stopped; 382 } 383 384 static int 385 net2272_write_packet(struct net2272_ep *ep, u8 *buf, 386 struct net2272_request *req, unsigned max) 387 { 388 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA); 389 u16 *bufp; 390 unsigned length, count; 391 u8 tmp; 392 393 length = min(req->req.length - req->req.actual, max); 394 req->req.actual += length; 395 396 dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n", 397 ep->ep.name, req, max, length, 398 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0)); 399 400 count = length; 401 bufp = (u16 *)buf; 402 403 while (likely(count >= 2)) { 404 /* no byte-swap required; chip endian set during init */ 405 writew(*bufp++, ep_data); 406 count -= 2; 407 } 408 buf = (u8 *)bufp; 409 410 /* write final byte by placing the NET2272 into 8-bit mode */ 411 if (unlikely(count)) { 412 tmp = net2272_read(ep->dev, LOCCTL); 413 net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH)); 414 writeb(*buf, ep_data); 415 net2272_write(ep->dev, LOCCTL, tmp); 416 } 417 return length; 418 } 419 420 /* returns: 0: still running, 1: completed, negative: errno */ 421 static int 422 net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req) 423 { 424 u8 *buf; 425 unsigned count, max; 426 int status; 427 428 dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n", 429 ep->ep.name, req->req.actual, req->req.length); 430 431 /* 432 * Keep loading the endpoint until the final packet is loaded, 433 * or the endpoint buffer is full. 434 */ 435 top: 436 /* 437 * Clear interrupt status 438 * - Packet Transmitted interrupt will become set again when the 439 * host successfully takes another packet 440 */ 441 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)); 442 while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) { 443 buf = req->req.buf + req->req.actual; 444 prefetch(buf); 445 446 /* force pagesel */ 447 net2272_ep_read(ep, EP_STAT0); 448 449 max = (net2272_ep_read(ep, EP_AVAIL1) << 8) | 450 (net2272_ep_read(ep, EP_AVAIL0)); 451 452 if (max < ep->ep.maxpacket) 453 max = (net2272_ep_read(ep, EP_AVAIL1) << 8) 454 | (net2272_ep_read(ep, EP_AVAIL0)); 455 456 count = net2272_write_packet(ep, buf, req, max); 457 /* see if we are done */ 458 if (req->req.length == req->req.actual) { 459 /* validate short or zlp packet */ 460 if (count < ep->ep.maxpacket) 461 set_fifo_bytecount(ep, 0); 462 net2272_done(ep, req, 0); 463 464 if (!list_empty(&ep->queue)) { 465 req = list_entry(ep->queue.next, 466 struct net2272_request, 467 queue); 468 status = net2272_kick_dma(ep, req); 469 470 if (status < 0) 471 if ((net2272_ep_read(ep, EP_STAT0) 472 & (1 << BUFFER_EMPTY))) 473 goto top; 474 } 475 return 1; 476 } 477 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)); 478 } 479 return 0; 480 } 481 482 static void 483 net2272_out_flush(struct net2272_ep *ep) 484 { 485 ASSERT_OUT_NAKING(ep); 486 487 net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT) 488 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)); 489 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH); 490 } 491 492 static int 493 net2272_read_packet(struct net2272_ep *ep, u8 *buf, 494 struct net2272_request *req, unsigned avail) 495 { 496 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA); 497 unsigned is_short; 498 u16 *bufp; 499 500 req->req.actual += avail; 501 502 dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n", 503 ep->ep.name, req, avail, 504 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0)); 505 506 is_short = (avail < ep->ep.maxpacket); 507 508 if (unlikely(avail == 0)) { 509 /* remove any zlp from the buffer */ 510 (void)readw(ep_data); 511 return is_short; 512 } 513 514 /* Ensure we get the final byte */ 515 if (unlikely(avail % 2)) 516 avail++; 517 bufp = (u16 *)buf; 518 519 do { 520 *bufp++ = readw(ep_data); 521 avail -= 2; 522 } while (avail); 523 524 /* 525 * To avoid false endpoint available race condition must read 526 * ep stat0 twice in the case of a short transfer 527 */ 528 if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) 529 net2272_ep_read(ep, EP_STAT0); 530 531 return is_short; 532 } 533 534 static int 535 net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req) 536 { 537 u8 *buf; 538 unsigned is_short; 539 int count; 540 int tmp; 541 int cleanup = 0; 542 543 dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n", 544 ep->ep.name, req->req.actual, req->req.length); 545 546 top: 547 do { 548 buf = req->req.buf + req->req.actual; 549 prefetchw(buf); 550 551 count = (net2272_ep_read(ep, EP_AVAIL1) << 8) 552 | net2272_ep_read(ep, EP_AVAIL0); 553 554 net2272_ep_write(ep, EP_STAT0, 555 (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) | 556 (1 << DATA_PACKET_RECEIVED_INTERRUPT)); 557 558 tmp = req->req.length - req->req.actual; 559 560 if (count > tmp) { 561 if ((tmp % ep->ep.maxpacket) != 0) { 562 dev_err(ep->dev->dev, 563 "%s out fifo %d bytes, expected %d\n", 564 ep->ep.name, count, tmp); 565 cleanup = 1; 566 } 567 count = (tmp > 0) ? tmp : 0; 568 } 569 570 is_short = net2272_read_packet(ep, buf, req, count); 571 572 /* completion */ 573 if (unlikely(cleanup || is_short || 574 req->req.actual == req->req.length)) { 575 576 if (cleanup) { 577 net2272_out_flush(ep); 578 net2272_done(ep, req, -EOVERFLOW); 579 } else 580 net2272_done(ep, req, 0); 581 582 /* re-initialize endpoint transfer registers 583 * otherwise they may result in erroneous pre-validation 584 * for subsequent control reads 585 */ 586 if (unlikely(ep->num == 0)) { 587 net2272_ep_write(ep, EP_TRANSFER2, 0); 588 net2272_ep_write(ep, EP_TRANSFER1, 0); 589 net2272_ep_write(ep, EP_TRANSFER0, 0); 590 } 591 592 if (!list_empty(&ep->queue)) { 593 int status; 594 595 req = list_entry(ep->queue.next, 596 struct net2272_request, queue); 597 status = net2272_kick_dma(ep, req); 598 if ((status < 0) && 599 !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY))) 600 goto top; 601 } 602 return 1; 603 } 604 } while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY))); 605 606 return 0; 607 } 608 609 static void 610 net2272_pio_advance(struct net2272_ep *ep) 611 { 612 struct net2272_request *req; 613 614 if (unlikely(list_empty(&ep->queue))) 615 return; 616 617 req = list_entry(ep->queue.next, struct net2272_request, queue); 618 (ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req); 619 } 620 621 /* returns 0 on success, else negative errno */ 622 static int 623 net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf, 624 unsigned len, unsigned dir) 625 { 626 dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n", 627 ep, buf, len, dir); 628 629 /* The NET2272 only supports a single dma channel */ 630 if (dev->dma_busy) 631 return -EBUSY; 632 /* 633 * EP_TRANSFER (used to determine the number of bytes received 634 * in an OUT transfer) is 24 bits wide; don't ask for more than that. 635 */ 636 if ((dir == 1) && (len > 0x1000000)) 637 return -EINVAL; 638 639 dev->dma_busy = 1; 640 641 /* initialize platform's dma */ 642 #ifdef CONFIG_USB_PCI 643 /* NET2272 addr, buffer addr, length, etc. */ 644 switch (dev->dev_id) { 645 case PCI_DEVICE_ID_RDK1: 646 /* Setup PLX 9054 DMA mode */ 647 writel((1 << LOCAL_BUS_WIDTH) | 648 (1 << TA_READY_INPUT_ENABLE) | 649 (0 << LOCAL_BURST_ENABLE) | 650 (1 << DONE_INTERRUPT_ENABLE) | 651 (1 << LOCAL_ADDRESSING_MODE) | 652 (1 << DEMAND_MODE) | 653 (1 << DMA_EOT_ENABLE) | 654 (1 << FAST_SLOW_TERMINATE_MODE_SELECT) | 655 (1 << DMA_CHANNEL_INTERRUPT_SELECT), 656 dev->rdk1.plx9054_base_addr + DMAMODE0); 657 658 writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0); 659 writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0); 660 writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0); 661 writel((dir << DIRECTION_OF_TRANSFER) | 662 (1 << INTERRUPT_AFTER_TERMINAL_COUNT), 663 dev->rdk1.plx9054_base_addr + DMADPR0); 664 writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) | 665 readl(dev->rdk1.plx9054_base_addr + INTCSR), 666 dev->rdk1.plx9054_base_addr + INTCSR); 667 668 break; 669 } 670 #endif 671 672 net2272_write(dev, DMAREQ, 673 (0 << DMA_BUFFER_VALID) | 674 (1 << DMA_REQUEST_ENABLE) | 675 (1 << DMA_CONTROL_DACK) | 676 (dev->dma_eot_polarity << EOT_POLARITY) | 677 (dev->dma_dack_polarity << DACK_POLARITY) | 678 (dev->dma_dreq_polarity << DREQ_POLARITY) | 679 ((ep >> 1) << DMA_ENDPOINT_SELECT)); 680 681 (void) net2272_read(dev, SCRATCH); 682 683 return 0; 684 } 685 686 static void 687 net2272_start_dma(struct net2272 *dev) 688 { 689 /* start platform's dma controller */ 690 #ifdef CONFIG_USB_PCI 691 switch (dev->dev_id) { 692 case PCI_DEVICE_ID_RDK1: 693 writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START), 694 dev->rdk1.plx9054_base_addr + DMACSR0); 695 break; 696 } 697 #endif 698 } 699 700 /* returns 0 on success, else negative errno */ 701 static int 702 net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req) 703 { 704 unsigned size; 705 u8 tmp; 706 707 if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma) 708 return -EINVAL; 709 710 /* don't use dma for odd-length transfers 711 * otherwise, we'd need to deal with the last byte with pio 712 */ 713 if (req->req.length & 1) 714 return -EINVAL; 715 716 dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n", 717 ep->ep.name, req, (unsigned long long) req->req.dma); 718 719 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS); 720 721 /* The NET2272 can only use DMA on one endpoint at a time */ 722 if (ep->dev->dma_busy) 723 return -EBUSY; 724 725 /* Make sure we only DMA an even number of bytes (we'll use 726 * pio to complete the transfer) 727 */ 728 size = req->req.length; 729 size &= ~1; 730 731 /* device-to-host transfer */ 732 if (ep->is_in) { 733 /* initialize platform's dma controller */ 734 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0)) 735 /* unable to obtain DMA channel; return error and use pio mode */ 736 return -EBUSY; 737 req->req.actual += size; 738 739 /* host-to-device transfer */ 740 } else { 741 tmp = net2272_ep_read(ep, EP_STAT0); 742 743 /* initialize platform's dma controller */ 744 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1)) 745 /* unable to obtain DMA channel; return error and use pio mode */ 746 return -EBUSY; 747 748 if (!(tmp & (1 << BUFFER_EMPTY))) 749 ep->not_empty = 1; 750 else 751 ep->not_empty = 0; 752 753 754 /* allow the endpoint's buffer to fill */ 755 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS); 756 757 /* this transfer completed and data's already in the fifo 758 * return error so pio gets used. 759 */ 760 if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) { 761 762 /* deassert dreq */ 763 net2272_write(ep->dev, DMAREQ, 764 (0 << DMA_BUFFER_VALID) | 765 (0 << DMA_REQUEST_ENABLE) | 766 (1 << DMA_CONTROL_DACK) | 767 (ep->dev->dma_eot_polarity << EOT_POLARITY) | 768 (ep->dev->dma_dack_polarity << DACK_POLARITY) | 769 (ep->dev->dma_dreq_polarity << DREQ_POLARITY) | 770 ((ep->num >> 1) << DMA_ENDPOINT_SELECT)); 771 772 return -EBUSY; 773 } 774 } 775 776 /* Don't use per-packet interrupts: use dma interrupts only */ 777 net2272_ep_write(ep, EP_IRQENB, 0); 778 779 net2272_start_dma(ep->dev); 780 781 return 0; 782 } 783 784 static void net2272_cancel_dma(struct net2272 *dev) 785 { 786 #ifdef CONFIG_USB_PCI 787 switch (dev->dev_id) { 788 case PCI_DEVICE_ID_RDK1: 789 writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0); 790 writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0); 791 while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) & 792 (1 << CHANNEL_DONE))) 793 continue; /* wait for dma to stabalize */ 794 795 /* dma abort generates an interrupt */ 796 writeb(1 << CHANNEL_CLEAR_INTERRUPT, 797 dev->rdk1.plx9054_base_addr + DMACSR0); 798 break; 799 } 800 #endif 801 802 dev->dma_busy = 0; 803 } 804 805 /*---------------------------------------------------------------------------*/ 806 807 static int 808 net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) 809 { 810 struct net2272_request *req; 811 struct net2272_ep *ep; 812 struct net2272 *dev; 813 unsigned long flags; 814 int status = -1; 815 u8 s; 816 817 req = container_of(_req, struct net2272_request, req); 818 if (!_req || !_req->complete || !_req->buf 819 || !list_empty(&req->queue)) 820 return -EINVAL; 821 ep = container_of(_ep, struct net2272_ep, ep); 822 if (!_ep || (!ep->desc && ep->num != 0)) 823 return -EINVAL; 824 dev = ep->dev; 825 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) 826 return -ESHUTDOWN; 827 828 /* set up dma mapping in case the caller didn't */ 829 if (use_dma && ep->dma) { 830 status = usb_gadget_map_request(&dev->gadget, _req, 831 ep->is_in); 832 if (status) 833 return status; 834 } 835 836 dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n", 837 _ep->name, _req, _req->length, _req->buf, 838 (unsigned long long) _req->dma, _req->zero ? "zero" : "!zero"); 839 840 spin_lock_irqsave(&dev->lock, flags); 841 842 _req->status = -EINPROGRESS; 843 _req->actual = 0; 844 845 /* kickstart this i/o queue? */ 846 if (list_empty(&ep->queue) && !ep->stopped) { 847 /* maybe there's no control data, just status ack */ 848 if (ep->num == 0 && _req->length == 0) { 849 net2272_done(ep, req, 0); 850 dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name); 851 goto done; 852 } 853 854 /* Return zlp, don't let it block subsequent packets */ 855 s = net2272_ep_read(ep, EP_STAT0); 856 if (s & (1 << BUFFER_EMPTY)) { 857 /* Buffer is empty check for a blocking zlp, handle it */ 858 if ((s & (1 << NAK_OUT_PACKETS)) && 859 net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) { 860 dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n"); 861 /* 862 * Request is going to terminate with a short packet ... 863 * hope the client is ready for it! 864 */ 865 status = net2272_read_fifo(ep, req); 866 /* clear short packet naking */ 867 net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS)); 868 goto done; 869 } 870 } 871 872 /* try dma first */ 873 status = net2272_kick_dma(ep, req); 874 875 if (status < 0) { 876 /* dma failed (most likely in use by another endpoint) 877 * fallback to pio 878 */ 879 status = 0; 880 881 if (ep->is_in) 882 status = net2272_write_fifo(ep, req); 883 else { 884 s = net2272_ep_read(ep, EP_STAT0); 885 if ((s & (1 << BUFFER_EMPTY)) == 0) 886 status = net2272_read_fifo(ep, req); 887 } 888 889 if (unlikely(status != 0)) { 890 if (status > 0) 891 status = 0; 892 req = NULL; 893 } 894 } 895 } 896 if (likely(req)) 897 list_add_tail(&req->queue, &ep->queue); 898 899 if (likely(!list_empty(&ep->queue))) 900 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS); 901 done: 902 spin_unlock_irqrestore(&dev->lock, flags); 903 904 return 0; 905 } 906 907 /* dequeue ALL requests */ 908 static void 909 net2272_dequeue_all(struct net2272_ep *ep) 910 { 911 struct net2272_request *req; 912 913 /* called with spinlock held */ 914 ep->stopped = 1; 915 916 while (!list_empty(&ep->queue)) { 917 req = list_entry(ep->queue.next, 918 struct net2272_request, 919 queue); 920 net2272_done(ep, req, -ESHUTDOWN); 921 } 922 } 923 924 /* dequeue JUST ONE request */ 925 static int 926 net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req) 927 { 928 struct net2272_ep *ep; 929 struct net2272_request *req = NULL, *iter; 930 unsigned long flags; 931 int stopped; 932 933 ep = container_of(_ep, struct net2272_ep, ep); 934 if (!_ep || (!ep->desc && ep->num != 0) || !_req) 935 return -EINVAL; 936 937 spin_lock_irqsave(&ep->dev->lock, flags); 938 stopped = ep->stopped; 939 ep->stopped = 1; 940 941 /* make sure it's still queued on this endpoint */ 942 list_for_each_entry(iter, &ep->queue, queue) { 943 if (&iter->req != _req) 944 continue; 945 req = iter; 946 break; 947 } 948 if (!req) { 949 ep->stopped = stopped; 950 spin_unlock_irqrestore(&ep->dev->lock, flags); 951 return -EINVAL; 952 } 953 954 /* queue head may be partially complete */ 955 if (ep->queue.next == &req->queue) { 956 dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name); 957 net2272_done(ep, req, -ECONNRESET); 958 } 959 ep->stopped = stopped; 960 961 spin_unlock_irqrestore(&ep->dev->lock, flags); 962 return 0; 963 } 964 965 /*---------------------------------------------------------------------------*/ 966 967 static int 968 net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged) 969 { 970 struct net2272_ep *ep; 971 unsigned long flags; 972 int ret = 0; 973 974 ep = container_of(_ep, struct net2272_ep, ep); 975 if (!_ep || (!ep->desc && ep->num != 0)) 976 return -EINVAL; 977 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) 978 return -ESHUTDOWN; 979 if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc)) 980 return -EINVAL; 981 982 spin_lock_irqsave(&ep->dev->lock, flags); 983 if (!list_empty(&ep->queue)) 984 ret = -EAGAIN; 985 else if (ep->is_in && value && net2272_fifo_status(_ep) != 0) 986 ret = -EAGAIN; 987 else { 988 dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name, 989 value ? "set" : "clear", 990 wedged ? "wedge" : "halt"); 991 /* set/clear */ 992 if (value) { 993 if (ep->num == 0) 994 ep->dev->protocol_stall = 1; 995 else 996 set_halt(ep); 997 if (wedged) 998 ep->wedged = 1; 999 } else { 1000 clear_halt(ep); 1001 ep->wedged = 0; 1002 } 1003 } 1004 spin_unlock_irqrestore(&ep->dev->lock, flags); 1005 1006 return ret; 1007 } 1008 1009 static int 1010 net2272_set_halt(struct usb_ep *_ep, int value) 1011 { 1012 return net2272_set_halt_and_wedge(_ep, value, 0); 1013 } 1014 1015 static int 1016 net2272_set_wedge(struct usb_ep *_ep) 1017 { 1018 if (!_ep || _ep->name == ep0name) 1019 return -EINVAL; 1020 return net2272_set_halt_and_wedge(_ep, 1, 1); 1021 } 1022 1023 static int 1024 net2272_fifo_status(struct usb_ep *_ep) 1025 { 1026 struct net2272_ep *ep; 1027 u16 avail; 1028 1029 ep = container_of(_ep, struct net2272_ep, ep); 1030 if (!_ep || (!ep->desc && ep->num != 0)) 1031 return -ENODEV; 1032 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) 1033 return -ESHUTDOWN; 1034 1035 avail = net2272_ep_read(ep, EP_AVAIL1) << 8; 1036 avail |= net2272_ep_read(ep, EP_AVAIL0); 1037 if (avail > ep->fifo_size) 1038 return -EOVERFLOW; 1039 if (ep->is_in) 1040 avail = ep->fifo_size - avail; 1041 return avail; 1042 } 1043 1044 static void 1045 net2272_fifo_flush(struct usb_ep *_ep) 1046 { 1047 struct net2272_ep *ep; 1048 1049 ep = container_of(_ep, struct net2272_ep, ep); 1050 if (!_ep || (!ep->desc && ep->num != 0)) 1051 return; 1052 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) 1053 return; 1054 1055 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH); 1056 } 1057 1058 static const struct usb_ep_ops net2272_ep_ops = { 1059 .enable = net2272_enable, 1060 .disable = net2272_disable, 1061 1062 .alloc_request = net2272_alloc_request, 1063 .free_request = net2272_free_request, 1064 1065 .queue = net2272_queue, 1066 .dequeue = net2272_dequeue, 1067 1068 .set_halt = net2272_set_halt, 1069 .set_wedge = net2272_set_wedge, 1070 .fifo_status = net2272_fifo_status, 1071 .fifo_flush = net2272_fifo_flush, 1072 }; 1073 1074 /*---------------------------------------------------------------------------*/ 1075 1076 static int 1077 net2272_get_frame(struct usb_gadget *_gadget) 1078 { 1079 struct net2272 *dev; 1080 unsigned long flags; 1081 u16 ret; 1082 1083 if (!_gadget) 1084 return -ENODEV; 1085 dev = container_of(_gadget, struct net2272, gadget); 1086 spin_lock_irqsave(&dev->lock, flags); 1087 1088 ret = net2272_read(dev, FRAME1) << 8; 1089 ret |= net2272_read(dev, FRAME0); 1090 1091 spin_unlock_irqrestore(&dev->lock, flags); 1092 return ret; 1093 } 1094 1095 static int 1096 net2272_wakeup(struct usb_gadget *_gadget) 1097 { 1098 struct net2272 *dev; 1099 u8 tmp; 1100 unsigned long flags; 1101 1102 if (!_gadget) 1103 return 0; 1104 dev = container_of(_gadget, struct net2272, gadget); 1105 1106 spin_lock_irqsave(&dev->lock, flags); 1107 tmp = net2272_read(dev, USBCTL0); 1108 if (tmp & (1 << IO_WAKEUP_ENABLE)) 1109 net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME)); 1110 1111 spin_unlock_irqrestore(&dev->lock, flags); 1112 1113 return 0; 1114 } 1115 1116 static int 1117 net2272_set_selfpowered(struct usb_gadget *_gadget, int value) 1118 { 1119 if (!_gadget) 1120 return -ENODEV; 1121 1122 _gadget->is_selfpowered = (value != 0); 1123 1124 return 0; 1125 } 1126 1127 static int 1128 net2272_pullup(struct usb_gadget *_gadget, int is_on) 1129 { 1130 struct net2272 *dev; 1131 u8 tmp; 1132 unsigned long flags; 1133 1134 if (!_gadget) 1135 return -ENODEV; 1136 dev = container_of(_gadget, struct net2272, gadget); 1137 1138 spin_lock_irqsave(&dev->lock, flags); 1139 tmp = net2272_read(dev, USBCTL0); 1140 dev->softconnect = (is_on != 0); 1141 if (is_on) 1142 tmp |= (1 << USB_DETECT_ENABLE); 1143 else 1144 tmp &= ~(1 << USB_DETECT_ENABLE); 1145 net2272_write(dev, USBCTL0, tmp); 1146 spin_unlock_irqrestore(&dev->lock, flags); 1147 1148 return 0; 1149 } 1150 1151 static int net2272_start(struct usb_gadget *_gadget, 1152 struct usb_gadget_driver *driver); 1153 static int net2272_stop(struct usb_gadget *_gadget); 1154 static void net2272_async_callbacks(struct usb_gadget *_gadget, bool enable); 1155 1156 static const struct usb_gadget_ops net2272_ops = { 1157 .get_frame = net2272_get_frame, 1158 .wakeup = net2272_wakeup, 1159 .set_selfpowered = net2272_set_selfpowered, 1160 .pullup = net2272_pullup, 1161 .udc_start = net2272_start, 1162 .udc_stop = net2272_stop, 1163 .udc_async_callbacks = net2272_async_callbacks, 1164 }; 1165 1166 /*---------------------------------------------------------------------------*/ 1167 1168 static ssize_t 1169 registers_show(struct device *_dev, struct device_attribute *attr, char *buf) 1170 { 1171 struct net2272 *dev; 1172 char *next; 1173 unsigned size, t; 1174 unsigned long flags; 1175 u8 t1, t2; 1176 int i; 1177 const char *s; 1178 1179 dev = dev_get_drvdata(_dev); 1180 next = buf; 1181 size = PAGE_SIZE; 1182 spin_lock_irqsave(&dev->lock, flags); 1183 1184 /* Main Control Registers */ 1185 t = scnprintf(next, size, "%s version %s," 1186 "chiprev %02x, locctl %02x\n" 1187 "irqenb0 %02x irqenb1 %02x " 1188 "irqstat0 %02x irqstat1 %02x\n", 1189 driver_name, driver_vers, dev->chiprev, 1190 net2272_read(dev, LOCCTL), 1191 net2272_read(dev, IRQENB0), 1192 net2272_read(dev, IRQENB1), 1193 net2272_read(dev, IRQSTAT0), 1194 net2272_read(dev, IRQSTAT1)); 1195 size -= t; 1196 next += t; 1197 1198 /* DMA */ 1199 t1 = net2272_read(dev, DMAREQ); 1200 t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n", 1201 t1, ep_name[(t1 & 0x01) + 1], 1202 t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "", 1203 t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "", 1204 t1 & (1 << DMA_REQUEST) ? "req " : "", 1205 t1 & (1 << DMA_BUFFER_VALID) ? "valid " : ""); 1206 size -= t; 1207 next += t; 1208 1209 /* USB Control Registers */ 1210 t1 = net2272_read(dev, USBCTL1); 1211 if (t1 & (1 << VBUS_PIN)) { 1212 if (t1 & (1 << USB_HIGH_SPEED)) 1213 s = "high speed"; 1214 else if (dev->gadget.speed == USB_SPEED_UNKNOWN) 1215 s = "powered"; 1216 else 1217 s = "full speed"; 1218 } else 1219 s = "not attached"; 1220 t = scnprintf(next, size, 1221 "usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n", 1222 net2272_read(dev, USBCTL0), t1, 1223 net2272_read(dev, OURADDR), s); 1224 size -= t; 1225 next += t; 1226 1227 /* Endpoint Registers */ 1228 for (i = 0; i < 4; ++i) { 1229 struct net2272_ep *ep; 1230 1231 ep = &dev->ep[i]; 1232 if (i && !ep->desc) 1233 continue; 1234 1235 t1 = net2272_ep_read(ep, EP_CFG); 1236 t2 = net2272_ep_read(ep, EP_RSPSET); 1237 t = scnprintf(next, size, 1238 "\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s" 1239 "irqenb %02x\n", 1240 ep->ep.name, t1, t2, 1241 (t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "", 1242 (t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "", 1243 (t2 & (1 << AUTOVALIDATE)) ? "auto " : "", 1244 (t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "", 1245 (t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "", 1246 (t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "", 1247 (t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ", 1248 (t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "", 1249 net2272_ep_read(ep, EP_IRQENB)); 1250 size -= t; 1251 next += t; 1252 1253 t = scnprintf(next, size, 1254 "\tstat0 %02x stat1 %02x avail %04x " 1255 "(ep%d%s-%s)%s\n", 1256 net2272_ep_read(ep, EP_STAT0), 1257 net2272_ep_read(ep, EP_STAT1), 1258 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0), 1259 t1 & 0x0f, 1260 ep->is_in ? "in" : "out", 1261 type_string(t1 >> 5), 1262 ep->stopped ? "*" : ""); 1263 size -= t; 1264 next += t; 1265 1266 t = scnprintf(next, size, 1267 "\tep_transfer %06x\n", 1268 ((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) | 1269 ((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) | 1270 ((net2272_ep_read(ep, EP_TRANSFER0) & 0xff))); 1271 size -= t; 1272 next += t; 1273 1274 t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03; 1275 t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03; 1276 t = scnprintf(next, size, 1277 "\tbuf-a %s buf-b %s\n", 1278 buf_state_string(t1), 1279 buf_state_string(t2)); 1280 size -= t; 1281 next += t; 1282 } 1283 1284 spin_unlock_irqrestore(&dev->lock, flags); 1285 1286 return PAGE_SIZE - size; 1287 } 1288 static DEVICE_ATTR_RO(registers); 1289 1290 /*---------------------------------------------------------------------------*/ 1291 1292 static void 1293 net2272_set_fifo_mode(struct net2272 *dev, int mode) 1294 { 1295 u8 tmp; 1296 1297 tmp = net2272_read(dev, LOCCTL) & 0x3f; 1298 tmp |= (mode << 6); 1299 net2272_write(dev, LOCCTL, tmp); 1300 1301 INIT_LIST_HEAD(&dev->gadget.ep_list); 1302 1303 /* always ep-a, ep-c ... maybe not ep-b */ 1304 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list); 1305 1306 switch (mode) { 1307 case 0: 1308 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); 1309 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512; 1310 break; 1311 case 1: 1312 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); 1313 dev->ep[1].fifo_size = 1024; 1314 dev->ep[2].fifo_size = 512; 1315 break; 1316 case 2: 1317 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); 1318 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024; 1319 break; 1320 case 3: 1321 dev->ep[1].fifo_size = 1024; 1322 break; 1323 } 1324 1325 /* ep-c is always 2 512 byte buffers */ 1326 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list); 1327 dev->ep[3].fifo_size = 512; 1328 } 1329 1330 /*---------------------------------------------------------------------------*/ 1331 1332 static void 1333 net2272_usb_reset(struct net2272 *dev) 1334 { 1335 dev->gadget.speed = USB_SPEED_UNKNOWN; 1336 1337 net2272_cancel_dma(dev); 1338 1339 net2272_write(dev, IRQENB0, 0); 1340 net2272_write(dev, IRQENB1, 0); 1341 1342 /* clear irq state */ 1343 net2272_write(dev, IRQSTAT0, 0xff); 1344 net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT)); 1345 1346 net2272_write(dev, DMAREQ, 1347 (0 << DMA_BUFFER_VALID) | 1348 (0 << DMA_REQUEST_ENABLE) | 1349 (1 << DMA_CONTROL_DACK) | 1350 (dev->dma_eot_polarity << EOT_POLARITY) | 1351 (dev->dma_dack_polarity << DACK_POLARITY) | 1352 (dev->dma_dreq_polarity << DREQ_POLARITY) | 1353 ((dma_ep >> 1) << DMA_ENDPOINT_SELECT)); 1354 1355 net2272_cancel_dma(dev); 1356 net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0); 1357 1358 /* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping 1359 * note that the higher level gadget drivers are expected to convert data to little endian. 1360 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here 1361 */ 1362 net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH)); 1363 net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE)); 1364 } 1365 1366 static void 1367 net2272_usb_reinit(struct net2272 *dev) 1368 { 1369 int i; 1370 1371 /* basic endpoint init */ 1372 for (i = 0; i < 4; ++i) { 1373 struct net2272_ep *ep = &dev->ep[i]; 1374 1375 ep->ep.name = ep_name[i]; 1376 ep->dev = dev; 1377 ep->num = i; 1378 ep->not_empty = 0; 1379 1380 if (use_dma && ep->num == dma_ep) 1381 ep->dma = 1; 1382 1383 if (i > 0 && i <= 3) 1384 ep->fifo_size = 512; 1385 else 1386 ep->fifo_size = 64; 1387 net2272_ep_reset(ep); 1388 1389 if (i == 0) { 1390 ep->ep.caps.type_control = true; 1391 } else { 1392 ep->ep.caps.type_iso = true; 1393 ep->ep.caps.type_bulk = true; 1394 ep->ep.caps.type_int = true; 1395 } 1396 1397 ep->ep.caps.dir_in = true; 1398 ep->ep.caps.dir_out = true; 1399 } 1400 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64); 1401 1402 dev->gadget.ep0 = &dev->ep[0].ep; 1403 dev->ep[0].stopped = 0; 1404 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); 1405 } 1406 1407 static void 1408 net2272_ep0_start(struct net2272 *dev) 1409 { 1410 struct net2272_ep *ep0 = &dev->ep[0]; 1411 1412 net2272_ep_write(ep0, EP_RSPSET, 1413 (1 << NAK_OUT_PACKETS_MODE) | 1414 (1 << ALT_NAK_OUT_PACKETS)); 1415 net2272_ep_write(ep0, EP_RSPCLR, 1416 (1 << HIDE_STATUS_PHASE) | 1417 (1 << CONTROL_STATUS_PHASE_HANDSHAKE)); 1418 net2272_write(dev, USBCTL0, 1419 (dev->softconnect << USB_DETECT_ENABLE) | 1420 (1 << USB_ROOT_PORT_WAKEUP_ENABLE) | 1421 (1 << IO_WAKEUP_ENABLE)); 1422 net2272_write(dev, IRQENB0, 1423 (1 << SETUP_PACKET_INTERRUPT_ENABLE) | 1424 (1 << ENDPOINT_0_INTERRUPT_ENABLE) | 1425 (1 << DMA_DONE_INTERRUPT_ENABLE)); 1426 net2272_write(dev, IRQENB1, 1427 (1 << VBUS_INTERRUPT_ENABLE) | 1428 (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) | 1429 (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE)); 1430 } 1431 1432 /* when a driver is successfully registered, it will receive 1433 * control requests including set_configuration(), which enables 1434 * non-control requests. then usb traffic follows until a 1435 * disconnect is reported. then a host may connect again, or 1436 * the driver might get unbound. 1437 */ 1438 static int net2272_start(struct usb_gadget *_gadget, 1439 struct usb_gadget_driver *driver) 1440 { 1441 struct net2272 *dev; 1442 unsigned i; 1443 1444 if (!driver || !driver->setup || 1445 driver->max_speed != USB_SPEED_HIGH) 1446 return -EINVAL; 1447 1448 dev = container_of(_gadget, struct net2272, gadget); 1449 1450 for (i = 0; i < 4; ++i) 1451 dev->ep[i].irqs = 0; 1452 /* hook up the driver ... */ 1453 dev->softconnect = 1; 1454 driver->driver.bus = NULL; 1455 dev->driver = driver; 1456 1457 /* ... then enable host detection and ep0; and we're ready 1458 * for set_configuration as well as eventual disconnect. 1459 */ 1460 net2272_ep0_start(dev); 1461 1462 return 0; 1463 } 1464 1465 static void 1466 stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver) 1467 { 1468 int i; 1469 1470 /* don't disconnect if it's not connected */ 1471 if (dev->gadget.speed == USB_SPEED_UNKNOWN) 1472 driver = NULL; 1473 1474 /* stop hardware; prevent new request submissions; 1475 * and kill any outstanding requests. 1476 */ 1477 net2272_usb_reset(dev); 1478 for (i = 0; i < 4; ++i) 1479 net2272_dequeue_all(&dev->ep[i]); 1480 1481 /* report disconnect; the driver is already quiesced */ 1482 if (dev->async_callbacks && driver) { 1483 spin_unlock(&dev->lock); 1484 driver->disconnect(&dev->gadget); 1485 spin_lock(&dev->lock); 1486 } 1487 1488 net2272_usb_reinit(dev); 1489 } 1490 1491 static int net2272_stop(struct usb_gadget *_gadget) 1492 { 1493 struct net2272 *dev; 1494 unsigned long flags; 1495 1496 dev = container_of(_gadget, struct net2272, gadget); 1497 1498 spin_lock_irqsave(&dev->lock, flags); 1499 stop_activity(dev, NULL); 1500 spin_unlock_irqrestore(&dev->lock, flags); 1501 1502 dev->driver = NULL; 1503 1504 return 0; 1505 } 1506 1507 static void net2272_async_callbacks(struct usb_gadget *_gadget, bool enable) 1508 { 1509 struct net2272 *dev = container_of(_gadget, struct net2272, gadget); 1510 1511 spin_lock_irq(&dev->lock); 1512 dev->async_callbacks = enable; 1513 spin_unlock_irq(&dev->lock); 1514 } 1515 1516 /*---------------------------------------------------------------------------*/ 1517 /* handle ep-a/ep-b dma completions */ 1518 static void 1519 net2272_handle_dma(struct net2272_ep *ep) 1520 { 1521 struct net2272_request *req; 1522 unsigned len; 1523 int status; 1524 1525 if (!list_empty(&ep->queue)) 1526 req = list_entry(ep->queue.next, 1527 struct net2272_request, queue); 1528 else 1529 req = NULL; 1530 1531 dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req); 1532 1533 /* Ensure DREQ is de-asserted */ 1534 net2272_write(ep->dev, DMAREQ, 1535 (0 << DMA_BUFFER_VALID) 1536 | (0 << DMA_REQUEST_ENABLE) 1537 | (1 << DMA_CONTROL_DACK) 1538 | (ep->dev->dma_eot_polarity << EOT_POLARITY) 1539 | (ep->dev->dma_dack_polarity << DACK_POLARITY) 1540 | (ep->dev->dma_dreq_polarity << DREQ_POLARITY) 1541 | (ep->dma << DMA_ENDPOINT_SELECT)); 1542 1543 ep->dev->dma_busy = 0; 1544 1545 net2272_ep_write(ep, EP_IRQENB, 1546 (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) 1547 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE) 1548 | net2272_ep_read(ep, EP_IRQENB)); 1549 1550 /* device-to-host transfer completed */ 1551 if (ep->is_in) { 1552 /* validate a short packet or zlp if necessary */ 1553 if ((req->req.length % ep->ep.maxpacket != 0) || 1554 req->req.zero) 1555 set_fifo_bytecount(ep, 0); 1556 1557 net2272_done(ep, req, 0); 1558 if (!list_empty(&ep->queue)) { 1559 req = list_entry(ep->queue.next, 1560 struct net2272_request, queue); 1561 status = net2272_kick_dma(ep, req); 1562 if (status < 0) 1563 net2272_pio_advance(ep); 1564 } 1565 1566 /* host-to-device transfer completed */ 1567 } else { 1568 /* terminated with a short packet? */ 1569 if (net2272_read(ep->dev, IRQSTAT0) & 1570 (1 << DMA_DONE_INTERRUPT)) { 1571 /* abort system dma */ 1572 net2272_cancel_dma(ep->dev); 1573 } 1574 1575 /* EP_TRANSFER will contain the number of bytes 1576 * actually received. 1577 * NOTE: There is no overflow detection on EP_TRANSFER: 1578 * We can't deal with transfers larger than 2^24 bytes! 1579 */ 1580 len = (net2272_ep_read(ep, EP_TRANSFER2) << 16) 1581 | (net2272_ep_read(ep, EP_TRANSFER1) << 8) 1582 | (net2272_ep_read(ep, EP_TRANSFER0)); 1583 1584 if (ep->not_empty) 1585 len += 4; 1586 1587 req->req.actual += len; 1588 1589 /* get any remaining data */ 1590 net2272_pio_advance(ep); 1591 } 1592 } 1593 1594 /*---------------------------------------------------------------------------*/ 1595 1596 static void 1597 net2272_handle_ep(struct net2272_ep *ep) 1598 { 1599 struct net2272_request *req; 1600 u8 stat0, stat1; 1601 1602 if (!list_empty(&ep->queue)) 1603 req = list_entry(ep->queue.next, 1604 struct net2272_request, queue); 1605 else 1606 req = NULL; 1607 1608 /* ack all, and handle what we care about */ 1609 stat0 = net2272_ep_read(ep, EP_STAT0); 1610 stat1 = net2272_ep_read(ep, EP_STAT1); 1611 ep->irqs++; 1612 1613 dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n", 1614 ep->ep.name, stat0, stat1, req ? &req->req : NULL); 1615 1616 net2272_ep_write(ep, EP_STAT0, stat0 & 1617 ~((1 << NAK_OUT_PACKETS) 1618 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))); 1619 net2272_ep_write(ep, EP_STAT1, stat1); 1620 1621 /* data packet(s) received (in the fifo, OUT) 1622 * direction must be validated, otherwise control read status phase 1623 * could be interpreted as a valid packet 1624 */ 1625 if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT))) 1626 net2272_pio_advance(ep); 1627 /* data packet(s) transmitted (IN) */ 1628 else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)) 1629 net2272_pio_advance(ep); 1630 } 1631 1632 static struct net2272_ep * 1633 net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex) 1634 { 1635 struct net2272_ep *ep; 1636 1637 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0) 1638 return &dev->ep[0]; 1639 1640 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) { 1641 u8 bEndpointAddress; 1642 1643 if (!ep->desc) 1644 continue; 1645 bEndpointAddress = ep->desc->bEndpointAddress; 1646 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN) 1647 continue; 1648 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f)) 1649 return ep; 1650 } 1651 return NULL; 1652 } 1653 1654 /* 1655 * USB Test Packet: 1656 * JKJKJKJK * 9 1657 * JJKKJJKK * 8 1658 * JJJJKKKK * 8 1659 * JJJJJJJKKKKKKK * 8 1660 * JJJJJJJK * 8 1661 * {JKKKKKKK * 10}, JK 1662 */ 1663 static const u8 net2272_test_packet[] = { 1664 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1665 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 1666 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 1667 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 1668 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 1669 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E 1670 }; 1671 1672 static void 1673 net2272_set_test_mode(struct net2272 *dev, int mode) 1674 { 1675 int i; 1676 1677 /* Disable all net2272 interrupts: 1678 * Nothing but a power cycle should stop the test. 1679 */ 1680 net2272_write(dev, IRQENB0, 0x00); 1681 net2272_write(dev, IRQENB1, 0x00); 1682 1683 /* Force tranceiver to high-speed */ 1684 net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED); 1685 1686 net2272_write(dev, PAGESEL, 0); 1687 net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT); 1688 net2272_write(dev, EP_RSPCLR, 1689 (1 << CONTROL_STATUS_PHASE_HANDSHAKE) 1690 | (1 << HIDE_STATUS_PHASE)); 1691 net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION); 1692 net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH); 1693 1694 /* wait for status phase to complete */ 1695 while (!(net2272_read(dev, EP_STAT0) & 1696 (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))) 1697 ; 1698 1699 /* Enable test mode */ 1700 net2272_write(dev, USBTEST, mode); 1701 1702 /* load test packet */ 1703 if (mode == USB_TEST_PACKET) { 1704 /* switch to 8 bit mode */ 1705 net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) & 1706 ~(1 << DATA_WIDTH)); 1707 1708 for (i = 0; i < sizeof(net2272_test_packet); ++i) 1709 net2272_write(dev, EP_DATA, net2272_test_packet[i]); 1710 1711 /* Validate test packet */ 1712 net2272_write(dev, EP_TRANSFER0, 0); 1713 } 1714 } 1715 1716 static void 1717 net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat) 1718 { 1719 struct net2272_ep *ep; 1720 u8 num, scratch; 1721 1722 /* starting a control request? */ 1723 if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) { 1724 union { 1725 u8 raw[8]; 1726 struct usb_ctrlrequest r; 1727 } u; 1728 int tmp = 0; 1729 struct net2272_request *req; 1730 1731 if (dev->gadget.speed == USB_SPEED_UNKNOWN) { 1732 if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED)) 1733 dev->gadget.speed = USB_SPEED_HIGH; 1734 else 1735 dev->gadget.speed = USB_SPEED_FULL; 1736 dev_dbg(dev->dev, "%s\n", 1737 usb_speed_string(dev->gadget.speed)); 1738 } 1739 1740 ep = &dev->ep[0]; 1741 ep->irqs++; 1742 1743 /* make sure any leftover interrupt state is cleared */ 1744 stat &= ~(1 << ENDPOINT_0_INTERRUPT); 1745 while (!list_empty(&ep->queue)) { 1746 req = list_entry(ep->queue.next, 1747 struct net2272_request, queue); 1748 net2272_done(ep, req, 1749 (req->req.actual == req->req.length) ? 0 : -EPROTO); 1750 } 1751 ep->stopped = 0; 1752 dev->protocol_stall = 0; 1753 net2272_ep_write(ep, EP_STAT0, 1754 (1 << DATA_IN_TOKEN_INTERRUPT) 1755 | (1 << DATA_OUT_TOKEN_INTERRUPT) 1756 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT) 1757 | (1 << DATA_PACKET_RECEIVED_INTERRUPT) 1758 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)); 1759 net2272_ep_write(ep, EP_STAT1, 1760 (1 << TIMEOUT) 1761 | (1 << USB_OUT_ACK_SENT) 1762 | (1 << USB_OUT_NAK_SENT) 1763 | (1 << USB_IN_ACK_RCVD) 1764 | (1 << USB_IN_NAK_SENT) 1765 | (1 << USB_STALL_SENT) 1766 | (1 << LOCAL_OUT_ZLP)); 1767 1768 /* 1769 * Ensure Control Read pre-validation setting is beyond maximum size 1770 * - Control Writes can leave non-zero values in EP_TRANSFER. If 1771 * an EP0 transfer following the Control Write is a Control Read, 1772 * the NET2272 sees the non-zero EP_TRANSFER as an unexpected 1773 * pre-validation count. 1774 * - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures 1775 * the pre-validation count cannot cause an unexpected validatation 1776 */ 1777 net2272_write(dev, PAGESEL, 0); 1778 net2272_write(dev, EP_TRANSFER2, 0xff); 1779 net2272_write(dev, EP_TRANSFER1, 0xff); 1780 net2272_write(dev, EP_TRANSFER0, 0xff); 1781 1782 u.raw[0] = net2272_read(dev, SETUP0); 1783 u.raw[1] = net2272_read(dev, SETUP1); 1784 u.raw[2] = net2272_read(dev, SETUP2); 1785 u.raw[3] = net2272_read(dev, SETUP3); 1786 u.raw[4] = net2272_read(dev, SETUP4); 1787 u.raw[5] = net2272_read(dev, SETUP5); 1788 u.raw[6] = net2272_read(dev, SETUP6); 1789 u.raw[7] = net2272_read(dev, SETUP7); 1790 /* 1791 * If you have a big endian cpu make sure le16_to_cpus 1792 * performs the proper byte swapping here... 1793 */ 1794 le16_to_cpus(&u.r.wValue); 1795 le16_to_cpus(&u.r.wIndex); 1796 le16_to_cpus(&u.r.wLength); 1797 1798 /* ack the irq */ 1799 net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT); 1800 stat ^= (1 << SETUP_PACKET_INTERRUPT); 1801 1802 /* watch control traffic at the token level, and force 1803 * synchronization before letting the status phase happen. 1804 */ 1805 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0; 1806 if (ep->is_in) { 1807 scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE) 1808 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE) 1809 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE); 1810 stop_out_naking(ep); 1811 } else 1812 scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) 1813 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE) 1814 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE); 1815 net2272_ep_write(ep, EP_IRQENB, scratch); 1816 1817 if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) 1818 goto delegate; 1819 switch (u.r.bRequest) { 1820 case USB_REQ_GET_STATUS: { 1821 struct net2272_ep *e; 1822 u16 status = 0; 1823 1824 switch (u.r.bRequestType & USB_RECIP_MASK) { 1825 case USB_RECIP_ENDPOINT: 1826 e = net2272_get_ep_by_addr(dev, u.r.wIndex); 1827 if (!e || u.r.wLength > 2) 1828 goto do_stall; 1829 if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT)) 1830 status = cpu_to_le16(1); 1831 else 1832 status = cpu_to_le16(0); 1833 1834 /* don't bother with a request object! */ 1835 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0); 1836 writew(status, net2272_reg_addr(dev, EP_DATA)); 1837 set_fifo_bytecount(&dev->ep[0], 0); 1838 allow_status(ep); 1839 dev_vdbg(dev->dev, "%s stat %02x\n", 1840 ep->ep.name, status); 1841 goto next_endpoints; 1842 case USB_RECIP_DEVICE: 1843 if (u.r.wLength > 2) 1844 goto do_stall; 1845 if (dev->gadget.is_selfpowered) 1846 status = (1 << USB_DEVICE_SELF_POWERED); 1847 1848 /* don't bother with a request object! */ 1849 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0); 1850 writew(status, net2272_reg_addr(dev, EP_DATA)); 1851 set_fifo_bytecount(&dev->ep[0], 0); 1852 allow_status(ep); 1853 dev_vdbg(dev->dev, "device stat %02x\n", status); 1854 goto next_endpoints; 1855 case USB_RECIP_INTERFACE: 1856 if (u.r.wLength > 2) 1857 goto do_stall; 1858 1859 /* don't bother with a request object! */ 1860 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0); 1861 writew(status, net2272_reg_addr(dev, EP_DATA)); 1862 set_fifo_bytecount(&dev->ep[0], 0); 1863 allow_status(ep); 1864 dev_vdbg(dev->dev, "interface status %02x\n", status); 1865 goto next_endpoints; 1866 } 1867 1868 break; 1869 } 1870 case USB_REQ_CLEAR_FEATURE: { 1871 struct net2272_ep *e; 1872 1873 if (u.r.bRequestType != USB_RECIP_ENDPOINT) 1874 goto delegate; 1875 if (u.r.wValue != USB_ENDPOINT_HALT || 1876 u.r.wLength != 0) 1877 goto do_stall; 1878 e = net2272_get_ep_by_addr(dev, u.r.wIndex); 1879 if (!e) 1880 goto do_stall; 1881 if (e->wedged) { 1882 dev_vdbg(dev->dev, "%s wedged, halt not cleared\n", 1883 ep->ep.name); 1884 } else { 1885 dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name); 1886 clear_halt(e); 1887 } 1888 allow_status(ep); 1889 goto next_endpoints; 1890 } 1891 case USB_REQ_SET_FEATURE: { 1892 struct net2272_ep *e; 1893 1894 if (u.r.bRequestType == USB_RECIP_DEVICE) { 1895 if (u.r.wIndex != NORMAL_OPERATION) 1896 net2272_set_test_mode(dev, (u.r.wIndex >> 8)); 1897 allow_status(ep); 1898 dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex); 1899 goto next_endpoints; 1900 } else if (u.r.bRequestType != USB_RECIP_ENDPOINT) 1901 goto delegate; 1902 if (u.r.wValue != USB_ENDPOINT_HALT || 1903 u.r.wLength != 0) 1904 goto do_stall; 1905 e = net2272_get_ep_by_addr(dev, u.r.wIndex); 1906 if (!e) 1907 goto do_stall; 1908 set_halt(e); 1909 allow_status(ep); 1910 dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name); 1911 goto next_endpoints; 1912 } 1913 case USB_REQ_SET_ADDRESS: { 1914 net2272_write(dev, OURADDR, u.r.wValue & 0xff); 1915 allow_status(ep); 1916 break; 1917 } 1918 default: 1919 delegate: 1920 dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x " 1921 "ep_cfg %08x\n", 1922 u.r.bRequestType, u.r.bRequest, 1923 u.r.wValue, u.r.wIndex, 1924 net2272_ep_read(ep, EP_CFG)); 1925 if (dev->async_callbacks) { 1926 spin_unlock(&dev->lock); 1927 tmp = dev->driver->setup(&dev->gadget, &u.r); 1928 spin_lock(&dev->lock); 1929 } 1930 } 1931 1932 /* stall ep0 on error */ 1933 if (tmp < 0) { 1934 do_stall: 1935 dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n", 1936 u.r.bRequestType, u.r.bRequest, tmp); 1937 dev->protocol_stall = 1; 1938 } 1939 /* endpoint dma irq? */ 1940 } else if (stat & (1 << DMA_DONE_INTERRUPT)) { 1941 net2272_cancel_dma(dev); 1942 net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT); 1943 stat &= ~(1 << DMA_DONE_INTERRUPT); 1944 num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT)) 1945 ? 2 : 1; 1946 1947 ep = &dev->ep[num]; 1948 net2272_handle_dma(ep); 1949 } 1950 1951 next_endpoints: 1952 /* endpoint data irq? */ 1953 scratch = stat & 0x0f; 1954 stat &= ~0x0f; 1955 for (num = 0; scratch; num++) { 1956 u8 t; 1957 1958 /* does this endpoint's FIFO and queue need tending? */ 1959 t = 1 << num; 1960 if ((scratch & t) == 0) 1961 continue; 1962 scratch ^= t; 1963 1964 ep = &dev->ep[num]; 1965 net2272_handle_ep(ep); 1966 } 1967 1968 /* some interrupts we can just ignore */ 1969 stat &= ~(1 << SOF_INTERRUPT); 1970 1971 if (stat) 1972 dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat); 1973 } 1974 1975 static void 1976 net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat) 1977 { 1978 u8 tmp, mask; 1979 1980 /* after disconnect there's nothing else to do! */ 1981 tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT); 1982 mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED); 1983 1984 if (stat & tmp) { 1985 bool reset = false; 1986 bool disconnect = false; 1987 1988 /* 1989 * Ignore disconnects and resets if the speed hasn't been set. 1990 * VBUS can bounce and there's always an initial reset. 1991 */ 1992 net2272_write(dev, IRQSTAT1, tmp); 1993 if (dev->gadget.speed != USB_SPEED_UNKNOWN) { 1994 if ((stat & (1 << VBUS_INTERRUPT)) && 1995 (net2272_read(dev, USBCTL1) & 1996 (1 << VBUS_PIN)) == 0) { 1997 disconnect = true; 1998 dev_dbg(dev->dev, "disconnect %s\n", 1999 dev->driver->driver.name); 2000 } else if ((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) && 2001 (net2272_read(dev, USBCTL1) & mask) 2002 == 0) { 2003 reset = true; 2004 dev_dbg(dev->dev, "reset %s\n", 2005 dev->driver->driver.name); 2006 } 2007 2008 if (disconnect || reset) { 2009 stop_activity(dev, dev->driver); 2010 net2272_ep0_start(dev); 2011 if (dev->async_callbacks) { 2012 spin_unlock(&dev->lock); 2013 if (reset) 2014 usb_gadget_udc_reset(&dev->gadget, dev->driver); 2015 else 2016 (dev->driver->disconnect)(&dev->gadget); 2017 spin_lock(&dev->lock); 2018 } 2019 return; 2020 } 2021 } 2022 stat &= ~tmp; 2023 2024 if (!stat) 2025 return; 2026 } 2027 2028 tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT); 2029 if (stat & tmp) { 2030 net2272_write(dev, IRQSTAT1, tmp); 2031 if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) { 2032 if (dev->async_callbacks && dev->driver->suspend) 2033 dev->driver->suspend(&dev->gadget); 2034 if (!enable_suspend) { 2035 stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT); 2036 dev_dbg(dev->dev, "Suspend disabled, ignoring\n"); 2037 } 2038 } else { 2039 if (dev->async_callbacks && dev->driver->resume) 2040 dev->driver->resume(&dev->gadget); 2041 } 2042 stat &= ~tmp; 2043 } 2044 2045 /* clear any other status/irqs */ 2046 if (stat) 2047 net2272_write(dev, IRQSTAT1, stat); 2048 2049 /* some status we can just ignore */ 2050 stat &= ~((1 << CONTROL_STATUS_INTERRUPT) 2051 | (1 << SUSPEND_REQUEST_INTERRUPT) 2052 | (1 << RESUME_INTERRUPT)); 2053 if (!stat) 2054 return; 2055 else 2056 dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat); 2057 } 2058 2059 static irqreturn_t net2272_irq(int irq, void *_dev) 2060 { 2061 struct net2272 *dev = _dev; 2062 #if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2) 2063 u32 intcsr; 2064 #endif 2065 #if defined(PLX_PCI_RDK) 2066 u8 dmareq; 2067 #endif 2068 spin_lock(&dev->lock); 2069 #if defined(PLX_PCI_RDK) 2070 intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR); 2071 2072 if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) { 2073 writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE), 2074 dev->rdk1.plx9054_base_addr + INTCSR); 2075 net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1)); 2076 net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0)); 2077 intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR); 2078 writel(intcsr | (1 << PCI_INTERRUPT_ENABLE), 2079 dev->rdk1.plx9054_base_addr + INTCSR); 2080 } 2081 if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) { 2082 writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)), 2083 dev->rdk1.plx9054_base_addr + DMACSR0); 2084 2085 dmareq = net2272_read(dev, DMAREQ); 2086 if (dmareq & 0x01) 2087 net2272_handle_dma(&dev->ep[2]); 2088 else 2089 net2272_handle_dma(&dev->ep[1]); 2090 } 2091 #endif 2092 #if defined(PLX_PCI_RDK2) 2093 /* see if PCI int for us by checking irqstat */ 2094 intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT); 2095 if (!(intcsr & (1 << NET2272_PCI_IRQ))) { 2096 spin_unlock(&dev->lock); 2097 return IRQ_NONE; 2098 } 2099 /* check dma interrupts */ 2100 #endif 2101 /* Platform/devcice interrupt handler */ 2102 #if !defined(PLX_PCI_RDK) 2103 net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1)); 2104 net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0)); 2105 #endif 2106 spin_unlock(&dev->lock); 2107 2108 return IRQ_HANDLED; 2109 } 2110 2111 static int net2272_present(struct net2272 *dev) 2112 { 2113 /* 2114 * Quick test to see if CPU can communicate properly with the NET2272. 2115 * Verifies connection using writes and reads to write/read and 2116 * read-only registers. 2117 * 2118 * This routine is strongly recommended especially during early bring-up 2119 * of new hardware, however for designs that do not apply Power On System 2120 * Tests (POST) it may discarded (or perhaps minimized). 2121 */ 2122 unsigned int ii; 2123 u8 val, refval; 2124 2125 /* Verify NET2272 write/read SCRATCH register can write and read */ 2126 refval = net2272_read(dev, SCRATCH); 2127 for (ii = 0; ii < 0x100; ii += 7) { 2128 net2272_write(dev, SCRATCH, ii); 2129 val = net2272_read(dev, SCRATCH); 2130 if (val != ii) { 2131 dev_dbg(dev->dev, 2132 "%s: write/read SCRATCH register test failed: " 2133 "wrote:0x%2.2x, read:0x%2.2x\n", 2134 __func__, ii, val); 2135 return -EINVAL; 2136 } 2137 } 2138 /* To be nice, we write the original SCRATCH value back: */ 2139 net2272_write(dev, SCRATCH, refval); 2140 2141 /* Verify NET2272 CHIPREV register is read-only: */ 2142 refval = net2272_read(dev, CHIPREV_2272); 2143 for (ii = 0; ii < 0x100; ii += 7) { 2144 net2272_write(dev, CHIPREV_2272, ii); 2145 val = net2272_read(dev, CHIPREV_2272); 2146 if (val != refval) { 2147 dev_dbg(dev->dev, 2148 "%s: write/read CHIPREV register test failed: " 2149 "wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n", 2150 __func__, ii, val, refval); 2151 return -EINVAL; 2152 } 2153 } 2154 2155 /* 2156 * Verify NET2272's "NET2270 legacy revision" register 2157 * - NET2272 has two revision registers. The NET2270 legacy revision 2158 * register should read the same value, regardless of the NET2272 2159 * silicon revision. The legacy register applies to NET2270 2160 * firmware being applied to the NET2272. 2161 */ 2162 val = net2272_read(dev, CHIPREV_LEGACY); 2163 if (val != NET2270_LEGACY_REV) { 2164 /* 2165 * Unexpected legacy revision value 2166 * - Perhaps the chip is a NET2270? 2167 */ 2168 dev_dbg(dev->dev, 2169 "%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n" 2170 " - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n", 2171 __func__, NET2270_LEGACY_REV, val); 2172 return -EINVAL; 2173 } 2174 2175 /* 2176 * Verify NET2272 silicon revision 2177 * - This revision register is appropriate for the silicon version 2178 * of the NET2272 2179 */ 2180 val = net2272_read(dev, CHIPREV_2272); 2181 switch (val) { 2182 case CHIPREV_NET2272_R1: 2183 /* 2184 * NET2272 Rev 1 has DMA related errata: 2185 * - Newer silicon (Rev 1A or better) required 2186 */ 2187 dev_dbg(dev->dev, 2188 "%s: Rev 1 detected: newer silicon recommended for DMA support\n", 2189 __func__); 2190 break; 2191 case CHIPREV_NET2272_R1A: 2192 break; 2193 default: 2194 /* NET2272 silicon version *may* not work with this firmware */ 2195 dev_dbg(dev->dev, 2196 "%s: unexpected silicon revision register value: " 2197 " CHIPREV_2272: 0x%2.2x\n", 2198 __func__, val); 2199 /* 2200 * Return Success, even though the chip rev is not an expected value 2201 * - Older, pre-built firmware can attempt to operate on newer silicon 2202 * - Often, new silicon is perfectly compatible 2203 */ 2204 } 2205 2206 /* Success: NET2272 checks out OK */ 2207 return 0; 2208 } 2209 2210 static void 2211 net2272_gadget_release(struct device *_dev) 2212 { 2213 struct net2272 *dev = container_of(_dev, struct net2272, gadget.dev); 2214 2215 kfree(dev); 2216 } 2217 2218 /*---------------------------------------------------------------------------*/ 2219 2220 static void 2221 net2272_remove(struct net2272 *dev) 2222 { 2223 if (dev->added) 2224 usb_del_gadget(&dev->gadget); 2225 free_irq(dev->irq, dev); 2226 iounmap(dev->base_addr); 2227 device_remove_file(dev->dev, &dev_attr_registers); 2228 2229 dev_info(dev->dev, "unbind\n"); 2230 } 2231 2232 static struct net2272 *net2272_probe_init(struct device *dev, unsigned int irq) 2233 { 2234 struct net2272 *ret; 2235 2236 if (!irq) { 2237 dev_dbg(dev, "No IRQ!\n"); 2238 return ERR_PTR(-ENODEV); 2239 } 2240 2241 /* alloc, and start init */ 2242 ret = kzalloc(sizeof(*ret), GFP_KERNEL); 2243 if (!ret) 2244 return ERR_PTR(-ENOMEM); 2245 2246 spin_lock_init(&ret->lock); 2247 ret->irq = irq; 2248 ret->dev = dev; 2249 ret->gadget.ops = &net2272_ops; 2250 ret->gadget.max_speed = USB_SPEED_HIGH; 2251 2252 /* the "gadget" abstracts/virtualizes the controller */ 2253 ret->gadget.name = driver_name; 2254 usb_initialize_gadget(dev, &ret->gadget, net2272_gadget_release); 2255 2256 return ret; 2257 } 2258 2259 static int 2260 net2272_probe_fin(struct net2272 *dev, unsigned int irqflags) 2261 { 2262 int ret; 2263 2264 /* See if there... */ 2265 if (net2272_present(dev)) { 2266 dev_warn(dev->dev, "2272 not found!\n"); 2267 ret = -ENODEV; 2268 goto err; 2269 } 2270 2271 net2272_usb_reset(dev); 2272 net2272_usb_reinit(dev); 2273 2274 ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev); 2275 if (ret) { 2276 dev_err(dev->dev, "request interrupt %i failed\n", dev->irq); 2277 goto err; 2278 } 2279 2280 dev->chiprev = net2272_read(dev, CHIPREV_2272); 2281 2282 /* done */ 2283 dev_info(dev->dev, "%s\n", driver_desc); 2284 dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n", 2285 dev->irq, dev->base_addr, dev->chiprev, 2286 dma_mode_string()); 2287 dev_info(dev->dev, "version: %s\n", driver_vers); 2288 2289 ret = device_create_file(dev->dev, &dev_attr_registers); 2290 if (ret) 2291 goto err_irq; 2292 2293 ret = usb_add_gadget(&dev->gadget); 2294 if (ret) 2295 goto err_add_udc; 2296 dev->added = 1; 2297 2298 return 0; 2299 2300 err_add_udc: 2301 device_remove_file(dev->dev, &dev_attr_registers); 2302 err_irq: 2303 free_irq(dev->irq, dev); 2304 err: 2305 return ret; 2306 } 2307 2308 #ifdef CONFIG_USB_PCI 2309 2310 /* 2311 * wrap this driver around the specified device, but 2312 * don't respond over USB until a gadget driver binds to us 2313 */ 2314 2315 static int 2316 net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev) 2317 { 2318 unsigned long resource, len, tmp; 2319 void __iomem *mem_mapped_addr[4]; 2320 int ret, i; 2321 2322 /* 2323 * BAR 0 holds PLX 9054 config registers 2324 * BAR 1 is i/o memory; unused here 2325 * BAR 2 holds EPLD config registers 2326 * BAR 3 holds NET2272 registers 2327 */ 2328 2329 /* Find and map all address spaces */ 2330 for (i = 0; i < 4; ++i) { 2331 if (i == 1) 2332 continue; /* BAR1 unused */ 2333 2334 resource = pci_resource_start(pdev, i); 2335 len = pci_resource_len(pdev, i); 2336 2337 if (!request_mem_region(resource, len, driver_name)) { 2338 dev_dbg(dev->dev, "controller already in use\n"); 2339 ret = -EBUSY; 2340 goto err; 2341 } 2342 2343 mem_mapped_addr[i] = ioremap(resource, len); 2344 if (mem_mapped_addr[i] == NULL) { 2345 release_mem_region(resource, len); 2346 dev_dbg(dev->dev, "can't map memory\n"); 2347 ret = -EFAULT; 2348 goto err; 2349 } 2350 } 2351 2352 dev->rdk1.plx9054_base_addr = mem_mapped_addr[0]; 2353 dev->rdk1.epld_base_addr = mem_mapped_addr[2]; 2354 dev->base_addr = mem_mapped_addr[3]; 2355 2356 /* Set PLX 9054 bus width (16 bits) */ 2357 tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1); 2358 writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT, 2359 dev->rdk1.plx9054_base_addr + LBRD1); 2360 2361 /* Enable PLX 9054 Interrupts */ 2362 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) | 2363 (1 << PCI_INTERRUPT_ENABLE) | 2364 (1 << LOCAL_INTERRUPT_INPUT_ENABLE), 2365 dev->rdk1.plx9054_base_addr + INTCSR); 2366 2367 writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)), 2368 dev->rdk1.plx9054_base_addr + DMACSR0); 2369 2370 /* reset */ 2371 writeb((1 << EPLD_DMA_ENABLE) | 2372 (1 << DMA_CTL_DACK) | 2373 (1 << DMA_TIMEOUT_ENABLE) | 2374 (1 << USER) | 2375 (0 << MPX_MODE) | 2376 (1 << BUSWIDTH) | 2377 (1 << NET2272_RESET), 2378 dev->base_addr + EPLD_IO_CONTROL_REGISTER); 2379 2380 mb(); 2381 writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) & 2382 ~(1 << NET2272_RESET), 2383 dev->base_addr + EPLD_IO_CONTROL_REGISTER); 2384 udelay(200); 2385 2386 return 0; 2387 2388 err: 2389 while (--i >= 0) { 2390 if (i == 1) 2391 continue; /* BAR1 unused */ 2392 iounmap(mem_mapped_addr[i]); 2393 release_mem_region(pci_resource_start(pdev, i), 2394 pci_resource_len(pdev, i)); 2395 } 2396 2397 return ret; 2398 } 2399 2400 static int 2401 net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev) 2402 { 2403 unsigned long resource, len; 2404 void __iomem *mem_mapped_addr[2]; 2405 int ret, i; 2406 2407 /* 2408 * BAR 0 holds FGPA config registers 2409 * BAR 1 holds NET2272 registers 2410 */ 2411 2412 /* Find and map all address spaces, bar2-3 unused in rdk 2 */ 2413 for (i = 0; i < 2; ++i) { 2414 resource = pci_resource_start(pdev, i); 2415 len = pci_resource_len(pdev, i); 2416 2417 if (!request_mem_region(resource, len, driver_name)) { 2418 dev_dbg(dev->dev, "controller already in use\n"); 2419 ret = -EBUSY; 2420 goto err; 2421 } 2422 2423 mem_mapped_addr[i] = ioremap(resource, len); 2424 if (mem_mapped_addr[i] == NULL) { 2425 release_mem_region(resource, len); 2426 dev_dbg(dev->dev, "can't map memory\n"); 2427 ret = -EFAULT; 2428 goto err; 2429 } 2430 } 2431 2432 dev->rdk2.fpga_base_addr = mem_mapped_addr[0]; 2433 dev->base_addr = mem_mapped_addr[1]; 2434 2435 mb(); 2436 /* Set 2272 bus width (16 bits) and reset */ 2437 writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK); 2438 udelay(200); 2439 writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK); 2440 /* Print fpga version number */ 2441 dev_info(dev->dev, "RDK2 FPGA version %08x\n", 2442 readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV)); 2443 /* Enable FPGA Interrupts */ 2444 writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB); 2445 2446 return 0; 2447 2448 err: 2449 while (--i >= 0) { 2450 iounmap(mem_mapped_addr[i]); 2451 release_mem_region(pci_resource_start(pdev, i), 2452 pci_resource_len(pdev, i)); 2453 } 2454 2455 return ret; 2456 } 2457 2458 static int 2459 net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2460 { 2461 struct net2272 *dev; 2462 int ret; 2463 2464 dev = net2272_probe_init(&pdev->dev, pdev->irq); 2465 if (IS_ERR(dev)) 2466 return PTR_ERR(dev); 2467 dev->dev_id = pdev->device; 2468 2469 if (pci_enable_device(pdev) < 0) { 2470 ret = -ENODEV; 2471 goto err_put; 2472 } 2473 2474 pci_set_master(pdev); 2475 2476 switch (pdev->device) { 2477 case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break; 2478 case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break; 2479 default: BUG(); 2480 } 2481 if (ret) 2482 goto err_pci; 2483 2484 ret = net2272_probe_fin(dev, 0); 2485 if (ret) 2486 goto err_pci; 2487 2488 pci_set_drvdata(pdev, dev); 2489 2490 return 0; 2491 2492 err_pci: 2493 pci_disable_device(pdev); 2494 err_put: 2495 usb_put_gadget(&dev->gadget); 2496 2497 return ret; 2498 } 2499 2500 static void 2501 net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev) 2502 { 2503 int i; 2504 2505 /* disable PLX 9054 interrupts */ 2506 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) & 2507 ~(1 << PCI_INTERRUPT_ENABLE), 2508 dev->rdk1.plx9054_base_addr + INTCSR); 2509 2510 /* clean up resources allocated during probe() */ 2511 iounmap(dev->rdk1.plx9054_base_addr); 2512 iounmap(dev->rdk1.epld_base_addr); 2513 2514 for (i = 0; i < 4; ++i) { 2515 if (i == 1) 2516 continue; /* BAR1 unused */ 2517 release_mem_region(pci_resource_start(pdev, i), 2518 pci_resource_len(pdev, i)); 2519 } 2520 } 2521 2522 static void 2523 net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev) 2524 { 2525 int i; 2526 2527 /* disable fpga interrupts 2528 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) & 2529 ~(1 << PCI_INTERRUPT_ENABLE), 2530 dev->rdk1.plx9054_base_addr + INTCSR); 2531 */ 2532 2533 /* clean up resources allocated during probe() */ 2534 iounmap(dev->rdk2.fpga_base_addr); 2535 2536 for (i = 0; i < 2; ++i) 2537 release_mem_region(pci_resource_start(pdev, i), 2538 pci_resource_len(pdev, i)); 2539 } 2540 2541 static void 2542 net2272_pci_remove(struct pci_dev *pdev) 2543 { 2544 struct net2272 *dev = pci_get_drvdata(pdev); 2545 2546 net2272_remove(dev); 2547 2548 switch (pdev->device) { 2549 case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break; 2550 case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break; 2551 default: BUG(); 2552 } 2553 2554 pci_disable_device(pdev); 2555 2556 usb_put_gadget(&dev->gadget); 2557 } 2558 2559 /* Table of matching PCI IDs */ 2560 static struct pci_device_id pci_ids[] = { 2561 { /* RDK 1 card */ 2562 .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe), 2563 .class_mask = 0, 2564 .vendor = PCI_VENDOR_ID_PLX, 2565 .device = PCI_DEVICE_ID_RDK1, 2566 .subvendor = PCI_ANY_ID, 2567 .subdevice = PCI_ANY_ID, 2568 }, 2569 { /* RDK 2 card */ 2570 .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe), 2571 .class_mask = 0, 2572 .vendor = PCI_VENDOR_ID_PLX, 2573 .device = PCI_DEVICE_ID_RDK2, 2574 .subvendor = PCI_ANY_ID, 2575 .subdevice = PCI_ANY_ID, 2576 }, 2577 { } 2578 }; 2579 MODULE_DEVICE_TABLE(pci, pci_ids); 2580 2581 static struct pci_driver net2272_pci_driver = { 2582 .name = driver_name, 2583 .id_table = pci_ids, 2584 2585 .probe = net2272_pci_probe, 2586 .remove = net2272_pci_remove, 2587 }; 2588 2589 static int net2272_pci_register(void) 2590 { 2591 return pci_register_driver(&net2272_pci_driver); 2592 } 2593 2594 static void net2272_pci_unregister(void) 2595 { 2596 pci_unregister_driver(&net2272_pci_driver); 2597 } 2598 2599 #else 2600 static inline int net2272_pci_register(void) { return 0; } 2601 static inline void net2272_pci_unregister(void) { } 2602 #endif 2603 2604 /*---------------------------------------------------------------------------*/ 2605 2606 static int 2607 net2272_plat_probe(struct platform_device *pdev) 2608 { 2609 struct net2272 *dev; 2610 int ret; 2611 unsigned int irqflags; 2612 resource_size_t base, len; 2613 struct resource *iomem, *iomem_bus, *irq_res; 2614 2615 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2616 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2617 iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0); 2618 if (!irq_res || !iomem) { 2619 dev_err(&pdev->dev, "must provide irq/base addr"); 2620 return -EINVAL; 2621 } 2622 2623 dev = net2272_probe_init(&pdev->dev, irq_res->start); 2624 if (IS_ERR(dev)) 2625 return PTR_ERR(dev); 2626 2627 irqflags = 0; 2628 if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE) 2629 irqflags |= IRQF_TRIGGER_RISING; 2630 if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE) 2631 irqflags |= IRQF_TRIGGER_FALLING; 2632 if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL) 2633 irqflags |= IRQF_TRIGGER_HIGH; 2634 if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL) 2635 irqflags |= IRQF_TRIGGER_LOW; 2636 2637 base = iomem->start; 2638 len = resource_size(iomem); 2639 if (iomem_bus) 2640 dev->base_shift = iomem_bus->start; 2641 2642 if (!request_mem_region(base, len, driver_name)) { 2643 dev_dbg(dev->dev, "get request memory region!\n"); 2644 ret = -EBUSY; 2645 goto err; 2646 } 2647 dev->base_addr = ioremap(base, len); 2648 if (!dev->base_addr) { 2649 dev_dbg(dev->dev, "can't map memory\n"); 2650 ret = -EFAULT; 2651 goto err_req; 2652 } 2653 2654 ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW); 2655 if (ret) 2656 goto err_io; 2657 2658 platform_set_drvdata(pdev, dev); 2659 dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n", 2660 (net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no "); 2661 2662 return 0; 2663 2664 err_io: 2665 iounmap(dev->base_addr); 2666 err_req: 2667 release_mem_region(base, len); 2668 err: 2669 usb_put_gadget(&dev->gadget); 2670 2671 return ret; 2672 } 2673 2674 static int 2675 net2272_plat_remove(struct platform_device *pdev) 2676 { 2677 struct net2272 *dev = platform_get_drvdata(pdev); 2678 2679 net2272_remove(dev); 2680 2681 release_mem_region(pdev->resource[0].start, 2682 resource_size(&pdev->resource[0])); 2683 2684 usb_put_gadget(&dev->gadget); 2685 2686 return 0; 2687 } 2688 2689 static struct platform_driver net2272_plat_driver = { 2690 .probe = net2272_plat_probe, 2691 .remove = net2272_plat_remove, 2692 .driver = { 2693 .name = driver_name, 2694 }, 2695 /* FIXME .suspend, .resume */ 2696 }; 2697 MODULE_ALIAS("platform:net2272"); 2698 2699 static int __init net2272_init(void) 2700 { 2701 int ret; 2702 2703 ret = net2272_pci_register(); 2704 if (ret) 2705 return ret; 2706 ret = platform_driver_register(&net2272_plat_driver); 2707 if (ret) 2708 goto err_pci; 2709 return ret; 2710 2711 err_pci: 2712 net2272_pci_unregister(); 2713 return ret; 2714 } 2715 module_init(net2272_init); 2716 2717 static void __exit net2272_cleanup(void) 2718 { 2719 net2272_pci_unregister(); 2720 platform_driver_unregister(&net2272_plat_driver); 2721 } 2722 module_exit(net2272_cleanup); 2723 2724 MODULE_DESCRIPTION(DRIVER_DESC); 2725 MODULE_AUTHOR("PLX Technology, Inc."); 2726 MODULE_LICENSE("GPL"); 2727