1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Driver for PLX NET2272 USB device controller 4 * 5 * Copyright (C) 2005-2006 PLX Technology, Inc. 6 * Copyright (C) 2006-2011 Analog Devices, Inc. 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/device.h> 11 #include <linux/errno.h> 12 #include <linux/gpio.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/io.h> 16 #include <linux/ioport.h> 17 #include <linux/kernel.h> 18 #include <linux/list.h> 19 #include <linux/module.h> 20 #include <linux/moduleparam.h> 21 #include <linux/pci.h> 22 #include <linux/platform_device.h> 23 #include <linux/prefetch.h> 24 #include <linux/sched.h> 25 #include <linux/slab.h> 26 #include <linux/timer.h> 27 #include <linux/usb.h> 28 #include <linux/usb/ch9.h> 29 #include <linux/usb/gadget.h> 30 31 #include <asm/byteorder.h> 32 #include <asm/unaligned.h> 33 34 #include "net2272.h" 35 36 #define DRIVER_DESC "PLX NET2272 USB Peripheral Controller" 37 38 static const char driver_name[] = "net2272"; 39 static const char driver_vers[] = "2006 October 17/mainline"; 40 static const char driver_desc[] = DRIVER_DESC; 41 42 static const char ep0name[] = "ep0"; 43 static const char * const ep_name[] = { 44 ep0name, 45 "ep-a", "ep-b", "ep-c", 46 }; 47 48 #ifdef CONFIG_USB_NET2272_DMA 49 /* 50 * use_dma: the NET2272 can use an external DMA controller. 51 * Note that since there is no generic DMA api, some functions, 52 * notably request_dma, start_dma, and cancel_dma will need to be 53 * modified for your platform's particular dma controller. 54 * 55 * If use_dma is disabled, pio will be used instead. 56 */ 57 static bool use_dma = 0; 58 module_param(use_dma, bool, 0644); 59 60 /* 61 * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b) 62 * The NET2272 can only use dma for a single endpoint at a time. 63 * At some point this could be modified to allow either endpoint 64 * to take control of dma as it becomes available. 65 * 66 * Note that DMA should not be used on OUT endpoints unless it can 67 * be guaranteed that no short packets will arrive on an IN endpoint 68 * while the DMA operation is pending. Otherwise the OUT DMA will 69 * terminate prematurely (See NET2272 Errata 630-0213-0101) 70 */ 71 static ushort dma_ep = 1; 72 module_param(dma_ep, ushort, 0644); 73 74 /* 75 * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton): 76 * mode 0 == Slow DREQ mode 77 * mode 1 == Fast DREQ mode 78 * mode 2 == Burst mode 79 */ 80 static ushort dma_mode = 2; 81 module_param(dma_mode, ushort, 0644); 82 #else 83 #define use_dma 0 84 #define dma_ep 1 85 #define dma_mode 2 86 #endif 87 88 /* 89 * fifo_mode: net2272 buffer configuration: 90 * mode 0 == ep-{a,b,c} 512db each 91 * mode 1 == ep-a 1k, ep-{b,c} 512db 92 * mode 2 == ep-a 1k, ep-b 1k, ep-c 512db 93 * mode 3 == ep-a 1k, ep-b disabled, ep-c 512db 94 */ 95 static ushort fifo_mode = 0; 96 module_param(fifo_mode, ushort, 0644); 97 98 /* 99 * enable_suspend: When enabled, the driver will respond to 100 * USB suspend requests by powering down the NET2272. Otherwise, 101 * USB suspend requests will be ignored. This is acceptible for 102 * self-powered devices. For bus powered devices set this to 1. 103 */ 104 static ushort enable_suspend = 0; 105 module_param(enable_suspend, ushort, 0644); 106 107 static void assert_out_naking(struct net2272_ep *ep, const char *where) 108 { 109 u8 tmp; 110 111 #ifndef DEBUG 112 return; 113 #endif 114 115 tmp = net2272_ep_read(ep, EP_STAT0); 116 if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) { 117 dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n", 118 ep->ep.name, where, tmp); 119 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS); 120 } 121 } 122 #define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__) 123 124 static void stop_out_naking(struct net2272_ep *ep) 125 { 126 u8 tmp = net2272_ep_read(ep, EP_STAT0); 127 128 if ((tmp & (1 << NAK_OUT_PACKETS)) != 0) 129 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS); 130 } 131 132 #define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out") 133 134 static char *type_string(u8 bmAttributes) 135 { 136 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) { 137 case USB_ENDPOINT_XFER_BULK: return "bulk"; 138 case USB_ENDPOINT_XFER_ISOC: return "iso"; 139 case USB_ENDPOINT_XFER_INT: return "intr"; 140 default: return "control"; 141 } 142 } 143 144 static char *buf_state_string(unsigned state) 145 { 146 switch (state) { 147 case BUFF_FREE: return "free"; 148 case BUFF_VALID: return "valid"; 149 case BUFF_LCL: return "local"; 150 case BUFF_USB: return "usb"; 151 default: return "unknown"; 152 } 153 } 154 155 static char *dma_mode_string(void) 156 { 157 if (!use_dma) 158 return "PIO"; 159 switch (dma_mode) { 160 case 0: return "SLOW DREQ"; 161 case 1: return "FAST DREQ"; 162 case 2: return "BURST"; 163 default: return "invalid"; 164 } 165 } 166 167 static void net2272_dequeue_all(struct net2272_ep *); 168 static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *); 169 static int net2272_fifo_status(struct usb_ep *); 170 171 static const struct usb_ep_ops net2272_ep_ops; 172 173 /*---------------------------------------------------------------------------*/ 174 175 static int 176 net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) 177 { 178 struct net2272 *dev; 179 struct net2272_ep *ep; 180 u32 max; 181 u8 tmp; 182 unsigned long flags; 183 184 ep = container_of(_ep, struct net2272_ep, ep); 185 if (!_ep || !desc || ep->desc || _ep->name == ep0name 186 || desc->bDescriptorType != USB_DT_ENDPOINT) 187 return -EINVAL; 188 dev = ep->dev; 189 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) 190 return -ESHUTDOWN; 191 192 max = usb_endpoint_maxp(desc); 193 194 spin_lock_irqsave(&dev->lock, flags); 195 _ep->maxpacket = max; 196 ep->desc = desc; 197 198 /* net2272_ep_reset() has already been called */ 199 ep->stopped = 0; 200 ep->wedged = 0; 201 202 /* set speed-dependent max packet */ 203 net2272_ep_write(ep, EP_MAXPKT0, max & 0xff); 204 net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8); 205 206 /* set type, direction, address; reset fifo counters */ 207 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH); 208 tmp = usb_endpoint_type(desc); 209 if (usb_endpoint_xfer_bulk(desc)) { 210 /* catch some particularly blatant driver bugs */ 211 if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) || 212 (dev->gadget.speed == USB_SPEED_FULL && max > 64)) { 213 spin_unlock_irqrestore(&dev->lock, flags); 214 return -ERANGE; 215 } 216 } 217 ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0; 218 tmp <<= ENDPOINT_TYPE; 219 tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER); 220 tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION; 221 tmp |= (1 << ENDPOINT_ENABLE); 222 223 /* for OUT transfers, block the rx fifo until a read is posted */ 224 ep->is_in = usb_endpoint_dir_in(desc); 225 if (!ep->is_in) 226 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS); 227 228 net2272_ep_write(ep, EP_CFG, tmp); 229 230 /* enable irqs */ 231 tmp = (1 << ep->num) | net2272_read(dev, IRQENB0); 232 net2272_write(dev, IRQENB0, tmp); 233 234 tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) 235 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE) 236 | net2272_ep_read(ep, EP_IRQENB); 237 net2272_ep_write(ep, EP_IRQENB, tmp); 238 239 tmp = desc->bEndpointAddress; 240 dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n", 241 _ep->name, tmp & 0x0f, PIPEDIR(tmp), 242 type_string(desc->bmAttributes), max, 243 net2272_ep_read(ep, EP_CFG)); 244 245 spin_unlock_irqrestore(&dev->lock, flags); 246 return 0; 247 } 248 249 static void net2272_ep_reset(struct net2272_ep *ep) 250 { 251 u8 tmp; 252 253 ep->desc = NULL; 254 INIT_LIST_HEAD(&ep->queue); 255 256 usb_ep_set_maxpacket_limit(&ep->ep, ~0); 257 ep->ep.ops = &net2272_ep_ops; 258 259 /* disable irqs, endpoint */ 260 net2272_ep_write(ep, EP_IRQENB, 0); 261 262 /* init to our chosen defaults, notably so that we NAK OUT 263 * packets until the driver queues a read. 264 */ 265 tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS); 266 net2272_ep_write(ep, EP_RSPSET, tmp); 267 268 tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE); 269 if (ep->num != 0) 270 tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT); 271 272 net2272_ep_write(ep, EP_RSPCLR, tmp); 273 274 /* scrub most status bits, and flush any fifo state */ 275 net2272_ep_write(ep, EP_STAT0, 276 (1 << DATA_IN_TOKEN_INTERRUPT) 277 | (1 << DATA_OUT_TOKEN_INTERRUPT) 278 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT) 279 | (1 << DATA_PACKET_RECEIVED_INTERRUPT) 280 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)); 281 282 net2272_ep_write(ep, EP_STAT1, 283 (1 << TIMEOUT) 284 | (1 << USB_OUT_ACK_SENT) 285 | (1 << USB_OUT_NAK_SENT) 286 | (1 << USB_IN_ACK_RCVD) 287 | (1 << USB_IN_NAK_SENT) 288 | (1 << USB_STALL_SENT) 289 | (1 << LOCAL_OUT_ZLP) 290 | (1 << BUFFER_FLUSH)); 291 292 /* fifo size is handled seperately */ 293 } 294 295 static int net2272_disable(struct usb_ep *_ep) 296 { 297 struct net2272_ep *ep; 298 unsigned long flags; 299 300 ep = container_of(_ep, struct net2272_ep, ep); 301 if (!_ep || !ep->desc || _ep->name == ep0name) 302 return -EINVAL; 303 304 spin_lock_irqsave(&ep->dev->lock, flags); 305 net2272_dequeue_all(ep); 306 net2272_ep_reset(ep); 307 308 dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name); 309 310 spin_unlock_irqrestore(&ep->dev->lock, flags); 311 return 0; 312 } 313 314 /*---------------------------------------------------------------------------*/ 315 316 static struct usb_request * 317 net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) 318 { 319 struct net2272_request *req; 320 321 if (!_ep) 322 return NULL; 323 324 req = kzalloc(sizeof(*req), gfp_flags); 325 if (!req) 326 return NULL; 327 328 INIT_LIST_HEAD(&req->queue); 329 330 return &req->req; 331 } 332 333 static void 334 net2272_free_request(struct usb_ep *_ep, struct usb_request *_req) 335 { 336 struct net2272_request *req; 337 338 if (!_ep || !_req) 339 return; 340 341 req = container_of(_req, struct net2272_request, req); 342 WARN_ON(!list_empty(&req->queue)); 343 kfree(req); 344 } 345 346 static void 347 net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status) 348 { 349 struct net2272 *dev; 350 unsigned stopped = ep->stopped; 351 352 if (ep->num == 0) { 353 if (ep->dev->protocol_stall) { 354 ep->stopped = 1; 355 set_halt(ep); 356 } 357 allow_status(ep); 358 } 359 360 list_del_init(&req->queue); 361 362 if (req->req.status == -EINPROGRESS) 363 req->req.status = status; 364 else 365 status = req->req.status; 366 367 dev = ep->dev; 368 if (use_dma && ep->dma) 369 usb_gadget_unmap_request(&dev->gadget, &req->req, 370 ep->is_in); 371 372 if (status && status != -ESHUTDOWN) 373 dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n", 374 ep->ep.name, &req->req, status, 375 req->req.actual, req->req.length, req->req.buf); 376 377 /* don't modify queue heads during completion callback */ 378 ep->stopped = 1; 379 spin_unlock(&dev->lock); 380 usb_gadget_giveback_request(&ep->ep, &req->req); 381 spin_lock(&dev->lock); 382 ep->stopped = stopped; 383 } 384 385 static int 386 net2272_write_packet(struct net2272_ep *ep, u8 *buf, 387 struct net2272_request *req, unsigned max) 388 { 389 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA); 390 u16 *bufp; 391 unsigned length, count; 392 u8 tmp; 393 394 length = min(req->req.length - req->req.actual, max); 395 req->req.actual += length; 396 397 dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n", 398 ep->ep.name, req, max, length, 399 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0)); 400 401 count = length; 402 bufp = (u16 *)buf; 403 404 while (likely(count >= 2)) { 405 /* no byte-swap required; chip endian set during init */ 406 writew(*bufp++, ep_data); 407 count -= 2; 408 } 409 buf = (u8 *)bufp; 410 411 /* write final byte by placing the NET2272 into 8-bit mode */ 412 if (unlikely(count)) { 413 tmp = net2272_read(ep->dev, LOCCTL); 414 net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH)); 415 writeb(*buf, ep_data); 416 net2272_write(ep->dev, LOCCTL, tmp); 417 } 418 return length; 419 } 420 421 /* returns: 0: still running, 1: completed, negative: errno */ 422 static int 423 net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req) 424 { 425 u8 *buf; 426 unsigned count, max; 427 int status; 428 429 dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n", 430 ep->ep.name, req->req.actual, req->req.length); 431 432 /* 433 * Keep loading the endpoint until the final packet is loaded, 434 * or the endpoint buffer is full. 435 */ 436 top: 437 /* 438 * Clear interrupt status 439 * - Packet Transmitted interrupt will become set again when the 440 * host successfully takes another packet 441 */ 442 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)); 443 while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) { 444 buf = req->req.buf + req->req.actual; 445 prefetch(buf); 446 447 /* force pagesel */ 448 net2272_ep_read(ep, EP_STAT0); 449 450 max = (net2272_ep_read(ep, EP_AVAIL1) << 8) | 451 (net2272_ep_read(ep, EP_AVAIL0)); 452 453 if (max < ep->ep.maxpacket) 454 max = (net2272_ep_read(ep, EP_AVAIL1) << 8) 455 | (net2272_ep_read(ep, EP_AVAIL0)); 456 457 count = net2272_write_packet(ep, buf, req, max); 458 /* see if we are done */ 459 if (req->req.length == req->req.actual) { 460 /* validate short or zlp packet */ 461 if (count < ep->ep.maxpacket) 462 set_fifo_bytecount(ep, 0); 463 net2272_done(ep, req, 0); 464 465 if (!list_empty(&ep->queue)) { 466 req = list_entry(ep->queue.next, 467 struct net2272_request, 468 queue); 469 status = net2272_kick_dma(ep, req); 470 471 if (status < 0) 472 if ((net2272_ep_read(ep, EP_STAT0) 473 & (1 << BUFFER_EMPTY))) 474 goto top; 475 } 476 return 1; 477 } 478 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)); 479 } 480 return 0; 481 } 482 483 static void 484 net2272_out_flush(struct net2272_ep *ep) 485 { 486 ASSERT_OUT_NAKING(ep); 487 488 net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT) 489 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)); 490 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH); 491 } 492 493 static int 494 net2272_read_packet(struct net2272_ep *ep, u8 *buf, 495 struct net2272_request *req, unsigned avail) 496 { 497 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA); 498 unsigned is_short; 499 u16 *bufp; 500 501 req->req.actual += avail; 502 503 dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n", 504 ep->ep.name, req, avail, 505 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0)); 506 507 is_short = (avail < ep->ep.maxpacket); 508 509 if (unlikely(avail == 0)) { 510 /* remove any zlp from the buffer */ 511 (void)readw(ep_data); 512 return is_short; 513 } 514 515 /* Ensure we get the final byte */ 516 if (unlikely(avail % 2)) 517 avail++; 518 bufp = (u16 *)buf; 519 520 do { 521 *bufp++ = readw(ep_data); 522 avail -= 2; 523 } while (avail); 524 525 /* 526 * To avoid false endpoint available race condition must read 527 * ep stat0 twice in the case of a short transfer 528 */ 529 if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) 530 net2272_ep_read(ep, EP_STAT0); 531 532 return is_short; 533 } 534 535 static int 536 net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req) 537 { 538 u8 *buf; 539 unsigned is_short; 540 int count; 541 int tmp; 542 int cleanup = 0; 543 int status = -1; 544 545 dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n", 546 ep->ep.name, req->req.actual, req->req.length); 547 548 top: 549 do { 550 buf = req->req.buf + req->req.actual; 551 prefetchw(buf); 552 553 count = (net2272_ep_read(ep, EP_AVAIL1) << 8) 554 | net2272_ep_read(ep, EP_AVAIL0); 555 556 net2272_ep_write(ep, EP_STAT0, 557 (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) | 558 (1 << DATA_PACKET_RECEIVED_INTERRUPT)); 559 560 tmp = req->req.length - req->req.actual; 561 562 if (count > tmp) { 563 if ((tmp % ep->ep.maxpacket) != 0) { 564 dev_err(ep->dev->dev, 565 "%s out fifo %d bytes, expected %d\n", 566 ep->ep.name, count, tmp); 567 cleanup = 1; 568 } 569 count = (tmp > 0) ? tmp : 0; 570 } 571 572 is_short = net2272_read_packet(ep, buf, req, count); 573 574 /* completion */ 575 if (unlikely(cleanup || is_short || 576 ((req->req.actual == req->req.length) 577 && !req->req.zero))) { 578 579 if (cleanup) { 580 net2272_out_flush(ep); 581 net2272_done(ep, req, -EOVERFLOW); 582 } else 583 net2272_done(ep, req, 0); 584 585 /* re-initialize endpoint transfer registers 586 * otherwise they may result in erroneous pre-validation 587 * for subsequent control reads 588 */ 589 if (unlikely(ep->num == 0)) { 590 net2272_ep_write(ep, EP_TRANSFER2, 0); 591 net2272_ep_write(ep, EP_TRANSFER1, 0); 592 net2272_ep_write(ep, EP_TRANSFER0, 0); 593 } 594 595 if (!list_empty(&ep->queue)) { 596 req = list_entry(ep->queue.next, 597 struct net2272_request, queue); 598 status = net2272_kick_dma(ep, req); 599 if ((status < 0) && 600 !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY))) 601 goto top; 602 } 603 return 1; 604 } 605 } while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY))); 606 607 return 0; 608 } 609 610 static void 611 net2272_pio_advance(struct net2272_ep *ep) 612 { 613 struct net2272_request *req; 614 615 if (unlikely(list_empty(&ep->queue))) 616 return; 617 618 req = list_entry(ep->queue.next, struct net2272_request, queue); 619 (ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req); 620 } 621 622 /* returns 0 on success, else negative errno */ 623 static int 624 net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf, 625 unsigned len, unsigned dir) 626 { 627 dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n", 628 ep, buf, len, dir); 629 630 /* The NET2272 only supports a single dma channel */ 631 if (dev->dma_busy) 632 return -EBUSY; 633 /* 634 * EP_TRANSFER (used to determine the number of bytes received 635 * in an OUT transfer) is 24 bits wide; don't ask for more than that. 636 */ 637 if ((dir == 1) && (len > 0x1000000)) 638 return -EINVAL; 639 640 dev->dma_busy = 1; 641 642 /* initialize platform's dma */ 643 #ifdef CONFIG_USB_PCI 644 /* NET2272 addr, buffer addr, length, etc. */ 645 switch (dev->dev_id) { 646 case PCI_DEVICE_ID_RDK1: 647 /* Setup PLX 9054 DMA mode */ 648 writel((1 << LOCAL_BUS_WIDTH) | 649 (1 << TA_READY_INPUT_ENABLE) | 650 (0 << LOCAL_BURST_ENABLE) | 651 (1 << DONE_INTERRUPT_ENABLE) | 652 (1 << LOCAL_ADDRESSING_MODE) | 653 (1 << DEMAND_MODE) | 654 (1 << DMA_EOT_ENABLE) | 655 (1 << FAST_SLOW_TERMINATE_MODE_SELECT) | 656 (1 << DMA_CHANNEL_INTERRUPT_SELECT), 657 dev->rdk1.plx9054_base_addr + DMAMODE0); 658 659 writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0); 660 writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0); 661 writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0); 662 writel((dir << DIRECTION_OF_TRANSFER) | 663 (1 << INTERRUPT_AFTER_TERMINAL_COUNT), 664 dev->rdk1.plx9054_base_addr + DMADPR0); 665 writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) | 666 readl(dev->rdk1.plx9054_base_addr + INTCSR), 667 dev->rdk1.plx9054_base_addr + INTCSR); 668 669 break; 670 } 671 #endif 672 673 net2272_write(dev, DMAREQ, 674 (0 << DMA_BUFFER_VALID) | 675 (1 << DMA_REQUEST_ENABLE) | 676 (1 << DMA_CONTROL_DACK) | 677 (dev->dma_eot_polarity << EOT_POLARITY) | 678 (dev->dma_dack_polarity << DACK_POLARITY) | 679 (dev->dma_dreq_polarity << DREQ_POLARITY) | 680 ((ep >> 1) << DMA_ENDPOINT_SELECT)); 681 682 (void) net2272_read(dev, SCRATCH); 683 684 return 0; 685 } 686 687 static void 688 net2272_start_dma(struct net2272 *dev) 689 { 690 /* start platform's dma controller */ 691 #ifdef CONFIG_USB_PCI 692 switch (dev->dev_id) { 693 case PCI_DEVICE_ID_RDK1: 694 writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START), 695 dev->rdk1.plx9054_base_addr + DMACSR0); 696 break; 697 } 698 #endif 699 } 700 701 /* returns 0 on success, else negative errno */ 702 static int 703 net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req) 704 { 705 unsigned size; 706 u8 tmp; 707 708 if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma) 709 return -EINVAL; 710 711 /* don't use dma for odd-length transfers 712 * otherwise, we'd need to deal with the last byte with pio 713 */ 714 if (req->req.length & 1) 715 return -EINVAL; 716 717 dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n", 718 ep->ep.name, req, (unsigned long long) req->req.dma); 719 720 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS); 721 722 /* The NET2272 can only use DMA on one endpoint at a time */ 723 if (ep->dev->dma_busy) 724 return -EBUSY; 725 726 /* Make sure we only DMA an even number of bytes (we'll use 727 * pio to complete the transfer) 728 */ 729 size = req->req.length; 730 size &= ~1; 731 732 /* device-to-host transfer */ 733 if (ep->is_in) { 734 /* initialize platform's dma controller */ 735 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0)) 736 /* unable to obtain DMA channel; return error and use pio mode */ 737 return -EBUSY; 738 req->req.actual += size; 739 740 /* host-to-device transfer */ 741 } else { 742 tmp = net2272_ep_read(ep, EP_STAT0); 743 744 /* initialize platform's dma controller */ 745 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1)) 746 /* unable to obtain DMA channel; return error and use pio mode */ 747 return -EBUSY; 748 749 if (!(tmp & (1 << BUFFER_EMPTY))) 750 ep->not_empty = 1; 751 else 752 ep->not_empty = 0; 753 754 755 /* allow the endpoint's buffer to fill */ 756 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS); 757 758 /* this transfer completed and data's already in the fifo 759 * return error so pio gets used. 760 */ 761 if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) { 762 763 /* deassert dreq */ 764 net2272_write(ep->dev, DMAREQ, 765 (0 << DMA_BUFFER_VALID) | 766 (0 << DMA_REQUEST_ENABLE) | 767 (1 << DMA_CONTROL_DACK) | 768 (ep->dev->dma_eot_polarity << EOT_POLARITY) | 769 (ep->dev->dma_dack_polarity << DACK_POLARITY) | 770 (ep->dev->dma_dreq_polarity << DREQ_POLARITY) | 771 ((ep->num >> 1) << DMA_ENDPOINT_SELECT)); 772 773 return -EBUSY; 774 } 775 } 776 777 /* Don't use per-packet interrupts: use dma interrupts only */ 778 net2272_ep_write(ep, EP_IRQENB, 0); 779 780 net2272_start_dma(ep->dev); 781 782 return 0; 783 } 784 785 static void net2272_cancel_dma(struct net2272 *dev) 786 { 787 #ifdef CONFIG_USB_PCI 788 switch (dev->dev_id) { 789 case PCI_DEVICE_ID_RDK1: 790 writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0); 791 writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0); 792 while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) & 793 (1 << CHANNEL_DONE))) 794 continue; /* wait for dma to stabalize */ 795 796 /* dma abort generates an interrupt */ 797 writeb(1 << CHANNEL_CLEAR_INTERRUPT, 798 dev->rdk1.plx9054_base_addr + DMACSR0); 799 break; 800 } 801 #endif 802 803 dev->dma_busy = 0; 804 } 805 806 /*---------------------------------------------------------------------------*/ 807 808 static int 809 net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) 810 { 811 struct net2272_request *req; 812 struct net2272_ep *ep; 813 struct net2272 *dev; 814 unsigned long flags; 815 int status = -1; 816 u8 s; 817 818 req = container_of(_req, struct net2272_request, req); 819 if (!_req || !_req->complete || !_req->buf 820 || !list_empty(&req->queue)) 821 return -EINVAL; 822 ep = container_of(_ep, struct net2272_ep, ep); 823 if (!_ep || (!ep->desc && ep->num != 0)) 824 return -EINVAL; 825 dev = ep->dev; 826 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) 827 return -ESHUTDOWN; 828 829 /* set up dma mapping in case the caller didn't */ 830 if (use_dma && ep->dma) { 831 status = usb_gadget_map_request(&dev->gadget, _req, 832 ep->is_in); 833 if (status) 834 return status; 835 } 836 837 dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n", 838 _ep->name, _req, _req->length, _req->buf, 839 (unsigned long long) _req->dma, _req->zero ? "zero" : "!zero"); 840 841 spin_lock_irqsave(&dev->lock, flags); 842 843 _req->status = -EINPROGRESS; 844 _req->actual = 0; 845 846 /* kickstart this i/o queue? */ 847 if (list_empty(&ep->queue) && !ep->stopped) { 848 /* maybe there's no control data, just status ack */ 849 if (ep->num == 0 && _req->length == 0) { 850 net2272_done(ep, req, 0); 851 dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name); 852 goto done; 853 } 854 855 /* Return zlp, don't let it block subsequent packets */ 856 s = net2272_ep_read(ep, EP_STAT0); 857 if (s & (1 << BUFFER_EMPTY)) { 858 /* Buffer is empty check for a blocking zlp, handle it */ 859 if ((s & (1 << NAK_OUT_PACKETS)) && 860 net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) { 861 dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n"); 862 /* 863 * Request is going to terminate with a short packet ... 864 * hope the client is ready for it! 865 */ 866 status = net2272_read_fifo(ep, req); 867 /* clear short packet naking */ 868 net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS)); 869 goto done; 870 } 871 } 872 873 /* try dma first */ 874 status = net2272_kick_dma(ep, req); 875 876 if (status < 0) { 877 /* dma failed (most likely in use by another endpoint) 878 * fallback to pio 879 */ 880 status = 0; 881 882 if (ep->is_in) 883 status = net2272_write_fifo(ep, req); 884 else { 885 s = net2272_ep_read(ep, EP_STAT0); 886 if ((s & (1 << BUFFER_EMPTY)) == 0) 887 status = net2272_read_fifo(ep, req); 888 } 889 890 if (unlikely(status != 0)) { 891 if (status > 0) 892 status = 0; 893 req = NULL; 894 } 895 } 896 } 897 if (likely(req)) 898 list_add_tail(&req->queue, &ep->queue); 899 900 if (likely(!list_empty(&ep->queue))) 901 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS); 902 done: 903 spin_unlock_irqrestore(&dev->lock, flags); 904 905 return 0; 906 } 907 908 /* dequeue ALL requests */ 909 static void 910 net2272_dequeue_all(struct net2272_ep *ep) 911 { 912 struct net2272_request *req; 913 914 /* called with spinlock held */ 915 ep->stopped = 1; 916 917 while (!list_empty(&ep->queue)) { 918 req = list_entry(ep->queue.next, 919 struct net2272_request, 920 queue); 921 net2272_done(ep, req, -ESHUTDOWN); 922 } 923 } 924 925 /* dequeue JUST ONE request */ 926 static int 927 net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req) 928 { 929 struct net2272_ep *ep; 930 struct net2272_request *req; 931 unsigned long flags; 932 int stopped; 933 934 ep = container_of(_ep, struct net2272_ep, ep); 935 if (!_ep || (!ep->desc && ep->num != 0) || !_req) 936 return -EINVAL; 937 938 spin_lock_irqsave(&ep->dev->lock, flags); 939 stopped = ep->stopped; 940 ep->stopped = 1; 941 942 /* make sure it's still queued on this endpoint */ 943 list_for_each_entry(req, &ep->queue, queue) { 944 if (&req->req == _req) 945 break; 946 } 947 if (&req->req != _req) { 948 ep->stopped = stopped; 949 spin_unlock_irqrestore(&ep->dev->lock, flags); 950 return -EINVAL; 951 } 952 953 /* queue head may be partially complete */ 954 if (ep->queue.next == &req->queue) { 955 dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name); 956 net2272_done(ep, req, -ECONNRESET); 957 } 958 req = NULL; 959 ep->stopped = stopped; 960 961 spin_unlock_irqrestore(&ep->dev->lock, flags); 962 return 0; 963 } 964 965 /*---------------------------------------------------------------------------*/ 966 967 static int 968 net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged) 969 { 970 struct net2272_ep *ep; 971 unsigned long flags; 972 int ret = 0; 973 974 ep = container_of(_ep, struct net2272_ep, ep); 975 if (!_ep || (!ep->desc && ep->num != 0)) 976 return -EINVAL; 977 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) 978 return -ESHUTDOWN; 979 if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc)) 980 return -EINVAL; 981 982 spin_lock_irqsave(&ep->dev->lock, flags); 983 if (!list_empty(&ep->queue)) 984 ret = -EAGAIN; 985 else if (ep->is_in && value && net2272_fifo_status(_ep) != 0) 986 ret = -EAGAIN; 987 else { 988 dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name, 989 value ? "set" : "clear", 990 wedged ? "wedge" : "halt"); 991 /* set/clear */ 992 if (value) { 993 if (ep->num == 0) 994 ep->dev->protocol_stall = 1; 995 else 996 set_halt(ep); 997 if (wedged) 998 ep->wedged = 1; 999 } else { 1000 clear_halt(ep); 1001 ep->wedged = 0; 1002 } 1003 } 1004 spin_unlock_irqrestore(&ep->dev->lock, flags); 1005 1006 return ret; 1007 } 1008 1009 static int 1010 net2272_set_halt(struct usb_ep *_ep, int value) 1011 { 1012 return net2272_set_halt_and_wedge(_ep, value, 0); 1013 } 1014 1015 static int 1016 net2272_set_wedge(struct usb_ep *_ep) 1017 { 1018 if (!_ep || _ep->name == ep0name) 1019 return -EINVAL; 1020 return net2272_set_halt_and_wedge(_ep, 1, 1); 1021 } 1022 1023 static int 1024 net2272_fifo_status(struct usb_ep *_ep) 1025 { 1026 struct net2272_ep *ep; 1027 u16 avail; 1028 1029 ep = container_of(_ep, struct net2272_ep, ep); 1030 if (!_ep || (!ep->desc && ep->num != 0)) 1031 return -ENODEV; 1032 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) 1033 return -ESHUTDOWN; 1034 1035 avail = net2272_ep_read(ep, EP_AVAIL1) << 8; 1036 avail |= net2272_ep_read(ep, EP_AVAIL0); 1037 if (avail > ep->fifo_size) 1038 return -EOVERFLOW; 1039 if (ep->is_in) 1040 avail = ep->fifo_size - avail; 1041 return avail; 1042 } 1043 1044 static void 1045 net2272_fifo_flush(struct usb_ep *_ep) 1046 { 1047 struct net2272_ep *ep; 1048 1049 ep = container_of(_ep, struct net2272_ep, ep); 1050 if (!_ep || (!ep->desc && ep->num != 0)) 1051 return; 1052 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) 1053 return; 1054 1055 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH); 1056 } 1057 1058 static const struct usb_ep_ops net2272_ep_ops = { 1059 .enable = net2272_enable, 1060 .disable = net2272_disable, 1061 1062 .alloc_request = net2272_alloc_request, 1063 .free_request = net2272_free_request, 1064 1065 .queue = net2272_queue, 1066 .dequeue = net2272_dequeue, 1067 1068 .set_halt = net2272_set_halt, 1069 .set_wedge = net2272_set_wedge, 1070 .fifo_status = net2272_fifo_status, 1071 .fifo_flush = net2272_fifo_flush, 1072 }; 1073 1074 /*---------------------------------------------------------------------------*/ 1075 1076 static int 1077 net2272_get_frame(struct usb_gadget *_gadget) 1078 { 1079 struct net2272 *dev; 1080 unsigned long flags; 1081 u16 ret; 1082 1083 if (!_gadget) 1084 return -ENODEV; 1085 dev = container_of(_gadget, struct net2272, gadget); 1086 spin_lock_irqsave(&dev->lock, flags); 1087 1088 ret = net2272_read(dev, FRAME1) << 8; 1089 ret |= net2272_read(dev, FRAME0); 1090 1091 spin_unlock_irqrestore(&dev->lock, flags); 1092 return ret; 1093 } 1094 1095 static int 1096 net2272_wakeup(struct usb_gadget *_gadget) 1097 { 1098 struct net2272 *dev; 1099 u8 tmp; 1100 unsigned long flags; 1101 1102 if (!_gadget) 1103 return 0; 1104 dev = container_of(_gadget, struct net2272, gadget); 1105 1106 spin_lock_irqsave(&dev->lock, flags); 1107 tmp = net2272_read(dev, USBCTL0); 1108 if (tmp & (1 << IO_WAKEUP_ENABLE)) 1109 net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME)); 1110 1111 spin_unlock_irqrestore(&dev->lock, flags); 1112 1113 return 0; 1114 } 1115 1116 static int 1117 net2272_set_selfpowered(struct usb_gadget *_gadget, int value) 1118 { 1119 if (!_gadget) 1120 return -ENODEV; 1121 1122 _gadget->is_selfpowered = (value != 0); 1123 1124 return 0; 1125 } 1126 1127 static int 1128 net2272_pullup(struct usb_gadget *_gadget, int is_on) 1129 { 1130 struct net2272 *dev; 1131 u8 tmp; 1132 unsigned long flags; 1133 1134 if (!_gadget) 1135 return -ENODEV; 1136 dev = container_of(_gadget, struct net2272, gadget); 1137 1138 spin_lock_irqsave(&dev->lock, flags); 1139 tmp = net2272_read(dev, USBCTL0); 1140 dev->softconnect = (is_on != 0); 1141 if (is_on) 1142 tmp |= (1 << USB_DETECT_ENABLE); 1143 else 1144 tmp &= ~(1 << USB_DETECT_ENABLE); 1145 net2272_write(dev, USBCTL0, tmp); 1146 spin_unlock_irqrestore(&dev->lock, flags); 1147 1148 return 0; 1149 } 1150 1151 static int net2272_start(struct usb_gadget *_gadget, 1152 struct usb_gadget_driver *driver); 1153 static int net2272_stop(struct usb_gadget *_gadget); 1154 1155 static const struct usb_gadget_ops net2272_ops = { 1156 .get_frame = net2272_get_frame, 1157 .wakeup = net2272_wakeup, 1158 .set_selfpowered = net2272_set_selfpowered, 1159 .pullup = net2272_pullup, 1160 .udc_start = net2272_start, 1161 .udc_stop = net2272_stop, 1162 }; 1163 1164 /*---------------------------------------------------------------------------*/ 1165 1166 static ssize_t 1167 registers_show(struct device *_dev, struct device_attribute *attr, char *buf) 1168 { 1169 struct net2272 *dev; 1170 char *next; 1171 unsigned size, t; 1172 unsigned long flags; 1173 u8 t1, t2; 1174 int i; 1175 const char *s; 1176 1177 dev = dev_get_drvdata(_dev); 1178 next = buf; 1179 size = PAGE_SIZE; 1180 spin_lock_irqsave(&dev->lock, flags); 1181 1182 if (dev->driver) 1183 s = dev->driver->driver.name; 1184 else 1185 s = "(none)"; 1186 1187 /* Main Control Registers */ 1188 t = scnprintf(next, size, "%s version %s," 1189 "chiprev %02x, locctl %02x\n" 1190 "irqenb0 %02x irqenb1 %02x " 1191 "irqstat0 %02x irqstat1 %02x\n", 1192 driver_name, driver_vers, dev->chiprev, 1193 net2272_read(dev, LOCCTL), 1194 net2272_read(dev, IRQENB0), 1195 net2272_read(dev, IRQENB1), 1196 net2272_read(dev, IRQSTAT0), 1197 net2272_read(dev, IRQSTAT1)); 1198 size -= t; 1199 next += t; 1200 1201 /* DMA */ 1202 t1 = net2272_read(dev, DMAREQ); 1203 t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n", 1204 t1, ep_name[(t1 & 0x01) + 1], 1205 t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "", 1206 t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "", 1207 t1 & (1 << DMA_REQUEST) ? "req " : "", 1208 t1 & (1 << DMA_BUFFER_VALID) ? "valid " : ""); 1209 size -= t; 1210 next += t; 1211 1212 /* USB Control Registers */ 1213 t1 = net2272_read(dev, USBCTL1); 1214 if (t1 & (1 << VBUS_PIN)) { 1215 if (t1 & (1 << USB_HIGH_SPEED)) 1216 s = "high speed"; 1217 else if (dev->gadget.speed == USB_SPEED_UNKNOWN) 1218 s = "powered"; 1219 else 1220 s = "full speed"; 1221 } else 1222 s = "not attached"; 1223 t = scnprintf(next, size, 1224 "usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n", 1225 net2272_read(dev, USBCTL0), t1, 1226 net2272_read(dev, OURADDR), s); 1227 size -= t; 1228 next += t; 1229 1230 /* Endpoint Registers */ 1231 for (i = 0; i < 4; ++i) { 1232 struct net2272_ep *ep; 1233 1234 ep = &dev->ep[i]; 1235 if (i && !ep->desc) 1236 continue; 1237 1238 t1 = net2272_ep_read(ep, EP_CFG); 1239 t2 = net2272_ep_read(ep, EP_RSPSET); 1240 t = scnprintf(next, size, 1241 "\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s" 1242 "irqenb %02x\n", 1243 ep->ep.name, t1, t2, 1244 (t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "", 1245 (t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "", 1246 (t2 & (1 << AUTOVALIDATE)) ? "auto " : "", 1247 (t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "", 1248 (t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "", 1249 (t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "", 1250 (t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ", 1251 (t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "", 1252 net2272_ep_read(ep, EP_IRQENB)); 1253 size -= t; 1254 next += t; 1255 1256 t = scnprintf(next, size, 1257 "\tstat0 %02x stat1 %02x avail %04x " 1258 "(ep%d%s-%s)%s\n", 1259 net2272_ep_read(ep, EP_STAT0), 1260 net2272_ep_read(ep, EP_STAT1), 1261 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0), 1262 t1 & 0x0f, 1263 ep->is_in ? "in" : "out", 1264 type_string(t1 >> 5), 1265 ep->stopped ? "*" : ""); 1266 size -= t; 1267 next += t; 1268 1269 t = scnprintf(next, size, 1270 "\tep_transfer %06x\n", 1271 ((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) | 1272 ((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) | 1273 ((net2272_ep_read(ep, EP_TRANSFER0) & 0xff))); 1274 size -= t; 1275 next += t; 1276 1277 t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03; 1278 t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03; 1279 t = scnprintf(next, size, 1280 "\tbuf-a %s buf-b %s\n", 1281 buf_state_string(t1), 1282 buf_state_string(t2)); 1283 size -= t; 1284 next += t; 1285 } 1286 1287 spin_unlock_irqrestore(&dev->lock, flags); 1288 1289 return PAGE_SIZE - size; 1290 } 1291 static DEVICE_ATTR_RO(registers); 1292 1293 /*---------------------------------------------------------------------------*/ 1294 1295 static void 1296 net2272_set_fifo_mode(struct net2272 *dev, int mode) 1297 { 1298 u8 tmp; 1299 1300 tmp = net2272_read(dev, LOCCTL) & 0x3f; 1301 tmp |= (mode << 6); 1302 net2272_write(dev, LOCCTL, tmp); 1303 1304 INIT_LIST_HEAD(&dev->gadget.ep_list); 1305 1306 /* always ep-a, ep-c ... maybe not ep-b */ 1307 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list); 1308 1309 switch (mode) { 1310 case 0: 1311 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); 1312 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512; 1313 break; 1314 case 1: 1315 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); 1316 dev->ep[1].fifo_size = 1024; 1317 dev->ep[2].fifo_size = 512; 1318 break; 1319 case 2: 1320 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); 1321 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024; 1322 break; 1323 case 3: 1324 dev->ep[1].fifo_size = 1024; 1325 break; 1326 } 1327 1328 /* ep-c is always 2 512 byte buffers */ 1329 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list); 1330 dev->ep[3].fifo_size = 512; 1331 } 1332 1333 /*---------------------------------------------------------------------------*/ 1334 1335 static void 1336 net2272_usb_reset(struct net2272 *dev) 1337 { 1338 dev->gadget.speed = USB_SPEED_UNKNOWN; 1339 1340 net2272_cancel_dma(dev); 1341 1342 net2272_write(dev, IRQENB0, 0); 1343 net2272_write(dev, IRQENB1, 0); 1344 1345 /* clear irq state */ 1346 net2272_write(dev, IRQSTAT0, 0xff); 1347 net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT)); 1348 1349 net2272_write(dev, DMAREQ, 1350 (0 << DMA_BUFFER_VALID) | 1351 (0 << DMA_REQUEST_ENABLE) | 1352 (1 << DMA_CONTROL_DACK) | 1353 (dev->dma_eot_polarity << EOT_POLARITY) | 1354 (dev->dma_dack_polarity << DACK_POLARITY) | 1355 (dev->dma_dreq_polarity << DREQ_POLARITY) | 1356 ((dma_ep >> 1) << DMA_ENDPOINT_SELECT)); 1357 1358 net2272_cancel_dma(dev); 1359 net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0); 1360 1361 /* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping 1362 * note that the higher level gadget drivers are expected to convert data to little endian. 1363 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here 1364 */ 1365 net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH)); 1366 net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE)); 1367 } 1368 1369 static void 1370 net2272_usb_reinit(struct net2272 *dev) 1371 { 1372 int i; 1373 1374 /* basic endpoint init */ 1375 for (i = 0; i < 4; ++i) { 1376 struct net2272_ep *ep = &dev->ep[i]; 1377 1378 ep->ep.name = ep_name[i]; 1379 ep->dev = dev; 1380 ep->num = i; 1381 ep->not_empty = 0; 1382 1383 if (use_dma && ep->num == dma_ep) 1384 ep->dma = 1; 1385 1386 if (i > 0 && i <= 3) 1387 ep->fifo_size = 512; 1388 else 1389 ep->fifo_size = 64; 1390 net2272_ep_reset(ep); 1391 1392 if (i == 0) { 1393 ep->ep.caps.type_control = true; 1394 } else { 1395 ep->ep.caps.type_iso = true; 1396 ep->ep.caps.type_bulk = true; 1397 ep->ep.caps.type_int = true; 1398 } 1399 1400 ep->ep.caps.dir_in = true; 1401 ep->ep.caps.dir_out = true; 1402 } 1403 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64); 1404 1405 dev->gadget.ep0 = &dev->ep[0].ep; 1406 dev->ep[0].stopped = 0; 1407 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); 1408 } 1409 1410 static void 1411 net2272_ep0_start(struct net2272 *dev) 1412 { 1413 struct net2272_ep *ep0 = &dev->ep[0]; 1414 1415 net2272_ep_write(ep0, EP_RSPSET, 1416 (1 << NAK_OUT_PACKETS_MODE) | 1417 (1 << ALT_NAK_OUT_PACKETS)); 1418 net2272_ep_write(ep0, EP_RSPCLR, 1419 (1 << HIDE_STATUS_PHASE) | 1420 (1 << CONTROL_STATUS_PHASE_HANDSHAKE)); 1421 net2272_write(dev, USBCTL0, 1422 (dev->softconnect << USB_DETECT_ENABLE) | 1423 (1 << USB_ROOT_PORT_WAKEUP_ENABLE) | 1424 (1 << IO_WAKEUP_ENABLE)); 1425 net2272_write(dev, IRQENB0, 1426 (1 << SETUP_PACKET_INTERRUPT_ENABLE) | 1427 (1 << ENDPOINT_0_INTERRUPT_ENABLE) | 1428 (1 << DMA_DONE_INTERRUPT_ENABLE)); 1429 net2272_write(dev, IRQENB1, 1430 (1 << VBUS_INTERRUPT_ENABLE) | 1431 (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) | 1432 (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE)); 1433 } 1434 1435 /* when a driver is successfully registered, it will receive 1436 * control requests including set_configuration(), which enables 1437 * non-control requests. then usb traffic follows until a 1438 * disconnect is reported. then a host may connect again, or 1439 * the driver might get unbound. 1440 */ 1441 static int net2272_start(struct usb_gadget *_gadget, 1442 struct usb_gadget_driver *driver) 1443 { 1444 struct net2272 *dev; 1445 unsigned i; 1446 1447 if (!driver || !driver->setup || 1448 driver->max_speed != USB_SPEED_HIGH) 1449 return -EINVAL; 1450 1451 dev = container_of(_gadget, struct net2272, gadget); 1452 1453 for (i = 0; i < 4; ++i) 1454 dev->ep[i].irqs = 0; 1455 /* hook up the driver ... */ 1456 dev->softconnect = 1; 1457 driver->driver.bus = NULL; 1458 dev->driver = driver; 1459 1460 /* ... then enable host detection and ep0; and we're ready 1461 * for set_configuration as well as eventual disconnect. 1462 */ 1463 net2272_ep0_start(dev); 1464 1465 return 0; 1466 } 1467 1468 static void 1469 stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver) 1470 { 1471 int i; 1472 1473 /* don't disconnect if it's not connected */ 1474 if (dev->gadget.speed == USB_SPEED_UNKNOWN) 1475 driver = NULL; 1476 1477 /* stop hardware; prevent new request submissions; 1478 * and kill any outstanding requests. 1479 */ 1480 net2272_usb_reset(dev); 1481 for (i = 0; i < 4; ++i) 1482 net2272_dequeue_all(&dev->ep[i]); 1483 1484 /* report disconnect; the driver is already quiesced */ 1485 if (driver) { 1486 spin_unlock(&dev->lock); 1487 driver->disconnect(&dev->gadget); 1488 spin_lock(&dev->lock); 1489 } 1490 1491 net2272_usb_reinit(dev); 1492 } 1493 1494 static int net2272_stop(struct usb_gadget *_gadget) 1495 { 1496 struct net2272 *dev; 1497 unsigned long flags; 1498 1499 dev = container_of(_gadget, struct net2272, gadget); 1500 1501 spin_lock_irqsave(&dev->lock, flags); 1502 stop_activity(dev, NULL); 1503 spin_unlock_irqrestore(&dev->lock, flags); 1504 1505 dev->driver = NULL; 1506 1507 return 0; 1508 } 1509 1510 /*---------------------------------------------------------------------------*/ 1511 /* handle ep-a/ep-b dma completions */ 1512 static void 1513 net2272_handle_dma(struct net2272_ep *ep) 1514 { 1515 struct net2272_request *req; 1516 unsigned len; 1517 int status; 1518 1519 if (!list_empty(&ep->queue)) 1520 req = list_entry(ep->queue.next, 1521 struct net2272_request, queue); 1522 else 1523 req = NULL; 1524 1525 dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req); 1526 1527 /* Ensure DREQ is de-asserted */ 1528 net2272_write(ep->dev, DMAREQ, 1529 (0 << DMA_BUFFER_VALID) 1530 | (0 << DMA_REQUEST_ENABLE) 1531 | (1 << DMA_CONTROL_DACK) 1532 | (ep->dev->dma_eot_polarity << EOT_POLARITY) 1533 | (ep->dev->dma_dack_polarity << DACK_POLARITY) 1534 | (ep->dev->dma_dreq_polarity << DREQ_POLARITY) 1535 | (ep->dma << DMA_ENDPOINT_SELECT)); 1536 1537 ep->dev->dma_busy = 0; 1538 1539 net2272_ep_write(ep, EP_IRQENB, 1540 (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) 1541 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE) 1542 | net2272_ep_read(ep, EP_IRQENB)); 1543 1544 /* device-to-host transfer completed */ 1545 if (ep->is_in) { 1546 /* validate a short packet or zlp if necessary */ 1547 if ((req->req.length % ep->ep.maxpacket != 0) || 1548 req->req.zero) 1549 set_fifo_bytecount(ep, 0); 1550 1551 net2272_done(ep, req, 0); 1552 if (!list_empty(&ep->queue)) { 1553 req = list_entry(ep->queue.next, 1554 struct net2272_request, queue); 1555 status = net2272_kick_dma(ep, req); 1556 if (status < 0) 1557 net2272_pio_advance(ep); 1558 } 1559 1560 /* host-to-device transfer completed */ 1561 } else { 1562 /* terminated with a short packet? */ 1563 if (net2272_read(ep->dev, IRQSTAT0) & 1564 (1 << DMA_DONE_INTERRUPT)) { 1565 /* abort system dma */ 1566 net2272_cancel_dma(ep->dev); 1567 } 1568 1569 /* EP_TRANSFER will contain the number of bytes 1570 * actually received. 1571 * NOTE: There is no overflow detection on EP_TRANSFER: 1572 * We can't deal with transfers larger than 2^24 bytes! 1573 */ 1574 len = (net2272_ep_read(ep, EP_TRANSFER2) << 16) 1575 | (net2272_ep_read(ep, EP_TRANSFER1) << 8) 1576 | (net2272_ep_read(ep, EP_TRANSFER0)); 1577 1578 if (ep->not_empty) 1579 len += 4; 1580 1581 req->req.actual += len; 1582 1583 /* get any remaining data */ 1584 net2272_pio_advance(ep); 1585 } 1586 } 1587 1588 /*---------------------------------------------------------------------------*/ 1589 1590 static void 1591 net2272_handle_ep(struct net2272_ep *ep) 1592 { 1593 struct net2272_request *req; 1594 u8 stat0, stat1; 1595 1596 if (!list_empty(&ep->queue)) 1597 req = list_entry(ep->queue.next, 1598 struct net2272_request, queue); 1599 else 1600 req = NULL; 1601 1602 /* ack all, and handle what we care about */ 1603 stat0 = net2272_ep_read(ep, EP_STAT0); 1604 stat1 = net2272_ep_read(ep, EP_STAT1); 1605 ep->irqs++; 1606 1607 dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n", 1608 ep->ep.name, stat0, stat1, req ? &req->req : NULL); 1609 1610 net2272_ep_write(ep, EP_STAT0, stat0 & 1611 ~((1 << NAK_OUT_PACKETS) 1612 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))); 1613 net2272_ep_write(ep, EP_STAT1, stat1); 1614 1615 /* data packet(s) received (in the fifo, OUT) 1616 * direction must be validated, otherwise control read status phase 1617 * could be interpreted as a valid packet 1618 */ 1619 if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT))) 1620 net2272_pio_advance(ep); 1621 /* data packet(s) transmitted (IN) */ 1622 else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)) 1623 net2272_pio_advance(ep); 1624 } 1625 1626 static struct net2272_ep * 1627 net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex) 1628 { 1629 struct net2272_ep *ep; 1630 1631 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0) 1632 return &dev->ep[0]; 1633 1634 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) { 1635 u8 bEndpointAddress; 1636 1637 if (!ep->desc) 1638 continue; 1639 bEndpointAddress = ep->desc->bEndpointAddress; 1640 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN) 1641 continue; 1642 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f)) 1643 return ep; 1644 } 1645 return NULL; 1646 } 1647 1648 /* 1649 * USB Test Packet: 1650 * JKJKJKJK * 9 1651 * JJKKJJKK * 8 1652 * JJJJKKKK * 8 1653 * JJJJJJJKKKKKKK * 8 1654 * JJJJJJJK * 8 1655 * {JKKKKKKK * 10}, JK 1656 */ 1657 static const u8 net2272_test_packet[] = { 1658 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1659 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 1660 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 1661 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 1662 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 1663 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E 1664 }; 1665 1666 static void 1667 net2272_set_test_mode(struct net2272 *dev, int mode) 1668 { 1669 int i; 1670 1671 /* Disable all net2272 interrupts: 1672 * Nothing but a power cycle should stop the test. 1673 */ 1674 net2272_write(dev, IRQENB0, 0x00); 1675 net2272_write(dev, IRQENB1, 0x00); 1676 1677 /* Force tranceiver to high-speed */ 1678 net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED); 1679 1680 net2272_write(dev, PAGESEL, 0); 1681 net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT); 1682 net2272_write(dev, EP_RSPCLR, 1683 (1 << CONTROL_STATUS_PHASE_HANDSHAKE) 1684 | (1 << HIDE_STATUS_PHASE)); 1685 net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION); 1686 net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH); 1687 1688 /* wait for status phase to complete */ 1689 while (!(net2272_read(dev, EP_STAT0) & 1690 (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))) 1691 ; 1692 1693 /* Enable test mode */ 1694 net2272_write(dev, USBTEST, mode); 1695 1696 /* load test packet */ 1697 if (mode == TEST_PACKET) { 1698 /* switch to 8 bit mode */ 1699 net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) & 1700 ~(1 << DATA_WIDTH)); 1701 1702 for (i = 0; i < sizeof(net2272_test_packet); ++i) 1703 net2272_write(dev, EP_DATA, net2272_test_packet[i]); 1704 1705 /* Validate test packet */ 1706 net2272_write(dev, EP_TRANSFER0, 0); 1707 } 1708 } 1709 1710 static void 1711 net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat) 1712 { 1713 struct net2272_ep *ep; 1714 u8 num, scratch; 1715 1716 /* starting a control request? */ 1717 if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) { 1718 union { 1719 u8 raw[8]; 1720 struct usb_ctrlrequest r; 1721 } u; 1722 int tmp = 0; 1723 struct net2272_request *req; 1724 1725 if (dev->gadget.speed == USB_SPEED_UNKNOWN) { 1726 if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED)) 1727 dev->gadget.speed = USB_SPEED_HIGH; 1728 else 1729 dev->gadget.speed = USB_SPEED_FULL; 1730 dev_dbg(dev->dev, "%s\n", 1731 usb_speed_string(dev->gadget.speed)); 1732 } 1733 1734 ep = &dev->ep[0]; 1735 ep->irqs++; 1736 1737 /* make sure any leftover interrupt state is cleared */ 1738 stat &= ~(1 << ENDPOINT_0_INTERRUPT); 1739 while (!list_empty(&ep->queue)) { 1740 req = list_entry(ep->queue.next, 1741 struct net2272_request, queue); 1742 net2272_done(ep, req, 1743 (req->req.actual == req->req.length) ? 0 : -EPROTO); 1744 } 1745 ep->stopped = 0; 1746 dev->protocol_stall = 0; 1747 net2272_ep_write(ep, EP_STAT0, 1748 (1 << DATA_IN_TOKEN_INTERRUPT) 1749 | (1 << DATA_OUT_TOKEN_INTERRUPT) 1750 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT) 1751 | (1 << DATA_PACKET_RECEIVED_INTERRUPT) 1752 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)); 1753 net2272_ep_write(ep, EP_STAT1, 1754 (1 << TIMEOUT) 1755 | (1 << USB_OUT_ACK_SENT) 1756 | (1 << USB_OUT_NAK_SENT) 1757 | (1 << USB_IN_ACK_RCVD) 1758 | (1 << USB_IN_NAK_SENT) 1759 | (1 << USB_STALL_SENT) 1760 | (1 << LOCAL_OUT_ZLP)); 1761 1762 /* 1763 * Ensure Control Read pre-validation setting is beyond maximum size 1764 * - Control Writes can leave non-zero values in EP_TRANSFER. If 1765 * an EP0 transfer following the Control Write is a Control Read, 1766 * the NET2272 sees the non-zero EP_TRANSFER as an unexpected 1767 * pre-validation count. 1768 * - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures 1769 * the pre-validation count cannot cause an unexpected validatation 1770 */ 1771 net2272_write(dev, PAGESEL, 0); 1772 net2272_write(dev, EP_TRANSFER2, 0xff); 1773 net2272_write(dev, EP_TRANSFER1, 0xff); 1774 net2272_write(dev, EP_TRANSFER0, 0xff); 1775 1776 u.raw[0] = net2272_read(dev, SETUP0); 1777 u.raw[1] = net2272_read(dev, SETUP1); 1778 u.raw[2] = net2272_read(dev, SETUP2); 1779 u.raw[3] = net2272_read(dev, SETUP3); 1780 u.raw[4] = net2272_read(dev, SETUP4); 1781 u.raw[5] = net2272_read(dev, SETUP5); 1782 u.raw[6] = net2272_read(dev, SETUP6); 1783 u.raw[7] = net2272_read(dev, SETUP7); 1784 /* 1785 * If you have a big endian cpu make sure le16_to_cpus 1786 * performs the proper byte swapping here... 1787 */ 1788 le16_to_cpus(&u.r.wValue); 1789 le16_to_cpus(&u.r.wIndex); 1790 le16_to_cpus(&u.r.wLength); 1791 1792 /* ack the irq */ 1793 net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT); 1794 stat ^= (1 << SETUP_PACKET_INTERRUPT); 1795 1796 /* watch control traffic at the token level, and force 1797 * synchronization before letting the status phase happen. 1798 */ 1799 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0; 1800 if (ep->is_in) { 1801 scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE) 1802 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE) 1803 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE); 1804 stop_out_naking(ep); 1805 } else 1806 scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) 1807 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE) 1808 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE); 1809 net2272_ep_write(ep, EP_IRQENB, scratch); 1810 1811 if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) 1812 goto delegate; 1813 switch (u.r.bRequest) { 1814 case USB_REQ_GET_STATUS: { 1815 struct net2272_ep *e; 1816 u16 status = 0; 1817 1818 switch (u.r.bRequestType & USB_RECIP_MASK) { 1819 case USB_RECIP_ENDPOINT: 1820 e = net2272_get_ep_by_addr(dev, u.r.wIndex); 1821 if (!e || u.r.wLength > 2) 1822 goto do_stall; 1823 if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT)) 1824 status = cpu_to_le16(1); 1825 else 1826 status = cpu_to_le16(0); 1827 1828 /* don't bother with a request object! */ 1829 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0); 1830 writew(status, net2272_reg_addr(dev, EP_DATA)); 1831 set_fifo_bytecount(&dev->ep[0], 0); 1832 allow_status(ep); 1833 dev_vdbg(dev->dev, "%s stat %02x\n", 1834 ep->ep.name, status); 1835 goto next_endpoints; 1836 case USB_RECIP_DEVICE: 1837 if (u.r.wLength > 2) 1838 goto do_stall; 1839 if (dev->gadget.is_selfpowered) 1840 status = (1 << USB_DEVICE_SELF_POWERED); 1841 1842 /* don't bother with a request object! */ 1843 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0); 1844 writew(status, net2272_reg_addr(dev, EP_DATA)); 1845 set_fifo_bytecount(&dev->ep[0], 0); 1846 allow_status(ep); 1847 dev_vdbg(dev->dev, "device stat %02x\n", status); 1848 goto next_endpoints; 1849 case USB_RECIP_INTERFACE: 1850 if (u.r.wLength > 2) 1851 goto do_stall; 1852 1853 /* don't bother with a request object! */ 1854 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0); 1855 writew(status, net2272_reg_addr(dev, EP_DATA)); 1856 set_fifo_bytecount(&dev->ep[0], 0); 1857 allow_status(ep); 1858 dev_vdbg(dev->dev, "interface status %02x\n", status); 1859 goto next_endpoints; 1860 } 1861 1862 break; 1863 } 1864 case USB_REQ_CLEAR_FEATURE: { 1865 struct net2272_ep *e; 1866 1867 if (u.r.bRequestType != USB_RECIP_ENDPOINT) 1868 goto delegate; 1869 if (u.r.wValue != USB_ENDPOINT_HALT || 1870 u.r.wLength != 0) 1871 goto do_stall; 1872 e = net2272_get_ep_by_addr(dev, u.r.wIndex); 1873 if (!e) 1874 goto do_stall; 1875 if (e->wedged) { 1876 dev_vdbg(dev->dev, "%s wedged, halt not cleared\n", 1877 ep->ep.name); 1878 } else { 1879 dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name); 1880 clear_halt(e); 1881 } 1882 allow_status(ep); 1883 goto next_endpoints; 1884 } 1885 case USB_REQ_SET_FEATURE: { 1886 struct net2272_ep *e; 1887 1888 if (u.r.bRequestType == USB_RECIP_DEVICE) { 1889 if (u.r.wIndex != NORMAL_OPERATION) 1890 net2272_set_test_mode(dev, (u.r.wIndex >> 8)); 1891 allow_status(ep); 1892 dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex); 1893 goto next_endpoints; 1894 } else if (u.r.bRequestType != USB_RECIP_ENDPOINT) 1895 goto delegate; 1896 if (u.r.wValue != USB_ENDPOINT_HALT || 1897 u.r.wLength != 0) 1898 goto do_stall; 1899 e = net2272_get_ep_by_addr(dev, u.r.wIndex); 1900 if (!e) 1901 goto do_stall; 1902 set_halt(e); 1903 allow_status(ep); 1904 dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name); 1905 goto next_endpoints; 1906 } 1907 case USB_REQ_SET_ADDRESS: { 1908 net2272_write(dev, OURADDR, u.r.wValue & 0xff); 1909 allow_status(ep); 1910 break; 1911 } 1912 default: 1913 delegate: 1914 dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x " 1915 "ep_cfg %08x\n", 1916 u.r.bRequestType, u.r.bRequest, 1917 u.r.wValue, u.r.wIndex, 1918 net2272_ep_read(ep, EP_CFG)); 1919 spin_unlock(&dev->lock); 1920 tmp = dev->driver->setup(&dev->gadget, &u.r); 1921 spin_lock(&dev->lock); 1922 } 1923 1924 /* stall ep0 on error */ 1925 if (tmp < 0) { 1926 do_stall: 1927 dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n", 1928 u.r.bRequestType, u.r.bRequest, tmp); 1929 dev->protocol_stall = 1; 1930 } 1931 /* endpoint dma irq? */ 1932 } else if (stat & (1 << DMA_DONE_INTERRUPT)) { 1933 net2272_cancel_dma(dev); 1934 net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT); 1935 stat &= ~(1 << DMA_DONE_INTERRUPT); 1936 num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT)) 1937 ? 2 : 1; 1938 1939 ep = &dev->ep[num]; 1940 net2272_handle_dma(ep); 1941 } 1942 1943 next_endpoints: 1944 /* endpoint data irq? */ 1945 scratch = stat & 0x0f; 1946 stat &= ~0x0f; 1947 for (num = 0; scratch; num++) { 1948 u8 t; 1949 1950 /* does this endpoint's FIFO and queue need tending? */ 1951 t = 1 << num; 1952 if ((scratch & t) == 0) 1953 continue; 1954 scratch ^= t; 1955 1956 ep = &dev->ep[num]; 1957 net2272_handle_ep(ep); 1958 } 1959 1960 /* some interrupts we can just ignore */ 1961 stat &= ~(1 << SOF_INTERRUPT); 1962 1963 if (stat) 1964 dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat); 1965 } 1966 1967 static void 1968 net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat) 1969 { 1970 u8 tmp, mask; 1971 1972 /* after disconnect there's nothing else to do! */ 1973 tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT); 1974 mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED); 1975 1976 if (stat & tmp) { 1977 bool reset = false; 1978 bool disconnect = false; 1979 1980 /* 1981 * Ignore disconnects and resets if the speed hasn't been set. 1982 * VBUS can bounce and there's always an initial reset. 1983 */ 1984 net2272_write(dev, IRQSTAT1, tmp); 1985 if (dev->gadget.speed != USB_SPEED_UNKNOWN) { 1986 if ((stat & (1 << VBUS_INTERRUPT)) && 1987 (net2272_read(dev, USBCTL1) & 1988 (1 << VBUS_PIN)) == 0) { 1989 disconnect = true; 1990 dev_dbg(dev->dev, "disconnect %s\n", 1991 dev->driver->driver.name); 1992 } else if ((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) && 1993 (net2272_read(dev, USBCTL1) & mask) 1994 == 0) { 1995 reset = true; 1996 dev_dbg(dev->dev, "reset %s\n", 1997 dev->driver->driver.name); 1998 } 1999 2000 if (disconnect || reset) { 2001 stop_activity(dev, dev->driver); 2002 net2272_ep0_start(dev); 2003 spin_unlock(&dev->lock); 2004 if (reset) 2005 usb_gadget_udc_reset 2006 (&dev->gadget, dev->driver); 2007 else 2008 (dev->driver->disconnect) 2009 (&dev->gadget); 2010 spin_lock(&dev->lock); 2011 return; 2012 } 2013 } 2014 stat &= ~tmp; 2015 2016 if (!stat) 2017 return; 2018 } 2019 2020 tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT); 2021 if (stat & tmp) { 2022 net2272_write(dev, IRQSTAT1, tmp); 2023 if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) { 2024 if (dev->driver->suspend) 2025 dev->driver->suspend(&dev->gadget); 2026 if (!enable_suspend) { 2027 stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT); 2028 dev_dbg(dev->dev, "Suspend disabled, ignoring\n"); 2029 } 2030 } else { 2031 if (dev->driver->resume) 2032 dev->driver->resume(&dev->gadget); 2033 } 2034 stat &= ~tmp; 2035 } 2036 2037 /* clear any other status/irqs */ 2038 if (stat) 2039 net2272_write(dev, IRQSTAT1, stat); 2040 2041 /* some status we can just ignore */ 2042 stat &= ~((1 << CONTROL_STATUS_INTERRUPT) 2043 | (1 << SUSPEND_REQUEST_INTERRUPT) 2044 | (1 << RESUME_INTERRUPT)); 2045 if (!stat) 2046 return; 2047 else 2048 dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat); 2049 } 2050 2051 static irqreturn_t net2272_irq(int irq, void *_dev) 2052 { 2053 struct net2272 *dev = _dev; 2054 #if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2) 2055 u32 intcsr; 2056 #endif 2057 #if defined(PLX_PCI_RDK) 2058 u8 dmareq; 2059 #endif 2060 spin_lock(&dev->lock); 2061 #if defined(PLX_PCI_RDK) 2062 intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR); 2063 2064 if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) { 2065 writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE), 2066 dev->rdk1.plx9054_base_addr + INTCSR); 2067 net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1)); 2068 net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0)); 2069 intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR); 2070 writel(intcsr | (1 << PCI_INTERRUPT_ENABLE), 2071 dev->rdk1.plx9054_base_addr + INTCSR); 2072 } 2073 if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) { 2074 writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)), 2075 dev->rdk1.plx9054_base_addr + DMACSR0); 2076 2077 dmareq = net2272_read(dev, DMAREQ); 2078 if (dmareq & 0x01) 2079 net2272_handle_dma(&dev->ep[2]); 2080 else 2081 net2272_handle_dma(&dev->ep[1]); 2082 } 2083 #endif 2084 #if defined(PLX_PCI_RDK2) 2085 /* see if PCI int for us by checking irqstat */ 2086 intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT); 2087 if (!(intcsr & (1 << NET2272_PCI_IRQ))) { 2088 spin_unlock(&dev->lock); 2089 return IRQ_NONE; 2090 } 2091 /* check dma interrupts */ 2092 #endif 2093 /* Platform/devcice interrupt handler */ 2094 #if !defined(PLX_PCI_RDK) 2095 net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1)); 2096 net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0)); 2097 #endif 2098 spin_unlock(&dev->lock); 2099 2100 return IRQ_HANDLED; 2101 } 2102 2103 static int net2272_present(struct net2272 *dev) 2104 { 2105 /* 2106 * Quick test to see if CPU can communicate properly with the NET2272. 2107 * Verifies connection using writes and reads to write/read and 2108 * read-only registers. 2109 * 2110 * This routine is strongly recommended especially during early bring-up 2111 * of new hardware, however for designs that do not apply Power On System 2112 * Tests (POST) it may discarded (or perhaps minimized). 2113 */ 2114 unsigned int ii; 2115 u8 val, refval; 2116 2117 /* Verify NET2272 write/read SCRATCH register can write and read */ 2118 refval = net2272_read(dev, SCRATCH); 2119 for (ii = 0; ii < 0x100; ii += 7) { 2120 net2272_write(dev, SCRATCH, ii); 2121 val = net2272_read(dev, SCRATCH); 2122 if (val != ii) { 2123 dev_dbg(dev->dev, 2124 "%s: write/read SCRATCH register test failed: " 2125 "wrote:0x%2.2x, read:0x%2.2x\n", 2126 __func__, ii, val); 2127 return -EINVAL; 2128 } 2129 } 2130 /* To be nice, we write the original SCRATCH value back: */ 2131 net2272_write(dev, SCRATCH, refval); 2132 2133 /* Verify NET2272 CHIPREV register is read-only: */ 2134 refval = net2272_read(dev, CHIPREV_2272); 2135 for (ii = 0; ii < 0x100; ii += 7) { 2136 net2272_write(dev, CHIPREV_2272, ii); 2137 val = net2272_read(dev, CHIPREV_2272); 2138 if (val != refval) { 2139 dev_dbg(dev->dev, 2140 "%s: write/read CHIPREV register test failed: " 2141 "wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n", 2142 __func__, ii, val, refval); 2143 return -EINVAL; 2144 } 2145 } 2146 2147 /* 2148 * Verify NET2272's "NET2270 legacy revision" register 2149 * - NET2272 has two revision registers. The NET2270 legacy revision 2150 * register should read the same value, regardless of the NET2272 2151 * silicon revision. The legacy register applies to NET2270 2152 * firmware being applied to the NET2272. 2153 */ 2154 val = net2272_read(dev, CHIPREV_LEGACY); 2155 if (val != NET2270_LEGACY_REV) { 2156 /* 2157 * Unexpected legacy revision value 2158 * - Perhaps the chip is a NET2270? 2159 */ 2160 dev_dbg(dev->dev, 2161 "%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n" 2162 " - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n", 2163 __func__, NET2270_LEGACY_REV, val); 2164 return -EINVAL; 2165 } 2166 2167 /* 2168 * Verify NET2272 silicon revision 2169 * - This revision register is appropriate for the silicon version 2170 * of the NET2272 2171 */ 2172 val = net2272_read(dev, CHIPREV_2272); 2173 switch (val) { 2174 case CHIPREV_NET2272_R1: 2175 /* 2176 * NET2272 Rev 1 has DMA related errata: 2177 * - Newer silicon (Rev 1A or better) required 2178 */ 2179 dev_dbg(dev->dev, 2180 "%s: Rev 1 detected: newer silicon recommended for DMA support\n", 2181 __func__); 2182 break; 2183 case CHIPREV_NET2272_R1A: 2184 break; 2185 default: 2186 /* NET2272 silicon version *may* not work with this firmware */ 2187 dev_dbg(dev->dev, 2188 "%s: unexpected silicon revision register value: " 2189 " CHIPREV_2272: 0x%2.2x\n", 2190 __func__, val); 2191 /* 2192 * Return Success, even though the chip rev is not an expected value 2193 * - Older, pre-built firmware can attempt to operate on newer silicon 2194 * - Often, new silicon is perfectly compatible 2195 */ 2196 } 2197 2198 /* Success: NET2272 checks out OK */ 2199 return 0; 2200 } 2201 2202 static void 2203 net2272_gadget_release(struct device *_dev) 2204 { 2205 struct net2272 *dev = dev_get_drvdata(_dev); 2206 kfree(dev); 2207 } 2208 2209 /*---------------------------------------------------------------------------*/ 2210 2211 static void 2212 net2272_remove(struct net2272 *dev) 2213 { 2214 usb_del_gadget_udc(&dev->gadget); 2215 free_irq(dev->irq, dev); 2216 iounmap(dev->base_addr); 2217 device_remove_file(dev->dev, &dev_attr_registers); 2218 2219 dev_info(dev->dev, "unbind\n"); 2220 } 2221 2222 static struct net2272 *net2272_probe_init(struct device *dev, unsigned int irq) 2223 { 2224 struct net2272 *ret; 2225 2226 if (!irq) { 2227 dev_dbg(dev, "No IRQ!\n"); 2228 return ERR_PTR(-ENODEV); 2229 } 2230 2231 /* alloc, and start init */ 2232 ret = kzalloc(sizeof(*ret), GFP_KERNEL); 2233 if (!ret) 2234 return ERR_PTR(-ENOMEM); 2235 2236 spin_lock_init(&ret->lock); 2237 ret->irq = irq; 2238 ret->dev = dev; 2239 ret->gadget.ops = &net2272_ops; 2240 ret->gadget.max_speed = USB_SPEED_HIGH; 2241 2242 /* the "gadget" abstracts/virtualizes the controller */ 2243 ret->gadget.name = driver_name; 2244 2245 return ret; 2246 } 2247 2248 static int 2249 net2272_probe_fin(struct net2272 *dev, unsigned int irqflags) 2250 { 2251 int ret; 2252 2253 /* See if there... */ 2254 if (net2272_present(dev)) { 2255 dev_warn(dev->dev, "2272 not found!\n"); 2256 ret = -ENODEV; 2257 goto err; 2258 } 2259 2260 net2272_usb_reset(dev); 2261 net2272_usb_reinit(dev); 2262 2263 ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev); 2264 if (ret) { 2265 dev_err(dev->dev, "request interrupt %i failed\n", dev->irq); 2266 goto err; 2267 } 2268 2269 dev->chiprev = net2272_read(dev, CHIPREV_2272); 2270 2271 /* done */ 2272 dev_info(dev->dev, "%s\n", driver_desc); 2273 dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n", 2274 dev->irq, dev->base_addr, dev->chiprev, 2275 dma_mode_string()); 2276 dev_info(dev->dev, "version: %s\n", driver_vers); 2277 2278 ret = device_create_file(dev->dev, &dev_attr_registers); 2279 if (ret) 2280 goto err_irq; 2281 2282 ret = usb_add_gadget_udc_release(dev->dev, &dev->gadget, 2283 net2272_gadget_release); 2284 if (ret) 2285 goto err_add_udc; 2286 2287 return 0; 2288 2289 err_add_udc: 2290 device_remove_file(dev->dev, &dev_attr_registers); 2291 err_irq: 2292 free_irq(dev->irq, dev); 2293 err: 2294 return ret; 2295 } 2296 2297 #ifdef CONFIG_USB_PCI 2298 2299 /* 2300 * wrap this driver around the specified device, but 2301 * don't respond over USB until a gadget driver binds to us 2302 */ 2303 2304 static int 2305 net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev) 2306 { 2307 unsigned long resource, len, tmp; 2308 void __iomem *mem_mapped_addr[4]; 2309 int ret, i; 2310 2311 /* 2312 * BAR 0 holds PLX 9054 config registers 2313 * BAR 1 is i/o memory; unused here 2314 * BAR 2 holds EPLD config registers 2315 * BAR 3 holds NET2272 registers 2316 */ 2317 2318 /* Find and map all address spaces */ 2319 for (i = 0; i < 4; ++i) { 2320 if (i == 1) 2321 continue; /* BAR1 unused */ 2322 2323 resource = pci_resource_start(pdev, i); 2324 len = pci_resource_len(pdev, i); 2325 2326 if (!request_mem_region(resource, len, driver_name)) { 2327 dev_dbg(dev->dev, "controller already in use\n"); 2328 ret = -EBUSY; 2329 goto err; 2330 } 2331 2332 mem_mapped_addr[i] = ioremap_nocache(resource, len); 2333 if (mem_mapped_addr[i] == NULL) { 2334 release_mem_region(resource, len); 2335 dev_dbg(dev->dev, "can't map memory\n"); 2336 ret = -EFAULT; 2337 goto err; 2338 } 2339 } 2340 2341 dev->rdk1.plx9054_base_addr = mem_mapped_addr[0]; 2342 dev->rdk1.epld_base_addr = mem_mapped_addr[2]; 2343 dev->base_addr = mem_mapped_addr[3]; 2344 2345 /* Set PLX 9054 bus width (16 bits) */ 2346 tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1); 2347 writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT, 2348 dev->rdk1.plx9054_base_addr + LBRD1); 2349 2350 /* Enable PLX 9054 Interrupts */ 2351 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) | 2352 (1 << PCI_INTERRUPT_ENABLE) | 2353 (1 << LOCAL_INTERRUPT_INPUT_ENABLE), 2354 dev->rdk1.plx9054_base_addr + INTCSR); 2355 2356 writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)), 2357 dev->rdk1.plx9054_base_addr + DMACSR0); 2358 2359 /* reset */ 2360 writeb((1 << EPLD_DMA_ENABLE) | 2361 (1 << DMA_CTL_DACK) | 2362 (1 << DMA_TIMEOUT_ENABLE) | 2363 (1 << USER) | 2364 (0 << MPX_MODE) | 2365 (1 << BUSWIDTH) | 2366 (1 << NET2272_RESET), 2367 dev->base_addr + EPLD_IO_CONTROL_REGISTER); 2368 2369 mb(); 2370 writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) & 2371 ~(1 << NET2272_RESET), 2372 dev->base_addr + EPLD_IO_CONTROL_REGISTER); 2373 udelay(200); 2374 2375 return 0; 2376 2377 err: 2378 while (--i >= 0) { 2379 iounmap(mem_mapped_addr[i]); 2380 release_mem_region(pci_resource_start(pdev, i), 2381 pci_resource_len(pdev, i)); 2382 } 2383 2384 return ret; 2385 } 2386 2387 static int 2388 net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev) 2389 { 2390 unsigned long resource, len; 2391 void __iomem *mem_mapped_addr[2]; 2392 int ret, i; 2393 2394 /* 2395 * BAR 0 holds FGPA config registers 2396 * BAR 1 holds NET2272 registers 2397 */ 2398 2399 /* Find and map all address spaces, bar2-3 unused in rdk 2 */ 2400 for (i = 0; i < 2; ++i) { 2401 resource = pci_resource_start(pdev, i); 2402 len = pci_resource_len(pdev, i); 2403 2404 if (!request_mem_region(resource, len, driver_name)) { 2405 dev_dbg(dev->dev, "controller already in use\n"); 2406 ret = -EBUSY; 2407 goto err; 2408 } 2409 2410 mem_mapped_addr[i] = ioremap_nocache(resource, len); 2411 if (mem_mapped_addr[i] == NULL) { 2412 release_mem_region(resource, len); 2413 dev_dbg(dev->dev, "can't map memory\n"); 2414 ret = -EFAULT; 2415 goto err; 2416 } 2417 } 2418 2419 dev->rdk2.fpga_base_addr = mem_mapped_addr[0]; 2420 dev->base_addr = mem_mapped_addr[1]; 2421 2422 mb(); 2423 /* Set 2272 bus width (16 bits) and reset */ 2424 writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK); 2425 udelay(200); 2426 writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK); 2427 /* Print fpga version number */ 2428 dev_info(dev->dev, "RDK2 FPGA version %08x\n", 2429 readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV)); 2430 /* Enable FPGA Interrupts */ 2431 writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB); 2432 2433 return 0; 2434 2435 err: 2436 while (--i >= 0) { 2437 iounmap(mem_mapped_addr[i]); 2438 release_mem_region(pci_resource_start(pdev, i), 2439 pci_resource_len(pdev, i)); 2440 } 2441 2442 return ret; 2443 } 2444 2445 static int 2446 net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2447 { 2448 struct net2272 *dev; 2449 int ret; 2450 2451 dev = net2272_probe_init(&pdev->dev, pdev->irq); 2452 if (IS_ERR(dev)) 2453 return PTR_ERR(dev); 2454 dev->dev_id = pdev->device; 2455 2456 if (pci_enable_device(pdev) < 0) { 2457 ret = -ENODEV; 2458 goto err_free; 2459 } 2460 2461 pci_set_master(pdev); 2462 2463 switch (pdev->device) { 2464 case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break; 2465 case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break; 2466 default: BUG(); 2467 } 2468 if (ret) 2469 goto err_pci; 2470 2471 ret = net2272_probe_fin(dev, 0); 2472 if (ret) 2473 goto err_pci; 2474 2475 pci_set_drvdata(pdev, dev); 2476 2477 return 0; 2478 2479 err_pci: 2480 pci_disable_device(pdev); 2481 err_free: 2482 kfree(dev); 2483 2484 return ret; 2485 } 2486 2487 static void 2488 net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev) 2489 { 2490 int i; 2491 2492 /* disable PLX 9054 interrupts */ 2493 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) & 2494 ~(1 << PCI_INTERRUPT_ENABLE), 2495 dev->rdk1.plx9054_base_addr + INTCSR); 2496 2497 /* clean up resources allocated during probe() */ 2498 iounmap(dev->rdk1.plx9054_base_addr); 2499 iounmap(dev->rdk1.epld_base_addr); 2500 2501 for (i = 0; i < 4; ++i) { 2502 if (i == 1) 2503 continue; /* BAR1 unused */ 2504 release_mem_region(pci_resource_start(pdev, i), 2505 pci_resource_len(pdev, i)); 2506 } 2507 } 2508 2509 static void 2510 net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev) 2511 { 2512 int i; 2513 2514 /* disable fpga interrupts 2515 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) & 2516 ~(1 << PCI_INTERRUPT_ENABLE), 2517 dev->rdk1.plx9054_base_addr + INTCSR); 2518 */ 2519 2520 /* clean up resources allocated during probe() */ 2521 iounmap(dev->rdk2.fpga_base_addr); 2522 2523 for (i = 0; i < 2; ++i) 2524 release_mem_region(pci_resource_start(pdev, i), 2525 pci_resource_len(pdev, i)); 2526 } 2527 2528 static void 2529 net2272_pci_remove(struct pci_dev *pdev) 2530 { 2531 struct net2272 *dev = pci_get_drvdata(pdev); 2532 2533 net2272_remove(dev); 2534 2535 switch (pdev->device) { 2536 case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break; 2537 case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break; 2538 default: BUG(); 2539 } 2540 2541 pci_disable_device(pdev); 2542 2543 kfree(dev); 2544 } 2545 2546 /* Table of matching PCI IDs */ 2547 static struct pci_device_id pci_ids[] = { 2548 { /* RDK 1 card */ 2549 .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe), 2550 .class_mask = 0, 2551 .vendor = PCI_VENDOR_ID_PLX, 2552 .device = PCI_DEVICE_ID_RDK1, 2553 .subvendor = PCI_ANY_ID, 2554 .subdevice = PCI_ANY_ID, 2555 }, 2556 { /* RDK 2 card */ 2557 .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe), 2558 .class_mask = 0, 2559 .vendor = PCI_VENDOR_ID_PLX, 2560 .device = PCI_DEVICE_ID_RDK2, 2561 .subvendor = PCI_ANY_ID, 2562 .subdevice = PCI_ANY_ID, 2563 }, 2564 { } 2565 }; 2566 MODULE_DEVICE_TABLE(pci, pci_ids); 2567 2568 static struct pci_driver net2272_pci_driver = { 2569 .name = driver_name, 2570 .id_table = pci_ids, 2571 2572 .probe = net2272_pci_probe, 2573 .remove = net2272_pci_remove, 2574 }; 2575 2576 static int net2272_pci_register(void) 2577 { 2578 return pci_register_driver(&net2272_pci_driver); 2579 } 2580 2581 static void net2272_pci_unregister(void) 2582 { 2583 pci_unregister_driver(&net2272_pci_driver); 2584 } 2585 2586 #else 2587 static inline int net2272_pci_register(void) { return 0; } 2588 static inline void net2272_pci_unregister(void) { } 2589 #endif 2590 2591 /*---------------------------------------------------------------------------*/ 2592 2593 static int 2594 net2272_plat_probe(struct platform_device *pdev) 2595 { 2596 struct net2272 *dev; 2597 int ret; 2598 unsigned int irqflags; 2599 resource_size_t base, len; 2600 struct resource *iomem, *iomem_bus, *irq_res; 2601 2602 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2603 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2604 iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0); 2605 if (!irq_res || !iomem) { 2606 dev_err(&pdev->dev, "must provide irq/base addr"); 2607 return -EINVAL; 2608 } 2609 2610 dev = net2272_probe_init(&pdev->dev, irq_res->start); 2611 if (IS_ERR(dev)) 2612 return PTR_ERR(dev); 2613 2614 irqflags = 0; 2615 if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE) 2616 irqflags |= IRQF_TRIGGER_RISING; 2617 if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE) 2618 irqflags |= IRQF_TRIGGER_FALLING; 2619 if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL) 2620 irqflags |= IRQF_TRIGGER_HIGH; 2621 if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL) 2622 irqflags |= IRQF_TRIGGER_LOW; 2623 2624 base = iomem->start; 2625 len = resource_size(iomem); 2626 if (iomem_bus) 2627 dev->base_shift = iomem_bus->start; 2628 2629 if (!request_mem_region(base, len, driver_name)) { 2630 dev_dbg(dev->dev, "get request memory region!\n"); 2631 ret = -EBUSY; 2632 goto err; 2633 } 2634 dev->base_addr = ioremap_nocache(base, len); 2635 if (!dev->base_addr) { 2636 dev_dbg(dev->dev, "can't map memory\n"); 2637 ret = -EFAULT; 2638 goto err_req; 2639 } 2640 2641 ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW); 2642 if (ret) 2643 goto err_io; 2644 2645 platform_set_drvdata(pdev, dev); 2646 dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n", 2647 (net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no "); 2648 2649 return 0; 2650 2651 err_io: 2652 iounmap(dev->base_addr); 2653 err_req: 2654 release_mem_region(base, len); 2655 err: 2656 return ret; 2657 } 2658 2659 static int 2660 net2272_plat_remove(struct platform_device *pdev) 2661 { 2662 struct net2272 *dev = platform_get_drvdata(pdev); 2663 2664 net2272_remove(dev); 2665 2666 release_mem_region(pdev->resource[0].start, 2667 resource_size(&pdev->resource[0])); 2668 2669 kfree(dev); 2670 2671 return 0; 2672 } 2673 2674 static struct platform_driver net2272_plat_driver = { 2675 .probe = net2272_plat_probe, 2676 .remove = net2272_plat_remove, 2677 .driver = { 2678 .name = driver_name, 2679 }, 2680 /* FIXME .suspend, .resume */ 2681 }; 2682 MODULE_ALIAS("platform:net2272"); 2683 2684 static int __init net2272_init(void) 2685 { 2686 int ret; 2687 2688 ret = net2272_pci_register(); 2689 if (ret) 2690 return ret; 2691 ret = platform_driver_register(&net2272_plat_driver); 2692 if (ret) 2693 goto err_pci; 2694 return ret; 2695 2696 err_pci: 2697 net2272_pci_unregister(); 2698 return ret; 2699 } 2700 module_init(net2272_init); 2701 2702 static void __exit net2272_cleanup(void) 2703 { 2704 net2272_pci_unregister(); 2705 platform_driver_unregister(&net2272_plat_driver); 2706 } 2707 module_exit(net2272_cleanup); 2708 2709 MODULE_DESCRIPTION(DRIVER_DESC); 2710 MODULE_AUTHOR("PLX Technology, Inc."); 2711 MODULE_LICENSE("GPL"); 2712