1 /* 2 * Driver for PLX NET2272 USB device controller 3 * 4 * Copyright (C) 2005-2006 PLX Technology, Inc. 5 * Copyright (C) 2006-2011 Analog Devices, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 */ 21 22 #include <linux/delay.h> 23 #include <linux/device.h> 24 #include <linux/errno.h> 25 #include <linux/gpio.h> 26 #include <linux/init.h> 27 #include <linux/interrupt.h> 28 #include <linux/io.h> 29 #include <linux/ioport.h> 30 #include <linux/kernel.h> 31 #include <linux/list.h> 32 #include <linux/module.h> 33 #include <linux/moduleparam.h> 34 #include <linux/pci.h> 35 #include <linux/platform_device.h> 36 #include <linux/prefetch.h> 37 #include <linux/sched.h> 38 #include <linux/slab.h> 39 #include <linux/timer.h> 40 #include <linux/usb.h> 41 #include <linux/usb/ch9.h> 42 #include <linux/usb/gadget.h> 43 44 #include <asm/byteorder.h> 45 #include <asm/unaligned.h> 46 47 #include "net2272.h" 48 49 #define DRIVER_DESC "PLX NET2272 USB Peripheral Controller" 50 51 static const char driver_name[] = "net2272"; 52 static const char driver_vers[] = "2006 October 17/mainline"; 53 static const char driver_desc[] = DRIVER_DESC; 54 55 static const char ep0name[] = "ep0"; 56 static const char * const ep_name[] = { 57 ep0name, 58 "ep-a", "ep-b", "ep-c", 59 }; 60 61 #ifdef CONFIG_USB_NET2272_DMA 62 /* 63 * use_dma: the NET2272 can use an external DMA controller. 64 * Note that since there is no generic DMA api, some functions, 65 * notably request_dma, start_dma, and cancel_dma will need to be 66 * modified for your platform's particular dma controller. 67 * 68 * If use_dma is disabled, pio will be used instead. 69 */ 70 static bool use_dma = 0; 71 module_param(use_dma, bool, 0644); 72 73 /* 74 * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b) 75 * The NET2272 can only use dma for a single endpoint at a time. 76 * At some point this could be modified to allow either endpoint 77 * to take control of dma as it becomes available. 78 * 79 * Note that DMA should not be used on OUT endpoints unless it can 80 * be guaranteed that no short packets will arrive on an IN endpoint 81 * while the DMA operation is pending. Otherwise the OUT DMA will 82 * terminate prematurely (See NET2272 Errata 630-0213-0101) 83 */ 84 static ushort dma_ep = 1; 85 module_param(dma_ep, ushort, 0644); 86 87 /* 88 * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton): 89 * mode 0 == Slow DREQ mode 90 * mode 1 == Fast DREQ mode 91 * mode 2 == Burst mode 92 */ 93 static ushort dma_mode = 2; 94 module_param(dma_mode, ushort, 0644); 95 #else 96 #define use_dma 0 97 #define dma_ep 1 98 #define dma_mode 2 99 #endif 100 101 /* 102 * fifo_mode: net2272 buffer configuration: 103 * mode 0 == ep-{a,b,c} 512db each 104 * mode 1 == ep-a 1k, ep-{b,c} 512db 105 * mode 2 == ep-a 1k, ep-b 1k, ep-c 512db 106 * mode 3 == ep-a 1k, ep-b disabled, ep-c 512db 107 */ 108 static ushort fifo_mode = 0; 109 module_param(fifo_mode, ushort, 0644); 110 111 /* 112 * enable_suspend: When enabled, the driver will respond to 113 * USB suspend requests by powering down the NET2272. Otherwise, 114 * USB suspend requests will be ignored. This is acceptible for 115 * self-powered devices. For bus powered devices set this to 1. 116 */ 117 static ushort enable_suspend = 0; 118 module_param(enable_suspend, ushort, 0644); 119 120 static void assert_out_naking(struct net2272_ep *ep, const char *where) 121 { 122 u8 tmp; 123 124 #ifndef DEBUG 125 return; 126 #endif 127 128 tmp = net2272_ep_read(ep, EP_STAT0); 129 if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) { 130 dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n", 131 ep->ep.name, where, tmp); 132 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS); 133 } 134 } 135 #define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__) 136 137 static void stop_out_naking(struct net2272_ep *ep) 138 { 139 u8 tmp = net2272_ep_read(ep, EP_STAT0); 140 141 if ((tmp & (1 << NAK_OUT_PACKETS)) != 0) 142 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS); 143 } 144 145 #define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out") 146 147 static char *type_string(u8 bmAttributes) 148 { 149 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) { 150 case USB_ENDPOINT_XFER_BULK: return "bulk"; 151 case USB_ENDPOINT_XFER_ISOC: return "iso"; 152 case USB_ENDPOINT_XFER_INT: return "intr"; 153 default: return "control"; 154 } 155 } 156 157 static char *buf_state_string(unsigned state) 158 { 159 switch (state) { 160 case BUFF_FREE: return "free"; 161 case BUFF_VALID: return "valid"; 162 case BUFF_LCL: return "local"; 163 case BUFF_USB: return "usb"; 164 default: return "unknown"; 165 } 166 } 167 168 static char *dma_mode_string(void) 169 { 170 if (!use_dma) 171 return "PIO"; 172 switch (dma_mode) { 173 case 0: return "SLOW DREQ"; 174 case 1: return "FAST DREQ"; 175 case 2: return "BURST"; 176 default: return "invalid"; 177 } 178 } 179 180 static void net2272_dequeue_all(struct net2272_ep *); 181 static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *); 182 static int net2272_fifo_status(struct usb_ep *); 183 184 static struct usb_ep_ops net2272_ep_ops; 185 186 /*---------------------------------------------------------------------------*/ 187 188 static int 189 net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) 190 { 191 struct net2272 *dev; 192 struct net2272_ep *ep; 193 u32 max; 194 u8 tmp; 195 unsigned long flags; 196 197 ep = container_of(_ep, struct net2272_ep, ep); 198 if (!_ep || !desc || ep->desc || _ep->name == ep0name 199 || desc->bDescriptorType != USB_DT_ENDPOINT) 200 return -EINVAL; 201 dev = ep->dev; 202 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) 203 return -ESHUTDOWN; 204 205 max = usb_endpoint_maxp(desc) & 0x1fff; 206 207 spin_lock_irqsave(&dev->lock, flags); 208 _ep->maxpacket = max & 0x7fff; 209 ep->desc = desc; 210 211 /* net2272_ep_reset() has already been called */ 212 ep->stopped = 0; 213 ep->wedged = 0; 214 215 /* set speed-dependent max packet */ 216 net2272_ep_write(ep, EP_MAXPKT0, max & 0xff); 217 net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8); 218 219 /* set type, direction, address; reset fifo counters */ 220 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH); 221 tmp = usb_endpoint_type(desc); 222 if (usb_endpoint_xfer_bulk(desc)) { 223 /* catch some particularly blatant driver bugs */ 224 if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) || 225 (dev->gadget.speed == USB_SPEED_FULL && max > 64)) { 226 spin_unlock_irqrestore(&dev->lock, flags); 227 return -ERANGE; 228 } 229 } 230 ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0; 231 tmp <<= ENDPOINT_TYPE; 232 tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER); 233 tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION; 234 tmp |= (1 << ENDPOINT_ENABLE); 235 236 /* for OUT transfers, block the rx fifo until a read is posted */ 237 ep->is_in = usb_endpoint_dir_in(desc); 238 if (!ep->is_in) 239 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS); 240 241 net2272_ep_write(ep, EP_CFG, tmp); 242 243 /* enable irqs */ 244 tmp = (1 << ep->num) | net2272_read(dev, IRQENB0); 245 net2272_write(dev, IRQENB0, tmp); 246 247 tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) 248 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE) 249 | net2272_ep_read(ep, EP_IRQENB); 250 net2272_ep_write(ep, EP_IRQENB, tmp); 251 252 tmp = desc->bEndpointAddress; 253 dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n", 254 _ep->name, tmp & 0x0f, PIPEDIR(tmp), 255 type_string(desc->bmAttributes), max, 256 net2272_ep_read(ep, EP_CFG)); 257 258 spin_unlock_irqrestore(&dev->lock, flags); 259 return 0; 260 } 261 262 static void net2272_ep_reset(struct net2272_ep *ep) 263 { 264 u8 tmp; 265 266 ep->desc = NULL; 267 INIT_LIST_HEAD(&ep->queue); 268 269 usb_ep_set_maxpacket_limit(&ep->ep, ~0); 270 ep->ep.ops = &net2272_ep_ops; 271 272 /* disable irqs, endpoint */ 273 net2272_ep_write(ep, EP_IRQENB, 0); 274 275 /* init to our chosen defaults, notably so that we NAK OUT 276 * packets until the driver queues a read. 277 */ 278 tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS); 279 net2272_ep_write(ep, EP_RSPSET, tmp); 280 281 tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE); 282 if (ep->num != 0) 283 tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT); 284 285 net2272_ep_write(ep, EP_RSPCLR, tmp); 286 287 /* scrub most status bits, and flush any fifo state */ 288 net2272_ep_write(ep, EP_STAT0, 289 (1 << DATA_IN_TOKEN_INTERRUPT) 290 | (1 << DATA_OUT_TOKEN_INTERRUPT) 291 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT) 292 | (1 << DATA_PACKET_RECEIVED_INTERRUPT) 293 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)); 294 295 net2272_ep_write(ep, EP_STAT1, 296 (1 << TIMEOUT) 297 | (1 << USB_OUT_ACK_SENT) 298 | (1 << USB_OUT_NAK_SENT) 299 | (1 << USB_IN_ACK_RCVD) 300 | (1 << USB_IN_NAK_SENT) 301 | (1 << USB_STALL_SENT) 302 | (1 << LOCAL_OUT_ZLP) 303 | (1 << BUFFER_FLUSH)); 304 305 /* fifo size is handled seperately */ 306 } 307 308 static int net2272_disable(struct usb_ep *_ep) 309 { 310 struct net2272_ep *ep; 311 unsigned long flags; 312 313 ep = container_of(_ep, struct net2272_ep, ep); 314 if (!_ep || !ep->desc || _ep->name == ep0name) 315 return -EINVAL; 316 317 spin_lock_irqsave(&ep->dev->lock, flags); 318 net2272_dequeue_all(ep); 319 net2272_ep_reset(ep); 320 321 dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name); 322 323 spin_unlock_irqrestore(&ep->dev->lock, flags); 324 return 0; 325 } 326 327 /*---------------------------------------------------------------------------*/ 328 329 static struct usb_request * 330 net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) 331 { 332 struct net2272_request *req; 333 334 if (!_ep) 335 return NULL; 336 337 req = kzalloc(sizeof(*req), gfp_flags); 338 if (!req) 339 return NULL; 340 341 INIT_LIST_HEAD(&req->queue); 342 343 return &req->req; 344 } 345 346 static void 347 net2272_free_request(struct usb_ep *_ep, struct usb_request *_req) 348 { 349 struct net2272_request *req; 350 351 if (!_ep || !_req) 352 return; 353 354 req = container_of(_req, struct net2272_request, req); 355 WARN_ON(!list_empty(&req->queue)); 356 kfree(req); 357 } 358 359 static void 360 net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status) 361 { 362 struct net2272 *dev; 363 unsigned stopped = ep->stopped; 364 365 if (ep->num == 0) { 366 if (ep->dev->protocol_stall) { 367 ep->stopped = 1; 368 set_halt(ep); 369 } 370 allow_status(ep); 371 } 372 373 list_del_init(&req->queue); 374 375 if (req->req.status == -EINPROGRESS) 376 req->req.status = status; 377 else 378 status = req->req.status; 379 380 dev = ep->dev; 381 if (use_dma && ep->dma) 382 usb_gadget_unmap_request(&dev->gadget, &req->req, 383 ep->is_in); 384 385 if (status && status != -ESHUTDOWN) 386 dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n", 387 ep->ep.name, &req->req, status, 388 req->req.actual, req->req.length, req->req.buf); 389 390 /* don't modify queue heads during completion callback */ 391 ep->stopped = 1; 392 spin_unlock(&dev->lock); 393 usb_gadget_giveback_request(&ep->ep, &req->req); 394 spin_lock(&dev->lock); 395 ep->stopped = stopped; 396 } 397 398 static int 399 net2272_write_packet(struct net2272_ep *ep, u8 *buf, 400 struct net2272_request *req, unsigned max) 401 { 402 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA); 403 u16 *bufp; 404 unsigned length, count; 405 u8 tmp; 406 407 length = min(req->req.length - req->req.actual, max); 408 req->req.actual += length; 409 410 dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n", 411 ep->ep.name, req, max, length, 412 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0)); 413 414 count = length; 415 bufp = (u16 *)buf; 416 417 while (likely(count >= 2)) { 418 /* no byte-swap required; chip endian set during init */ 419 writew(*bufp++, ep_data); 420 count -= 2; 421 } 422 buf = (u8 *)bufp; 423 424 /* write final byte by placing the NET2272 into 8-bit mode */ 425 if (unlikely(count)) { 426 tmp = net2272_read(ep->dev, LOCCTL); 427 net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH)); 428 writeb(*buf, ep_data); 429 net2272_write(ep->dev, LOCCTL, tmp); 430 } 431 return length; 432 } 433 434 /* returns: 0: still running, 1: completed, negative: errno */ 435 static int 436 net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req) 437 { 438 u8 *buf; 439 unsigned count, max; 440 int status; 441 442 dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n", 443 ep->ep.name, req->req.actual, req->req.length); 444 445 /* 446 * Keep loading the endpoint until the final packet is loaded, 447 * or the endpoint buffer is full. 448 */ 449 top: 450 /* 451 * Clear interrupt status 452 * - Packet Transmitted interrupt will become set again when the 453 * host successfully takes another packet 454 */ 455 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)); 456 while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) { 457 buf = req->req.buf + req->req.actual; 458 prefetch(buf); 459 460 /* force pagesel */ 461 net2272_ep_read(ep, EP_STAT0); 462 463 max = (net2272_ep_read(ep, EP_AVAIL1) << 8) | 464 (net2272_ep_read(ep, EP_AVAIL0)); 465 466 if (max < ep->ep.maxpacket) 467 max = (net2272_ep_read(ep, EP_AVAIL1) << 8) 468 | (net2272_ep_read(ep, EP_AVAIL0)); 469 470 count = net2272_write_packet(ep, buf, req, max); 471 /* see if we are done */ 472 if (req->req.length == req->req.actual) { 473 /* validate short or zlp packet */ 474 if (count < ep->ep.maxpacket) 475 set_fifo_bytecount(ep, 0); 476 net2272_done(ep, req, 0); 477 478 if (!list_empty(&ep->queue)) { 479 req = list_entry(ep->queue.next, 480 struct net2272_request, 481 queue); 482 status = net2272_kick_dma(ep, req); 483 484 if (status < 0) 485 if ((net2272_ep_read(ep, EP_STAT0) 486 & (1 << BUFFER_EMPTY))) 487 goto top; 488 } 489 return 1; 490 } 491 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)); 492 } 493 return 0; 494 } 495 496 static void 497 net2272_out_flush(struct net2272_ep *ep) 498 { 499 ASSERT_OUT_NAKING(ep); 500 501 net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT) 502 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)); 503 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH); 504 } 505 506 static int 507 net2272_read_packet(struct net2272_ep *ep, u8 *buf, 508 struct net2272_request *req, unsigned avail) 509 { 510 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA); 511 unsigned is_short; 512 u16 *bufp; 513 514 req->req.actual += avail; 515 516 dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n", 517 ep->ep.name, req, avail, 518 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0)); 519 520 is_short = (avail < ep->ep.maxpacket); 521 522 if (unlikely(avail == 0)) { 523 /* remove any zlp from the buffer */ 524 (void)readw(ep_data); 525 return is_short; 526 } 527 528 /* Ensure we get the final byte */ 529 if (unlikely(avail % 2)) 530 avail++; 531 bufp = (u16 *)buf; 532 533 do { 534 *bufp++ = readw(ep_data); 535 avail -= 2; 536 } while (avail); 537 538 /* 539 * To avoid false endpoint available race condition must read 540 * ep stat0 twice in the case of a short transfer 541 */ 542 if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) 543 net2272_ep_read(ep, EP_STAT0); 544 545 return is_short; 546 } 547 548 static int 549 net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req) 550 { 551 u8 *buf; 552 unsigned is_short; 553 int count; 554 int tmp; 555 int cleanup = 0; 556 int status = -1; 557 558 dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n", 559 ep->ep.name, req->req.actual, req->req.length); 560 561 top: 562 do { 563 buf = req->req.buf + req->req.actual; 564 prefetchw(buf); 565 566 count = (net2272_ep_read(ep, EP_AVAIL1) << 8) 567 | net2272_ep_read(ep, EP_AVAIL0); 568 569 net2272_ep_write(ep, EP_STAT0, 570 (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) | 571 (1 << DATA_PACKET_RECEIVED_INTERRUPT)); 572 573 tmp = req->req.length - req->req.actual; 574 575 if (count > tmp) { 576 if ((tmp % ep->ep.maxpacket) != 0) { 577 dev_err(ep->dev->dev, 578 "%s out fifo %d bytes, expected %d\n", 579 ep->ep.name, count, tmp); 580 cleanup = 1; 581 } 582 count = (tmp > 0) ? tmp : 0; 583 } 584 585 is_short = net2272_read_packet(ep, buf, req, count); 586 587 /* completion */ 588 if (unlikely(cleanup || is_short || 589 ((req->req.actual == req->req.length) 590 && !req->req.zero))) { 591 592 if (cleanup) { 593 net2272_out_flush(ep); 594 net2272_done(ep, req, -EOVERFLOW); 595 } else 596 net2272_done(ep, req, 0); 597 598 /* re-initialize endpoint transfer registers 599 * otherwise they may result in erroneous pre-validation 600 * for subsequent control reads 601 */ 602 if (unlikely(ep->num == 0)) { 603 net2272_ep_write(ep, EP_TRANSFER2, 0); 604 net2272_ep_write(ep, EP_TRANSFER1, 0); 605 net2272_ep_write(ep, EP_TRANSFER0, 0); 606 } 607 608 if (!list_empty(&ep->queue)) { 609 req = list_entry(ep->queue.next, 610 struct net2272_request, queue); 611 status = net2272_kick_dma(ep, req); 612 if ((status < 0) && 613 !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY))) 614 goto top; 615 } 616 return 1; 617 } 618 } while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY))); 619 620 return 0; 621 } 622 623 static void 624 net2272_pio_advance(struct net2272_ep *ep) 625 { 626 struct net2272_request *req; 627 628 if (unlikely(list_empty(&ep->queue))) 629 return; 630 631 req = list_entry(ep->queue.next, struct net2272_request, queue); 632 (ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req); 633 } 634 635 /* returns 0 on success, else negative errno */ 636 static int 637 net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf, 638 unsigned len, unsigned dir) 639 { 640 dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n", 641 ep, buf, len, dir); 642 643 /* The NET2272 only supports a single dma channel */ 644 if (dev->dma_busy) 645 return -EBUSY; 646 /* 647 * EP_TRANSFER (used to determine the number of bytes received 648 * in an OUT transfer) is 24 bits wide; don't ask for more than that. 649 */ 650 if ((dir == 1) && (len > 0x1000000)) 651 return -EINVAL; 652 653 dev->dma_busy = 1; 654 655 /* initialize platform's dma */ 656 #ifdef CONFIG_PCI 657 /* NET2272 addr, buffer addr, length, etc. */ 658 switch (dev->dev_id) { 659 case PCI_DEVICE_ID_RDK1: 660 /* Setup PLX 9054 DMA mode */ 661 writel((1 << LOCAL_BUS_WIDTH) | 662 (1 << TA_READY_INPUT_ENABLE) | 663 (0 << LOCAL_BURST_ENABLE) | 664 (1 << DONE_INTERRUPT_ENABLE) | 665 (1 << LOCAL_ADDRESSING_MODE) | 666 (1 << DEMAND_MODE) | 667 (1 << DMA_EOT_ENABLE) | 668 (1 << FAST_SLOW_TERMINATE_MODE_SELECT) | 669 (1 << DMA_CHANNEL_INTERRUPT_SELECT), 670 dev->rdk1.plx9054_base_addr + DMAMODE0); 671 672 writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0); 673 writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0); 674 writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0); 675 writel((dir << DIRECTION_OF_TRANSFER) | 676 (1 << INTERRUPT_AFTER_TERMINAL_COUNT), 677 dev->rdk1.plx9054_base_addr + DMADPR0); 678 writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) | 679 readl(dev->rdk1.plx9054_base_addr + INTCSR), 680 dev->rdk1.plx9054_base_addr + INTCSR); 681 682 break; 683 } 684 #endif 685 686 net2272_write(dev, DMAREQ, 687 (0 << DMA_BUFFER_VALID) | 688 (1 << DMA_REQUEST_ENABLE) | 689 (1 << DMA_CONTROL_DACK) | 690 (dev->dma_eot_polarity << EOT_POLARITY) | 691 (dev->dma_dack_polarity << DACK_POLARITY) | 692 (dev->dma_dreq_polarity << DREQ_POLARITY) | 693 ((ep >> 1) << DMA_ENDPOINT_SELECT)); 694 695 (void) net2272_read(dev, SCRATCH); 696 697 return 0; 698 } 699 700 static void 701 net2272_start_dma(struct net2272 *dev) 702 { 703 /* start platform's dma controller */ 704 #ifdef CONFIG_PCI 705 switch (dev->dev_id) { 706 case PCI_DEVICE_ID_RDK1: 707 writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START), 708 dev->rdk1.plx9054_base_addr + DMACSR0); 709 break; 710 } 711 #endif 712 } 713 714 /* returns 0 on success, else negative errno */ 715 static int 716 net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req) 717 { 718 unsigned size; 719 u8 tmp; 720 721 if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma) 722 return -EINVAL; 723 724 /* don't use dma for odd-length transfers 725 * otherwise, we'd need to deal with the last byte with pio 726 */ 727 if (req->req.length & 1) 728 return -EINVAL; 729 730 dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n", 731 ep->ep.name, req, (unsigned long long) req->req.dma); 732 733 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS); 734 735 /* The NET2272 can only use DMA on one endpoint at a time */ 736 if (ep->dev->dma_busy) 737 return -EBUSY; 738 739 /* Make sure we only DMA an even number of bytes (we'll use 740 * pio to complete the transfer) 741 */ 742 size = req->req.length; 743 size &= ~1; 744 745 /* device-to-host transfer */ 746 if (ep->is_in) { 747 /* initialize platform's dma controller */ 748 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0)) 749 /* unable to obtain DMA channel; return error and use pio mode */ 750 return -EBUSY; 751 req->req.actual += size; 752 753 /* host-to-device transfer */ 754 } else { 755 tmp = net2272_ep_read(ep, EP_STAT0); 756 757 /* initialize platform's dma controller */ 758 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1)) 759 /* unable to obtain DMA channel; return error and use pio mode */ 760 return -EBUSY; 761 762 if (!(tmp & (1 << BUFFER_EMPTY))) 763 ep->not_empty = 1; 764 else 765 ep->not_empty = 0; 766 767 768 /* allow the endpoint's buffer to fill */ 769 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS); 770 771 /* this transfer completed and data's already in the fifo 772 * return error so pio gets used. 773 */ 774 if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) { 775 776 /* deassert dreq */ 777 net2272_write(ep->dev, DMAREQ, 778 (0 << DMA_BUFFER_VALID) | 779 (0 << DMA_REQUEST_ENABLE) | 780 (1 << DMA_CONTROL_DACK) | 781 (ep->dev->dma_eot_polarity << EOT_POLARITY) | 782 (ep->dev->dma_dack_polarity << DACK_POLARITY) | 783 (ep->dev->dma_dreq_polarity << DREQ_POLARITY) | 784 ((ep->num >> 1) << DMA_ENDPOINT_SELECT)); 785 786 return -EBUSY; 787 } 788 } 789 790 /* Don't use per-packet interrupts: use dma interrupts only */ 791 net2272_ep_write(ep, EP_IRQENB, 0); 792 793 net2272_start_dma(ep->dev); 794 795 return 0; 796 } 797 798 static void net2272_cancel_dma(struct net2272 *dev) 799 { 800 #ifdef CONFIG_PCI 801 switch (dev->dev_id) { 802 case PCI_DEVICE_ID_RDK1: 803 writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0); 804 writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0); 805 while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) & 806 (1 << CHANNEL_DONE))) 807 continue; /* wait for dma to stabalize */ 808 809 /* dma abort generates an interrupt */ 810 writeb(1 << CHANNEL_CLEAR_INTERRUPT, 811 dev->rdk1.plx9054_base_addr + DMACSR0); 812 break; 813 } 814 #endif 815 816 dev->dma_busy = 0; 817 } 818 819 /*---------------------------------------------------------------------------*/ 820 821 static int 822 net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) 823 { 824 struct net2272_request *req; 825 struct net2272_ep *ep; 826 struct net2272 *dev; 827 unsigned long flags; 828 int status = -1; 829 u8 s; 830 831 req = container_of(_req, struct net2272_request, req); 832 if (!_req || !_req->complete || !_req->buf 833 || !list_empty(&req->queue)) 834 return -EINVAL; 835 ep = container_of(_ep, struct net2272_ep, ep); 836 if (!_ep || (!ep->desc && ep->num != 0)) 837 return -EINVAL; 838 dev = ep->dev; 839 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) 840 return -ESHUTDOWN; 841 842 /* set up dma mapping in case the caller didn't */ 843 if (use_dma && ep->dma) { 844 status = usb_gadget_map_request(&dev->gadget, _req, 845 ep->is_in); 846 if (status) 847 return status; 848 } 849 850 dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n", 851 _ep->name, _req, _req->length, _req->buf, 852 (unsigned long long) _req->dma, _req->zero ? "zero" : "!zero"); 853 854 spin_lock_irqsave(&dev->lock, flags); 855 856 _req->status = -EINPROGRESS; 857 _req->actual = 0; 858 859 /* kickstart this i/o queue? */ 860 if (list_empty(&ep->queue) && !ep->stopped) { 861 /* maybe there's no control data, just status ack */ 862 if (ep->num == 0 && _req->length == 0) { 863 net2272_done(ep, req, 0); 864 dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name); 865 goto done; 866 } 867 868 /* Return zlp, don't let it block subsequent packets */ 869 s = net2272_ep_read(ep, EP_STAT0); 870 if (s & (1 << BUFFER_EMPTY)) { 871 /* Buffer is empty check for a blocking zlp, handle it */ 872 if ((s & (1 << NAK_OUT_PACKETS)) && 873 net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) { 874 dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n"); 875 /* 876 * Request is going to terminate with a short packet ... 877 * hope the client is ready for it! 878 */ 879 status = net2272_read_fifo(ep, req); 880 /* clear short packet naking */ 881 net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS)); 882 goto done; 883 } 884 } 885 886 /* try dma first */ 887 status = net2272_kick_dma(ep, req); 888 889 if (status < 0) { 890 /* dma failed (most likely in use by another endpoint) 891 * fallback to pio 892 */ 893 status = 0; 894 895 if (ep->is_in) 896 status = net2272_write_fifo(ep, req); 897 else { 898 s = net2272_ep_read(ep, EP_STAT0); 899 if ((s & (1 << BUFFER_EMPTY)) == 0) 900 status = net2272_read_fifo(ep, req); 901 } 902 903 if (unlikely(status != 0)) { 904 if (status > 0) 905 status = 0; 906 req = NULL; 907 } 908 } 909 } 910 if (likely(req)) 911 list_add_tail(&req->queue, &ep->queue); 912 913 if (likely(!list_empty(&ep->queue))) 914 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS); 915 done: 916 spin_unlock_irqrestore(&dev->lock, flags); 917 918 return 0; 919 } 920 921 /* dequeue ALL requests */ 922 static void 923 net2272_dequeue_all(struct net2272_ep *ep) 924 { 925 struct net2272_request *req; 926 927 /* called with spinlock held */ 928 ep->stopped = 1; 929 930 while (!list_empty(&ep->queue)) { 931 req = list_entry(ep->queue.next, 932 struct net2272_request, 933 queue); 934 net2272_done(ep, req, -ESHUTDOWN); 935 } 936 } 937 938 /* dequeue JUST ONE request */ 939 static int 940 net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req) 941 { 942 struct net2272_ep *ep; 943 struct net2272_request *req; 944 unsigned long flags; 945 int stopped; 946 947 ep = container_of(_ep, struct net2272_ep, ep); 948 if (!_ep || (!ep->desc && ep->num != 0) || !_req) 949 return -EINVAL; 950 951 spin_lock_irqsave(&ep->dev->lock, flags); 952 stopped = ep->stopped; 953 ep->stopped = 1; 954 955 /* make sure it's still queued on this endpoint */ 956 list_for_each_entry(req, &ep->queue, queue) { 957 if (&req->req == _req) 958 break; 959 } 960 if (&req->req != _req) { 961 spin_unlock_irqrestore(&ep->dev->lock, flags); 962 return -EINVAL; 963 } 964 965 /* queue head may be partially complete */ 966 if (ep->queue.next == &req->queue) { 967 dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name); 968 net2272_done(ep, req, -ECONNRESET); 969 } 970 req = NULL; 971 ep->stopped = stopped; 972 973 spin_unlock_irqrestore(&ep->dev->lock, flags); 974 return 0; 975 } 976 977 /*---------------------------------------------------------------------------*/ 978 979 static int 980 net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged) 981 { 982 struct net2272_ep *ep; 983 unsigned long flags; 984 int ret = 0; 985 986 ep = container_of(_ep, struct net2272_ep, ep); 987 if (!_ep || (!ep->desc && ep->num != 0)) 988 return -EINVAL; 989 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) 990 return -ESHUTDOWN; 991 if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc)) 992 return -EINVAL; 993 994 spin_lock_irqsave(&ep->dev->lock, flags); 995 if (!list_empty(&ep->queue)) 996 ret = -EAGAIN; 997 else if (ep->is_in && value && net2272_fifo_status(_ep) != 0) 998 ret = -EAGAIN; 999 else { 1000 dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name, 1001 value ? "set" : "clear", 1002 wedged ? "wedge" : "halt"); 1003 /* set/clear */ 1004 if (value) { 1005 if (ep->num == 0) 1006 ep->dev->protocol_stall = 1; 1007 else 1008 set_halt(ep); 1009 if (wedged) 1010 ep->wedged = 1; 1011 } else { 1012 clear_halt(ep); 1013 ep->wedged = 0; 1014 } 1015 } 1016 spin_unlock_irqrestore(&ep->dev->lock, flags); 1017 1018 return ret; 1019 } 1020 1021 static int 1022 net2272_set_halt(struct usb_ep *_ep, int value) 1023 { 1024 return net2272_set_halt_and_wedge(_ep, value, 0); 1025 } 1026 1027 static int 1028 net2272_set_wedge(struct usb_ep *_ep) 1029 { 1030 if (!_ep || _ep->name == ep0name) 1031 return -EINVAL; 1032 return net2272_set_halt_and_wedge(_ep, 1, 1); 1033 } 1034 1035 static int 1036 net2272_fifo_status(struct usb_ep *_ep) 1037 { 1038 struct net2272_ep *ep; 1039 u16 avail; 1040 1041 ep = container_of(_ep, struct net2272_ep, ep); 1042 if (!_ep || (!ep->desc && ep->num != 0)) 1043 return -ENODEV; 1044 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) 1045 return -ESHUTDOWN; 1046 1047 avail = net2272_ep_read(ep, EP_AVAIL1) << 8; 1048 avail |= net2272_ep_read(ep, EP_AVAIL0); 1049 if (avail > ep->fifo_size) 1050 return -EOVERFLOW; 1051 if (ep->is_in) 1052 avail = ep->fifo_size - avail; 1053 return avail; 1054 } 1055 1056 static void 1057 net2272_fifo_flush(struct usb_ep *_ep) 1058 { 1059 struct net2272_ep *ep; 1060 1061 ep = container_of(_ep, struct net2272_ep, ep); 1062 if (!_ep || (!ep->desc && ep->num != 0)) 1063 return; 1064 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) 1065 return; 1066 1067 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH); 1068 } 1069 1070 static struct usb_ep_ops net2272_ep_ops = { 1071 .enable = net2272_enable, 1072 .disable = net2272_disable, 1073 1074 .alloc_request = net2272_alloc_request, 1075 .free_request = net2272_free_request, 1076 1077 .queue = net2272_queue, 1078 .dequeue = net2272_dequeue, 1079 1080 .set_halt = net2272_set_halt, 1081 .set_wedge = net2272_set_wedge, 1082 .fifo_status = net2272_fifo_status, 1083 .fifo_flush = net2272_fifo_flush, 1084 }; 1085 1086 /*---------------------------------------------------------------------------*/ 1087 1088 static int 1089 net2272_get_frame(struct usb_gadget *_gadget) 1090 { 1091 struct net2272 *dev; 1092 unsigned long flags; 1093 u16 ret; 1094 1095 if (!_gadget) 1096 return -ENODEV; 1097 dev = container_of(_gadget, struct net2272, gadget); 1098 spin_lock_irqsave(&dev->lock, flags); 1099 1100 ret = net2272_read(dev, FRAME1) << 8; 1101 ret |= net2272_read(dev, FRAME0); 1102 1103 spin_unlock_irqrestore(&dev->lock, flags); 1104 return ret; 1105 } 1106 1107 static int 1108 net2272_wakeup(struct usb_gadget *_gadget) 1109 { 1110 struct net2272 *dev; 1111 u8 tmp; 1112 unsigned long flags; 1113 1114 if (!_gadget) 1115 return 0; 1116 dev = container_of(_gadget, struct net2272, gadget); 1117 1118 spin_lock_irqsave(&dev->lock, flags); 1119 tmp = net2272_read(dev, USBCTL0); 1120 if (tmp & (1 << IO_WAKEUP_ENABLE)) 1121 net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME)); 1122 1123 spin_unlock_irqrestore(&dev->lock, flags); 1124 1125 return 0; 1126 } 1127 1128 static int 1129 net2272_set_selfpowered(struct usb_gadget *_gadget, int value) 1130 { 1131 if (!_gadget) 1132 return -ENODEV; 1133 1134 _gadget->is_selfpowered = (value != 0); 1135 1136 return 0; 1137 } 1138 1139 static int 1140 net2272_pullup(struct usb_gadget *_gadget, int is_on) 1141 { 1142 struct net2272 *dev; 1143 u8 tmp; 1144 unsigned long flags; 1145 1146 if (!_gadget) 1147 return -ENODEV; 1148 dev = container_of(_gadget, struct net2272, gadget); 1149 1150 spin_lock_irqsave(&dev->lock, flags); 1151 tmp = net2272_read(dev, USBCTL0); 1152 dev->softconnect = (is_on != 0); 1153 if (is_on) 1154 tmp |= (1 << USB_DETECT_ENABLE); 1155 else 1156 tmp &= ~(1 << USB_DETECT_ENABLE); 1157 net2272_write(dev, USBCTL0, tmp); 1158 spin_unlock_irqrestore(&dev->lock, flags); 1159 1160 return 0; 1161 } 1162 1163 static int net2272_start(struct usb_gadget *_gadget, 1164 struct usb_gadget_driver *driver); 1165 static int net2272_stop(struct usb_gadget *_gadget); 1166 1167 static const struct usb_gadget_ops net2272_ops = { 1168 .get_frame = net2272_get_frame, 1169 .wakeup = net2272_wakeup, 1170 .set_selfpowered = net2272_set_selfpowered, 1171 .pullup = net2272_pullup, 1172 .udc_start = net2272_start, 1173 .udc_stop = net2272_stop, 1174 }; 1175 1176 /*---------------------------------------------------------------------------*/ 1177 1178 static ssize_t 1179 registers_show(struct device *_dev, struct device_attribute *attr, char *buf) 1180 { 1181 struct net2272 *dev; 1182 char *next; 1183 unsigned size, t; 1184 unsigned long flags; 1185 u8 t1, t2; 1186 int i; 1187 const char *s; 1188 1189 dev = dev_get_drvdata(_dev); 1190 next = buf; 1191 size = PAGE_SIZE; 1192 spin_lock_irqsave(&dev->lock, flags); 1193 1194 if (dev->driver) 1195 s = dev->driver->driver.name; 1196 else 1197 s = "(none)"; 1198 1199 /* Main Control Registers */ 1200 t = scnprintf(next, size, "%s version %s," 1201 "chiprev %02x, locctl %02x\n" 1202 "irqenb0 %02x irqenb1 %02x " 1203 "irqstat0 %02x irqstat1 %02x\n", 1204 driver_name, driver_vers, dev->chiprev, 1205 net2272_read(dev, LOCCTL), 1206 net2272_read(dev, IRQENB0), 1207 net2272_read(dev, IRQENB1), 1208 net2272_read(dev, IRQSTAT0), 1209 net2272_read(dev, IRQSTAT1)); 1210 size -= t; 1211 next += t; 1212 1213 /* DMA */ 1214 t1 = net2272_read(dev, DMAREQ); 1215 t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n", 1216 t1, ep_name[(t1 & 0x01) + 1], 1217 t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "", 1218 t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "", 1219 t1 & (1 << DMA_REQUEST) ? "req " : "", 1220 t1 & (1 << DMA_BUFFER_VALID) ? "valid " : ""); 1221 size -= t; 1222 next += t; 1223 1224 /* USB Control Registers */ 1225 t1 = net2272_read(dev, USBCTL1); 1226 if (t1 & (1 << VBUS_PIN)) { 1227 if (t1 & (1 << USB_HIGH_SPEED)) 1228 s = "high speed"; 1229 else if (dev->gadget.speed == USB_SPEED_UNKNOWN) 1230 s = "powered"; 1231 else 1232 s = "full speed"; 1233 } else 1234 s = "not attached"; 1235 t = scnprintf(next, size, 1236 "usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n", 1237 net2272_read(dev, USBCTL0), t1, 1238 net2272_read(dev, OURADDR), s); 1239 size -= t; 1240 next += t; 1241 1242 /* Endpoint Registers */ 1243 for (i = 0; i < 4; ++i) { 1244 struct net2272_ep *ep; 1245 1246 ep = &dev->ep[i]; 1247 if (i && !ep->desc) 1248 continue; 1249 1250 t1 = net2272_ep_read(ep, EP_CFG); 1251 t2 = net2272_ep_read(ep, EP_RSPSET); 1252 t = scnprintf(next, size, 1253 "\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s" 1254 "irqenb %02x\n", 1255 ep->ep.name, t1, t2, 1256 (t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "", 1257 (t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "", 1258 (t2 & (1 << AUTOVALIDATE)) ? "auto " : "", 1259 (t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "", 1260 (t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "", 1261 (t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "", 1262 (t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ", 1263 (t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "", 1264 net2272_ep_read(ep, EP_IRQENB)); 1265 size -= t; 1266 next += t; 1267 1268 t = scnprintf(next, size, 1269 "\tstat0 %02x stat1 %02x avail %04x " 1270 "(ep%d%s-%s)%s\n", 1271 net2272_ep_read(ep, EP_STAT0), 1272 net2272_ep_read(ep, EP_STAT1), 1273 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0), 1274 t1 & 0x0f, 1275 ep->is_in ? "in" : "out", 1276 type_string(t1 >> 5), 1277 ep->stopped ? "*" : ""); 1278 size -= t; 1279 next += t; 1280 1281 t = scnprintf(next, size, 1282 "\tep_transfer %06x\n", 1283 ((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) | 1284 ((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) | 1285 ((net2272_ep_read(ep, EP_TRANSFER0) & 0xff))); 1286 size -= t; 1287 next += t; 1288 1289 t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03; 1290 t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03; 1291 t = scnprintf(next, size, 1292 "\tbuf-a %s buf-b %s\n", 1293 buf_state_string(t1), 1294 buf_state_string(t2)); 1295 size -= t; 1296 next += t; 1297 } 1298 1299 spin_unlock_irqrestore(&dev->lock, flags); 1300 1301 return PAGE_SIZE - size; 1302 } 1303 static DEVICE_ATTR_RO(registers); 1304 1305 /*---------------------------------------------------------------------------*/ 1306 1307 static void 1308 net2272_set_fifo_mode(struct net2272 *dev, int mode) 1309 { 1310 u8 tmp; 1311 1312 tmp = net2272_read(dev, LOCCTL) & 0x3f; 1313 tmp |= (mode << 6); 1314 net2272_write(dev, LOCCTL, tmp); 1315 1316 INIT_LIST_HEAD(&dev->gadget.ep_list); 1317 1318 /* always ep-a, ep-c ... maybe not ep-b */ 1319 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list); 1320 1321 switch (mode) { 1322 case 0: 1323 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); 1324 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512; 1325 break; 1326 case 1: 1327 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); 1328 dev->ep[1].fifo_size = 1024; 1329 dev->ep[2].fifo_size = 512; 1330 break; 1331 case 2: 1332 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); 1333 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024; 1334 break; 1335 case 3: 1336 dev->ep[1].fifo_size = 1024; 1337 break; 1338 } 1339 1340 /* ep-c is always 2 512 byte buffers */ 1341 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list); 1342 dev->ep[3].fifo_size = 512; 1343 } 1344 1345 /*---------------------------------------------------------------------------*/ 1346 1347 static void 1348 net2272_usb_reset(struct net2272 *dev) 1349 { 1350 dev->gadget.speed = USB_SPEED_UNKNOWN; 1351 1352 net2272_cancel_dma(dev); 1353 1354 net2272_write(dev, IRQENB0, 0); 1355 net2272_write(dev, IRQENB1, 0); 1356 1357 /* clear irq state */ 1358 net2272_write(dev, IRQSTAT0, 0xff); 1359 net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT)); 1360 1361 net2272_write(dev, DMAREQ, 1362 (0 << DMA_BUFFER_VALID) | 1363 (0 << DMA_REQUEST_ENABLE) | 1364 (1 << DMA_CONTROL_DACK) | 1365 (dev->dma_eot_polarity << EOT_POLARITY) | 1366 (dev->dma_dack_polarity << DACK_POLARITY) | 1367 (dev->dma_dreq_polarity << DREQ_POLARITY) | 1368 ((dma_ep >> 1) << DMA_ENDPOINT_SELECT)); 1369 1370 net2272_cancel_dma(dev); 1371 net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0); 1372 1373 /* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping 1374 * note that the higher level gadget drivers are expected to convert data to little endian. 1375 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here 1376 */ 1377 net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH)); 1378 net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE)); 1379 } 1380 1381 static void 1382 net2272_usb_reinit(struct net2272 *dev) 1383 { 1384 int i; 1385 1386 /* basic endpoint init */ 1387 for (i = 0; i < 4; ++i) { 1388 struct net2272_ep *ep = &dev->ep[i]; 1389 1390 ep->ep.name = ep_name[i]; 1391 ep->dev = dev; 1392 ep->num = i; 1393 ep->not_empty = 0; 1394 1395 if (use_dma && ep->num == dma_ep) 1396 ep->dma = 1; 1397 1398 if (i > 0 && i <= 3) 1399 ep->fifo_size = 512; 1400 else 1401 ep->fifo_size = 64; 1402 net2272_ep_reset(ep); 1403 1404 if (i == 0) { 1405 ep->ep.caps.type_control = true; 1406 } else { 1407 ep->ep.caps.type_iso = true; 1408 ep->ep.caps.type_bulk = true; 1409 ep->ep.caps.type_int = true; 1410 } 1411 1412 ep->ep.caps.dir_in = true; 1413 ep->ep.caps.dir_out = true; 1414 } 1415 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64); 1416 1417 dev->gadget.ep0 = &dev->ep[0].ep; 1418 dev->ep[0].stopped = 0; 1419 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); 1420 } 1421 1422 static void 1423 net2272_ep0_start(struct net2272 *dev) 1424 { 1425 struct net2272_ep *ep0 = &dev->ep[0]; 1426 1427 net2272_ep_write(ep0, EP_RSPSET, 1428 (1 << NAK_OUT_PACKETS_MODE) | 1429 (1 << ALT_NAK_OUT_PACKETS)); 1430 net2272_ep_write(ep0, EP_RSPCLR, 1431 (1 << HIDE_STATUS_PHASE) | 1432 (1 << CONTROL_STATUS_PHASE_HANDSHAKE)); 1433 net2272_write(dev, USBCTL0, 1434 (dev->softconnect << USB_DETECT_ENABLE) | 1435 (1 << USB_ROOT_PORT_WAKEUP_ENABLE) | 1436 (1 << IO_WAKEUP_ENABLE)); 1437 net2272_write(dev, IRQENB0, 1438 (1 << SETUP_PACKET_INTERRUPT_ENABLE) | 1439 (1 << ENDPOINT_0_INTERRUPT_ENABLE) | 1440 (1 << DMA_DONE_INTERRUPT_ENABLE)); 1441 net2272_write(dev, IRQENB1, 1442 (1 << VBUS_INTERRUPT_ENABLE) | 1443 (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) | 1444 (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE)); 1445 } 1446 1447 /* when a driver is successfully registered, it will receive 1448 * control requests including set_configuration(), which enables 1449 * non-control requests. then usb traffic follows until a 1450 * disconnect is reported. then a host may connect again, or 1451 * the driver might get unbound. 1452 */ 1453 static int net2272_start(struct usb_gadget *_gadget, 1454 struct usb_gadget_driver *driver) 1455 { 1456 struct net2272 *dev; 1457 unsigned i; 1458 1459 if (!driver || !driver->setup || 1460 driver->max_speed != USB_SPEED_HIGH) 1461 return -EINVAL; 1462 1463 dev = container_of(_gadget, struct net2272, gadget); 1464 1465 for (i = 0; i < 4; ++i) 1466 dev->ep[i].irqs = 0; 1467 /* hook up the driver ... */ 1468 dev->softconnect = 1; 1469 driver->driver.bus = NULL; 1470 dev->driver = driver; 1471 1472 /* ... then enable host detection and ep0; and we're ready 1473 * for set_configuration as well as eventual disconnect. 1474 */ 1475 net2272_ep0_start(dev); 1476 1477 return 0; 1478 } 1479 1480 static void 1481 stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver) 1482 { 1483 int i; 1484 1485 /* don't disconnect if it's not connected */ 1486 if (dev->gadget.speed == USB_SPEED_UNKNOWN) 1487 driver = NULL; 1488 1489 /* stop hardware; prevent new request submissions; 1490 * and kill any outstanding requests. 1491 */ 1492 net2272_usb_reset(dev); 1493 for (i = 0; i < 4; ++i) 1494 net2272_dequeue_all(&dev->ep[i]); 1495 1496 /* report disconnect; the driver is already quiesced */ 1497 if (driver) { 1498 spin_unlock(&dev->lock); 1499 driver->disconnect(&dev->gadget); 1500 spin_lock(&dev->lock); 1501 } 1502 1503 net2272_usb_reinit(dev); 1504 } 1505 1506 static int net2272_stop(struct usb_gadget *_gadget) 1507 { 1508 struct net2272 *dev; 1509 unsigned long flags; 1510 1511 dev = container_of(_gadget, struct net2272, gadget); 1512 1513 spin_lock_irqsave(&dev->lock, flags); 1514 stop_activity(dev, NULL); 1515 spin_unlock_irqrestore(&dev->lock, flags); 1516 1517 dev->driver = NULL; 1518 1519 return 0; 1520 } 1521 1522 /*---------------------------------------------------------------------------*/ 1523 /* handle ep-a/ep-b dma completions */ 1524 static void 1525 net2272_handle_dma(struct net2272_ep *ep) 1526 { 1527 struct net2272_request *req; 1528 unsigned len; 1529 int status; 1530 1531 if (!list_empty(&ep->queue)) 1532 req = list_entry(ep->queue.next, 1533 struct net2272_request, queue); 1534 else 1535 req = NULL; 1536 1537 dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req); 1538 1539 /* Ensure DREQ is de-asserted */ 1540 net2272_write(ep->dev, DMAREQ, 1541 (0 << DMA_BUFFER_VALID) 1542 | (0 << DMA_REQUEST_ENABLE) 1543 | (1 << DMA_CONTROL_DACK) 1544 | (ep->dev->dma_eot_polarity << EOT_POLARITY) 1545 | (ep->dev->dma_dack_polarity << DACK_POLARITY) 1546 | (ep->dev->dma_dreq_polarity << DREQ_POLARITY) 1547 | (ep->dma << DMA_ENDPOINT_SELECT)); 1548 1549 ep->dev->dma_busy = 0; 1550 1551 net2272_ep_write(ep, EP_IRQENB, 1552 (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) 1553 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE) 1554 | net2272_ep_read(ep, EP_IRQENB)); 1555 1556 /* device-to-host transfer completed */ 1557 if (ep->is_in) { 1558 /* validate a short packet or zlp if necessary */ 1559 if ((req->req.length % ep->ep.maxpacket != 0) || 1560 req->req.zero) 1561 set_fifo_bytecount(ep, 0); 1562 1563 net2272_done(ep, req, 0); 1564 if (!list_empty(&ep->queue)) { 1565 req = list_entry(ep->queue.next, 1566 struct net2272_request, queue); 1567 status = net2272_kick_dma(ep, req); 1568 if (status < 0) 1569 net2272_pio_advance(ep); 1570 } 1571 1572 /* host-to-device transfer completed */ 1573 } else { 1574 /* terminated with a short packet? */ 1575 if (net2272_read(ep->dev, IRQSTAT0) & 1576 (1 << DMA_DONE_INTERRUPT)) { 1577 /* abort system dma */ 1578 net2272_cancel_dma(ep->dev); 1579 } 1580 1581 /* EP_TRANSFER will contain the number of bytes 1582 * actually received. 1583 * NOTE: There is no overflow detection on EP_TRANSFER: 1584 * We can't deal with transfers larger than 2^24 bytes! 1585 */ 1586 len = (net2272_ep_read(ep, EP_TRANSFER2) << 16) 1587 | (net2272_ep_read(ep, EP_TRANSFER1) << 8) 1588 | (net2272_ep_read(ep, EP_TRANSFER0)); 1589 1590 if (ep->not_empty) 1591 len += 4; 1592 1593 req->req.actual += len; 1594 1595 /* get any remaining data */ 1596 net2272_pio_advance(ep); 1597 } 1598 } 1599 1600 /*---------------------------------------------------------------------------*/ 1601 1602 static void 1603 net2272_handle_ep(struct net2272_ep *ep) 1604 { 1605 struct net2272_request *req; 1606 u8 stat0, stat1; 1607 1608 if (!list_empty(&ep->queue)) 1609 req = list_entry(ep->queue.next, 1610 struct net2272_request, queue); 1611 else 1612 req = NULL; 1613 1614 /* ack all, and handle what we care about */ 1615 stat0 = net2272_ep_read(ep, EP_STAT0); 1616 stat1 = net2272_ep_read(ep, EP_STAT1); 1617 ep->irqs++; 1618 1619 dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n", 1620 ep->ep.name, stat0, stat1, req ? &req->req : NULL); 1621 1622 net2272_ep_write(ep, EP_STAT0, stat0 & 1623 ~((1 << NAK_OUT_PACKETS) 1624 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))); 1625 net2272_ep_write(ep, EP_STAT1, stat1); 1626 1627 /* data packet(s) received (in the fifo, OUT) 1628 * direction must be validated, otherwise control read status phase 1629 * could be interpreted as a valid packet 1630 */ 1631 if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT))) 1632 net2272_pio_advance(ep); 1633 /* data packet(s) transmitted (IN) */ 1634 else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)) 1635 net2272_pio_advance(ep); 1636 } 1637 1638 static struct net2272_ep * 1639 net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex) 1640 { 1641 struct net2272_ep *ep; 1642 1643 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0) 1644 return &dev->ep[0]; 1645 1646 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) { 1647 u8 bEndpointAddress; 1648 1649 if (!ep->desc) 1650 continue; 1651 bEndpointAddress = ep->desc->bEndpointAddress; 1652 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN) 1653 continue; 1654 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f)) 1655 return ep; 1656 } 1657 return NULL; 1658 } 1659 1660 /* 1661 * USB Test Packet: 1662 * JKJKJKJK * 9 1663 * JJKKJJKK * 8 1664 * JJJJKKKK * 8 1665 * JJJJJJJKKKKKKK * 8 1666 * JJJJJJJK * 8 1667 * {JKKKKKKK * 10}, JK 1668 */ 1669 static const u8 net2272_test_packet[] = { 1670 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1671 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 1672 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 1673 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 1674 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 1675 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E 1676 }; 1677 1678 static void 1679 net2272_set_test_mode(struct net2272 *dev, int mode) 1680 { 1681 int i; 1682 1683 /* Disable all net2272 interrupts: 1684 * Nothing but a power cycle should stop the test. 1685 */ 1686 net2272_write(dev, IRQENB0, 0x00); 1687 net2272_write(dev, IRQENB1, 0x00); 1688 1689 /* Force tranceiver to high-speed */ 1690 net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED); 1691 1692 net2272_write(dev, PAGESEL, 0); 1693 net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT); 1694 net2272_write(dev, EP_RSPCLR, 1695 (1 << CONTROL_STATUS_PHASE_HANDSHAKE) 1696 | (1 << HIDE_STATUS_PHASE)); 1697 net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION); 1698 net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH); 1699 1700 /* wait for status phase to complete */ 1701 while (!(net2272_read(dev, EP_STAT0) & 1702 (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))) 1703 ; 1704 1705 /* Enable test mode */ 1706 net2272_write(dev, USBTEST, mode); 1707 1708 /* load test packet */ 1709 if (mode == TEST_PACKET) { 1710 /* switch to 8 bit mode */ 1711 net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) & 1712 ~(1 << DATA_WIDTH)); 1713 1714 for (i = 0; i < sizeof(net2272_test_packet); ++i) 1715 net2272_write(dev, EP_DATA, net2272_test_packet[i]); 1716 1717 /* Validate test packet */ 1718 net2272_write(dev, EP_TRANSFER0, 0); 1719 } 1720 } 1721 1722 static void 1723 net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat) 1724 { 1725 struct net2272_ep *ep; 1726 u8 num, scratch; 1727 1728 /* starting a control request? */ 1729 if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) { 1730 union { 1731 u8 raw[8]; 1732 struct usb_ctrlrequest r; 1733 } u; 1734 int tmp = 0; 1735 struct net2272_request *req; 1736 1737 if (dev->gadget.speed == USB_SPEED_UNKNOWN) { 1738 if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED)) 1739 dev->gadget.speed = USB_SPEED_HIGH; 1740 else 1741 dev->gadget.speed = USB_SPEED_FULL; 1742 dev_dbg(dev->dev, "%s\n", 1743 usb_speed_string(dev->gadget.speed)); 1744 } 1745 1746 ep = &dev->ep[0]; 1747 ep->irqs++; 1748 1749 /* make sure any leftover interrupt state is cleared */ 1750 stat &= ~(1 << ENDPOINT_0_INTERRUPT); 1751 while (!list_empty(&ep->queue)) { 1752 req = list_entry(ep->queue.next, 1753 struct net2272_request, queue); 1754 net2272_done(ep, req, 1755 (req->req.actual == req->req.length) ? 0 : -EPROTO); 1756 } 1757 ep->stopped = 0; 1758 dev->protocol_stall = 0; 1759 net2272_ep_write(ep, EP_STAT0, 1760 (1 << DATA_IN_TOKEN_INTERRUPT) 1761 | (1 << DATA_OUT_TOKEN_INTERRUPT) 1762 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT) 1763 | (1 << DATA_PACKET_RECEIVED_INTERRUPT) 1764 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)); 1765 net2272_ep_write(ep, EP_STAT1, 1766 (1 << TIMEOUT) 1767 | (1 << USB_OUT_ACK_SENT) 1768 | (1 << USB_OUT_NAK_SENT) 1769 | (1 << USB_IN_ACK_RCVD) 1770 | (1 << USB_IN_NAK_SENT) 1771 | (1 << USB_STALL_SENT) 1772 | (1 << LOCAL_OUT_ZLP)); 1773 1774 /* 1775 * Ensure Control Read pre-validation setting is beyond maximum size 1776 * - Control Writes can leave non-zero values in EP_TRANSFER. If 1777 * an EP0 transfer following the Control Write is a Control Read, 1778 * the NET2272 sees the non-zero EP_TRANSFER as an unexpected 1779 * pre-validation count. 1780 * - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures 1781 * the pre-validation count cannot cause an unexpected validatation 1782 */ 1783 net2272_write(dev, PAGESEL, 0); 1784 net2272_write(dev, EP_TRANSFER2, 0xff); 1785 net2272_write(dev, EP_TRANSFER1, 0xff); 1786 net2272_write(dev, EP_TRANSFER0, 0xff); 1787 1788 u.raw[0] = net2272_read(dev, SETUP0); 1789 u.raw[1] = net2272_read(dev, SETUP1); 1790 u.raw[2] = net2272_read(dev, SETUP2); 1791 u.raw[3] = net2272_read(dev, SETUP3); 1792 u.raw[4] = net2272_read(dev, SETUP4); 1793 u.raw[5] = net2272_read(dev, SETUP5); 1794 u.raw[6] = net2272_read(dev, SETUP6); 1795 u.raw[7] = net2272_read(dev, SETUP7); 1796 /* 1797 * If you have a big endian cpu make sure le16_to_cpus 1798 * performs the proper byte swapping here... 1799 */ 1800 le16_to_cpus(&u.r.wValue); 1801 le16_to_cpus(&u.r.wIndex); 1802 le16_to_cpus(&u.r.wLength); 1803 1804 /* ack the irq */ 1805 net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT); 1806 stat ^= (1 << SETUP_PACKET_INTERRUPT); 1807 1808 /* watch control traffic at the token level, and force 1809 * synchronization before letting the status phase happen. 1810 */ 1811 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0; 1812 if (ep->is_in) { 1813 scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE) 1814 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE) 1815 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE); 1816 stop_out_naking(ep); 1817 } else 1818 scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) 1819 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE) 1820 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE); 1821 net2272_ep_write(ep, EP_IRQENB, scratch); 1822 1823 if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) 1824 goto delegate; 1825 switch (u.r.bRequest) { 1826 case USB_REQ_GET_STATUS: { 1827 struct net2272_ep *e; 1828 u16 status = 0; 1829 1830 switch (u.r.bRequestType & USB_RECIP_MASK) { 1831 case USB_RECIP_ENDPOINT: 1832 e = net2272_get_ep_by_addr(dev, u.r.wIndex); 1833 if (!e || u.r.wLength > 2) 1834 goto do_stall; 1835 if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT)) 1836 status = cpu_to_le16(1); 1837 else 1838 status = cpu_to_le16(0); 1839 1840 /* don't bother with a request object! */ 1841 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0); 1842 writew(status, net2272_reg_addr(dev, EP_DATA)); 1843 set_fifo_bytecount(&dev->ep[0], 0); 1844 allow_status(ep); 1845 dev_vdbg(dev->dev, "%s stat %02x\n", 1846 ep->ep.name, status); 1847 goto next_endpoints; 1848 case USB_RECIP_DEVICE: 1849 if (u.r.wLength > 2) 1850 goto do_stall; 1851 if (dev->gadget.is_selfpowered) 1852 status = (1 << USB_DEVICE_SELF_POWERED); 1853 1854 /* don't bother with a request object! */ 1855 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0); 1856 writew(status, net2272_reg_addr(dev, EP_DATA)); 1857 set_fifo_bytecount(&dev->ep[0], 0); 1858 allow_status(ep); 1859 dev_vdbg(dev->dev, "device stat %02x\n", status); 1860 goto next_endpoints; 1861 case USB_RECIP_INTERFACE: 1862 if (u.r.wLength > 2) 1863 goto do_stall; 1864 1865 /* don't bother with a request object! */ 1866 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0); 1867 writew(status, net2272_reg_addr(dev, EP_DATA)); 1868 set_fifo_bytecount(&dev->ep[0], 0); 1869 allow_status(ep); 1870 dev_vdbg(dev->dev, "interface status %02x\n", status); 1871 goto next_endpoints; 1872 } 1873 1874 break; 1875 } 1876 case USB_REQ_CLEAR_FEATURE: { 1877 struct net2272_ep *e; 1878 1879 if (u.r.bRequestType != USB_RECIP_ENDPOINT) 1880 goto delegate; 1881 if (u.r.wValue != USB_ENDPOINT_HALT || 1882 u.r.wLength != 0) 1883 goto do_stall; 1884 e = net2272_get_ep_by_addr(dev, u.r.wIndex); 1885 if (!e) 1886 goto do_stall; 1887 if (e->wedged) { 1888 dev_vdbg(dev->dev, "%s wedged, halt not cleared\n", 1889 ep->ep.name); 1890 } else { 1891 dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name); 1892 clear_halt(e); 1893 } 1894 allow_status(ep); 1895 goto next_endpoints; 1896 } 1897 case USB_REQ_SET_FEATURE: { 1898 struct net2272_ep *e; 1899 1900 if (u.r.bRequestType == USB_RECIP_DEVICE) { 1901 if (u.r.wIndex != NORMAL_OPERATION) 1902 net2272_set_test_mode(dev, (u.r.wIndex >> 8)); 1903 allow_status(ep); 1904 dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex); 1905 goto next_endpoints; 1906 } else if (u.r.bRequestType != USB_RECIP_ENDPOINT) 1907 goto delegate; 1908 if (u.r.wValue != USB_ENDPOINT_HALT || 1909 u.r.wLength != 0) 1910 goto do_stall; 1911 e = net2272_get_ep_by_addr(dev, u.r.wIndex); 1912 if (!e) 1913 goto do_stall; 1914 set_halt(e); 1915 allow_status(ep); 1916 dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name); 1917 goto next_endpoints; 1918 } 1919 case USB_REQ_SET_ADDRESS: { 1920 net2272_write(dev, OURADDR, u.r.wValue & 0xff); 1921 allow_status(ep); 1922 break; 1923 } 1924 default: 1925 delegate: 1926 dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x " 1927 "ep_cfg %08x\n", 1928 u.r.bRequestType, u.r.bRequest, 1929 u.r.wValue, u.r.wIndex, 1930 net2272_ep_read(ep, EP_CFG)); 1931 spin_unlock(&dev->lock); 1932 tmp = dev->driver->setup(&dev->gadget, &u.r); 1933 spin_lock(&dev->lock); 1934 } 1935 1936 /* stall ep0 on error */ 1937 if (tmp < 0) { 1938 do_stall: 1939 dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n", 1940 u.r.bRequestType, u.r.bRequest, tmp); 1941 dev->protocol_stall = 1; 1942 } 1943 /* endpoint dma irq? */ 1944 } else if (stat & (1 << DMA_DONE_INTERRUPT)) { 1945 net2272_cancel_dma(dev); 1946 net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT); 1947 stat &= ~(1 << DMA_DONE_INTERRUPT); 1948 num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT)) 1949 ? 2 : 1; 1950 1951 ep = &dev->ep[num]; 1952 net2272_handle_dma(ep); 1953 } 1954 1955 next_endpoints: 1956 /* endpoint data irq? */ 1957 scratch = stat & 0x0f; 1958 stat &= ~0x0f; 1959 for (num = 0; scratch; num++) { 1960 u8 t; 1961 1962 /* does this endpoint's FIFO and queue need tending? */ 1963 t = 1 << num; 1964 if ((scratch & t) == 0) 1965 continue; 1966 scratch ^= t; 1967 1968 ep = &dev->ep[num]; 1969 net2272_handle_ep(ep); 1970 } 1971 1972 /* some interrupts we can just ignore */ 1973 stat &= ~(1 << SOF_INTERRUPT); 1974 1975 if (stat) 1976 dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat); 1977 } 1978 1979 static void 1980 net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat) 1981 { 1982 u8 tmp, mask; 1983 1984 /* after disconnect there's nothing else to do! */ 1985 tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT); 1986 mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED); 1987 1988 if (stat & tmp) { 1989 bool reset = false; 1990 bool disconnect = false; 1991 1992 /* 1993 * Ignore disconnects and resets if the speed hasn't been set. 1994 * VBUS can bounce and there's always an initial reset. 1995 */ 1996 net2272_write(dev, IRQSTAT1, tmp); 1997 if (dev->gadget.speed != USB_SPEED_UNKNOWN) { 1998 if ((stat & (1 << VBUS_INTERRUPT)) && 1999 (net2272_read(dev, USBCTL1) & 2000 (1 << VBUS_PIN)) == 0) { 2001 disconnect = true; 2002 dev_dbg(dev->dev, "disconnect %s\n", 2003 dev->driver->driver.name); 2004 } else if ((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) && 2005 (net2272_read(dev, USBCTL1) & mask) 2006 == 0) { 2007 reset = true; 2008 dev_dbg(dev->dev, "reset %s\n", 2009 dev->driver->driver.name); 2010 } 2011 2012 if (disconnect || reset) { 2013 stop_activity(dev, dev->driver); 2014 net2272_ep0_start(dev); 2015 spin_unlock(&dev->lock); 2016 if (reset) 2017 usb_gadget_udc_reset 2018 (&dev->gadget, dev->driver); 2019 else 2020 (dev->driver->disconnect) 2021 (&dev->gadget); 2022 spin_lock(&dev->lock); 2023 return; 2024 } 2025 } 2026 stat &= ~tmp; 2027 2028 if (!stat) 2029 return; 2030 } 2031 2032 tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT); 2033 if (stat & tmp) { 2034 net2272_write(dev, IRQSTAT1, tmp); 2035 if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) { 2036 if (dev->driver->suspend) 2037 dev->driver->suspend(&dev->gadget); 2038 if (!enable_suspend) { 2039 stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT); 2040 dev_dbg(dev->dev, "Suspend disabled, ignoring\n"); 2041 } 2042 } else { 2043 if (dev->driver->resume) 2044 dev->driver->resume(&dev->gadget); 2045 } 2046 stat &= ~tmp; 2047 } 2048 2049 /* clear any other status/irqs */ 2050 if (stat) 2051 net2272_write(dev, IRQSTAT1, stat); 2052 2053 /* some status we can just ignore */ 2054 stat &= ~((1 << CONTROL_STATUS_INTERRUPT) 2055 | (1 << SUSPEND_REQUEST_INTERRUPT) 2056 | (1 << RESUME_INTERRUPT)); 2057 if (!stat) 2058 return; 2059 else 2060 dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat); 2061 } 2062 2063 static irqreturn_t net2272_irq(int irq, void *_dev) 2064 { 2065 struct net2272 *dev = _dev; 2066 #if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2) 2067 u32 intcsr; 2068 #endif 2069 #if defined(PLX_PCI_RDK) 2070 u8 dmareq; 2071 #endif 2072 spin_lock(&dev->lock); 2073 #if defined(PLX_PCI_RDK) 2074 intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR); 2075 2076 if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) { 2077 writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE), 2078 dev->rdk1.plx9054_base_addr + INTCSR); 2079 net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1)); 2080 net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0)); 2081 intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR); 2082 writel(intcsr | (1 << PCI_INTERRUPT_ENABLE), 2083 dev->rdk1.plx9054_base_addr + INTCSR); 2084 } 2085 if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) { 2086 writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)), 2087 dev->rdk1.plx9054_base_addr + DMACSR0); 2088 2089 dmareq = net2272_read(dev, DMAREQ); 2090 if (dmareq & 0x01) 2091 net2272_handle_dma(&dev->ep[2]); 2092 else 2093 net2272_handle_dma(&dev->ep[1]); 2094 } 2095 #endif 2096 #if defined(PLX_PCI_RDK2) 2097 /* see if PCI int for us by checking irqstat */ 2098 intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT); 2099 if (!intcsr & (1 << NET2272_PCI_IRQ)) { 2100 spin_unlock(&dev->lock); 2101 return IRQ_NONE; 2102 } 2103 /* check dma interrupts */ 2104 #endif 2105 /* Platform/devcice interrupt handler */ 2106 #if !defined(PLX_PCI_RDK) 2107 net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1)); 2108 net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0)); 2109 #endif 2110 spin_unlock(&dev->lock); 2111 2112 return IRQ_HANDLED; 2113 } 2114 2115 static int net2272_present(struct net2272 *dev) 2116 { 2117 /* 2118 * Quick test to see if CPU can communicate properly with the NET2272. 2119 * Verifies connection using writes and reads to write/read and 2120 * read-only registers. 2121 * 2122 * This routine is strongly recommended especially during early bring-up 2123 * of new hardware, however for designs that do not apply Power On System 2124 * Tests (POST) it may discarded (or perhaps minimized). 2125 */ 2126 unsigned int ii; 2127 u8 val, refval; 2128 2129 /* Verify NET2272 write/read SCRATCH register can write and read */ 2130 refval = net2272_read(dev, SCRATCH); 2131 for (ii = 0; ii < 0x100; ii += 7) { 2132 net2272_write(dev, SCRATCH, ii); 2133 val = net2272_read(dev, SCRATCH); 2134 if (val != ii) { 2135 dev_dbg(dev->dev, 2136 "%s: write/read SCRATCH register test failed: " 2137 "wrote:0x%2.2x, read:0x%2.2x\n", 2138 __func__, ii, val); 2139 return -EINVAL; 2140 } 2141 } 2142 /* To be nice, we write the original SCRATCH value back: */ 2143 net2272_write(dev, SCRATCH, refval); 2144 2145 /* Verify NET2272 CHIPREV register is read-only: */ 2146 refval = net2272_read(dev, CHIPREV_2272); 2147 for (ii = 0; ii < 0x100; ii += 7) { 2148 net2272_write(dev, CHIPREV_2272, ii); 2149 val = net2272_read(dev, CHIPREV_2272); 2150 if (val != refval) { 2151 dev_dbg(dev->dev, 2152 "%s: write/read CHIPREV register test failed: " 2153 "wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n", 2154 __func__, ii, val, refval); 2155 return -EINVAL; 2156 } 2157 } 2158 2159 /* 2160 * Verify NET2272's "NET2270 legacy revision" register 2161 * - NET2272 has two revision registers. The NET2270 legacy revision 2162 * register should read the same value, regardless of the NET2272 2163 * silicon revision. The legacy register applies to NET2270 2164 * firmware being applied to the NET2272. 2165 */ 2166 val = net2272_read(dev, CHIPREV_LEGACY); 2167 if (val != NET2270_LEGACY_REV) { 2168 /* 2169 * Unexpected legacy revision value 2170 * - Perhaps the chip is a NET2270? 2171 */ 2172 dev_dbg(dev->dev, 2173 "%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n" 2174 " - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n", 2175 __func__, NET2270_LEGACY_REV, val); 2176 return -EINVAL; 2177 } 2178 2179 /* 2180 * Verify NET2272 silicon revision 2181 * - This revision register is appropriate for the silicon version 2182 * of the NET2272 2183 */ 2184 val = net2272_read(dev, CHIPREV_2272); 2185 switch (val) { 2186 case CHIPREV_NET2272_R1: 2187 /* 2188 * NET2272 Rev 1 has DMA related errata: 2189 * - Newer silicon (Rev 1A or better) required 2190 */ 2191 dev_dbg(dev->dev, 2192 "%s: Rev 1 detected: newer silicon recommended for DMA support\n", 2193 __func__); 2194 break; 2195 case CHIPREV_NET2272_R1A: 2196 break; 2197 default: 2198 /* NET2272 silicon version *may* not work with this firmware */ 2199 dev_dbg(dev->dev, 2200 "%s: unexpected silicon revision register value: " 2201 " CHIPREV_2272: 0x%2.2x\n", 2202 __func__, val); 2203 /* 2204 * Return Success, even though the chip rev is not an expected value 2205 * - Older, pre-built firmware can attempt to operate on newer silicon 2206 * - Often, new silicon is perfectly compatible 2207 */ 2208 } 2209 2210 /* Success: NET2272 checks out OK */ 2211 return 0; 2212 } 2213 2214 static void 2215 net2272_gadget_release(struct device *_dev) 2216 { 2217 struct net2272 *dev = dev_get_drvdata(_dev); 2218 kfree(dev); 2219 } 2220 2221 /*---------------------------------------------------------------------------*/ 2222 2223 static void 2224 net2272_remove(struct net2272 *dev) 2225 { 2226 usb_del_gadget_udc(&dev->gadget); 2227 free_irq(dev->irq, dev); 2228 iounmap(dev->base_addr); 2229 device_remove_file(dev->dev, &dev_attr_registers); 2230 2231 dev_info(dev->dev, "unbind\n"); 2232 } 2233 2234 static struct net2272 *net2272_probe_init(struct device *dev, unsigned int irq) 2235 { 2236 struct net2272 *ret; 2237 2238 if (!irq) { 2239 dev_dbg(dev, "No IRQ!\n"); 2240 return ERR_PTR(-ENODEV); 2241 } 2242 2243 /* alloc, and start init */ 2244 ret = kzalloc(sizeof(*ret), GFP_KERNEL); 2245 if (!ret) 2246 return ERR_PTR(-ENOMEM); 2247 2248 spin_lock_init(&ret->lock); 2249 ret->irq = irq; 2250 ret->dev = dev; 2251 ret->gadget.ops = &net2272_ops; 2252 ret->gadget.max_speed = USB_SPEED_HIGH; 2253 2254 /* the "gadget" abstracts/virtualizes the controller */ 2255 ret->gadget.name = driver_name; 2256 2257 return ret; 2258 } 2259 2260 static int 2261 net2272_probe_fin(struct net2272 *dev, unsigned int irqflags) 2262 { 2263 int ret; 2264 2265 /* See if there... */ 2266 if (net2272_present(dev)) { 2267 dev_warn(dev->dev, "2272 not found!\n"); 2268 ret = -ENODEV; 2269 goto err; 2270 } 2271 2272 net2272_usb_reset(dev); 2273 net2272_usb_reinit(dev); 2274 2275 ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev); 2276 if (ret) { 2277 dev_err(dev->dev, "request interrupt %i failed\n", dev->irq); 2278 goto err; 2279 } 2280 2281 dev->chiprev = net2272_read(dev, CHIPREV_2272); 2282 2283 /* done */ 2284 dev_info(dev->dev, "%s\n", driver_desc); 2285 dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n", 2286 dev->irq, dev->base_addr, dev->chiprev, 2287 dma_mode_string()); 2288 dev_info(dev->dev, "version: %s\n", driver_vers); 2289 2290 ret = device_create_file(dev->dev, &dev_attr_registers); 2291 if (ret) 2292 goto err_irq; 2293 2294 ret = usb_add_gadget_udc_release(dev->dev, &dev->gadget, 2295 net2272_gadget_release); 2296 if (ret) 2297 goto err_add_udc; 2298 2299 return 0; 2300 2301 err_add_udc: 2302 device_remove_file(dev->dev, &dev_attr_registers); 2303 err_irq: 2304 free_irq(dev->irq, dev); 2305 err: 2306 return ret; 2307 } 2308 2309 #ifdef CONFIG_PCI 2310 2311 /* 2312 * wrap this driver around the specified device, but 2313 * don't respond over USB until a gadget driver binds to us 2314 */ 2315 2316 static int 2317 net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev) 2318 { 2319 unsigned long resource, len, tmp; 2320 void __iomem *mem_mapped_addr[4]; 2321 int ret, i; 2322 2323 /* 2324 * BAR 0 holds PLX 9054 config registers 2325 * BAR 1 is i/o memory; unused here 2326 * BAR 2 holds EPLD config registers 2327 * BAR 3 holds NET2272 registers 2328 */ 2329 2330 /* Find and map all address spaces */ 2331 for (i = 0; i < 4; ++i) { 2332 if (i == 1) 2333 continue; /* BAR1 unused */ 2334 2335 resource = pci_resource_start(pdev, i); 2336 len = pci_resource_len(pdev, i); 2337 2338 if (!request_mem_region(resource, len, driver_name)) { 2339 dev_dbg(dev->dev, "controller already in use\n"); 2340 ret = -EBUSY; 2341 goto err; 2342 } 2343 2344 mem_mapped_addr[i] = ioremap_nocache(resource, len); 2345 if (mem_mapped_addr[i] == NULL) { 2346 release_mem_region(resource, len); 2347 dev_dbg(dev->dev, "can't map memory\n"); 2348 ret = -EFAULT; 2349 goto err; 2350 } 2351 } 2352 2353 dev->rdk1.plx9054_base_addr = mem_mapped_addr[0]; 2354 dev->rdk1.epld_base_addr = mem_mapped_addr[2]; 2355 dev->base_addr = mem_mapped_addr[3]; 2356 2357 /* Set PLX 9054 bus width (16 bits) */ 2358 tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1); 2359 writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT, 2360 dev->rdk1.plx9054_base_addr + LBRD1); 2361 2362 /* Enable PLX 9054 Interrupts */ 2363 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) | 2364 (1 << PCI_INTERRUPT_ENABLE) | 2365 (1 << LOCAL_INTERRUPT_INPUT_ENABLE), 2366 dev->rdk1.plx9054_base_addr + INTCSR); 2367 2368 writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)), 2369 dev->rdk1.plx9054_base_addr + DMACSR0); 2370 2371 /* reset */ 2372 writeb((1 << EPLD_DMA_ENABLE) | 2373 (1 << DMA_CTL_DACK) | 2374 (1 << DMA_TIMEOUT_ENABLE) | 2375 (1 << USER) | 2376 (0 << MPX_MODE) | 2377 (1 << BUSWIDTH) | 2378 (1 << NET2272_RESET), 2379 dev->base_addr + EPLD_IO_CONTROL_REGISTER); 2380 2381 mb(); 2382 writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) & 2383 ~(1 << NET2272_RESET), 2384 dev->base_addr + EPLD_IO_CONTROL_REGISTER); 2385 udelay(200); 2386 2387 return 0; 2388 2389 err: 2390 while (--i >= 0) { 2391 iounmap(mem_mapped_addr[i]); 2392 release_mem_region(pci_resource_start(pdev, i), 2393 pci_resource_len(pdev, i)); 2394 } 2395 2396 return ret; 2397 } 2398 2399 static int 2400 net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev) 2401 { 2402 unsigned long resource, len; 2403 void __iomem *mem_mapped_addr[2]; 2404 int ret, i; 2405 2406 /* 2407 * BAR 0 holds FGPA config registers 2408 * BAR 1 holds NET2272 registers 2409 */ 2410 2411 /* Find and map all address spaces, bar2-3 unused in rdk 2 */ 2412 for (i = 0; i < 2; ++i) { 2413 resource = pci_resource_start(pdev, i); 2414 len = pci_resource_len(pdev, i); 2415 2416 if (!request_mem_region(resource, len, driver_name)) { 2417 dev_dbg(dev->dev, "controller already in use\n"); 2418 ret = -EBUSY; 2419 goto err; 2420 } 2421 2422 mem_mapped_addr[i] = ioremap_nocache(resource, len); 2423 if (mem_mapped_addr[i] == NULL) { 2424 release_mem_region(resource, len); 2425 dev_dbg(dev->dev, "can't map memory\n"); 2426 ret = -EFAULT; 2427 goto err; 2428 } 2429 } 2430 2431 dev->rdk2.fpga_base_addr = mem_mapped_addr[0]; 2432 dev->base_addr = mem_mapped_addr[1]; 2433 2434 mb(); 2435 /* Set 2272 bus width (16 bits) and reset */ 2436 writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK); 2437 udelay(200); 2438 writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK); 2439 /* Print fpga version number */ 2440 dev_info(dev->dev, "RDK2 FPGA version %08x\n", 2441 readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV)); 2442 /* Enable FPGA Interrupts */ 2443 writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB); 2444 2445 return 0; 2446 2447 err: 2448 while (--i >= 0) { 2449 iounmap(mem_mapped_addr[i]); 2450 release_mem_region(pci_resource_start(pdev, i), 2451 pci_resource_len(pdev, i)); 2452 } 2453 2454 return ret; 2455 } 2456 2457 static int 2458 net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2459 { 2460 struct net2272 *dev; 2461 int ret; 2462 2463 dev = net2272_probe_init(&pdev->dev, pdev->irq); 2464 if (IS_ERR(dev)) 2465 return PTR_ERR(dev); 2466 dev->dev_id = pdev->device; 2467 2468 if (pci_enable_device(pdev) < 0) { 2469 ret = -ENODEV; 2470 goto err_free; 2471 } 2472 2473 pci_set_master(pdev); 2474 2475 switch (pdev->device) { 2476 case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break; 2477 case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break; 2478 default: BUG(); 2479 } 2480 if (ret) 2481 goto err_pci; 2482 2483 ret = net2272_probe_fin(dev, 0); 2484 if (ret) 2485 goto err_pci; 2486 2487 pci_set_drvdata(pdev, dev); 2488 2489 return 0; 2490 2491 err_pci: 2492 pci_disable_device(pdev); 2493 err_free: 2494 kfree(dev); 2495 2496 return ret; 2497 } 2498 2499 static void 2500 net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev) 2501 { 2502 int i; 2503 2504 /* disable PLX 9054 interrupts */ 2505 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) & 2506 ~(1 << PCI_INTERRUPT_ENABLE), 2507 dev->rdk1.plx9054_base_addr + INTCSR); 2508 2509 /* clean up resources allocated during probe() */ 2510 iounmap(dev->rdk1.plx9054_base_addr); 2511 iounmap(dev->rdk1.epld_base_addr); 2512 2513 for (i = 0; i < 4; ++i) { 2514 if (i == 1) 2515 continue; /* BAR1 unused */ 2516 release_mem_region(pci_resource_start(pdev, i), 2517 pci_resource_len(pdev, i)); 2518 } 2519 } 2520 2521 static void 2522 net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev) 2523 { 2524 int i; 2525 2526 /* disable fpga interrupts 2527 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) & 2528 ~(1 << PCI_INTERRUPT_ENABLE), 2529 dev->rdk1.plx9054_base_addr + INTCSR); 2530 */ 2531 2532 /* clean up resources allocated during probe() */ 2533 iounmap(dev->rdk2.fpga_base_addr); 2534 2535 for (i = 0; i < 2; ++i) 2536 release_mem_region(pci_resource_start(pdev, i), 2537 pci_resource_len(pdev, i)); 2538 } 2539 2540 static void 2541 net2272_pci_remove(struct pci_dev *pdev) 2542 { 2543 struct net2272 *dev = pci_get_drvdata(pdev); 2544 2545 net2272_remove(dev); 2546 2547 switch (pdev->device) { 2548 case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break; 2549 case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break; 2550 default: BUG(); 2551 } 2552 2553 pci_disable_device(pdev); 2554 2555 kfree(dev); 2556 } 2557 2558 /* Table of matching PCI IDs */ 2559 static struct pci_device_id pci_ids[] = { 2560 { /* RDK 1 card */ 2561 .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe), 2562 .class_mask = 0, 2563 .vendor = PCI_VENDOR_ID_PLX, 2564 .device = PCI_DEVICE_ID_RDK1, 2565 .subvendor = PCI_ANY_ID, 2566 .subdevice = PCI_ANY_ID, 2567 }, 2568 { /* RDK 2 card */ 2569 .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe), 2570 .class_mask = 0, 2571 .vendor = PCI_VENDOR_ID_PLX, 2572 .device = PCI_DEVICE_ID_RDK2, 2573 .subvendor = PCI_ANY_ID, 2574 .subdevice = PCI_ANY_ID, 2575 }, 2576 { } 2577 }; 2578 MODULE_DEVICE_TABLE(pci, pci_ids); 2579 2580 static struct pci_driver net2272_pci_driver = { 2581 .name = driver_name, 2582 .id_table = pci_ids, 2583 2584 .probe = net2272_pci_probe, 2585 .remove = net2272_pci_remove, 2586 }; 2587 2588 static int net2272_pci_register(void) 2589 { 2590 return pci_register_driver(&net2272_pci_driver); 2591 } 2592 2593 static void net2272_pci_unregister(void) 2594 { 2595 pci_unregister_driver(&net2272_pci_driver); 2596 } 2597 2598 #else 2599 static inline int net2272_pci_register(void) { return 0; } 2600 static inline void net2272_pci_unregister(void) { } 2601 #endif 2602 2603 /*---------------------------------------------------------------------------*/ 2604 2605 static int 2606 net2272_plat_probe(struct platform_device *pdev) 2607 { 2608 struct net2272 *dev; 2609 int ret; 2610 unsigned int irqflags; 2611 resource_size_t base, len; 2612 struct resource *iomem, *iomem_bus, *irq_res; 2613 2614 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2615 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2616 iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0); 2617 if (!irq_res || !iomem) { 2618 dev_err(&pdev->dev, "must provide irq/base addr"); 2619 return -EINVAL; 2620 } 2621 2622 dev = net2272_probe_init(&pdev->dev, irq_res->start); 2623 if (IS_ERR(dev)) 2624 return PTR_ERR(dev); 2625 2626 irqflags = 0; 2627 if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE) 2628 irqflags |= IRQF_TRIGGER_RISING; 2629 if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE) 2630 irqflags |= IRQF_TRIGGER_FALLING; 2631 if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL) 2632 irqflags |= IRQF_TRIGGER_HIGH; 2633 if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL) 2634 irqflags |= IRQF_TRIGGER_LOW; 2635 2636 base = iomem->start; 2637 len = resource_size(iomem); 2638 if (iomem_bus) 2639 dev->base_shift = iomem_bus->start; 2640 2641 if (!request_mem_region(base, len, driver_name)) { 2642 dev_dbg(dev->dev, "get request memory region!\n"); 2643 ret = -EBUSY; 2644 goto err; 2645 } 2646 dev->base_addr = ioremap_nocache(base, len); 2647 if (!dev->base_addr) { 2648 dev_dbg(dev->dev, "can't map memory\n"); 2649 ret = -EFAULT; 2650 goto err_req; 2651 } 2652 2653 ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW); 2654 if (ret) 2655 goto err_io; 2656 2657 platform_set_drvdata(pdev, dev); 2658 dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n", 2659 (net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no "); 2660 2661 return 0; 2662 2663 err_io: 2664 iounmap(dev->base_addr); 2665 err_req: 2666 release_mem_region(base, len); 2667 err: 2668 return ret; 2669 } 2670 2671 static int 2672 net2272_plat_remove(struct platform_device *pdev) 2673 { 2674 struct net2272 *dev = platform_get_drvdata(pdev); 2675 2676 net2272_remove(dev); 2677 2678 release_mem_region(pdev->resource[0].start, 2679 resource_size(&pdev->resource[0])); 2680 2681 kfree(dev); 2682 2683 return 0; 2684 } 2685 2686 static struct platform_driver net2272_plat_driver = { 2687 .probe = net2272_plat_probe, 2688 .remove = net2272_plat_remove, 2689 .driver = { 2690 .name = driver_name, 2691 }, 2692 /* FIXME .suspend, .resume */ 2693 }; 2694 MODULE_ALIAS("platform:net2272"); 2695 2696 static int __init net2272_init(void) 2697 { 2698 int ret; 2699 2700 ret = net2272_pci_register(); 2701 if (ret) 2702 return ret; 2703 ret = platform_driver_register(&net2272_plat_driver); 2704 if (ret) 2705 goto err_pci; 2706 return ret; 2707 2708 err_pci: 2709 net2272_pci_unregister(); 2710 return ret; 2711 } 2712 module_init(net2272_init); 2713 2714 static void __exit net2272_cleanup(void) 2715 { 2716 net2272_pci_unregister(); 2717 platform_driver_unregister(&net2272_plat_driver); 2718 } 2719 module_exit(net2272_cleanup); 2720 2721 MODULE_DESCRIPTION(DRIVER_DESC); 2722 MODULE_AUTHOR("PLX Technology, Inc."); 2723 MODULE_LICENSE("GPL"); 2724