1 /* 2 * Copyright (c) 2000-2004 by David Brownell 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License as published by the 6 * Free Software Foundation; either version 2 of the License, or (at your 7 * option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software Foundation, 16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 */ 18 19 #include <linux/module.h> 20 #include <linux/pci.h> 21 #include <linux/dmapool.h> 22 #include <linux/kernel.h> 23 #include <linux/delay.h> 24 #include <linux/ioport.h> 25 #include <linux/sched.h> 26 #include <linux/slab.h> 27 #include <linux/vmalloc.h> 28 #include <linux/errno.h> 29 #include <linux/init.h> 30 #include <linux/timer.h> 31 #include <linux/ktime.h> 32 #include <linux/list.h> 33 #include <linux/interrupt.h> 34 #include <linux/usb.h> 35 #include <linux/moduleparam.h> 36 #include <linux/dma-mapping.h> 37 #include <linux/debugfs.h> 38 39 #include "../core/hcd.h" 40 41 #include <asm/byteorder.h> 42 #include <asm/io.h> 43 #include <asm/irq.h> 44 #include <asm/system.h> 45 #include <asm/unaligned.h> 46 47 /*-------------------------------------------------------------------------*/ 48 49 /* 50 * EHCI hc_driver implementation ... experimental, incomplete. 51 * Based on the final 1.0 register interface specification. 52 * 53 * USB 2.0 shows up in upcoming www.pcmcia.org technology. 54 * First was PCMCIA, like ISA; then CardBus, which is PCI. 55 * Next comes "CardBay", using USB 2.0 signals. 56 * 57 * Contains additional contributions by Brad Hards, Rory Bolt, and others. 58 * Special thanks to Intel and VIA for providing host controllers to 59 * test this driver on, and Cypress (including In-System Design) for 60 * providing early devices for those host controllers to talk to! 61 */ 62 63 #define DRIVER_AUTHOR "David Brownell" 64 #define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver" 65 66 static const char hcd_name [] = "ehci_hcd"; 67 68 69 #undef VERBOSE_DEBUG 70 #undef EHCI_URB_TRACE 71 72 #ifdef DEBUG 73 #define EHCI_STATS 74 #endif 75 76 /* magic numbers that can affect system performance */ 77 #define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */ 78 #define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */ 79 #define EHCI_TUNE_RL_TT 0 80 #define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */ 81 #define EHCI_TUNE_MULT_TT 1 82 #define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */ 83 84 #define EHCI_IAA_MSECS 10 /* arbitrary */ 85 #define EHCI_IO_JIFFIES (HZ/10) /* io watchdog > irq_thresh */ 86 #define EHCI_ASYNC_JIFFIES (HZ/20) /* async idle timeout */ 87 #define EHCI_SHRINK_FRAMES 5 /* async qh unlink delay */ 88 89 /* Initial IRQ latency: faster than hw default */ 90 static int log2_irq_thresh = 0; // 0 to 6 91 module_param (log2_irq_thresh, int, S_IRUGO); 92 MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes"); 93 94 /* initial park setting: slower than hw default */ 95 static unsigned park = 0; 96 module_param (park, uint, S_IRUGO); 97 MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets"); 98 99 /* for flakey hardware, ignore overcurrent indicators */ 100 static int ignore_oc = 0; 101 module_param (ignore_oc, bool, S_IRUGO); 102 MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications"); 103 104 #define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT) 105 106 /*-------------------------------------------------------------------------*/ 107 108 #include "ehci.h" 109 #include "ehci-dbg.c" 110 111 /*-------------------------------------------------------------------------*/ 112 113 static void 114 timer_action(struct ehci_hcd *ehci, enum ehci_timer_action action) 115 { 116 /* Don't override timeouts which shrink or (later) disable 117 * the async ring; just the I/O watchdog. Note that if a 118 * SHRINK were pending, OFF would never be requested. 119 */ 120 if (timer_pending(&ehci->watchdog) 121 && ((BIT(TIMER_ASYNC_SHRINK) | BIT(TIMER_ASYNC_OFF)) 122 & ehci->actions)) 123 return; 124 125 if (!test_and_set_bit(action, &ehci->actions)) { 126 unsigned long t; 127 128 switch (action) { 129 case TIMER_IO_WATCHDOG: 130 if (!ehci->need_io_watchdog) 131 return; 132 t = EHCI_IO_JIFFIES; 133 break; 134 case TIMER_ASYNC_OFF: 135 t = EHCI_ASYNC_JIFFIES; 136 break; 137 /* case TIMER_ASYNC_SHRINK: */ 138 default: 139 /* add a jiffie since we synch against the 140 * 8 KHz uframe counter. 141 */ 142 t = DIV_ROUND_UP(EHCI_SHRINK_FRAMES * HZ, 1000) + 1; 143 break; 144 } 145 mod_timer(&ehci->watchdog, t + jiffies); 146 } 147 } 148 149 /*-------------------------------------------------------------------------*/ 150 151 /* 152 * handshake - spin reading hc until handshake completes or fails 153 * @ptr: address of hc register to be read 154 * @mask: bits to look at in result of read 155 * @done: value of those bits when handshake succeeds 156 * @usec: timeout in microseconds 157 * 158 * Returns negative errno, or zero on success 159 * 160 * Success happens when the "mask" bits have the specified value (hardware 161 * handshake done). There are two failure modes: "usec" have passed (major 162 * hardware flakeout), or the register reads as all-ones (hardware removed). 163 * 164 * That last failure should_only happen in cases like physical cardbus eject 165 * before driver shutdown. But it also seems to be caused by bugs in cardbus 166 * bridge shutdown: shutting down the bridge before the devices using it. 167 */ 168 static int handshake (struct ehci_hcd *ehci, void __iomem *ptr, 169 u32 mask, u32 done, int usec) 170 { 171 u32 result; 172 173 do { 174 result = ehci_readl(ehci, ptr); 175 if (result == ~(u32)0) /* card removed */ 176 return -ENODEV; 177 result &= mask; 178 if (result == done) 179 return 0; 180 udelay (1); 181 usec--; 182 } while (usec > 0); 183 return -ETIMEDOUT; 184 } 185 186 /* force HC to halt state from unknown (EHCI spec section 2.3) */ 187 static int ehci_halt (struct ehci_hcd *ehci) 188 { 189 u32 temp = ehci_readl(ehci, &ehci->regs->status); 190 191 /* disable any irqs left enabled by previous code */ 192 ehci_writel(ehci, 0, &ehci->regs->intr_enable); 193 194 if ((temp & STS_HALT) != 0) 195 return 0; 196 197 temp = ehci_readl(ehci, &ehci->regs->command); 198 temp &= ~CMD_RUN; 199 ehci_writel(ehci, temp, &ehci->regs->command); 200 return handshake (ehci, &ehci->regs->status, 201 STS_HALT, STS_HALT, 16 * 125); 202 } 203 204 static int handshake_on_error_set_halt(struct ehci_hcd *ehci, void __iomem *ptr, 205 u32 mask, u32 done, int usec) 206 { 207 int error; 208 209 error = handshake(ehci, ptr, mask, done, usec); 210 if (error) { 211 ehci_halt(ehci); 212 ehci_to_hcd(ehci)->state = HC_STATE_HALT; 213 ehci_err(ehci, "force halt; handshake %p %08x %08x -> %d\n", 214 ptr, mask, done, error); 215 } 216 217 return error; 218 } 219 220 /* put TDI/ARC silicon into EHCI mode */ 221 static void tdi_reset (struct ehci_hcd *ehci) 222 { 223 u32 __iomem *reg_ptr; 224 u32 tmp; 225 226 reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + USBMODE); 227 tmp = ehci_readl(ehci, reg_ptr); 228 tmp |= USBMODE_CM_HC; 229 /* The default byte access to MMR space is LE after 230 * controller reset. Set the required endian mode 231 * for transfer buffers to match the host microprocessor 232 */ 233 if (ehci_big_endian_mmio(ehci)) 234 tmp |= USBMODE_BE; 235 ehci_writel(ehci, tmp, reg_ptr); 236 } 237 238 /* reset a non-running (STS_HALT == 1) controller */ 239 static int ehci_reset (struct ehci_hcd *ehci) 240 { 241 int retval; 242 u32 command = ehci_readl(ehci, &ehci->regs->command); 243 244 /* If the EHCI debug controller is active, special care must be 245 * taken before and after a host controller reset */ 246 if (ehci->debug && !dbgp_reset_prep()) 247 ehci->debug = NULL; 248 249 command |= CMD_RESET; 250 dbg_cmd (ehci, "reset", command); 251 ehci_writel(ehci, command, &ehci->regs->command); 252 ehci_to_hcd(ehci)->state = HC_STATE_HALT; 253 ehci->next_statechange = jiffies; 254 retval = handshake (ehci, &ehci->regs->command, 255 CMD_RESET, 0, 250 * 1000); 256 257 if (ehci->has_hostpc) { 258 ehci_writel(ehci, USBMODE_EX_HC | USBMODE_EX_VBPS, 259 (u32 __iomem *)(((u8 *)ehci->regs) + USBMODE_EX)); 260 ehci_writel(ehci, TXFIFO_DEFAULT, 261 (u32 __iomem *)(((u8 *)ehci->regs) + TXFILLTUNING)); 262 } 263 if (retval) 264 return retval; 265 266 if (ehci_is_TDI(ehci)) 267 tdi_reset (ehci); 268 269 if (ehci->debug) 270 dbgp_external_startup(); 271 272 return retval; 273 } 274 275 /* idle the controller (from running) */ 276 static void ehci_quiesce (struct ehci_hcd *ehci) 277 { 278 u32 temp; 279 280 #ifdef DEBUG 281 if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) 282 BUG (); 283 #endif 284 285 /* wait for any schedule enables/disables to take effect */ 286 temp = ehci_readl(ehci, &ehci->regs->command) << 10; 287 temp &= STS_ASS | STS_PSS; 288 if (handshake_on_error_set_halt(ehci, &ehci->regs->status, 289 STS_ASS | STS_PSS, temp, 16 * 125)) 290 return; 291 292 /* then disable anything that's still active */ 293 temp = ehci_readl(ehci, &ehci->regs->command); 294 temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE); 295 ehci_writel(ehci, temp, &ehci->regs->command); 296 297 /* hardware can take 16 microframes to turn off ... */ 298 handshake_on_error_set_halt(ehci, &ehci->regs->status, 299 STS_ASS | STS_PSS, 0, 16 * 125); 300 } 301 302 /*-------------------------------------------------------------------------*/ 303 304 static void end_unlink_async(struct ehci_hcd *ehci); 305 static void ehci_work(struct ehci_hcd *ehci); 306 307 #include "ehci-hub.c" 308 #include "ehci-mem.c" 309 #include "ehci-q.c" 310 #include "ehci-sched.c" 311 312 /*-------------------------------------------------------------------------*/ 313 314 static void ehci_iaa_watchdog(unsigned long param) 315 { 316 struct ehci_hcd *ehci = (struct ehci_hcd *) param; 317 unsigned long flags; 318 319 spin_lock_irqsave (&ehci->lock, flags); 320 321 /* Lost IAA irqs wedge things badly; seen first with a vt8235. 322 * So we need this watchdog, but must protect it against both 323 * (a) SMP races against real IAA firing and retriggering, and 324 * (b) clean HC shutdown, when IAA watchdog was pending. 325 */ 326 if (ehci->reclaim 327 && !timer_pending(&ehci->iaa_watchdog) 328 && HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) { 329 u32 cmd, status; 330 331 /* If we get here, IAA is *REALLY* late. It's barely 332 * conceivable that the system is so busy that CMD_IAAD 333 * is still legitimately set, so let's be sure it's 334 * clear before we read STS_IAA. (The HC should clear 335 * CMD_IAAD when it sets STS_IAA.) 336 */ 337 cmd = ehci_readl(ehci, &ehci->regs->command); 338 if (cmd & CMD_IAAD) 339 ehci_writel(ehci, cmd & ~CMD_IAAD, 340 &ehci->regs->command); 341 342 /* If IAA is set here it either legitimately triggered 343 * before we cleared IAAD above (but _way_ late, so we'll 344 * still count it as lost) ... or a silicon erratum: 345 * - VIA seems to set IAA without triggering the IRQ; 346 * - IAAD potentially cleared without setting IAA. 347 */ 348 status = ehci_readl(ehci, &ehci->regs->status); 349 if ((status & STS_IAA) || !(cmd & CMD_IAAD)) { 350 COUNT (ehci->stats.lost_iaa); 351 ehci_writel(ehci, STS_IAA, &ehci->regs->status); 352 } 353 354 ehci_vdbg(ehci, "IAA watchdog: status %x cmd %x\n", 355 status, cmd); 356 end_unlink_async(ehci); 357 } 358 359 spin_unlock_irqrestore(&ehci->lock, flags); 360 } 361 362 static void ehci_watchdog(unsigned long param) 363 { 364 struct ehci_hcd *ehci = (struct ehci_hcd *) param; 365 unsigned long flags; 366 367 spin_lock_irqsave(&ehci->lock, flags); 368 369 /* stop async processing after it's idled a bit */ 370 if (test_bit (TIMER_ASYNC_OFF, &ehci->actions)) 371 start_unlink_async (ehci, ehci->async); 372 373 /* ehci could run by timer, without IRQs ... */ 374 ehci_work (ehci); 375 376 spin_unlock_irqrestore (&ehci->lock, flags); 377 } 378 379 /* On some systems, leaving remote wakeup enabled prevents system shutdown. 380 * The firmware seems to think that powering off is a wakeup event! 381 * This routine turns off remote wakeup and everything else, on all ports. 382 */ 383 static void ehci_turn_off_all_ports(struct ehci_hcd *ehci) 384 { 385 int port = HCS_N_PORTS(ehci->hcs_params); 386 387 while (port--) 388 ehci_writel(ehci, PORT_RWC_BITS, 389 &ehci->regs->port_status[port]); 390 } 391 392 /* 393 * Halt HC, turn off all ports, and let the BIOS use the companion controllers. 394 * Should be called with ehci->lock held. 395 */ 396 static void ehci_silence_controller(struct ehci_hcd *ehci) 397 { 398 ehci_halt(ehci); 399 ehci_turn_off_all_ports(ehci); 400 401 /* make BIOS/etc use companion controller during reboot */ 402 ehci_writel(ehci, 0, &ehci->regs->configured_flag); 403 404 /* unblock posted writes */ 405 ehci_readl(ehci, &ehci->regs->configured_flag); 406 } 407 408 /* ehci_shutdown kick in for silicon on any bus (not just pci, etc). 409 * This forcibly disables dma and IRQs, helping kexec and other cases 410 * where the next system software may expect clean state. 411 */ 412 static void ehci_shutdown(struct usb_hcd *hcd) 413 { 414 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 415 416 del_timer_sync(&ehci->watchdog); 417 del_timer_sync(&ehci->iaa_watchdog); 418 419 spin_lock_irq(&ehci->lock); 420 ehci_silence_controller(ehci); 421 spin_unlock_irq(&ehci->lock); 422 } 423 424 static void ehci_port_power (struct ehci_hcd *ehci, int is_on) 425 { 426 unsigned port; 427 428 if (!HCS_PPC (ehci->hcs_params)) 429 return; 430 431 ehci_dbg (ehci, "...power%s ports...\n", is_on ? "up" : "down"); 432 for (port = HCS_N_PORTS (ehci->hcs_params); port > 0; ) 433 (void) ehci_hub_control(ehci_to_hcd(ehci), 434 is_on ? SetPortFeature : ClearPortFeature, 435 USB_PORT_FEAT_POWER, 436 port--, NULL, 0); 437 /* Flush those writes */ 438 ehci_readl(ehci, &ehci->regs->command); 439 msleep(20); 440 } 441 442 /*-------------------------------------------------------------------------*/ 443 444 /* 445 * ehci_work is called from some interrupts, timers, and so on. 446 * it calls driver completion functions, after dropping ehci->lock. 447 */ 448 static void ehci_work (struct ehci_hcd *ehci) 449 { 450 timer_action_done (ehci, TIMER_IO_WATCHDOG); 451 452 /* another CPU may drop ehci->lock during a schedule scan while 453 * it reports urb completions. this flag guards against bogus 454 * attempts at re-entrant schedule scanning. 455 */ 456 if (ehci->scanning) 457 return; 458 ehci->scanning = 1; 459 scan_async (ehci); 460 if (ehci->next_uframe != -1) 461 scan_periodic (ehci); 462 ehci->scanning = 0; 463 464 /* the IO watchdog guards against hardware or driver bugs that 465 * misplace IRQs, and should let us run completely without IRQs. 466 * such lossage has been observed on both VT6202 and VT8235. 467 */ 468 if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state) && 469 (ehci->async->qh_next.ptr != NULL || 470 ehci->periodic_sched != 0)) 471 timer_action (ehci, TIMER_IO_WATCHDOG); 472 } 473 474 /* 475 * Called when the ehci_hcd module is removed. 476 */ 477 static void ehci_stop (struct usb_hcd *hcd) 478 { 479 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 480 481 ehci_dbg (ehci, "stop\n"); 482 483 /* no more interrupts ... */ 484 del_timer_sync (&ehci->watchdog); 485 del_timer_sync(&ehci->iaa_watchdog); 486 487 spin_lock_irq(&ehci->lock); 488 if (HC_IS_RUNNING (hcd->state)) 489 ehci_quiesce (ehci); 490 491 ehci_silence_controller(ehci); 492 ehci_reset (ehci); 493 spin_unlock_irq(&ehci->lock); 494 495 remove_companion_file(ehci); 496 remove_debug_files (ehci); 497 498 /* root hub is shut down separately (first, when possible) */ 499 spin_lock_irq (&ehci->lock); 500 if (ehci->async) 501 ehci_work (ehci); 502 spin_unlock_irq (&ehci->lock); 503 ehci_mem_cleanup (ehci); 504 505 #ifdef EHCI_STATS 506 ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n", 507 ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim, 508 ehci->stats.lost_iaa); 509 ehci_dbg (ehci, "complete %ld unlink %ld\n", 510 ehci->stats.complete, ehci->stats.unlink); 511 #endif 512 513 dbg_status (ehci, "ehci_stop completed", 514 ehci_readl(ehci, &ehci->regs->status)); 515 } 516 517 /* one-time init, only for memory state */ 518 static int ehci_init(struct usb_hcd *hcd) 519 { 520 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 521 u32 temp; 522 int retval; 523 u32 hcc_params; 524 struct ehci_qh_hw *hw; 525 526 spin_lock_init(&ehci->lock); 527 528 /* 529 * keep io watchdog by default, those good HCDs could turn off it later 530 */ 531 ehci->need_io_watchdog = 1; 532 init_timer(&ehci->watchdog); 533 ehci->watchdog.function = ehci_watchdog; 534 ehci->watchdog.data = (unsigned long) ehci; 535 536 init_timer(&ehci->iaa_watchdog); 537 ehci->iaa_watchdog.function = ehci_iaa_watchdog; 538 ehci->iaa_watchdog.data = (unsigned long) ehci; 539 540 /* 541 * hw default: 1K periodic list heads, one per frame. 542 * periodic_size can shrink by USBCMD update if hcc_params allows. 543 */ 544 ehci->periodic_size = DEFAULT_I_TDPS; 545 INIT_LIST_HEAD(&ehci->cached_itd_list); 546 if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) 547 return retval; 548 549 /* controllers may cache some of the periodic schedule ... */ 550 hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params); 551 if (HCC_ISOC_CACHE(hcc_params)) // full frame cache 552 ehci->i_thresh = 2 + 8; 553 else // N microframes cached 554 ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params); 555 556 ehci->reclaim = NULL; 557 ehci->next_uframe = -1; 558 ehci->clock_frame = -1; 559 560 /* 561 * dedicate a qh for the async ring head, since we couldn't unlink 562 * a 'real' qh without stopping the async schedule [4.8]. use it 563 * as the 'reclamation list head' too. 564 * its dummy is used in hw_alt_next of many tds, to prevent the qh 565 * from automatically advancing to the next td after short reads. 566 */ 567 ehci->async->qh_next.qh = NULL; 568 hw = ehci->async->hw; 569 hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma); 570 hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD); 571 hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT); 572 hw->hw_qtd_next = EHCI_LIST_END(ehci); 573 ehci->async->qh_state = QH_STATE_LINKED; 574 hw->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma); 575 576 /* clear interrupt enables, set irq latency */ 577 if (log2_irq_thresh < 0 || log2_irq_thresh > 6) 578 log2_irq_thresh = 0; 579 temp = 1 << (16 + log2_irq_thresh); 580 if (HCC_CANPARK(hcc_params)) { 581 /* HW default park == 3, on hardware that supports it (like 582 * NVidia and ALI silicon), maximizes throughput on the async 583 * schedule by avoiding QH fetches between transfers. 584 * 585 * With fast usb storage devices and NForce2, "park" seems to 586 * make problems: throughput reduction (!), data errors... 587 */ 588 if (park) { 589 park = min(park, (unsigned) 3); 590 temp |= CMD_PARK; 591 temp |= park << 8; 592 } 593 ehci_dbg(ehci, "park %d\n", park); 594 } 595 if (HCC_PGM_FRAMELISTLEN(hcc_params)) { 596 /* periodic schedule size can be smaller than default */ 597 temp &= ~(3 << 2); 598 temp |= (EHCI_TUNE_FLS << 2); 599 switch (EHCI_TUNE_FLS) { 600 case 0: ehci->periodic_size = 1024; break; 601 case 1: ehci->periodic_size = 512; break; 602 case 2: ehci->periodic_size = 256; break; 603 default: BUG(); 604 } 605 } 606 ehci->command = temp; 607 608 /* Accept arbitrarily long scatter-gather lists */ 609 hcd->self.sg_tablesize = ~0; 610 return 0; 611 } 612 613 /* start HC running; it's halted, ehci_init() has been run (once) */ 614 static int ehci_run (struct usb_hcd *hcd) 615 { 616 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 617 int retval; 618 u32 temp; 619 u32 hcc_params; 620 621 hcd->uses_new_polling = 1; 622 hcd->poll_rh = 0; 623 624 /* EHCI spec section 4.1 */ 625 if ((retval = ehci_reset(ehci)) != 0) { 626 ehci_mem_cleanup(ehci); 627 return retval; 628 } 629 ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list); 630 ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next); 631 632 /* 633 * hcc_params controls whether ehci->regs->segment must (!!!) 634 * be used; it constrains QH/ITD/SITD and QTD locations. 635 * pci_pool consistent memory always uses segment zero. 636 * streaming mappings for I/O buffers, like pci_map_single(), 637 * can return segments above 4GB, if the device allows. 638 * 639 * NOTE: the dma mask is visible through dma_supported(), so 640 * drivers can pass this info along ... like NETIF_F_HIGHDMA, 641 * Scsi_Host.highmem_io, and so forth. It's readonly to all 642 * host side drivers though. 643 */ 644 hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params); 645 if (HCC_64BIT_ADDR(hcc_params)) { 646 ehci_writel(ehci, 0, &ehci->regs->segment); 647 #if 0 648 // this is deeply broken on almost all architectures 649 if (!dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64))) 650 ehci_info(ehci, "enabled 64bit DMA\n"); 651 #endif 652 } 653 654 655 // Philips, Intel, and maybe others need CMD_RUN before the 656 // root hub will detect new devices (why?); NEC doesn't 657 ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET); 658 ehci->command |= CMD_RUN; 659 ehci_writel(ehci, ehci->command, &ehci->regs->command); 660 dbg_cmd (ehci, "init", ehci->command); 661 662 /* 663 * Start, enabling full USB 2.0 functionality ... usb 1.1 devices 664 * are explicitly handed to companion controller(s), so no TT is 665 * involved with the root hub. (Except where one is integrated, 666 * and there's no companion controller unless maybe for USB OTG.) 667 * 668 * Turning on the CF flag will transfer ownership of all ports 669 * from the companions to the EHCI controller. If any of the 670 * companions are in the middle of a port reset at the time, it 671 * could cause trouble. Write-locking ehci_cf_port_reset_rwsem 672 * guarantees that no resets are in progress. After we set CF, 673 * a short delay lets the hardware catch up; new resets shouldn't 674 * be started before the port switching actions could complete. 675 */ 676 down_write(&ehci_cf_port_reset_rwsem); 677 hcd->state = HC_STATE_RUNNING; 678 ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag); 679 ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ 680 msleep(5); 681 up_write(&ehci_cf_port_reset_rwsem); 682 ehci->last_periodic_enable = ktime_get_real(); 683 684 temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase)); 685 ehci_info (ehci, 686 "USB %x.%x started, EHCI %x.%02x%s\n", 687 ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), 688 temp >> 8, temp & 0xff, 689 ignore_oc ? ", overcurrent ignored" : ""); 690 691 ehci_writel(ehci, INTR_MASK, 692 &ehci->regs->intr_enable); /* Turn On Interrupts */ 693 694 /* GRR this is run-once init(), being done every time the HC starts. 695 * So long as they're part of class devices, we can't do it init() 696 * since the class device isn't created that early. 697 */ 698 create_debug_files(ehci); 699 create_companion_file(ehci); 700 701 return 0; 702 } 703 704 /*-------------------------------------------------------------------------*/ 705 706 static irqreturn_t ehci_irq (struct usb_hcd *hcd) 707 { 708 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 709 u32 status, masked_status, pcd_status = 0, cmd; 710 int bh; 711 712 spin_lock (&ehci->lock); 713 714 status = ehci_readl(ehci, &ehci->regs->status); 715 716 /* e.g. cardbus physical eject */ 717 if (status == ~(u32) 0) { 718 ehci_dbg (ehci, "device removed\n"); 719 goto dead; 720 } 721 722 masked_status = status & INTR_MASK; 723 if (!masked_status) { /* irq sharing? */ 724 spin_unlock(&ehci->lock); 725 return IRQ_NONE; 726 } 727 728 /* clear (just) interrupts */ 729 ehci_writel(ehci, masked_status, &ehci->regs->status); 730 cmd = ehci_readl(ehci, &ehci->regs->command); 731 bh = 0; 732 733 #ifdef VERBOSE_DEBUG 734 /* unrequested/ignored: Frame List Rollover */ 735 dbg_status (ehci, "irq", status); 736 #endif 737 738 /* INT, ERR, and IAA interrupt rates can be throttled */ 739 740 /* normal [4.15.1.2] or error [4.15.1.1] completion */ 741 if (likely ((status & (STS_INT|STS_ERR)) != 0)) { 742 if (likely ((status & STS_ERR) == 0)) 743 COUNT (ehci->stats.normal); 744 else 745 COUNT (ehci->stats.error); 746 bh = 1; 747 } 748 749 /* complete the unlinking of some qh [4.15.2.3] */ 750 if (status & STS_IAA) { 751 /* guard against (alleged) silicon errata */ 752 if (cmd & CMD_IAAD) { 753 ehci_writel(ehci, cmd & ~CMD_IAAD, 754 &ehci->regs->command); 755 ehci_dbg(ehci, "IAA with IAAD still set?\n"); 756 } 757 if (ehci->reclaim) { 758 COUNT(ehci->stats.reclaim); 759 end_unlink_async(ehci); 760 } else 761 ehci_dbg(ehci, "IAA with nothing to reclaim?\n"); 762 } 763 764 /* remote wakeup [4.3.1] */ 765 if (status & STS_PCD) { 766 unsigned i = HCS_N_PORTS (ehci->hcs_params); 767 768 /* kick root hub later */ 769 pcd_status = status; 770 771 /* resume root hub? */ 772 if (!(cmd & CMD_RUN)) 773 usb_hcd_resume_root_hub(hcd); 774 775 while (i--) { 776 int pstatus = ehci_readl(ehci, 777 &ehci->regs->port_status [i]); 778 779 if (pstatus & PORT_OWNER) 780 continue; 781 if (!(test_bit(i, &ehci->suspended_ports) && 782 ((pstatus & PORT_RESUME) || 783 !(pstatus & PORT_SUSPEND)) && 784 (pstatus & PORT_PE) && 785 ehci->reset_done[i] == 0)) 786 continue; 787 788 /* start 20 msec resume signaling from this port, 789 * and make khubd collect PORT_STAT_C_SUSPEND to 790 * stop that signaling. 791 */ 792 ehci->reset_done [i] = jiffies + msecs_to_jiffies (20); 793 ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); 794 mod_timer(&hcd->rh_timer, ehci->reset_done[i]); 795 } 796 } 797 798 /* PCI errors [4.15.2.4] */ 799 if (unlikely ((status & STS_FATAL) != 0)) { 800 ehci_err(ehci, "fatal error\n"); 801 dbg_cmd(ehci, "fatal", cmd); 802 dbg_status(ehci, "fatal", status); 803 ehci_halt(ehci); 804 dead: 805 ehci_reset(ehci); 806 ehci_writel(ehci, 0, &ehci->regs->configured_flag); 807 /* generic layer kills/unlinks all urbs, then 808 * uses ehci_stop to clean up the rest 809 */ 810 bh = 1; 811 } 812 813 if (bh) 814 ehci_work (ehci); 815 spin_unlock (&ehci->lock); 816 if (pcd_status) 817 usb_hcd_poll_rh_status(hcd); 818 return IRQ_HANDLED; 819 } 820 821 /*-------------------------------------------------------------------------*/ 822 823 /* 824 * non-error returns are a promise to giveback() the urb later 825 * we drop ownership so next owner (or urb unlink) can get it 826 * 827 * urb + dev is in hcd.self.controller.urb_list 828 * we're queueing TDs onto software and hardware lists 829 * 830 * hcd-specific init for hcpriv hasn't been done yet 831 * 832 * NOTE: control, bulk, and interrupt share the same code to append TDs 833 * to a (possibly active) QH, and the same QH scanning code. 834 */ 835 static int ehci_urb_enqueue ( 836 struct usb_hcd *hcd, 837 struct urb *urb, 838 gfp_t mem_flags 839 ) { 840 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 841 struct list_head qtd_list; 842 843 INIT_LIST_HEAD (&qtd_list); 844 845 switch (usb_pipetype (urb->pipe)) { 846 case PIPE_CONTROL: 847 /* qh_completions() code doesn't handle all the fault cases 848 * in multi-TD control transfers. Even 1KB is rare anyway. 849 */ 850 if (urb->transfer_buffer_length > (16 * 1024)) 851 return -EMSGSIZE; 852 /* FALLTHROUGH */ 853 /* case PIPE_BULK: */ 854 default: 855 if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags)) 856 return -ENOMEM; 857 return submit_async(ehci, urb, &qtd_list, mem_flags); 858 859 case PIPE_INTERRUPT: 860 if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags)) 861 return -ENOMEM; 862 return intr_submit(ehci, urb, &qtd_list, mem_flags); 863 864 case PIPE_ISOCHRONOUS: 865 if (urb->dev->speed == USB_SPEED_HIGH) 866 return itd_submit (ehci, urb, mem_flags); 867 else 868 return sitd_submit (ehci, urb, mem_flags); 869 } 870 } 871 872 static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) 873 { 874 /* failfast */ 875 if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state) && ehci->reclaim) 876 end_unlink_async(ehci); 877 878 /* If the QH isn't linked then there's nothing we can do 879 * unless we were called during a giveback, in which case 880 * qh_completions() has to deal with it. 881 */ 882 if (qh->qh_state != QH_STATE_LINKED) { 883 if (qh->qh_state == QH_STATE_COMPLETING) 884 qh->needs_rescan = 1; 885 return; 886 } 887 888 /* defer till later if busy */ 889 if (ehci->reclaim) { 890 struct ehci_qh *last; 891 892 for (last = ehci->reclaim; 893 last->reclaim; 894 last = last->reclaim) 895 continue; 896 qh->qh_state = QH_STATE_UNLINK_WAIT; 897 last->reclaim = qh; 898 899 /* start IAA cycle */ 900 } else 901 start_unlink_async (ehci, qh); 902 } 903 904 /* remove from hardware lists 905 * completions normally happen asynchronously 906 */ 907 908 static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) 909 { 910 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 911 struct ehci_qh *qh; 912 unsigned long flags; 913 int rc; 914 915 spin_lock_irqsave (&ehci->lock, flags); 916 rc = usb_hcd_check_unlink_urb(hcd, urb, status); 917 if (rc) 918 goto done; 919 920 switch (usb_pipetype (urb->pipe)) { 921 // case PIPE_CONTROL: 922 // case PIPE_BULK: 923 default: 924 qh = (struct ehci_qh *) urb->hcpriv; 925 if (!qh) 926 break; 927 switch (qh->qh_state) { 928 case QH_STATE_LINKED: 929 case QH_STATE_COMPLETING: 930 unlink_async(ehci, qh); 931 break; 932 case QH_STATE_UNLINK: 933 case QH_STATE_UNLINK_WAIT: 934 /* already started */ 935 break; 936 case QH_STATE_IDLE: 937 /* QH might be waiting for a Clear-TT-Buffer */ 938 qh_completions(ehci, qh); 939 break; 940 } 941 break; 942 943 case PIPE_INTERRUPT: 944 qh = (struct ehci_qh *) urb->hcpriv; 945 if (!qh) 946 break; 947 switch (qh->qh_state) { 948 case QH_STATE_LINKED: 949 case QH_STATE_COMPLETING: 950 intr_deschedule (ehci, qh); 951 break; 952 case QH_STATE_IDLE: 953 qh_completions (ehci, qh); 954 break; 955 default: 956 ehci_dbg (ehci, "bogus qh %p state %d\n", 957 qh, qh->qh_state); 958 goto done; 959 } 960 break; 961 962 case PIPE_ISOCHRONOUS: 963 // itd or sitd ... 964 965 // wait till next completion, do it then. 966 // completion irqs can wait up to 1024 msec, 967 break; 968 } 969 done: 970 spin_unlock_irqrestore (&ehci->lock, flags); 971 return rc; 972 } 973 974 /*-------------------------------------------------------------------------*/ 975 976 // bulk qh holds the data toggle 977 978 static void 979 ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep) 980 { 981 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 982 unsigned long flags; 983 struct ehci_qh *qh, *tmp; 984 985 /* ASSERT: any requests/urbs are being unlinked */ 986 /* ASSERT: nobody can be submitting urbs for this any more */ 987 988 rescan: 989 spin_lock_irqsave (&ehci->lock, flags); 990 qh = ep->hcpriv; 991 if (!qh) 992 goto done; 993 994 /* endpoints can be iso streams. for now, we don't 995 * accelerate iso completions ... so spin a while. 996 */ 997 if (qh->hw->hw_info1 == 0) { 998 ehci_vdbg (ehci, "iso delay\n"); 999 goto idle_timeout; 1000 } 1001 1002 if (!HC_IS_RUNNING (hcd->state)) 1003 qh->qh_state = QH_STATE_IDLE; 1004 switch (qh->qh_state) { 1005 case QH_STATE_LINKED: 1006 case QH_STATE_COMPLETING: 1007 for (tmp = ehci->async->qh_next.qh; 1008 tmp && tmp != qh; 1009 tmp = tmp->qh_next.qh) 1010 continue; 1011 /* periodic qh self-unlinks on empty */ 1012 if (!tmp) 1013 goto nogood; 1014 unlink_async (ehci, qh); 1015 /* FALL THROUGH */ 1016 case QH_STATE_UNLINK: /* wait for hw to finish? */ 1017 case QH_STATE_UNLINK_WAIT: 1018 idle_timeout: 1019 spin_unlock_irqrestore (&ehci->lock, flags); 1020 schedule_timeout_uninterruptible(1); 1021 goto rescan; 1022 case QH_STATE_IDLE: /* fully unlinked */ 1023 if (qh->clearing_tt) 1024 goto idle_timeout; 1025 if (list_empty (&qh->qtd_list)) { 1026 qh_put (qh); 1027 break; 1028 } 1029 /* else FALL THROUGH */ 1030 default: 1031 nogood: 1032 /* caller was supposed to have unlinked any requests; 1033 * that's not our job. just leak this memory. 1034 */ 1035 ehci_err (ehci, "qh %p (#%02x) state %d%s\n", 1036 qh, ep->desc.bEndpointAddress, qh->qh_state, 1037 list_empty (&qh->qtd_list) ? "" : "(has tds)"); 1038 break; 1039 } 1040 ep->hcpriv = NULL; 1041 done: 1042 spin_unlock_irqrestore (&ehci->lock, flags); 1043 return; 1044 } 1045 1046 static void 1047 ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep) 1048 { 1049 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 1050 struct ehci_qh *qh; 1051 int eptype = usb_endpoint_type(&ep->desc); 1052 int epnum = usb_endpoint_num(&ep->desc); 1053 int is_out = usb_endpoint_dir_out(&ep->desc); 1054 unsigned long flags; 1055 1056 if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT) 1057 return; 1058 1059 spin_lock_irqsave(&ehci->lock, flags); 1060 qh = ep->hcpriv; 1061 1062 /* For Bulk and Interrupt endpoints we maintain the toggle state 1063 * in the hardware; the toggle bits in udev aren't used at all. 1064 * When an endpoint is reset by usb_clear_halt() we must reset 1065 * the toggle bit in the QH. 1066 */ 1067 if (qh) { 1068 usb_settoggle(qh->dev, epnum, is_out, 0); 1069 if (!list_empty(&qh->qtd_list)) { 1070 WARN_ONCE(1, "clear_halt for a busy endpoint\n"); 1071 } else if (qh->qh_state == QH_STATE_LINKED || 1072 qh->qh_state == QH_STATE_COMPLETING) { 1073 1074 /* The toggle value in the QH can't be updated 1075 * while the QH is active. Unlink it now; 1076 * re-linking will call qh_refresh(). 1077 */ 1078 if (eptype == USB_ENDPOINT_XFER_BULK) 1079 unlink_async(ehci, qh); 1080 else 1081 intr_deschedule(ehci, qh); 1082 } 1083 } 1084 spin_unlock_irqrestore(&ehci->lock, flags); 1085 } 1086 1087 static int ehci_get_frame (struct usb_hcd *hcd) 1088 { 1089 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 1090 return (ehci_readl(ehci, &ehci->regs->frame_index) >> 3) % 1091 ehci->periodic_size; 1092 } 1093 1094 /*-------------------------------------------------------------------------*/ 1095 1096 MODULE_DESCRIPTION(DRIVER_DESC); 1097 MODULE_AUTHOR (DRIVER_AUTHOR); 1098 MODULE_LICENSE ("GPL"); 1099 1100 #ifdef CONFIG_PCI 1101 #include "ehci-pci.c" 1102 #define PCI_DRIVER ehci_pci_driver 1103 #endif 1104 1105 #ifdef CONFIG_USB_EHCI_FSL 1106 #include "ehci-fsl.c" 1107 #define PLATFORM_DRIVER ehci_fsl_driver 1108 #endif 1109 1110 #ifdef CONFIG_USB_EHCI_MXC 1111 #include "ehci-mxc.c" 1112 #define PLATFORM_DRIVER ehci_mxc_driver 1113 #endif 1114 1115 #ifdef CONFIG_SOC_AU1200 1116 #include "ehci-au1xxx.c" 1117 #define PLATFORM_DRIVER ehci_hcd_au1xxx_driver 1118 #endif 1119 1120 #ifdef CONFIG_ARCH_OMAP34XX 1121 #include "ehci-omap.c" 1122 #define PLATFORM_DRIVER ehci_hcd_omap_driver 1123 #endif 1124 1125 #ifdef CONFIG_PPC_PS3 1126 #include "ehci-ps3.c" 1127 #define PS3_SYSTEM_BUS_DRIVER ps3_ehci_driver 1128 #endif 1129 1130 #ifdef CONFIG_USB_EHCI_HCD_PPC_OF 1131 #include "ehci-ppc-of.c" 1132 #define OF_PLATFORM_DRIVER ehci_hcd_ppc_of_driver 1133 #endif 1134 1135 #ifdef CONFIG_XPS_USB_HCD_XILINX 1136 #include "ehci-xilinx-of.c" 1137 #define OF_PLATFORM_DRIVER ehci_hcd_xilinx_of_driver 1138 #endif 1139 1140 #ifdef CONFIG_PLAT_ORION 1141 #include "ehci-orion.c" 1142 #define PLATFORM_DRIVER ehci_orion_driver 1143 #endif 1144 1145 #ifdef CONFIG_ARCH_IXP4XX 1146 #include "ehci-ixp4xx.c" 1147 #define PLATFORM_DRIVER ixp4xx_ehci_driver 1148 #endif 1149 1150 #ifdef CONFIG_USB_W90X900_EHCI 1151 #include "ehci-w90x900.c" 1152 #define PLATFORM_DRIVER ehci_hcd_w90x900_driver 1153 #endif 1154 1155 #ifdef CONFIG_ARCH_AT91 1156 #include "ehci-atmel.c" 1157 #define PLATFORM_DRIVER ehci_atmel_driver 1158 #endif 1159 1160 #if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \ 1161 !defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) 1162 #error "missing bus glue for ehci-hcd" 1163 #endif 1164 1165 static int __init ehci_hcd_init(void) 1166 { 1167 int retval = 0; 1168 1169 if (usb_disabled()) 1170 return -ENODEV; 1171 1172 printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name); 1173 set_bit(USB_EHCI_LOADED, &usb_hcds_loaded); 1174 if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) || 1175 test_bit(USB_OHCI_LOADED, &usb_hcds_loaded)) 1176 printk(KERN_WARNING "Warning! ehci_hcd should always be loaded" 1177 " before uhci_hcd and ohci_hcd, not after\n"); 1178 1179 pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd sitd %Zd\n", 1180 hcd_name, 1181 sizeof(struct ehci_qh), sizeof(struct ehci_qtd), 1182 sizeof(struct ehci_itd), sizeof(struct ehci_sitd)); 1183 1184 #ifdef DEBUG 1185 ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root); 1186 if (!ehci_debug_root) { 1187 retval = -ENOENT; 1188 goto err_debug; 1189 } 1190 #endif 1191 1192 #ifdef PLATFORM_DRIVER 1193 retval = platform_driver_register(&PLATFORM_DRIVER); 1194 if (retval < 0) 1195 goto clean0; 1196 #endif 1197 1198 #ifdef PCI_DRIVER 1199 retval = pci_register_driver(&PCI_DRIVER); 1200 if (retval < 0) 1201 goto clean1; 1202 #endif 1203 1204 #ifdef PS3_SYSTEM_BUS_DRIVER 1205 retval = ps3_ehci_driver_register(&PS3_SYSTEM_BUS_DRIVER); 1206 if (retval < 0) 1207 goto clean2; 1208 #endif 1209 1210 #ifdef OF_PLATFORM_DRIVER 1211 retval = of_register_platform_driver(&OF_PLATFORM_DRIVER); 1212 if (retval < 0) 1213 goto clean3; 1214 #endif 1215 return retval; 1216 1217 #ifdef OF_PLATFORM_DRIVER 1218 /* of_unregister_platform_driver(&OF_PLATFORM_DRIVER); */ 1219 clean3: 1220 #endif 1221 #ifdef PS3_SYSTEM_BUS_DRIVER 1222 ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER); 1223 clean2: 1224 #endif 1225 #ifdef PCI_DRIVER 1226 pci_unregister_driver(&PCI_DRIVER); 1227 clean1: 1228 #endif 1229 #ifdef PLATFORM_DRIVER 1230 platform_driver_unregister(&PLATFORM_DRIVER); 1231 clean0: 1232 #endif 1233 #ifdef DEBUG 1234 debugfs_remove(ehci_debug_root); 1235 ehci_debug_root = NULL; 1236 err_debug: 1237 #endif 1238 clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded); 1239 return retval; 1240 } 1241 module_init(ehci_hcd_init); 1242 1243 static void __exit ehci_hcd_cleanup(void) 1244 { 1245 #ifdef OF_PLATFORM_DRIVER 1246 of_unregister_platform_driver(&OF_PLATFORM_DRIVER); 1247 #endif 1248 #ifdef PLATFORM_DRIVER 1249 platform_driver_unregister(&PLATFORM_DRIVER); 1250 #endif 1251 #ifdef PCI_DRIVER 1252 pci_unregister_driver(&PCI_DRIVER); 1253 #endif 1254 #ifdef PS3_SYSTEM_BUS_DRIVER 1255 ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER); 1256 #endif 1257 #ifdef DEBUG 1258 debugfs_remove(ehci_debug_root); 1259 #endif 1260 clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded); 1261 } 1262 module_exit(ehci_hcd_cleanup); 1263 1264