1 /* 2 * This file contains code to reset and initialize USB host controllers. 3 * Some of it includes work-arounds for PCI hardware and BIOS quirks. 4 * It may need to run early during booting -- before USB would normally 5 * initialize -- to ensure that Linux doesn't use any legacy modes. 6 * 7 * Copyright (c) 1999 Martin Mares <mj@ucw.cz> 8 * (and others) 9 */ 10 11 #include <linux/types.h> 12 #include <linux/kernel.h> 13 #include <linux/pci.h> 14 #include <linux/init.h> 15 #include <linux/delay.h> 16 #include <linux/acpi.h> 17 #include <linux/dmi.h> 18 #include "pci-quirks.h" 19 #include "xhci-ext-caps.h" 20 21 22 #define UHCI_USBLEGSUP 0xc0 /* legacy support */ 23 #define UHCI_USBCMD 0 /* command register */ 24 #define UHCI_USBINTR 4 /* interrupt register */ 25 #define UHCI_USBLEGSUP_RWC 0x8f00 /* the R/WC bits */ 26 #define UHCI_USBLEGSUP_RO 0x5040 /* R/O and reserved bits */ 27 #define UHCI_USBCMD_RUN 0x0001 /* RUN/STOP bit */ 28 #define UHCI_USBCMD_HCRESET 0x0002 /* Host Controller reset */ 29 #define UHCI_USBCMD_EGSM 0x0008 /* Global Suspend Mode */ 30 #define UHCI_USBCMD_CONFIGURE 0x0040 /* Config Flag */ 31 #define UHCI_USBINTR_RESUME 0x0002 /* Resume interrupt enable */ 32 33 #define OHCI_CONTROL 0x04 34 #define OHCI_CMDSTATUS 0x08 35 #define OHCI_INTRSTATUS 0x0c 36 #define OHCI_INTRENABLE 0x10 37 #define OHCI_INTRDISABLE 0x14 38 #define OHCI_OCR (1 << 3) /* ownership change request */ 39 #define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */ 40 #define OHCI_CTRL_IR (1 << 8) /* interrupt routing */ 41 #define OHCI_INTR_OC (1 << 30) /* ownership change */ 42 43 #define EHCI_HCC_PARAMS 0x08 /* extended capabilities */ 44 #define EHCI_USBCMD 0 /* command register */ 45 #define EHCI_USBCMD_RUN (1 << 0) /* RUN/STOP bit */ 46 #define EHCI_USBSTS 4 /* status register */ 47 #define EHCI_USBSTS_HALTED (1 << 12) /* HCHalted bit */ 48 #define EHCI_USBINTR 8 /* interrupt register */ 49 #define EHCI_CONFIGFLAG 0x40 /* configured flag register */ 50 #define EHCI_USBLEGSUP 0 /* legacy support register */ 51 #define EHCI_USBLEGSUP_BIOS (1 << 16) /* BIOS semaphore */ 52 #define EHCI_USBLEGSUP_OS (1 << 24) /* OS semaphore */ 53 #define EHCI_USBLEGCTLSTS 4 /* legacy control/status */ 54 #define EHCI_USBLEGCTLSTS_SOOE (1 << 13) /* SMI on ownership change */ 55 56 /* AMD quirk use */ 57 #define AB_REG_BAR_LOW 0xe0 58 #define AB_REG_BAR_HIGH 0xe1 59 #define AB_REG_BAR_SB700 0xf0 60 #define AB_INDX(addr) ((addr) + 0x00) 61 #define AB_DATA(addr) ((addr) + 0x04) 62 #define AX_INDXC 0x30 63 #define AX_DATAC 0x34 64 65 #define NB_PCIE_INDX_ADDR 0xe0 66 #define NB_PCIE_INDX_DATA 0xe4 67 #define PCIE_P_CNTL 0x10040 68 #define BIF_NB 0x10002 69 #define NB_PIF0_PWRDOWN_0 0x01100012 70 #define NB_PIF0_PWRDOWN_1 0x01100013 71 72 #define USB_INTEL_XUSB2PR 0xD0 73 #define USB_INTEL_USB3_PSSEN 0xD8 74 75 static struct amd_chipset_info { 76 struct pci_dev *nb_dev; 77 struct pci_dev *smbus_dev; 78 int nb_type; 79 int sb_type; 80 int isoc_reqs; 81 int probe_count; 82 int probe_result; 83 } amd_chipset; 84 85 static DEFINE_SPINLOCK(amd_lock); 86 87 int usb_amd_find_chipset_info(void) 88 { 89 u8 rev = 0; 90 unsigned long flags; 91 struct amd_chipset_info info; 92 int ret; 93 94 spin_lock_irqsave(&amd_lock, flags); 95 96 /* probe only once */ 97 if (amd_chipset.probe_count > 0) { 98 amd_chipset.probe_count++; 99 spin_unlock_irqrestore(&amd_lock, flags); 100 return amd_chipset.probe_result; 101 } 102 memset(&info, 0, sizeof(info)); 103 spin_unlock_irqrestore(&amd_lock, flags); 104 105 info.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL); 106 if (info.smbus_dev) { 107 rev = info.smbus_dev->revision; 108 if (rev >= 0x40) 109 info.sb_type = 1; 110 else if (rev >= 0x30 && rev <= 0x3b) 111 info.sb_type = 3; 112 } else { 113 info.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 114 0x780b, NULL); 115 if (!info.smbus_dev) { 116 ret = 0; 117 goto commit; 118 } 119 120 rev = info.smbus_dev->revision; 121 if (rev >= 0x11 && rev <= 0x18) 122 info.sb_type = 2; 123 } 124 125 if (info.sb_type == 0) { 126 if (info.smbus_dev) { 127 pci_dev_put(info.smbus_dev); 128 info.smbus_dev = NULL; 129 } 130 ret = 0; 131 goto commit; 132 } 133 134 info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL); 135 if (info.nb_dev) { 136 info.nb_type = 1; 137 } else { 138 info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL); 139 if (info.nb_dev) { 140 info.nb_type = 2; 141 } else { 142 info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 143 0x9600, NULL); 144 if (info.nb_dev) 145 info.nb_type = 3; 146 } 147 } 148 149 ret = info.probe_result = 1; 150 printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n"); 151 152 commit: 153 154 spin_lock_irqsave(&amd_lock, flags); 155 if (amd_chipset.probe_count > 0) { 156 /* race - someone else was faster - drop devices */ 157 158 /* Mark that we where here */ 159 amd_chipset.probe_count++; 160 ret = amd_chipset.probe_result; 161 162 spin_unlock_irqrestore(&amd_lock, flags); 163 164 if (info.nb_dev) 165 pci_dev_put(info.nb_dev); 166 if (info.smbus_dev) 167 pci_dev_put(info.smbus_dev); 168 169 } else { 170 /* no race - commit the result */ 171 info.probe_count++; 172 amd_chipset = info; 173 spin_unlock_irqrestore(&amd_lock, flags); 174 } 175 176 return ret; 177 } 178 EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info); 179 180 /* 181 * The hardware normally enables the A-link power management feature, which 182 * lets the system lower the power consumption in idle states. 183 * 184 * This USB quirk prevents the link going into that lower power state 185 * during isochronous transfers. 186 * 187 * Without this quirk, isochronous stream on OHCI/EHCI/xHCI controllers of 188 * some AMD platforms may stutter or have breaks occasionally. 189 */ 190 static void usb_amd_quirk_pll(int disable) 191 { 192 u32 addr, addr_low, addr_high, val; 193 u32 bit = disable ? 0 : 1; 194 unsigned long flags; 195 196 spin_lock_irqsave(&amd_lock, flags); 197 198 if (disable) { 199 amd_chipset.isoc_reqs++; 200 if (amd_chipset.isoc_reqs > 1) { 201 spin_unlock_irqrestore(&amd_lock, flags); 202 return; 203 } 204 } else { 205 amd_chipset.isoc_reqs--; 206 if (amd_chipset.isoc_reqs > 0) { 207 spin_unlock_irqrestore(&amd_lock, flags); 208 return; 209 } 210 } 211 212 if (amd_chipset.sb_type == 1 || amd_chipset.sb_type == 2) { 213 outb_p(AB_REG_BAR_LOW, 0xcd6); 214 addr_low = inb_p(0xcd7); 215 outb_p(AB_REG_BAR_HIGH, 0xcd6); 216 addr_high = inb_p(0xcd7); 217 addr = addr_high << 8 | addr_low; 218 219 outl_p(0x30, AB_INDX(addr)); 220 outl_p(0x40, AB_DATA(addr)); 221 outl_p(0x34, AB_INDX(addr)); 222 val = inl_p(AB_DATA(addr)); 223 } else if (amd_chipset.sb_type == 3) { 224 pci_read_config_dword(amd_chipset.smbus_dev, 225 AB_REG_BAR_SB700, &addr); 226 outl(AX_INDXC, AB_INDX(addr)); 227 outl(0x40, AB_DATA(addr)); 228 outl(AX_DATAC, AB_INDX(addr)); 229 val = inl(AB_DATA(addr)); 230 } else { 231 spin_unlock_irqrestore(&amd_lock, flags); 232 return; 233 } 234 235 if (disable) { 236 val &= ~0x08; 237 val |= (1 << 4) | (1 << 9); 238 } else { 239 val |= 0x08; 240 val &= ~((1 << 4) | (1 << 9)); 241 } 242 outl_p(val, AB_DATA(addr)); 243 244 if (!amd_chipset.nb_dev) { 245 spin_unlock_irqrestore(&amd_lock, flags); 246 return; 247 } 248 249 if (amd_chipset.nb_type == 1 || amd_chipset.nb_type == 3) { 250 addr = PCIE_P_CNTL; 251 pci_write_config_dword(amd_chipset.nb_dev, 252 NB_PCIE_INDX_ADDR, addr); 253 pci_read_config_dword(amd_chipset.nb_dev, 254 NB_PCIE_INDX_DATA, &val); 255 256 val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12)); 257 val |= bit | (bit << 3) | (bit << 12); 258 val |= ((!bit) << 4) | ((!bit) << 9); 259 pci_write_config_dword(amd_chipset.nb_dev, 260 NB_PCIE_INDX_DATA, val); 261 262 addr = BIF_NB; 263 pci_write_config_dword(amd_chipset.nb_dev, 264 NB_PCIE_INDX_ADDR, addr); 265 pci_read_config_dword(amd_chipset.nb_dev, 266 NB_PCIE_INDX_DATA, &val); 267 val &= ~(1 << 8); 268 val |= bit << 8; 269 270 pci_write_config_dword(amd_chipset.nb_dev, 271 NB_PCIE_INDX_DATA, val); 272 } else if (amd_chipset.nb_type == 2) { 273 addr = NB_PIF0_PWRDOWN_0; 274 pci_write_config_dword(amd_chipset.nb_dev, 275 NB_PCIE_INDX_ADDR, addr); 276 pci_read_config_dword(amd_chipset.nb_dev, 277 NB_PCIE_INDX_DATA, &val); 278 if (disable) 279 val &= ~(0x3f << 7); 280 else 281 val |= 0x3f << 7; 282 283 pci_write_config_dword(amd_chipset.nb_dev, 284 NB_PCIE_INDX_DATA, val); 285 286 addr = NB_PIF0_PWRDOWN_1; 287 pci_write_config_dword(amd_chipset.nb_dev, 288 NB_PCIE_INDX_ADDR, addr); 289 pci_read_config_dword(amd_chipset.nb_dev, 290 NB_PCIE_INDX_DATA, &val); 291 if (disable) 292 val &= ~(0x3f << 7); 293 else 294 val |= 0x3f << 7; 295 296 pci_write_config_dword(amd_chipset.nb_dev, 297 NB_PCIE_INDX_DATA, val); 298 } 299 300 spin_unlock_irqrestore(&amd_lock, flags); 301 return; 302 } 303 304 void usb_amd_quirk_pll_disable(void) 305 { 306 usb_amd_quirk_pll(1); 307 } 308 EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable); 309 310 void usb_amd_quirk_pll_enable(void) 311 { 312 usb_amd_quirk_pll(0); 313 } 314 EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable); 315 316 void usb_amd_dev_put(void) 317 { 318 struct pci_dev *nb, *smbus; 319 unsigned long flags; 320 321 spin_lock_irqsave(&amd_lock, flags); 322 323 amd_chipset.probe_count--; 324 if (amd_chipset.probe_count > 0) { 325 spin_unlock_irqrestore(&amd_lock, flags); 326 return; 327 } 328 329 /* save them to pci_dev_put outside of spinlock */ 330 nb = amd_chipset.nb_dev; 331 smbus = amd_chipset.smbus_dev; 332 333 amd_chipset.nb_dev = NULL; 334 amd_chipset.smbus_dev = NULL; 335 amd_chipset.nb_type = 0; 336 amd_chipset.sb_type = 0; 337 amd_chipset.isoc_reqs = 0; 338 amd_chipset.probe_result = 0; 339 340 spin_unlock_irqrestore(&amd_lock, flags); 341 342 if (nb) 343 pci_dev_put(nb); 344 if (smbus) 345 pci_dev_put(smbus); 346 } 347 EXPORT_SYMBOL_GPL(usb_amd_dev_put); 348 349 /* 350 * Make sure the controller is completely inactive, unable to 351 * generate interrupts or do DMA. 352 */ 353 void uhci_reset_hc(struct pci_dev *pdev, unsigned long base) 354 { 355 /* Turn off PIRQ enable and SMI enable. (This also turns off the 356 * BIOS's USB Legacy Support.) Turn off all the R/WC bits too. 357 */ 358 pci_write_config_word(pdev, UHCI_USBLEGSUP, UHCI_USBLEGSUP_RWC); 359 360 /* Reset the HC - this will force us to get a 361 * new notification of any already connected 362 * ports due to the virtual disconnect that it 363 * implies. 364 */ 365 outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD); 366 mb(); 367 udelay(5); 368 if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET) 369 dev_warn(&pdev->dev, "HCRESET not completed yet!\n"); 370 371 /* Just to be safe, disable interrupt requests and 372 * make sure the controller is stopped. 373 */ 374 outw(0, base + UHCI_USBINTR); 375 outw(0, base + UHCI_USBCMD); 376 } 377 EXPORT_SYMBOL_GPL(uhci_reset_hc); 378 379 /* 380 * Initialize a controller that was newly discovered or has just been 381 * resumed. In either case we can't be sure of its previous state. 382 * 383 * Returns: 1 if the controller was reset, 0 otherwise. 384 */ 385 int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base) 386 { 387 u16 legsup; 388 unsigned int cmd, intr; 389 390 /* 391 * When restarting a suspended controller, we expect all the 392 * settings to be the same as we left them: 393 * 394 * PIRQ and SMI disabled, no R/W bits set in USBLEGSUP; 395 * Controller is stopped and configured with EGSM set; 396 * No interrupts enabled except possibly Resume Detect. 397 * 398 * If any of these conditions are violated we do a complete reset. 399 */ 400 pci_read_config_word(pdev, UHCI_USBLEGSUP, &legsup); 401 if (legsup & ~(UHCI_USBLEGSUP_RO | UHCI_USBLEGSUP_RWC)) { 402 dev_dbg(&pdev->dev, "%s: legsup = 0x%04x\n", 403 __func__, legsup); 404 goto reset_needed; 405 } 406 407 cmd = inw(base + UHCI_USBCMD); 408 if ((cmd & UHCI_USBCMD_RUN) || !(cmd & UHCI_USBCMD_CONFIGURE) || 409 !(cmd & UHCI_USBCMD_EGSM)) { 410 dev_dbg(&pdev->dev, "%s: cmd = 0x%04x\n", 411 __func__, cmd); 412 goto reset_needed; 413 } 414 415 intr = inw(base + UHCI_USBINTR); 416 if (intr & (~UHCI_USBINTR_RESUME)) { 417 dev_dbg(&pdev->dev, "%s: intr = 0x%04x\n", 418 __func__, intr); 419 goto reset_needed; 420 } 421 return 0; 422 423 reset_needed: 424 dev_dbg(&pdev->dev, "Performing full reset\n"); 425 uhci_reset_hc(pdev, base); 426 return 1; 427 } 428 EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc); 429 430 static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask) 431 { 432 u16 cmd; 433 return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask); 434 } 435 436 #define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO) 437 #define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY) 438 439 static void __devinit quirk_usb_handoff_uhci(struct pci_dev *pdev) 440 { 441 unsigned long base = 0; 442 int i; 443 444 if (!pio_enabled(pdev)) 445 return; 446 447 for (i = 0; i < PCI_ROM_RESOURCE; i++) 448 if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) { 449 base = pci_resource_start(pdev, i); 450 break; 451 } 452 453 if (base) 454 uhci_check_and_reset_hc(pdev, base); 455 } 456 457 static int __devinit mmio_resource_enabled(struct pci_dev *pdev, int idx) 458 { 459 return pci_resource_start(pdev, idx) && mmio_enabled(pdev); 460 } 461 462 static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev) 463 { 464 void __iomem *base; 465 u32 control; 466 467 if (!mmio_resource_enabled(pdev, 0)) 468 return; 469 470 base = pci_ioremap_bar(pdev, 0); 471 if (base == NULL) 472 return; 473 474 control = readl(base + OHCI_CONTROL); 475 476 /* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */ 477 #ifdef __hppa__ 478 #define OHCI_CTRL_MASK (OHCI_CTRL_RWC | OHCI_CTRL_IR) 479 #else 480 #define OHCI_CTRL_MASK OHCI_CTRL_RWC 481 482 if (control & OHCI_CTRL_IR) { 483 int wait_time = 500; /* arbitrary; 5 seconds */ 484 writel(OHCI_INTR_OC, base + OHCI_INTRENABLE); 485 writel(OHCI_OCR, base + OHCI_CMDSTATUS); 486 while (wait_time > 0 && 487 readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) { 488 wait_time -= 10; 489 msleep(10); 490 } 491 if (wait_time <= 0) 492 dev_warn(&pdev->dev, "OHCI: BIOS handoff failed" 493 " (BIOS bug?) %08x\n", 494 readl(base + OHCI_CONTROL)); 495 } 496 #endif 497 498 /* reset controller, preserving RWC (and possibly IR) */ 499 writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL); 500 501 /* 502 * disable interrupts 503 */ 504 writel(~(u32)0, base + OHCI_INTRDISABLE); 505 writel(~(u32)0, base + OHCI_INTRSTATUS); 506 507 iounmap(base); 508 } 509 510 static void __devinit ehci_bios_handoff(struct pci_dev *pdev, 511 void __iomem *op_reg_base, 512 u32 cap, u8 offset) 513 { 514 int try_handoff = 1, tried_handoff = 0; 515 516 /* The Pegatron Lucid (ExoPC) tablet sporadically waits for 90 517 * seconds trying the handoff on its unused controller. Skip 518 * it. */ 519 if (pdev->vendor == 0x8086 && pdev->device == 0x283a) { 520 const char *dmi_bn = dmi_get_system_info(DMI_BOARD_NAME); 521 const char *dmi_bv = dmi_get_system_info(DMI_BIOS_VERSION); 522 if (dmi_bn && !strcmp(dmi_bn, "EXOPG06411") && 523 dmi_bv && !strcmp(dmi_bv, "Lucid-CE-133")) 524 try_handoff = 0; 525 } 526 527 if (try_handoff && (cap & EHCI_USBLEGSUP_BIOS)) { 528 dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n"); 529 530 #if 0 531 /* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on, 532 * but that seems dubious in general (the BIOS left it off intentionally) 533 * and is known to prevent some systems from booting. so we won't do this 534 * unless maybe we can determine when we're on a system that needs SMI forced. 535 */ 536 /* BIOS workaround (?): be sure the pre-Linux code 537 * receives the SMI 538 */ 539 pci_read_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, &val); 540 pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 541 val | EHCI_USBLEGCTLSTS_SOOE); 542 #endif 543 544 /* some systems get upset if this semaphore is 545 * set for any other reason than forcing a BIOS 546 * handoff.. 547 */ 548 pci_write_config_byte(pdev, offset + 3, 1); 549 } 550 551 /* if boot firmware now owns EHCI, spin till it hands it over. */ 552 if (try_handoff) { 553 int msec = 1000; 554 while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) { 555 tried_handoff = 1; 556 msleep(10); 557 msec -= 10; 558 pci_read_config_dword(pdev, offset, &cap); 559 } 560 } 561 562 if (cap & EHCI_USBLEGSUP_BIOS) { 563 /* well, possibly buggy BIOS... try to shut it down, 564 * and hope nothing goes too wrong 565 */ 566 if (try_handoff) 567 dev_warn(&pdev->dev, "EHCI: BIOS handoff failed" 568 " (BIOS bug?) %08x\n", cap); 569 pci_write_config_byte(pdev, offset + 2, 0); 570 } 571 572 /* just in case, always disable EHCI SMIs */ 573 pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 0); 574 575 /* If the BIOS ever owned the controller then we can't expect 576 * any power sessions to remain intact. 577 */ 578 if (tried_handoff) 579 writel(0, op_reg_base + EHCI_CONFIGFLAG); 580 } 581 582 static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev) 583 { 584 void __iomem *base, *op_reg_base; 585 u32 hcc_params, cap, val; 586 u8 offset, cap_length; 587 int wait_time, delta, count = 256/4; 588 589 if (!mmio_resource_enabled(pdev, 0)) 590 return; 591 592 base = pci_ioremap_bar(pdev, 0); 593 if (base == NULL) 594 return; 595 596 cap_length = readb(base); 597 op_reg_base = base + cap_length; 598 599 /* EHCI 0.96 and later may have "extended capabilities" 600 * spec section 5.1 explains the bios handoff, e.g. for 601 * booting from USB disk or using a usb keyboard 602 */ 603 hcc_params = readl(base + EHCI_HCC_PARAMS); 604 offset = (hcc_params >> 8) & 0xff; 605 while (offset && --count) { 606 pci_read_config_dword(pdev, offset, &cap); 607 608 switch (cap & 0xff) { 609 case 1: 610 ehci_bios_handoff(pdev, op_reg_base, cap, offset); 611 break; 612 case 0: /* Illegal reserved cap, set cap=0 so we exit */ 613 cap = 0; /* then fallthrough... */ 614 default: 615 dev_warn(&pdev->dev, "EHCI: unrecognized capability " 616 "%02x\n", cap & 0xff); 617 } 618 offset = (cap >> 8) & 0xff; 619 } 620 if (!count) 621 dev_printk(KERN_DEBUG, &pdev->dev, "EHCI: capability loop?\n"); 622 623 /* 624 * halt EHCI & disable its interrupts in any case 625 */ 626 val = readl(op_reg_base + EHCI_USBSTS); 627 if ((val & EHCI_USBSTS_HALTED) == 0) { 628 val = readl(op_reg_base + EHCI_USBCMD); 629 val &= ~EHCI_USBCMD_RUN; 630 writel(val, op_reg_base + EHCI_USBCMD); 631 632 wait_time = 2000; 633 delta = 100; 634 do { 635 writel(0x3f, op_reg_base + EHCI_USBSTS); 636 udelay(delta); 637 wait_time -= delta; 638 val = readl(op_reg_base + EHCI_USBSTS); 639 if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) { 640 break; 641 } 642 } while (wait_time > 0); 643 } 644 writel(0, op_reg_base + EHCI_USBINTR); 645 writel(0x3f, op_reg_base + EHCI_USBSTS); 646 647 iounmap(base); 648 } 649 650 /* 651 * handshake - spin reading a register until handshake completes 652 * @ptr: address of hc register to be read 653 * @mask: bits to look at in result of read 654 * @done: value of those bits when handshake succeeds 655 * @wait_usec: timeout in microseconds 656 * @delay_usec: delay in microseconds to wait between polling 657 * 658 * Polls a register every delay_usec microseconds. 659 * Returns 0 when the mask bits have the value done. 660 * Returns -ETIMEDOUT if this condition is not true after 661 * wait_usec microseconds have passed. 662 */ 663 static int handshake(void __iomem *ptr, u32 mask, u32 done, 664 int wait_usec, int delay_usec) 665 { 666 u32 result; 667 668 do { 669 result = readl(ptr); 670 result &= mask; 671 if (result == done) 672 return 0; 673 udelay(delay_usec); 674 wait_usec -= delay_usec; 675 } while (wait_usec > 0); 676 return -ETIMEDOUT; 677 } 678 679 bool usb_is_intel_switchable_xhci(struct pci_dev *pdev) 680 { 681 return pdev->class == PCI_CLASS_SERIAL_USB_XHCI && 682 pdev->vendor == PCI_VENDOR_ID_INTEL && 683 pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI; 684 } 685 EXPORT_SYMBOL_GPL(usb_is_intel_switchable_xhci); 686 687 /* 688 * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that 689 * share some number of ports. These ports can be switched between either 690 * controller. Not all of the ports under the EHCI host controller may be 691 * switchable. 692 * 693 * The ports should be switched over to xHCI before PCI probes for any device 694 * start. This avoids active devices under EHCI being disconnected during the 695 * port switchover, which could cause loss of data on USB storage devices, or 696 * failed boot when the root file system is on a USB mass storage device and is 697 * enumerated under EHCI first. 698 * 699 * We write into the xHC's PCI configuration space in some Intel-specific 700 * registers to switch the ports over. The USB 3.0 terminations and the USB 701 * 2.0 data wires are switched separately. We want to enable the SuperSpeed 702 * terminations before switching the USB 2.0 wires over, so that USB 3.0 703 * devices connect at SuperSpeed, rather than at USB 2.0 speeds. 704 */ 705 void usb_enable_xhci_ports(struct pci_dev *xhci_pdev) 706 { 707 u32 ports_available; 708 709 ports_available = 0xffffffff; 710 /* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable 711 * Register, to turn on SuperSpeed terminations for all 712 * available ports. 713 */ 714 pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 715 cpu_to_le32(ports_available)); 716 717 pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 718 &ports_available); 719 dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled " 720 "under xHCI: 0x%x\n", ports_available); 721 722 ports_available = 0xffffffff; 723 /* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to 724 * switch the USB 2.0 power and data lines over to the xHCI 725 * host. 726 */ 727 pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 728 cpu_to_le32(ports_available)); 729 730 pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 731 &ports_available); 732 dev_dbg(&xhci_pdev->dev, "USB 2.0 ports that are now switched over " 733 "to xHCI: 0x%x\n", ports_available); 734 } 735 EXPORT_SYMBOL_GPL(usb_enable_xhci_ports); 736 737 /** 738 * PCI Quirks for xHCI. 739 * 740 * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS. 741 * It signals to the BIOS that the OS wants control of the host controller, 742 * and then waits 5 seconds for the BIOS to hand over control. 743 * If we timeout, assume the BIOS is broken and take control anyway. 744 */ 745 static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev) 746 { 747 void __iomem *base; 748 int ext_cap_offset; 749 void __iomem *op_reg_base; 750 u32 val; 751 int timeout; 752 753 if (!mmio_resource_enabled(pdev, 0)) 754 return; 755 756 base = ioremap_nocache(pci_resource_start(pdev, 0), 757 pci_resource_len(pdev, 0)); 758 if (base == NULL) 759 return; 760 761 /* 762 * Find the Legacy Support Capability register - 763 * this is optional for xHCI host controllers. 764 */ 765 ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET); 766 do { 767 if (!ext_cap_offset) 768 /* We've reached the end of the extended capabilities */ 769 goto hc_init; 770 val = readl(base + ext_cap_offset); 771 if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY) 772 break; 773 ext_cap_offset = xhci_find_next_cap_offset(base, ext_cap_offset); 774 } while (1); 775 776 /* If the BIOS owns the HC, signal that the OS wants it, and wait */ 777 if (val & XHCI_HC_BIOS_OWNED) { 778 writel(val & XHCI_HC_OS_OWNED, base + ext_cap_offset); 779 780 /* Wait for 5 seconds with 10 microsecond polling interval */ 781 timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED, 782 0, 5000, 10); 783 784 /* Assume a buggy BIOS and take HC ownership anyway */ 785 if (timeout) { 786 dev_warn(&pdev->dev, "xHCI BIOS handoff failed" 787 " (BIOS bug ?) %08x\n", val); 788 writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset); 789 } 790 } 791 792 /* Disable any BIOS SMIs */ 793 writel(XHCI_LEGACY_DISABLE_SMI, 794 base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); 795 796 if (usb_is_intel_switchable_xhci(pdev)) 797 usb_enable_xhci_ports(pdev); 798 hc_init: 799 op_reg_base = base + XHCI_HC_LENGTH(readl(base)); 800 801 /* Wait for the host controller to be ready before writing any 802 * operational or runtime registers. Wait 5 seconds and no more. 803 */ 804 timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0, 805 5000, 10); 806 /* Assume a buggy HC and start HC initialization anyway */ 807 if (timeout) { 808 val = readl(op_reg_base + XHCI_STS_OFFSET); 809 dev_warn(&pdev->dev, 810 "xHCI HW not ready after 5 sec (HC bug?) " 811 "status = 0x%x\n", val); 812 } 813 814 /* Send the halt and disable interrupts command */ 815 val = readl(op_reg_base + XHCI_CMD_OFFSET); 816 val &= ~(XHCI_CMD_RUN | XHCI_IRQS); 817 writel(val, op_reg_base + XHCI_CMD_OFFSET); 818 819 /* Wait for the HC to halt - poll every 125 usec (one microframe). */ 820 timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1, 821 XHCI_MAX_HALT_USEC, 125); 822 if (timeout) { 823 val = readl(op_reg_base + XHCI_STS_OFFSET); 824 dev_warn(&pdev->dev, 825 "xHCI HW did not halt within %d usec " 826 "status = 0x%x\n", XHCI_MAX_HALT_USEC, val); 827 } 828 829 iounmap(base); 830 } 831 832 static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev) 833 { 834 if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI) 835 quirk_usb_handoff_uhci(pdev); 836 else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI) 837 quirk_usb_handoff_ohci(pdev); 838 else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI) 839 quirk_usb_disable_ehci(pdev); 840 else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI) 841 quirk_usb_handoff_xhci(pdev); 842 } 843 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff); 844