1 /* 2 * This file contains code to reset and initialize USB host controllers. 3 * Some of it includes work-arounds for PCI hardware and BIOS quirks. 4 * It may need to run early during booting -- before USB would normally 5 * initialize -- to ensure that Linux doesn't use any legacy modes. 6 * 7 * Copyright (c) 1999 Martin Mares <mj@ucw.cz> 8 * (and others) 9 */ 10 11 #include <linux/types.h> 12 #include <linux/kconfig.h> 13 #include <linux/kernel.h> 14 #include <linux/pci.h> 15 #include <linux/delay.h> 16 #include <linux/export.h> 17 #include <linux/acpi.h> 18 #include <linux/dmi.h> 19 #include "pci-quirks.h" 20 #include "xhci-ext-caps.h" 21 22 23 #define UHCI_USBLEGSUP 0xc0 /* legacy support */ 24 #define UHCI_USBCMD 0 /* command register */ 25 #define UHCI_USBINTR 4 /* interrupt register */ 26 #define UHCI_USBLEGSUP_RWC 0x8f00 /* the R/WC bits */ 27 #define UHCI_USBLEGSUP_RO 0x5040 /* R/O and reserved bits */ 28 #define UHCI_USBCMD_RUN 0x0001 /* RUN/STOP bit */ 29 #define UHCI_USBCMD_HCRESET 0x0002 /* Host Controller reset */ 30 #define UHCI_USBCMD_EGSM 0x0008 /* Global Suspend Mode */ 31 #define UHCI_USBCMD_CONFIGURE 0x0040 /* Config Flag */ 32 #define UHCI_USBINTR_RESUME 0x0002 /* Resume interrupt enable */ 33 34 #define OHCI_CONTROL 0x04 35 #define OHCI_CMDSTATUS 0x08 36 #define OHCI_INTRSTATUS 0x0c 37 #define OHCI_INTRENABLE 0x10 38 #define OHCI_INTRDISABLE 0x14 39 #define OHCI_FMINTERVAL 0x34 40 #define OHCI_HCFS (3 << 6) /* hc functional state */ 41 #define OHCI_HCR (1 << 0) /* host controller reset */ 42 #define OHCI_OCR (1 << 3) /* ownership change request */ 43 #define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */ 44 #define OHCI_CTRL_IR (1 << 8) /* interrupt routing */ 45 #define OHCI_INTR_OC (1 << 30) /* ownership change */ 46 47 #define EHCI_HCC_PARAMS 0x08 /* extended capabilities */ 48 #define EHCI_USBCMD 0 /* command register */ 49 #define EHCI_USBCMD_RUN (1 << 0) /* RUN/STOP bit */ 50 #define EHCI_USBSTS 4 /* status register */ 51 #define EHCI_USBSTS_HALTED (1 << 12) /* HCHalted bit */ 52 #define EHCI_USBINTR 8 /* interrupt register */ 53 #define EHCI_CONFIGFLAG 0x40 /* configured flag register */ 54 #define EHCI_USBLEGSUP 0 /* legacy support register */ 55 #define EHCI_USBLEGSUP_BIOS (1 << 16) /* BIOS semaphore */ 56 #define EHCI_USBLEGSUP_OS (1 << 24) /* OS semaphore */ 57 #define EHCI_USBLEGCTLSTS 4 /* legacy control/status */ 58 #define EHCI_USBLEGCTLSTS_SOOE (1 << 13) /* SMI on ownership change */ 59 60 /* AMD quirk use */ 61 #define AB_REG_BAR_LOW 0xe0 62 #define AB_REG_BAR_HIGH 0xe1 63 #define AB_REG_BAR_SB700 0xf0 64 #define AB_INDX(addr) ((addr) + 0x00) 65 #define AB_DATA(addr) ((addr) + 0x04) 66 #define AX_INDXC 0x30 67 #define AX_DATAC 0x34 68 69 #define NB_PCIE_INDX_ADDR 0xe0 70 #define NB_PCIE_INDX_DATA 0xe4 71 #define PCIE_P_CNTL 0x10040 72 #define BIF_NB 0x10002 73 #define NB_PIF0_PWRDOWN_0 0x01100012 74 #define NB_PIF0_PWRDOWN_1 0x01100013 75 76 #define USB_INTEL_XUSB2PR 0xD0 77 #define USB_INTEL_USB2PRM 0xD4 78 #define USB_INTEL_USB3_PSSEN 0xD8 79 #define USB_INTEL_USB3PRM 0xDC 80 81 /* 82 * amd_chipset_gen values represent AMD different chipset generations 83 */ 84 enum amd_chipset_gen { 85 NOT_AMD_CHIPSET = 0, 86 AMD_CHIPSET_SB600, 87 AMD_CHIPSET_SB700, 88 AMD_CHIPSET_SB800, 89 AMD_CHIPSET_HUDSON2, 90 AMD_CHIPSET_BOLTON, 91 AMD_CHIPSET_YANGTZE, 92 AMD_CHIPSET_UNKNOWN, 93 }; 94 95 struct amd_chipset_type { 96 enum amd_chipset_gen gen; 97 u8 rev; 98 }; 99 100 static struct amd_chipset_info { 101 struct pci_dev *nb_dev; 102 struct pci_dev *smbus_dev; 103 int nb_type; 104 struct amd_chipset_type sb_type; 105 int isoc_reqs; 106 int probe_count; 107 int probe_result; 108 } amd_chipset; 109 110 static DEFINE_SPINLOCK(amd_lock); 111 112 /* 113 * amd_chipset_sb_type_init - initialize amd chipset southbridge type 114 * 115 * AMD FCH/SB generation and revision is identified by SMBus controller 116 * vendor, device and revision IDs. 117 * 118 * Returns: 1 if it is an AMD chipset, 0 otherwise. 119 */ 120 static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo) 121 { 122 u8 rev = 0; 123 pinfo->sb_type.gen = AMD_CHIPSET_UNKNOWN; 124 125 pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 126 PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL); 127 if (pinfo->smbus_dev) { 128 rev = pinfo->smbus_dev->revision; 129 if (rev >= 0x10 && rev <= 0x1f) 130 pinfo->sb_type.gen = AMD_CHIPSET_SB600; 131 else if (rev >= 0x30 && rev <= 0x3f) 132 pinfo->sb_type.gen = AMD_CHIPSET_SB700; 133 else if (rev >= 0x40 && rev <= 0x4f) 134 pinfo->sb_type.gen = AMD_CHIPSET_SB800; 135 } else { 136 pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 137 PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL); 138 139 if (!pinfo->smbus_dev) { 140 pinfo->sb_type.gen = NOT_AMD_CHIPSET; 141 return 0; 142 } 143 144 rev = pinfo->smbus_dev->revision; 145 if (rev >= 0x11 && rev <= 0x14) 146 pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2; 147 else if (rev >= 0x15 && rev <= 0x18) 148 pinfo->sb_type.gen = AMD_CHIPSET_BOLTON; 149 else if (rev >= 0x39 && rev <= 0x3a) 150 pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE; 151 } 152 153 pinfo->sb_type.rev = rev; 154 return 1; 155 } 156 157 void sb800_prefetch(struct device *dev, int on) 158 { 159 u16 misc; 160 struct pci_dev *pdev = to_pci_dev(dev); 161 162 pci_read_config_word(pdev, 0x50, &misc); 163 if (on == 0) 164 pci_write_config_word(pdev, 0x50, misc & 0xfcff); 165 else 166 pci_write_config_word(pdev, 0x50, misc | 0x0300); 167 } 168 EXPORT_SYMBOL_GPL(sb800_prefetch); 169 170 int usb_amd_find_chipset_info(void) 171 { 172 unsigned long flags; 173 struct amd_chipset_info info; 174 int ret; 175 176 spin_lock_irqsave(&amd_lock, flags); 177 178 /* probe only once */ 179 if (amd_chipset.probe_count > 0) { 180 amd_chipset.probe_count++; 181 spin_unlock_irqrestore(&amd_lock, flags); 182 return amd_chipset.probe_result; 183 } 184 memset(&info, 0, sizeof(info)); 185 spin_unlock_irqrestore(&amd_lock, flags); 186 187 if (!amd_chipset_sb_type_init(&info)) { 188 ret = 0; 189 goto commit; 190 } 191 192 /* Below chipset generations needn't enable AMD PLL quirk */ 193 if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN || 194 info.sb_type.gen == AMD_CHIPSET_SB600 || 195 info.sb_type.gen == AMD_CHIPSET_YANGTZE || 196 (info.sb_type.gen == AMD_CHIPSET_SB700 && 197 info.sb_type.rev > 0x3b)) { 198 if (info.smbus_dev) { 199 pci_dev_put(info.smbus_dev); 200 info.smbus_dev = NULL; 201 } 202 ret = 0; 203 goto commit; 204 } 205 206 info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL); 207 if (info.nb_dev) { 208 info.nb_type = 1; 209 } else { 210 info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL); 211 if (info.nb_dev) { 212 info.nb_type = 2; 213 } else { 214 info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 215 0x9600, NULL); 216 if (info.nb_dev) 217 info.nb_type = 3; 218 } 219 } 220 221 ret = info.probe_result = 1; 222 printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n"); 223 224 commit: 225 226 spin_lock_irqsave(&amd_lock, flags); 227 if (amd_chipset.probe_count > 0) { 228 /* race - someone else was faster - drop devices */ 229 230 /* Mark that we where here */ 231 amd_chipset.probe_count++; 232 ret = amd_chipset.probe_result; 233 234 spin_unlock_irqrestore(&amd_lock, flags); 235 236 pci_dev_put(info.nb_dev); 237 pci_dev_put(info.smbus_dev); 238 239 } else { 240 /* no race - commit the result */ 241 info.probe_count++; 242 amd_chipset = info; 243 spin_unlock_irqrestore(&amd_lock, flags); 244 } 245 246 return ret; 247 } 248 EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info); 249 250 int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev) 251 { 252 /* Make sure amd chipset type has already been initialized */ 253 usb_amd_find_chipset_info(); 254 if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE) 255 return 0; 256 257 dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n"); 258 return 1; 259 } 260 EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk); 261 262 bool usb_amd_hang_symptom_quirk(void) 263 { 264 u8 rev; 265 266 usb_amd_find_chipset_info(); 267 rev = amd_chipset.sb_type.rev; 268 /* SB600 and old version of SB700 have hang symptom bug */ 269 return amd_chipset.sb_type.gen == AMD_CHIPSET_SB600 || 270 (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 && 271 rev >= 0x3a && rev <= 0x3b); 272 } 273 EXPORT_SYMBOL_GPL(usb_amd_hang_symptom_quirk); 274 275 bool usb_amd_prefetch_quirk(void) 276 { 277 usb_amd_find_chipset_info(); 278 /* SB800 needs pre-fetch fix */ 279 return amd_chipset.sb_type.gen == AMD_CHIPSET_SB800; 280 } 281 EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk); 282 283 /* 284 * The hardware normally enables the A-link power management feature, which 285 * lets the system lower the power consumption in idle states. 286 * 287 * This USB quirk prevents the link going into that lower power state 288 * during isochronous transfers. 289 * 290 * Without this quirk, isochronous stream on OHCI/EHCI/xHCI controllers of 291 * some AMD platforms may stutter or have breaks occasionally. 292 */ 293 static void usb_amd_quirk_pll(int disable) 294 { 295 u32 addr, addr_low, addr_high, val; 296 u32 bit = disable ? 0 : 1; 297 unsigned long flags; 298 299 spin_lock_irqsave(&amd_lock, flags); 300 301 if (disable) { 302 amd_chipset.isoc_reqs++; 303 if (amd_chipset.isoc_reqs > 1) { 304 spin_unlock_irqrestore(&amd_lock, flags); 305 return; 306 } 307 } else { 308 amd_chipset.isoc_reqs--; 309 if (amd_chipset.isoc_reqs > 0) { 310 spin_unlock_irqrestore(&amd_lock, flags); 311 return; 312 } 313 } 314 315 if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB800 || 316 amd_chipset.sb_type.gen == AMD_CHIPSET_HUDSON2 || 317 amd_chipset.sb_type.gen == AMD_CHIPSET_BOLTON) { 318 outb_p(AB_REG_BAR_LOW, 0xcd6); 319 addr_low = inb_p(0xcd7); 320 outb_p(AB_REG_BAR_HIGH, 0xcd6); 321 addr_high = inb_p(0xcd7); 322 addr = addr_high << 8 | addr_low; 323 324 outl_p(0x30, AB_INDX(addr)); 325 outl_p(0x40, AB_DATA(addr)); 326 outl_p(0x34, AB_INDX(addr)); 327 val = inl_p(AB_DATA(addr)); 328 } else if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 && 329 amd_chipset.sb_type.rev <= 0x3b) { 330 pci_read_config_dword(amd_chipset.smbus_dev, 331 AB_REG_BAR_SB700, &addr); 332 outl(AX_INDXC, AB_INDX(addr)); 333 outl(0x40, AB_DATA(addr)); 334 outl(AX_DATAC, AB_INDX(addr)); 335 val = inl(AB_DATA(addr)); 336 } else { 337 spin_unlock_irqrestore(&amd_lock, flags); 338 return; 339 } 340 341 if (disable) { 342 val &= ~0x08; 343 val |= (1 << 4) | (1 << 9); 344 } else { 345 val |= 0x08; 346 val &= ~((1 << 4) | (1 << 9)); 347 } 348 outl_p(val, AB_DATA(addr)); 349 350 if (!amd_chipset.nb_dev) { 351 spin_unlock_irqrestore(&amd_lock, flags); 352 return; 353 } 354 355 if (amd_chipset.nb_type == 1 || amd_chipset.nb_type == 3) { 356 addr = PCIE_P_CNTL; 357 pci_write_config_dword(amd_chipset.nb_dev, 358 NB_PCIE_INDX_ADDR, addr); 359 pci_read_config_dword(amd_chipset.nb_dev, 360 NB_PCIE_INDX_DATA, &val); 361 362 val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12)); 363 val |= bit | (bit << 3) | (bit << 12); 364 val |= ((!bit) << 4) | ((!bit) << 9); 365 pci_write_config_dword(amd_chipset.nb_dev, 366 NB_PCIE_INDX_DATA, val); 367 368 addr = BIF_NB; 369 pci_write_config_dword(amd_chipset.nb_dev, 370 NB_PCIE_INDX_ADDR, addr); 371 pci_read_config_dword(amd_chipset.nb_dev, 372 NB_PCIE_INDX_DATA, &val); 373 val &= ~(1 << 8); 374 val |= bit << 8; 375 376 pci_write_config_dword(amd_chipset.nb_dev, 377 NB_PCIE_INDX_DATA, val); 378 } else if (amd_chipset.nb_type == 2) { 379 addr = NB_PIF0_PWRDOWN_0; 380 pci_write_config_dword(amd_chipset.nb_dev, 381 NB_PCIE_INDX_ADDR, addr); 382 pci_read_config_dword(amd_chipset.nb_dev, 383 NB_PCIE_INDX_DATA, &val); 384 if (disable) 385 val &= ~(0x3f << 7); 386 else 387 val |= 0x3f << 7; 388 389 pci_write_config_dword(amd_chipset.nb_dev, 390 NB_PCIE_INDX_DATA, val); 391 392 addr = NB_PIF0_PWRDOWN_1; 393 pci_write_config_dword(amd_chipset.nb_dev, 394 NB_PCIE_INDX_ADDR, addr); 395 pci_read_config_dword(amd_chipset.nb_dev, 396 NB_PCIE_INDX_DATA, &val); 397 if (disable) 398 val &= ~(0x3f << 7); 399 else 400 val |= 0x3f << 7; 401 402 pci_write_config_dword(amd_chipset.nb_dev, 403 NB_PCIE_INDX_DATA, val); 404 } 405 406 spin_unlock_irqrestore(&amd_lock, flags); 407 return; 408 } 409 410 void usb_amd_quirk_pll_disable(void) 411 { 412 usb_amd_quirk_pll(1); 413 } 414 EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable); 415 416 void usb_amd_quirk_pll_enable(void) 417 { 418 usb_amd_quirk_pll(0); 419 } 420 EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable); 421 422 void usb_amd_dev_put(void) 423 { 424 struct pci_dev *nb, *smbus; 425 unsigned long flags; 426 427 spin_lock_irqsave(&amd_lock, flags); 428 429 amd_chipset.probe_count--; 430 if (amd_chipset.probe_count > 0) { 431 spin_unlock_irqrestore(&amd_lock, flags); 432 return; 433 } 434 435 /* save them to pci_dev_put outside of spinlock */ 436 nb = amd_chipset.nb_dev; 437 smbus = amd_chipset.smbus_dev; 438 439 amd_chipset.nb_dev = NULL; 440 amd_chipset.smbus_dev = NULL; 441 amd_chipset.nb_type = 0; 442 memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type)); 443 amd_chipset.isoc_reqs = 0; 444 amd_chipset.probe_result = 0; 445 446 spin_unlock_irqrestore(&amd_lock, flags); 447 448 pci_dev_put(nb); 449 pci_dev_put(smbus); 450 } 451 EXPORT_SYMBOL_GPL(usb_amd_dev_put); 452 453 /* 454 * Make sure the controller is completely inactive, unable to 455 * generate interrupts or do DMA. 456 */ 457 void uhci_reset_hc(struct pci_dev *pdev, unsigned long base) 458 { 459 /* Turn off PIRQ enable and SMI enable. (This also turns off the 460 * BIOS's USB Legacy Support.) Turn off all the R/WC bits too. 461 */ 462 pci_write_config_word(pdev, UHCI_USBLEGSUP, UHCI_USBLEGSUP_RWC); 463 464 /* Reset the HC - this will force us to get a 465 * new notification of any already connected 466 * ports due to the virtual disconnect that it 467 * implies. 468 */ 469 outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD); 470 mb(); 471 udelay(5); 472 if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET) 473 dev_warn(&pdev->dev, "HCRESET not completed yet!\n"); 474 475 /* Just to be safe, disable interrupt requests and 476 * make sure the controller is stopped. 477 */ 478 outw(0, base + UHCI_USBINTR); 479 outw(0, base + UHCI_USBCMD); 480 } 481 EXPORT_SYMBOL_GPL(uhci_reset_hc); 482 483 /* 484 * Initialize a controller that was newly discovered or has just been 485 * resumed. In either case we can't be sure of its previous state. 486 * 487 * Returns: 1 if the controller was reset, 0 otherwise. 488 */ 489 int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base) 490 { 491 u16 legsup; 492 unsigned int cmd, intr; 493 494 /* 495 * When restarting a suspended controller, we expect all the 496 * settings to be the same as we left them: 497 * 498 * PIRQ and SMI disabled, no R/W bits set in USBLEGSUP; 499 * Controller is stopped and configured with EGSM set; 500 * No interrupts enabled except possibly Resume Detect. 501 * 502 * If any of these conditions are violated we do a complete reset. 503 */ 504 pci_read_config_word(pdev, UHCI_USBLEGSUP, &legsup); 505 if (legsup & ~(UHCI_USBLEGSUP_RO | UHCI_USBLEGSUP_RWC)) { 506 dev_dbg(&pdev->dev, "%s: legsup = 0x%04x\n", 507 __func__, legsup); 508 goto reset_needed; 509 } 510 511 cmd = inw(base + UHCI_USBCMD); 512 if ((cmd & UHCI_USBCMD_RUN) || !(cmd & UHCI_USBCMD_CONFIGURE) || 513 !(cmd & UHCI_USBCMD_EGSM)) { 514 dev_dbg(&pdev->dev, "%s: cmd = 0x%04x\n", 515 __func__, cmd); 516 goto reset_needed; 517 } 518 519 intr = inw(base + UHCI_USBINTR); 520 if (intr & (~UHCI_USBINTR_RESUME)) { 521 dev_dbg(&pdev->dev, "%s: intr = 0x%04x\n", 522 __func__, intr); 523 goto reset_needed; 524 } 525 return 0; 526 527 reset_needed: 528 dev_dbg(&pdev->dev, "Performing full reset\n"); 529 uhci_reset_hc(pdev, base); 530 return 1; 531 } 532 EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc); 533 534 static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask) 535 { 536 u16 cmd; 537 return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask); 538 } 539 540 #define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO) 541 #define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY) 542 543 static void quirk_usb_handoff_uhci(struct pci_dev *pdev) 544 { 545 unsigned long base = 0; 546 int i; 547 548 if (!pio_enabled(pdev)) 549 return; 550 551 for (i = 0; i < PCI_ROM_RESOURCE; i++) 552 if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) { 553 base = pci_resource_start(pdev, i); 554 break; 555 } 556 557 if (base) 558 uhci_check_and_reset_hc(pdev, base); 559 } 560 561 static int mmio_resource_enabled(struct pci_dev *pdev, int idx) 562 { 563 return pci_resource_start(pdev, idx) && mmio_enabled(pdev); 564 } 565 566 static void quirk_usb_handoff_ohci(struct pci_dev *pdev) 567 { 568 void __iomem *base; 569 u32 control; 570 u32 fminterval = 0; 571 bool no_fminterval = false; 572 int cnt; 573 574 if (!mmio_resource_enabled(pdev, 0)) 575 return; 576 577 base = pci_ioremap_bar(pdev, 0); 578 if (base == NULL) 579 return; 580 581 /* 582 * ULi M5237 OHCI controller locks the whole system when accessing 583 * the OHCI_FMINTERVAL offset. 584 */ 585 if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237) 586 no_fminterval = true; 587 588 control = readl(base + OHCI_CONTROL); 589 590 /* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */ 591 #ifdef __hppa__ 592 #define OHCI_CTRL_MASK (OHCI_CTRL_RWC | OHCI_CTRL_IR) 593 #else 594 #define OHCI_CTRL_MASK OHCI_CTRL_RWC 595 596 if (control & OHCI_CTRL_IR) { 597 int wait_time = 500; /* arbitrary; 5 seconds */ 598 writel(OHCI_INTR_OC, base + OHCI_INTRENABLE); 599 writel(OHCI_OCR, base + OHCI_CMDSTATUS); 600 while (wait_time > 0 && 601 readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) { 602 wait_time -= 10; 603 msleep(10); 604 } 605 if (wait_time <= 0) 606 dev_warn(&pdev->dev, "OHCI: BIOS handoff failed" 607 " (BIOS bug?) %08x\n", 608 readl(base + OHCI_CONTROL)); 609 } 610 #endif 611 612 /* disable interrupts */ 613 writel((u32) ~0, base + OHCI_INTRDISABLE); 614 615 /* Reset the USB bus, if the controller isn't already in RESET */ 616 if (control & OHCI_HCFS) { 617 /* Go into RESET, preserving RWC (and possibly IR) */ 618 writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL); 619 readl(base + OHCI_CONTROL); 620 621 /* drive bus reset for at least 50 ms (7.1.7.5) */ 622 msleep(50); 623 } 624 625 /* software reset of the controller, preserving HcFmInterval */ 626 if (!no_fminterval) 627 fminterval = readl(base + OHCI_FMINTERVAL); 628 629 writel(OHCI_HCR, base + OHCI_CMDSTATUS); 630 631 /* reset requires max 10 us delay */ 632 for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */ 633 if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0) 634 break; 635 udelay(1); 636 } 637 638 if (!no_fminterval) 639 writel(fminterval, base + OHCI_FMINTERVAL); 640 641 /* Now the controller is safely in SUSPEND and nothing can wake it up */ 642 iounmap(base); 643 } 644 645 static const struct dmi_system_id ehci_dmi_nohandoff_table[] = { 646 { 647 /* Pegatron Lucid (ExoPC) */ 648 .matches = { 649 DMI_MATCH(DMI_BOARD_NAME, "EXOPG06411"), 650 DMI_MATCH(DMI_BIOS_VERSION, "Lucid-CE-133"), 651 }, 652 }, 653 { 654 /* Pegatron Lucid (Ordissimo AIRIS) */ 655 .matches = { 656 DMI_MATCH(DMI_BOARD_NAME, "M11JB"), 657 DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"), 658 }, 659 }, 660 { 661 /* Pegatron Lucid (Ordissimo) */ 662 .matches = { 663 DMI_MATCH(DMI_BOARD_NAME, "Ordissimo"), 664 DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"), 665 }, 666 }, 667 { 668 /* HASEE E200 */ 669 .matches = { 670 DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"), 671 DMI_MATCH(DMI_BOARD_NAME, "E210"), 672 DMI_MATCH(DMI_BIOS_VERSION, "6.00"), 673 }, 674 }, 675 { } 676 }; 677 678 static void ehci_bios_handoff(struct pci_dev *pdev, 679 void __iomem *op_reg_base, 680 u32 cap, u8 offset) 681 { 682 int try_handoff = 1, tried_handoff = 0; 683 684 /* 685 * The Pegatron Lucid tablet sporadically waits for 98 seconds trying 686 * the handoff on its unused controller. Skip it. 687 * 688 * The HASEE E200 hangs when the semaphore is set (bugzilla #77021). 689 */ 690 if (pdev->vendor == 0x8086 && (pdev->device == 0x283a || 691 pdev->device == 0x27cc)) { 692 if (dmi_check_system(ehci_dmi_nohandoff_table)) 693 try_handoff = 0; 694 } 695 696 if (try_handoff && (cap & EHCI_USBLEGSUP_BIOS)) { 697 dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n"); 698 699 #if 0 700 /* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on, 701 * but that seems dubious in general (the BIOS left it off intentionally) 702 * and is known to prevent some systems from booting. so we won't do this 703 * unless maybe we can determine when we're on a system that needs SMI forced. 704 */ 705 /* BIOS workaround (?): be sure the pre-Linux code 706 * receives the SMI 707 */ 708 pci_read_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, &val); 709 pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 710 val | EHCI_USBLEGCTLSTS_SOOE); 711 #endif 712 713 /* some systems get upset if this semaphore is 714 * set for any other reason than forcing a BIOS 715 * handoff.. 716 */ 717 pci_write_config_byte(pdev, offset + 3, 1); 718 } 719 720 /* if boot firmware now owns EHCI, spin till it hands it over. */ 721 if (try_handoff) { 722 int msec = 1000; 723 while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) { 724 tried_handoff = 1; 725 msleep(10); 726 msec -= 10; 727 pci_read_config_dword(pdev, offset, &cap); 728 } 729 } 730 731 if (cap & EHCI_USBLEGSUP_BIOS) { 732 /* well, possibly buggy BIOS... try to shut it down, 733 * and hope nothing goes too wrong 734 */ 735 if (try_handoff) 736 dev_warn(&pdev->dev, "EHCI: BIOS handoff failed" 737 " (BIOS bug?) %08x\n", cap); 738 pci_write_config_byte(pdev, offset + 2, 0); 739 } 740 741 /* just in case, always disable EHCI SMIs */ 742 pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 0); 743 744 /* If the BIOS ever owned the controller then we can't expect 745 * any power sessions to remain intact. 746 */ 747 if (tried_handoff) 748 writel(0, op_reg_base + EHCI_CONFIGFLAG); 749 } 750 751 static void quirk_usb_disable_ehci(struct pci_dev *pdev) 752 { 753 void __iomem *base, *op_reg_base; 754 u32 hcc_params, cap, val; 755 u8 offset, cap_length; 756 int wait_time, count = 256/4; 757 758 if (!mmio_resource_enabled(pdev, 0)) 759 return; 760 761 base = pci_ioremap_bar(pdev, 0); 762 if (base == NULL) 763 return; 764 765 cap_length = readb(base); 766 op_reg_base = base + cap_length; 767 768 /* EHCI 0.96 and later may have "extended capabilities" 769 * spec section 5.1 explains the bios handoff, e.g. for 770 * booting from USB disk or using a usb keyboard 771 */ 772 hcc_params = readl(base + EHCI_HCC_PARAMS); 773 offset = (hcc_params >> 8) & 0xff; 774 while (offset && --count) { 775 pci_read_config_dword(pdev, offset, &cap); 776 777 switch (cap & 0xff) { 778 case 1: 779 ehci_bios_handoff(pdev, op_reg_base, cap, offset); 780 break; 781 case 0: /* Illegal reserved cap, set cap=0 so we exit */ 782 cap = 0; /* then fallthrough... */ 783 default: 784 dev_warn(&pdev->dev, "EHCI: unrecognized capability " 785 "%02x\n", cap & 0xff); 786 } 787 offset = (cap >> 8) & 0xff; 788 } 789 if (!count) 790 dev_printk(KERN_DEBUG, &pdev->dev, "EHCI: capability loop?\n"); 791 792 /* 793 * halt EHCI & disable its interrupts in any case 794 */ 795 val = readl(op_reg_base + EHCI_USBSTS); 796 if ((val & EHCI_USBSTS_HALTED) == 0) { 797 val = readl(op_reg_base + EHCI_USBCMD); 798 val &= ~EHCI_USBCMD_RUN; 799 writel(val, op_reg_base + EHCI_USBCMD); 800 801 wait_time = 2000; 802 do { 803 writel(0x3f, op_reg_base + EHCI_USBSTS); 804 udelay(100); 805 wait_time -= 100; 806 val = readl(op_reg_base + EHCI_USBSTS); 807 if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) { 808 break; 809 } 810 } while (wait_time > 0); 811 } 812 writel(0, op_reg_base + EHCI_USBINTR); 813 writel(0x3f, op_reg_base + EHCI_USBSTS); 814 815 iounmap(base); 816 } 817 818 /* 819 * handshake - spin reading a register until handshake completes 820 * @ptr: address of hc register to be read 821 * @mask: bits to look at in result of read 822 * @done: value of those bits when handshake succeeds 823 * @wait_usec: timeout in microseconds 824 * @delay_usec: delay in microseconds to wait between polling 825 * 826 * Polls a register every delay_usec microseconds. 827 * Returns 0 when the mask bits have the value done. 828 * Returns -ETIMEDOUT if this condition is not true after 829 * wait_usec microseconds have passed. 830 */ 831 static int handshake(void __iomem *ptr, u32 mask, u32 done, 832 int wait_usec, int delay_usec) 833 { 834 u32 result; 835 836 do { 837 result = readl(ptr); 838 result &= mask; 839 if (result == done) 840 return 0; 841 udelay(delay_usec); 842 wait_usec -= delay_usec; 843 } while (wait_usec > 0); 844 return -ETIMEDOUT; 845 } 846 847 /* 848 * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that 849 * share some number of ports. These ports can be switched between either 850 * controller. Not all of the ports under the EHCI host controller may be 851 * switchable. 852 * 853 * The ports should be switched over to xHCI before PCI probes for any device 854 * start. This avoids active devices under EHCI being disconnected during the 855 * port switchover, which could cause loss of data on USB storage devices, or 856 * failed boot when the root file system is on a USB mass storage device and is 857 * enumerated under EHCI first. 858 * 859 * We write into the xHC's PCI configuration space in some Intel-specific 860 * registers to switch the ports over. The USB 3.0 terminations and the USB 861 * 2.0 data wires are switched separately. We want to enable the SuperSpeed 862 * terminations before switching the USB 2.0 wires over, so that USB 3.0 863 * devices connect at SuperSpeed, rather than at USB 2.0 speeds. 864 */ 865 void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev) 866 { 867 u32 ports_available; 868 bool ehci_found = false; 869 struct pci_dev *companion = NULL; 870 871 /* Sony VAIO t-series with subsystem device ID 90a8 is not capable of 872 * switching ports from EHCI to xHCI 873 */ 874 if (xhci_pdev->subsystem_vendor == PCI_VENDOR_ID_SONY && 875 xhci_pdev->subsystem_device == 0x90a8) 876 return; 877 878 /* make sure an intel EHCI controller exists */ 879 for_each_pci_dev(companion) { 880 if (companion->class == PCI_CLASS_SERIAL_USB_EHCI && 881 companion->vendor == PCI_VENDOR_ID_INTEL) { 882 ehci_found = true; 883 break; 884 } 885 } 886 887 if (!ehci_found) 888 return; 889 890 /* Don't switchover the ports if the user hasn't compiled the xHCI 891 * driver. Otherwise they will see "dead" USB ports that don't power 892 * the devices. 893 */ 894 if (!IS_ENABLED(CONFIG_USB_XHCI_HCD)) { 895 dev_warn(&xhci_pdev->dev, 896 "CONFIG_USB_XHCI_HCD is turned off, " 897 "defaulting to EHCI.\n"); 898 dev_warn(&xhci_pdev->dev, 899 "USB 3.0 devices will work at USB 2.0 speeds.\n"); 900 usb_disable_xhci_ports(xhci_pdev); 901 return; 902 } 903 904 /* Read USB3PRM, the USB 3.0 Port Routing Mask Register 905 * Indicate the ports that can be changed from OS. 906 */ 907 pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM, 908 &ports_available); 909 910 dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n", 911 ports_available); 912 913 /* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable 914 * Register, to turn on SuperSpeed terminations for the 915 * switchable ports. 916 */ 917 pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 918 ports_available); 919 920 pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 921 &ports_available); 922 dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled " 923 "under xHCI: 0x%x\n", ports_available); 924 925 /* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register 926 * Indicate the USB 2.0 ports to be controlled by the xHCI host. 927 */ 928 929 pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM, 930 &ports_available); 931 932 dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n", 933 ports_available); 934 935 /* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to 936 * switch the USB 2.0 power and data lines over to the xHCI 937 * host. 938 */ 939 pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 940 ports_available); 941 942 pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 943 &ports_available); 944 dev_dbg(&xhci_pdev->dev, "USB 2.0 ports that are now switched over " 945 "to xHCI: 0x%x\n", ports_available); 946 } 947 EXPORT_SYMBOL_GPL(usb_enable_intel_xhci_ports); 948 949 void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) 950 { 951 pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0); 952 pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0); 953 } 954 EXPORT_SYMBOL_GPL(usb_disable_xhci_ports); 955 956 /** 957 * PCI Quirks for xHCI. 958 * 959 * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS. 960 * It signals to the BIOS that the OS wants control of the host controller, 961 * and then waits 5 seconds for the BIOS to hand over control. 962 * If we timeout, assume the BIOS is broken and take control anyway. 963 */ 964 static void quirk_usb_handoff_xhci(struct pci_dev *pdev) 965 { 966 void __iomem *base; 967 int ext_cap_offset; 968 void __iomem *op_reg_base; 969 u32 val; 970 int timeout; 971 int len = pci_resource_len(pdev, 0); 972 973 if (!mmio_resource_enabled(pdev, 0)) 974 return; 975 976 base = ioremap_nocache(pci_resource_start(pdev, 0), len); 977 if (base == NULL) 978 return; 979 980 /* 981 * Find the Legacy Support Capability register - 982 * this is optional for xHCI host controllers. 983 */ 984 ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET); 985 do { 986 if ((ext_cap_offset + sizeof(val)) > len) { 987 /* We're reading garbage from the controller */ 988 dev_warn(&pdev->dev, 989 "xHCI controller failing to respond"); 990 return; 991 } 992 993 if (!ext_cap_offset) 994 /* We've reached the end of the extended capabilities */ 995 goto hc_init; 996 997 val = readl(base + ext_cap_offset); 998 if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY) 999 break; 1000 ext_cap_offset = xhci_find_next_cap_offset(base, ext_cap_offset); 1001 } while (1); 1002 1003 /* If the BIOS owns the HC, signal that the OS wants it, and wait */ 1004 if (val & XHCI_HC_BIOS_OWNED) { 1005 writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset); 1006 1007 /* Wait for 5 seconds with 10 microsecond polling interval */ 1008 timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED, 1009 0, 5000, 10); 1010 1011 /* Assume a buggy BIOS and take HC ownership anyway */ 1012 if (timeout) { 1013 dev_warn(&pdev->dev, "xHCI BIOS handoff failed" 1014 " (BIOS bug ?) %08x\n", val); 1015 writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset); 1016 } 1017 } 1018 1019 val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); 1020 /* Mask off (turn off) any enabled SMIs */ 1021 val &= XHCI_LEGACY_DISABLE_SMI; 1022 /* Mask all SMI events bits, RW1C */ 1023 val |= XHCI_LEGACY_SMI_EVENTS; 1024 /* Disable any BIOS SMIs and clear all SMI events*/ 1025 writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); 1026 1027 hc_init: 1028 if (pdev->vendor == PCI_VENDOR_ID_INTEL) 1029 usb_enable_intel_xhci_ports(pdev); 1030 1031 op_reg_base = base + XHCI_HC_LENGTH(readl(base)); 1032 1033 /* Wait for the host controller to be ready before writing any 1034 * operational or runtime registers. Wait 5 seconds and no more. 1035 */ 1036 timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0, 1037 5000, 10); 1038 /* Assume a buggy HC and start HC initialization anyway */ 1039 if (timeout) { 1040 val = readl(op_reg_base + XHCI_STS_OFFSET); 1041 dev_warn(&pdev->dev, 1042 "xHCI HW not ready after 5 sec (HC bug?) " 1043 "status = 0x%x\n", val); 1044 } 1045 1046 /* Send the halt and disable interrupts command */ 1047 val = readl(op_reg_base + XHCI_CMD_OFFSET); 1048 val &= ~(XHCI_CMD_RUN | XHCI_IRQS); 1049 writel(val, op_reg_base + XHCI_CMD_OFFSET); 1050 1051 /* Wait for the HC to halt - poll every 125 usec (one microframe). */ 1052 timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1, 1053 XHCI_MAX_HALT_USEC, 125); 1054 if (timeout) { 1055 val = readl(op_reg_base + XHCI_STS_OFFSET); 1056 dev_warn(&pdev->dev, 1057 "xHCI HW did not halt within %d usec " 1058 "status = 0x%x\n", XHCI_MAX_HALT_USEC, val); 1059 } 1060 1061 iounmap(base); 1062 } 1063 1064 static void quirk_usb_early_handoff(struct pci_dev *pdev) 1065 { 1066 /* Skip Netlogic mips SoC's internal PCI USB controller. 1067 * This device does not need/support EHCI/OHCI handoff 1068 */ 1069 if (pdev->vendor == 0x184e) /* vendor Netlogic */ 1070 return; 1071 if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI && 1072 pdev->class != PCI_CLASS_SERIAL_USB_OHCI && 1073 pdev->class != PCI_CLASS_SERIAL_USB_EHCI && 1074 pdev->class != PCI_CLASS_SERIAL_USB_XHCI) 1075 return; 1076 1077 if (pci_enable_device(pdev) < 0) { 1078 dev_warn(&pdev->dev, "Can't enable PCI device, " 1079 "BIOS handoff failed.\n"); 1080 return; 1081 } 1082 if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI) 1083 quirk_usb_handoff_uhci(pdev); 1084 else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI) 1085 quirk_usb_handoff_ohci(pdev); 1086 else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI) 1087 quirk_usb_disable_ehci(pdev); 1088 else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI) 1089 quirk_usb_handoff_xhci(pdev); 1090 pci_disable_device(pdev); 1091 } 1092 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, 1093 PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff); 1094