1 /* 2 * This file contains code to reset and initialize USB host controllers. 3 * Some of it includes work-arounds for PCI hardware and BIOS quirks. 4 * It may need to run early during booting -- before USB would normally 5 * initialize -- to ensure that Linux doesn't use any legacy modes. 6 * 7 * Copyright (c) 1999 Martin Mares <mj@ucw.cz> 8 * (and others) 9 */ 10 11 #include <linux/types.h> 12 #include <linux/kernel.h> 13 #include <linux/pci.h> 14 #include <linux/delay.h> 15 #include <linux/export.h> 16 #include <linux/acpi.h> 17 #include <linux/dmi.h> 18 #include "pci-quirks.h" 19 #include "xhci-ext-caps.h" 20 21 22 #define UHCI_USBLEGSUP 0xc0 /* legacy support */ 23 #define UHCI_USBCMD 0 /* command register */ 24 #define UHCI_USBINTR 4 /* interrupt register */ 25 #define UHCI_USBLEGSUP_RWC 0x8f00 /* the R/WC bits */ 26 #define UHCI_USBLEGSUP_RO 0x5040 /* R/O and reserved bits */ 27 #define UHCI_USBCMD_RUN 0x0001 /* RUN/STOP bit */ 28 #define UHCI_USBCMD_HCRESET 0x0002 /* Host Controller reset */ 29 #define UHCI_USBCMD_EGSM 0x0008 /* Global Suspend Mode */ 30 #define UHCI_USBCMD_CONFIGURE 0x0040 /* Config Flag */ 31 #define UHCI_USBINTR_RESUME 0x0002 /* Resume interrupt enable */ 32 33 #define OHCI_CONTROL 0x04 34 #define OHCI_CMDSTATUS 0x08 35 #define OHCI_INTRSTATUS 0x0c 36 #define OHCI_INTRENABLE 0x10 37 #define OHCI_INTRDISABLE 0x14 38 #define OHCI_FMINTERVAL 0x34 39 #define OHCI_HCFS (3 << 6) /* hc functional state */ 40 #define OHCI_HCR (1 << 0) /* host controller reset */ 41 #define OHCI_OCR (1 << 3) /* ownership change request */ 42 #define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */ 43 #define OHCI_CTRL_IR (1 << 8) /* interrupt routing */ 44 #define OHCI_INTR_OC (1 << 30) /* ownership change */ 45 46 #define EHCI_HCC_PARAMS 0x08 /* extended capabilities */ 47 #define EHCI_USBCMD 0 /* command register */ 48 #define EHCI_USBCMD_RUN (1 << 0) /* RUN/STOP bit */ 49 #define EHCI_USBSTS 4 /* status register */ 50 #define EHCI_USBSTS_HALTED (1 << 12) /* HCHalted bit */ 51 #define EHCI_USBINTR 8 /* interrupt register */ 52 #define EHCI_CONFIGFLAG 0x40 /* configured flag register */ 53 #define EHCI_USBLEGSUP 0 /* legacy support register */ 54 #define EHCI_USBLEGSUP_BIOS (1 << 16) /* BIOS semaphore */ 55 #define EHCI_USBLEGSUP_OS (1 << 24) /* OS semaphore */ 56 #define EHCI_USBLEGCTLSTS 4 /* legacy control/status */ 57 #define EHCI_USBLEGCTLSTS_SOOE (1 << 13) /* SMI on ownership change */ 58 59 /* AMD quirk use */ 60 #define AB_REG_BAR_LOW 0xe0 61 #define AB_REG_BAR_HIGH 0xe1 62 #define AB_REG_BAR_SB700 0xf0 63 #define AB_INDX(addr) ((addr) + 0x00) 64 #define AB_DATA(addr) ((addr) + 0x04) 65 #define AX_INDXC 0x30 66 #define AX_DATAC 0x34 67 68 #define NB_PCIE_INDX_ADDR 0xe0 69 #define NB_PCIE_INDX_DATA 0xe4 70 #define PCIE_P_CNTL 0x10040 71 #define BIF_NB 0x10002 72 #define NB_PIF0_PWRDOWN_0 0x01100012 73 #define NB_PIF0_PWRDOWN_1 0x01100013 74 75 #define USB_INTEL_XUSB2PR 0xD0 76 #define USB_INTEL_USB2PRM 0xD4 77 #define USB_INTEL_USB3_PSSEN 0xD8 78 #define USB_INTEL_USB3PRM 0xDC 79 80 /* 81 * amd_chipset_gen values represent AMD different chipset generations 82 */ 83 enum amd_chipset_gen { 84 NOT_AMD_CHIPSET = 0, 85 AMD_CHIPSET_SB600, 86 AMD_CHIPSET_SB700, 87 AMD_CHIPSET_SB800, 88 AMD_CHIPSET_HUDSON2, 89 AMD_CHIPSET_BOLTON, 90 AMD_CHIPSET_YANGTZE, 91 AMD_CHIPSET_UNKNOWN, 92 }; 93 94 struct amd_chipset_type { 95 enum amd_chipset_gen gen; 96 u8 rev; 97 }; 98 99 static struct amd_chipset_info { 100 struct pci_dev *nb_dev; 101 struct pci_dev *smbus_dev; 102 int nb_type; 103 struct amd_chipset_type sb_type; 104 int isoc_reqs; 105 int probe_count; 106 int probe_result; 107 } amd_chipset; 108 109 static DEFINE_SPINLOCK(amd_lock); 110 111 /* 112 * amd_chipset_sb_type_init - initialize amd chipset southbridge type 113 * 114 * AMD FCH/SB generation and revision is identified by SMBus controller 115 * vendor, device and revision IDs. 116 * 117 * Returns: 1 if it is an AMD chipset, 0 otherwise. 118 */ 119 static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo) 120 { 121 u8 rev = 0; 122 pinfo->sb_type.gen = AMD_CHIPSET_UNKNOWN; 123 124 pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 125 PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL); 126 if (pinfo->smbus_dev) { 127 rev = pinfo->smbus_dev->revision; 128 if (rev >= 0x10 && rev <= 0x1f) 129 pinfo->sb_type.gen = AMD_CHIPSET_SB600; 130 else if (rev >= 0x30 && rev <= 0x3f) 131 pinfo->sb_type.gen = AMD_CHIPSET_SB700; 132 else if (rev >= 0x40 && rev <= 0x4f) 133 pinfo->sb_type.gen = AMD_CHIPSET_SB800; 134 } else { 135 pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 136 PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL); 137 138 if (!pinfo->smbus_dev) { 139 pinfo->sb_type.gen = NOT_AMD_CHIPSET; 140 return 0; 141 } 142 143 rev = pinfo->smbus_dev->revision; 144 if (rev >= 0x11 && rev <= 0x14) 145 pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2; 146 else if (rev >= 0x15 && rev <= 0x18) 147 pinfo->sb_type.gen = AMD_CHIPSET_BOLTON; 148 else if (rev >= 0x39 && rev <= 0x3a) 149 pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE; 150 } 151 152 pinfo->sb_type.rev = rev; 153 return 1; 154 } 155 156 void sb800_prefetch(struct device *dev, int on) 157 { 158 u16 misc; 159 struct pci_dev *pdev = to_pci_dev(dev); 160 161 pci_read_config_word(pdev, 0x50, &misc); 162 if (on == 0) 163 pci_write_config_word(pdev, 0x50, misc & 0xfcff); 164 else 165 pci_write_config_word(pdev, 0x50, misc | 0x0300); 166 } 167 EXPORT_SYMBOL_GPL(sb800_prefetch); 168 169 int usb_amd_find_chipset_info(void) 170 { 171 unsigned long flags; 172 struct amd_chipset_info info; 173 int ret; 174 175 spin_lock_irqsave(&amd_lock, flags); 176 177 /* probe only once */ 178 if (amd_chipset.probe_count > 0) { 179 amd_chipset.probe_count++; 180 spin_unlock_irqrestore(&amd_lock, flags); 181 return amd_chipset.probe_result; 182 } 183 memset(&info, 0, sizeof(info)); 184 spin_unlock_irqrestore(&amd_lock, flags); 185 186 if (!amd_chipset_sb_type_init(&info)) { 187 ret = 0; 188 goto commit; 189 } 190 191 /* Below chipset generations needn't enable AMD PLL quirk */ 192 if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN || 193 info.sb_type.gen == AMD_CHIPSET_SB600 || 194 info.sb_type.gen == AMD_CHIPSET_YANGTZE || 195 (info.sb_type.gen == AMD_CHIPSET_SB700 && 196 info.sb_type.rev > 0x3b)) { 197 if (info.smbus_dev) { 198 pci_dev_put(info.smbus_dev); 199 info.smbus_dev = NULL; 200 } 201 ret = 0; 202 goto commit; 203 } 204 205 info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL); 206 if (info.nb_dev) { 207 info.nb_type = 1; 208 } else { 209 info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL); 210 if (info.nb_dev) { 211 info.nb_type = 2; 212 } else { 213 info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 214 0x9600, NULL); 215 if (info.nb_dev) 216 info.nb_type = 3; 217 } 218 } 219 220 ret = info.probe_result = 1; 221 printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n"); 222 223 commit: 224 225 spin_lock_irqsave(&amd_lock, flags); 226 if (amd_chipset.probe_count > 0) { 227 /* race - someone else was faster - drop devices */ 228 229 /* Mark that we where here */ 230 amd_chipset.probe_count++; 231 ret = amd_chipset.probe_result; 232 233 spin_unlock_irqrestore(&amd_lock, flags); 234 235 pci_dev_put(info.nb_dev); 236 pci_dev_put(info.smbus_dev); 237 238 } else { 239 /* no race - commit the result */ 240 info.probe_count++; 241 amd_chipset = info; 242 spin_unlock_irqrestore(&amd_lock, flags); 243 } 244 245 return ret; 246 } 247 EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info); 248 249 int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev) 250 { 251 /* Make sure amd chipset type has already been initialized */ 252 usb_amd_find_chipset_info(); 253 if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE) 254 return 0; 255 256 dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n"); 257 return 1; 258 } 259 EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk); 260 261 bool usb_amd_hang_symptom_quirk(void) 262 { 263 u8 rev; 264 265 usb_amd_find_chipset_info(); 266 rev = amd_chipset.sb_type.rev; 267 /* SB600 and old version of SB700 have hang symptom bug */ 268 return amd_chipset.sb_type.gen == AMD_CHIPSET_SB600 || 269 (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 && 270 rev >= 0x3a && rev <= 0x3b); 271 } 272 EXPORT_SYMBOL_GPL(usb_amd_hang_symptom_quirk); 273 274 bool usb_amd_prefetch_quirk(void) 275 { 276 usb_amd_find_chipset_info(); 277 /* SB800 needs pre-fetch fix */ 278 return amd_chipset.sb_type.gen == AMD_CHIPSET_SB800; 279 } 280 EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk); 281 282 /* 283 * The hardware normally enables the A-link power management feature, which 284 * lets the system lower the power consumption in idle states. 285 * 286 * This USB quirk prevents the link going into that lower power state 287 * during isochronous transfers. 288 * 289 * Without this quirk, isochronous stream on OHCI/EHCI/xHCI controllers of 290 * some AMD platforms may stutter or have breaks occasionally. 291 */ 292 static void usb_amd_quirk_pll(int disable) 293 { 294 u32 addr, addr_low, addr_high, val; 295 u32 bit = disable ? 0 : 1; 296 unsigned long flags; 297 298 spin_lock_irqsave(&amd_lock, flags); 299 300 if (disable) { 301 amd_chipset.isoc_reqs++; 302 if (amd_chipset.isoc_reqs > 1) { 303 spin_unlock_irqrestore(&amd_lock, flags); 304 return; 305 } 306 } else { 307 amd_chipset.isoc_reqs--; 308 if (amd_chipset.isoc_reqs > 0) { 309 spin_unlock_irqrestore(&amd_lock, flags); 310 return; 311 } 312 } 313 314 if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB800 || 315 amd_chipset.sb_type.gen == AMD_CHIPSET_HUDSON2 || 316 amd_chipset.sb_type.gen == AMD_CHIPSET_BOLTON) { 317 outb_p(AB_REG_BAR_LOW, 0xcd6); 318 addr_low = inb_p(0xcd7); 319 outb_p(AB_REG_BAR_HIGH, 0xcd6); 320 addr_high = inb_p(0xcd7); 321 addr = addr_high << 8 | addr_low; 322 323 outl_p(0x30, AB_INDX(addr)); 324 outl_p(0x40, AB_DATA(addr)); 325 outl_p(0x34, AB_INDX(addr)); 326 val = inl_p(AB_DATA(addr)); 327 } else if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 && 328 amd_chipset.sb_type.rev <= 0x3b) { 329 pci_read_config_dword(amd_chipset.smbus_dev, 330 AB_REG_BAR_SB700, &addr); 331 outl(AX_INDXC, AB_INDX(addr)); 332 outl(0x40, AB_DATA(addr)); 333 outl(AX_DATAC, AB_INDX(addr)); 334 val = inl(AB_DATA(addr)); 335 } else { 336 spin_unlock_irqrestore(&amd_lock, flags); 337 return; 338 } 339 340 if (disable) { 341 val &= ~0x08; 342 val |= (1 << 4) | (1 << 9); 343 } else { 344 val |= 0x08; 345 val &= ~((1 << 4) | (1 << 9)); 346 } 347 outl_p(val, AB_DATA(addr)); 348 349 if (!amd_chipset.nb_dev) { 350 spin_unlock_irqrestore(&amd_lock, flags); 351 return; 352 } 353 354 if (amd_chipset.nb_type == 1 || amd_chipset.nb_type == 3) { 355 addr = PCIE_P_CNTL; 356 pci_write_config_dword(amd_chipset.nb_dev, 357 NB_PCIE_INDX_ADDR, addr); 358 pci_read_config_dword(amd_chipset.nb_dev, 359 NB_PCIE_INDX_DATA, &val); 360 361 val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12)); 362 val |= bit | (bit << 3) | (bit << 12); 363 val |= ((!bit) << 4) | ((!bit) << 9); 364 pci_write_config_dword(amd_chipset.nb_dev, 365 NB_PCIE_INDX_DATA, val); 366 367 addr = BIF_NB; 368 pci_write_config_dword(amd_chipset.nb_dev, 369 NB_PCIE_INDX_ADDR, addr); 370 pci_read_config_dword(amd_chipset.nb_dev, 371 NB_PCIE_INDX_DATA, &val); 372 val &= ~(1 << 8); 373 val |= bit << 8; 374 375 pci_write_config_dword(amd_chipset.nb_dev, 376 NB_PCIE_INDX_DATA, val); 377 } else if (amd_chipset.nb_type == 2) { 378 addr = NB_PIF0_PWRDOWN_0; 379 pci_write_config_dword(amd_chipset.nb_dev, 380 NB_PCIE_INDX_ADDR, addr); 381 pci_read_config_dword(amd_chipset.nb_dev, 382 NB_PCIE_INDX_DATA, &val); 383 if (disable) 384 val &= ~(0x3f << 7); 385 else 386 val |= 0x3f << 7; 387 388 pci_write_config_dword(amd_chipset.nb_dev, 389 NB_PCIE_INDX_DATA, val); 390 391 addr = NB_PIF0_PWRDOWN_1; 392 pci_write_config_dword(amd_chipset.nb_dev, 393 NB_PCIE_INDX_ADDR, addr); 394 pci_read_config_dword(amd_chipset.nb_dev, 395 NB_PCIE_INDX_DATA, &val); 396 if (disable) 397 val &= ~(0x3f << 7); 398 else 399 val |= 0x3f << 7; 400 401 pci_write_config_dword(amd_chipset.nb_dev, 402 NB_PCIE_INDX_DATA, val); 403 } 404 405 spin_unlock_irqrestore(&amd_lock, flags); 406 return; 407 } 408 409 void usb_amd_quirk_pll_disable(void) 410 { 411 usb_amd_quirk_pll(1); 412 } 413 EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable); 414 415 void usb_amd_quirk_pll_enable(void) 416 { 417 usb_amd_quirk_pll(0); 418 } 419 EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable); 420 421 void usb_amd_dev_put(void) 422 { 423 struct pci_dev *nb, *smbus; 424 unsigned long flags; 425 426 spin_lock_irqsave(&amd_lock, flags); 427 428 amd_chipset.probe_count--; 429 if (amd_chipset.probe_count > 0) { 430 spin_unlock_irqrestore(&amd_lock, flags); 431 return; 432 } 433 434 /* save them to pci_dev_put outside of spinlock */ 435 nb = amd_chipset.nb_dev; 436 smbus = amd_chipset.smbus_dev; 437 438 amd_chipset.nb_dev = NULL; 439 amd_chipset.smbus_dev = NULL; 440 amd_chipset.nb_type = 0; 441 memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type)); 442 amd_chipset.isoc_reqs = 0; 443 amd_chipset.probe_result = 0; 444 445 spin_unlock_irqrestore(&amd_lock, flags); 446 447 pci_dev_put(nb); 448 pci_dev_put(smbus); 449 } 450 EXPORT_SYMBOL_GPL(usb_amd_dev_put); 451 452 /* 453 * Make sure the controller is completely inactive, unable to 454 * generate interrupts or do DMA. 455 */ 456 void uhci_reset_hc(struct pci_dev *pdev, unsigned long base) 457 { 458 /* Turn off PIRQ enable and SMI enable. (This also turns off the 459 * BIOS's USB Legacy Support.) Turn off all the R/WC bits too. 460 */ 461 pci_write_config_word(pdev, UHCI_USBLEGSUP, UHCI_USBLEGSUP_RWC); 462 463 /* Reset the HC - this will force us to get a 464 * new notification of any already connected 465 * ports due to the virtual disconnect that it 466 * implies. 467 */ 468 outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD); 469 mb(); 470 udelay(5); 471 if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET) 472 dev_warn(&pdev->dev, "HCRESET not completed yet!\n"); 473 474 /* Just to be safe, disable interrupt requests and 475 * make sure the controller is stopped. 476 */ 477 outw(0, base + UHCI_USBINTR); 478 outw(0, base + UHCI_USBCMD); 479 } 480 EXPORT_SYMBOL_GPL(uhci_reset_hc); 481 482 /* 483 * Initialize a controller that was newly discovered or has just been 484 * resumed. In either case we can't be sure of its previous state. 485 * 486 * Returns: 1 if the controller was reset, 0 otherwise. 487 */ 488 int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base) 489 { 490 u16 legsup; 491 unsigned int cmd, intr; 492 493 /* 494 * When restarting a suspended controller, we expect all the 495 * settings to be the same as we left them: 496 * 497 * PIRQ and SMI disabled, no R/W bits set in USBLEGSUP; 498 * Controller is stopped and configured with EGSM set; 499 * No interrupts enabled except possibly Resume Detect. 500 * 501 * If any of these conditions are violated we do a complete reset. 502 */ 503 pci_read_config_word(pdev, UHCI_USBLEGSUP, &legsup); 504 if (legsup & ~(UHCI_USBLEGSUP_RO | UHCI_USBLEGSUP_RWC)) { 505 dev_dbg(&pdev->dev, "%s: legsup = 0x%04x\n", 506 __func__, legsup); 507 goto reset_needed; 508 } 509 510 cmd = inw(base + UHCI_USBCMD); 511 if ((cmd & UHCI_USBCMD_RUN) || !(cmd & UHCI_USBCMD_CONFIGURE) || 512 !(cmd & UHCI_USBCMD_EGSM)) { 513 dev_dbg(&pdev->dev, "%s: cmd = 0x%04x\n", 514 __func__, cmd); 515 goto reset_needed; 516 } 517 518 intr = inw(base + UHCI_USBINTR); 519 if (intr & (~UHCI_USBINTR_RESUME)) { 520 dev_dbg(&pdev->dev, "%s: intr = 0x%04x\n", 521 __func__, intr); 522 goto reset_needed; 523 } 524 return 0; 525 526 reset_needed: 527 dev_dbg(&pdev->dev, "Performing full reset\n"); 528 uhci_reset_hc(pdev, base); 529 return 1; 530 } 531 EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc); 532 533 static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask) 534 { 535 u16 cmd; 536 return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask); 537 } 538 539 #define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO) 540 #define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY) 541 542 static void quirk_usb_handoff_uhci(struct pci_dev *pdev) 543 { 544 unsigned long base = 0; 545 int i; 546 547 if (!pio_enabled(pdev)) 548 return; 549 550 for (i = 0; i < PCI_ROM_RESOURCE; i++) 551 if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) { 552 base = pci_resource_start(pdev, i); 553 break; 554 } 555 556 if (base) 557 uhci_check_and_reset_hc(pdev, base); 558 } 559 560 static int mmio_resource_enabled(struct pci_dev *pdev, int idx) 561 { 562 return pci_resource_start(pdev, idx) && mmio_enabled(pdev); 563 } 564 565 static void quirk_usb_handoff_ohci(struct pci_dev *pdev) 566 { 567 void __iomem *base; 568 u32 control; 569 u32 fminterval = 0; 570 bool no_fminterval = false; 571 int cnt; 572 573 if (!mmio_resource_enabled(pdev, 0)) 574 return; 575 576 base = pci_ioremap_bar(pdev, 0); 577 if (base == NULL) 578 return; 579 580 /* 581 * ULi M5237 OHCI controller locks the whole system when accessing 582 * the OHCI_FMINTERVAL offset. 583 */ 584 if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237) 585 no_fminterval = true; 586 587 control = readl(base + OHCI_CONTROL); 588 589 /* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */ 590 #ifdef __hppa__ 591 #define OHCI_CTRL_MASK (OHCI_CTRL_RWC | OHCI_CTRL_IR) 592 #else 593 #define OHCI_CTRL_MASK OHCI_CTRL_RWC 594 595 if (control & OHCI_CTRL_IR) { 596 int wait_time = 500; /* arbitrary; 5 seconds */ 597 writel(OHCI_INTR_OC, base + OHCI_INTRENABLE); 598 writel(OHCI_OCR, base + OHCI_CMDSTATUS); 599 while (wait_time > 0 && 600 readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) { 601 wait_time -= 10; 602 msleep(10); 603 } 604 if (wait_time <= 0) 605 dev_warn(&pdev->dev, 606 "OHCI: BIOS handoff failed (BIOS bug?) %08x\n", 607 readl(base + OHCI_CONTROL)); 608 } 609 #endif 610 611 /* disable interrupts */ 612 writel((u32) ~0, base + OHCI_INTRDISABLE); 613 614 /* Reset the USB bus, if the controller isn't already in RESET */ 615 if (control & OHCI_HCFS) { 616 /* Go into RESET, preserving RWC (and possibly IR) */ 617 writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL); 618 readl(base + OHCI_CONTROL); 619 620 /* drive bus reset for at least 50 ms (7.1.7.5) */ 621 msleep(50); 622 } 623 624 /* software reset of the controller, preserving HcFmInterval */ 625 if (!no_fminterval) 626 fminterval = readl(base + OHCI_FMINTERVAL); 627 628 writel(OHCI_HCR, base + OHCI_CMDSTATUS); 629 630 /* reset requires max 10 us delay */ 631 for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */ 632 if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0) 633 break; 634 udelay(1); 635 } 636 637 if (!no_fminterval) 638 writel(fminterval, base + OHCI_FMINTERVAL); 639 640 /* Now the controller is safely in SUSPEND and nothing can wake it up */ 641 iounmap(base); 642 } 643 644 static const struct dmi_system_id ehci_dmi_nohandoff_table[] = { 645 { 646 /* Pegatron Lucid (ExoPC) */ 647 .matches = { 648 DMI_MATCH(DMI_BOARD_NAME, "EXOPG06411"), 649 DMI_MATCH(DMI_BIOS_VERSION, "Lucid-CE-133"), 650 }, 651 }, 652 { 653 /* Pegatron Lucid (Ordissimo AIRIS) */ 654 .matches = { 655 DMI_MATCH(DMI_BOARD_NAME, "M11JB"), 656 DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"), 657 }, 658 }, 659 { 660 /* Pegatron Lucid (Ordissimo) */ 661 .matches = { 662 DMI_MATCH(DMI_BOARD_NAME, "Ordissimo"), 663 DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"), 664 }, 665 }, 666 { 667 /* HASEE E200 */ 668 .matches = { 669 DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"), 670 DMI_MATCH(DMI_BOARD_NAME, "E210"), 671 DMI_MATCH(DMI_BIOS_VERSION, "6.00"), 672 }, 673 }, 674 { } 675 }; 676 677 static void ehci_bios_handoff(struct pci_dev *pdev, 678 void __iomem *op_reg_base, 679 u32 cap, u8 offset) 680 { 681 int try_handoff = 1, tried_handoff = 0; 682 683 /* 684 * The Pegatron Lucid tablet sporadically waits for 98 seconds trying 685 * the handoff on its unused controller. Skip it. 686 * 687 * The HASEE E200 hangs when the semaphore is set (bugzilla #77021). 688 */ 689 if (pdev->vendor == 0x8086 && (pdev->device == 0x283a || 690 pdev->device == 0x27cc)) { 691 if (dmi_check_system(ehci_dmi_nohandoff_table)) 692 try_handoff = 0; 693 } 694 695 if (try_handoff && (cap & EHCI_USBLEGSUP_BIOS)) { 696 dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n"); 697 698 #if 0 699 /* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on, 700 * but that seems dubious in general (the BIOS left it off intentionally) 701 * and is known to prevent some systems from booting. so we won't do this 702 * unless maybe we can determine when we're on a system that needs SMI forced. 703 */ 704 /* BIOS workaround (?): be sure the pre-Linux code 705 * receives the SMI 706 */ 707 pci_read_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, &val); 708 pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 709 val | EHCI_USBLEGCTLSTS_SOOE); 710 #endif 711 712 /* some systems get upset if this semaphore is 713 * set for any other reason than forcing a BIOS 714 * handoff.. 715 */ 716 pci_write_config_byte(pdev, offset + 3, 1); 717 } 718 719 /* if boot firmware now owns EHCI, spin till it hands it over. */ 720 if (try_handoff) { 721 int msec = 1000; 722 while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) { 723 tried_handoff = 1; 724 msleep(10); 725 msec -= 10; 726 pci_read_config_dword(pdev, offset, &cap); 727 } 728 } 729 730 if (cap & EHCI_USBLEGSUP_BIOS) { 731 /* well, possibly buggy BIOS... try to shut it down, 732 * and hope nothing goes too wrong 733 */ 734 if (try_handoff) 735 dev_warn(&pdev->dev, 736 "EHCI: BIOS handoff failed (BIOS bug?) %08x\n", 737 cap); 738 pci_write_config_byte(pdev, offset + 2, 0); 739 } 740 741 /* just in case, always disable EHCI SMIs */ 742 pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 0); 743 744 /* If the BIOS ever owned the controller then we can't expect 745 * any power sessions to remain intact. 746 */ 747 if (tried_handoff) 748 writel(0, op_reg_base + EHCI_CONFIGFLAG); 749 } 750 751 static void quirk_usb_disable_ehci(struct pci_dev *pdev) 752 { 753 void __iomem *base, *op_reg_base; 754 u32 hcc_params, cap, val; 755 u8 offset, cap_length; 756 int wait_time, count = 256/4; 757 758 if (!mmio_resource_enabled(pdev, 0)) 759 return; 760 761 base = pci_ioremap_bar(pdev, 0); 762 if (base == NULL) 763 return; 764 765 cap_length = readb(base); 766 op_reg_base = base + cap_length; 767 768 /* EHCI 0.96 and later may have "extended capabilities" 769 * spec section 5.1 explains the bios handoff, e.g. for 770 * booting from USB disk or using a usb keyboard 771 */ 772 hcc_params = readl(base + EHCI_HCC_PARAMS); 773 offset = (hcc_params >> 8) & 0xff; 774 while (offset && --count) { 775 pci_read_config_dword(pdev, offset, &cap); 776 777 switch (cap & 0xff) { 778 case 1: 779 ehci_bios_handoff(pdev, op_reg_base, cap, offset); 780 break; 781 case 0: /* Illegal reserved cap, set cap=0 so we exit */ 782 cap = 0; /* then fallthrough... */ 783 default: 784 dev_warn(&pdev->dev, 785 "EHCI: unrecognized capability %02x\n", 786 cap & 0xff); 787 } 788 offset = (cap >> 8) & 0xff; 789 } 790 if (!count) 791 dev_printk(KERN_DEBUG, &pdev->dev, "EHCI: capability loop?\n"); 792 793 /* 794 * halt EHCI & disable its interrupts in any case 795 */ 796 val = readl(op_reg_base + EHCI_USBSTS); 797 if ((val & EHCI_USBSTS_HALTED) == 0) { 798 val = readl(op_reg_base + EHCI_USBCMD); 799 val &= ~EHCI_USBCMD_RUN; 800 writel(val, op_reg_base + EHCI_USBCMD); 801 802 wait_time = 2000; 803 do { 804 writel(0x3f, op_reg_base + EHCI_USBSTS); 805 udelay(100); 806 wait_time -= 100; 807 val = readl(op_reg_base + EHCI_USBSTS); 808 if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) { 809 break; 810 } 811 } while (wait_time > 0); 812 } 813 writel(0, op_reg_base + EHCI_USBINTR); 814 writel(0x3f, op_reg_base + EHCI_USBSTS); 815 816 iounmap(base); 817 } 818 819 /* 820 * handshake - spin reading a register until handshake completes 821 * @ptr: address of hc register to be read 822 * @mask: bits to look at in result of read 823 * @done: value of those bits when handshake succeeds 824 * @wait_usec: timeout in microseconds 825 * @delay_usec: delay in microseconds to wait between polling 826 * 827 * Polls a register every delay_usec microseconds. 828 * Returns 0 when the mask bits have the value done. 829 * Returns -ETIMEDOUT if this condition is not true after 830 * wait_usec microseconds have passed. 831 */ 832 static int handshake(void __iomem *ptr, u32 mask, u32 done, 833 int wait_usec, int delay_usec) 834 { 835 u32 result; 836 837 do { 838 result = readl(ptr); 839 result &= mask; 840 if (result == done) 841 return 0; 842 udelay(delay_usec); 843 wait_usec -= delay_usec; 844 } while (wait_usec > 0); 845 return -ETIMEDOUT; 846 } 847 848 /* 849 * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that 850 * share some number of ports. These ports can be switched between either 851 * controller. Not all of the ports under the EHCI host controller may be 852 * switchable. 853 * 854 * The ports should be switched over to xHCI before PCI probes for any device 855 * start. This avoids active devices under EHCI being disconnected during the 856 * port switchover, which could cause loss of data on USB storage devices, or 857 * failed boot when the root file system is on a USB mass storage device and is 858 * enumerated under EHCI first. 859 * 860 * We write into the xHC's PCI configuration space in some Intel-specific 861 * registers to switch the ports over. The USB 3.0 terminations and the USB 862 * 2.0 data wires are switched separately. We want to enable the SuperSpeed 863 * terminations before switching the USB 2.0 wires over, so that USB 3.0 864 * devices connect at SuperSpeed, rather than at USB 2.0 speeds. 865 */ 866 void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev) 867 { 868 u32 ports_available; 869 bool ehci_found = false; 870 struct pci_dev *companion = NULL; 871 872 /* Sony VAIO t-series with subsystem device ID 90a8 is not capable of 873 * switching ports from EHCI to xHCI 874 */ 875 if (xhci_pdev->subsystem_vendor == PCI_VENDOR_ID_SONY && 876 xhci_pdev->subsystem_device == 0x90a8) 877 return; 878 879 /* make sure an intel EHCI controller exists */ 880 for_each_pci_dev(companion) { 881 if (companion->class == PCI_CLASS_SERIAL_USB_EHCI && 882 companion->vendor == PCI_VENDOR_ID_INTEL) { 883 ehci_found = true; 884 break; 885 } 886 } 887 888 if (!ehci_found) 889 return; 890 891 /* Don't switchover the ports if the user hasn't compiled the xHCI 892 * driver. Otherwise they will see "dead" USB ports that don't power 893 * the devices. 894 */ 895 if (!IS_ENABLED(CONFIG_USB_XHCI_HCD)) { 896 dev_warn(&xhci_pdev->dev, 897 "CONFIG_USB_XHCI_HCD is turned off, defaulting to EHCI.\n"); 898 dev_warn(&xhci_pdev->dev, 899 "USB 3.0 devices will work at USB 2.0 speeds.\n"); 900 usb_disable_xhci_ports(xhci_pdev); 901 return; 902 } 903 904 /* Read USB3PRM, the USB 3.0 Port Routing Mask Register 905 * Indicate the ports that can be changed from OS. 906 */ 907 pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM, 908 &ports_available); 909 910 dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n", 911 ports_available); 912 913 /* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable 914 * Register, to turn on SuperSpeed terminations for the 915 * switchable ports. 916 */ 917 pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 918 ports_available); 919 920 pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 921 &ports_available); 922 dev_dbg(&xhci_pdev->dev, 923 "USB 3.0 ports that are now enabled under xHCI: 0x%x\n", 924 ports_available); 925 926 /* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register 927 * Indicate the USB 2.0 ports to be controlled by the xHCI host. 928 */ 929 930 pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM, 931 &ports_available); 932 933 dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n", 934 ports_available); 935 936 /* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to 937 * switch the USB 2.0 power and data lines over to the xHCI 938 * host. 939 */ 940 pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 941 ports_available); 942 943 pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 944 &ports_available); 945 dev_dbg(&xhci_pdev->dev, 946 "USB 2.0 ports that are now switched over to xHCI: 0x%x\n", 947 ports_available); 948 } 949 EXPORT_SYMBOL_GPL(usb_enable_intel_xhci_ports); 950 951 void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) 952 { 953 pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0); 954 pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0); 955 } 956 EXPORT_SYMBOL_GPL(usb_disable_xhci_ports); 957 958 /** 959 * PCI Quirks for xHCI. 960 * 961 * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS. 962 * It signals to the BIOS that the OS wants control of the host controller, 963 * and then waits 5 seconds for the BIOS to hand over control. 964 * If we timeout, assume the BIOS is broken and take control anyway. 965 */ 966 static void quirk_usb_handoff_xhci(struct pci_dev *pdev) 967 { 968 void __iomem *base; 969 int ext_cap_offset; 970 void __iomem *op_reg_base; 971 u32 val; 972 int timeout; 973 int len = pci_resource_len(pdev, 0); 974 975 if (!mmio_resource_enabled(pdev, 0)) 976 return; 977 978 base = ioremap_nocache(pci_resource_start(pdev, 0), len); 979 if (base == NULL) 980 return; 981 982 /* 983 * Find the Legacy Support Capability register - 984 * this is optional for xHCI host controllers. 985 */ 986 ext_cap_offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_LEGACY); 987 988 if (!ext_cap_offset) 989 goto hc_init; 990 991 if ((ext_cap_offset + sizeof(val)) > len) { 992 /* We're reading garbage from the controller */ 993 dev_warn(&pdev->dev, "xHCI controller failing to respond"); 994 goto iounmap; 995 } 996 val = readl(base + ext_cap_offset); 997 998 /* Auto handoff never worked for these devices. Force it and continue */ 999 if ((pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) || 1000 (pdev->vendor == PCI_VENDOR_ID_RENESAS 1001 && pdev->device == 0x0014)) { 1002 val = (val | XHCI_HC_OS_OWNED) & ~XHCI_HC_BIOS_OWNED; 1003 writel(val, base + ext_cap_offset); 1004 } 1005 1006 /* If the BIOS owns the HC, signal that the OS wants it, and wait */ 1007 if (val & XHCI_HC_BIOS_OWNED) { 1008 writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset); 1009 1010 /* Wait for 5 seconds with 10 microsecond polling interval */ 1011 timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED, 1012 0, 5000, 10); 1013 1014 /* Assume a buggy BIOS and take HC ownership anyway */ 1015 if (timeout) { 1016 dev_warn(&pdev->dev, 1017 "xHCI BIOS handoff failed (BIOS bug ?) %08x\n", 1018 val); 1019 writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset); 1020 } 1021 } 1022 1023 val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); 1024 /* Mask off (turn off) any enabled SMIs */ 1025 val &= XHCI_LEGACY_DISABLE_SMI; 1026 /* Mask all SMI events bits, RW1C */ 1027 val |= XHCI_LEGACY_SMI_EVENTS; 1028 /* Disable any BIOS SMIs and clear all SMI events*/ 1029 writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); 1030 1031 hc_init: 1032 if (pdev->vendor == PCI_VENDOR_ID_INTEL) 1033 usb_enable_intel_xhci_ports(pdev); 1034 1035 op_reg_base = base + XHCI_HC_LENGTH(readl(base)); 1036 1037 /* Wait for the host controller to be ready before writing any 1038 * operational or runtime registers. Wait 5 seconds and no more. 1039 */ 1040 timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0, 1041 5000, 10); 1042 /* Assume a buggy HC and start HC initialization anyway */ 1043 if (timeout) { 1044 val = readl(op_reg_base + XHCI_STS_OFFSET); 1045 dev_warn(&pdev->dev, 1046 "xHCI HW not ready after 5 sec (HC bug?) status = 0x%x\n", 1047 val); 1048 } 1049 1050 /* Send the halt and disable interrupts command */ 1051 val = readl(op_reg_base + XHCI_CMD_OFFSET); 1052 val &= ~(XHCI_CMD_RUN | XHCI_IRQS); 1053 writel(val, op_reg_base + XHCI_CMD_OFFSET); 1054 1055 /* Wait for the HC to halt - poll every 125 usec (one microframe). */ 1056 timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1, 1057 XHCI_MAX_HALT_USEC, 125); 1058 if (timeout) { 1059 val = readl(op_reg_base + XHCI_STS_OFFSET); 1060 dev_warn(&pdev->dev, 1061 "xHCI HW did not halt within %d usec status = 0x%x\n", 1062 XHCI_MAX_HALT_USEC, val); 1063 } 1064 1065 iounmap: 1066 iounmap(base); 1067 } 1068 1069 static void quirk_usb_early_handoff(struct pci_dev *pdev) 1070 { 1071 /* Skip Netlogic mips SoC's internal PCI USB controller. 1072 * This device does not need/support EHCI/OHCI handoff 1073 */ 1074 if (pdev->vendor == 0x184e) /* vendor Netlogic */ 1075 return; 1076 if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI && 1077 pdev->class != PCI_CLASS_SERIAL_USB_OHCI && 1078 pdev->class != PCI_CLASS_SERIAL_USB_EHCI && 1079 pdev->class != PCI_CLASS_SERIAL_USB_XHCI) 1080 return; 1081 1082 if (pci_enable_device(pdev) < 0) { 1083 dev_warn(&pdev->dev, 1084 "Can't enable PCI device, BIOS handoff failed.\n"); 1085 return; 1086 } 1087 if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI) 1088 quirk_usb_handoff_uhci(pdev); 1089 else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI) 1090 quirk_usb_handoff_ohci(pdev); 1091 else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI) 1092 quirk_usb_disable_ehci(pdev); 1093 else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI) 1094 quirk_usb_handoff_xhci(pdev); 1095 pci_disable_device(pdev); 1096 } 1097 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, 1098 PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff); 1099