1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * xHCI host controller driver 4 * 5 * Copyright (C) 2008 Intel Corp. 6 * 7 * Author: Sarah Sharp 8 * Some code borrowed from the Linux EHCI driver. 9 */ 10 11 #include <linux/pci.h> 12 #include <linux/iopoll.h> 13 #include <linux/irq.h> 14 #include <linux/log2.h> 15 #include <linux/module.h> 16 #include <linux/moduleparam.h> 17 #include <linux/slab.h> 18 #include <linux/dmi.h> 19 #include <linux/dma-mapping.h> 20 21 #include "xhci.h" 22 #include "xhci-trace.h" 23 #include "xhci-mtk.h" 24 #include "xhci-debugfs.h" 25 #include "xhci-dbgcap.h" 26 27 #define DRIVER_AUTHOR "Sarah Sharp" 28 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" 29 30 #define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E) 31 32 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ 33 static int link_quirk; 34 module_param(link_quirk, int, S_IRUGO | S_IWUSR); 35 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); 36 37 static unsigned long long quirks; 38 module_param(quirks, ullong, S_IRUGO); 39 MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default"); 40 41 static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) 42 { 43 struct xhci_segment *seg = ring->first_seg; 44 45 if (!td || !td->start_seg) 46 return false; 47 do { 48 if (seg == td->start_seg) 49 return true; 50 seg = seg->next; 51 } while (seg && seg != ring->first_seg); 52 53 return false; 54 } 55 56 /* 57 * xhci_handshake - spin reading hc until handshake completes or fails 58 * @ptr: address of hc register to be read 59 * @mask: bits to look at in result of read 60 * @done: value of those bits when handshake succeeds 61 * @usec: timeout in microseconds 62 * 63 * Returns negative errno, or zero on success 64 * 65 * Success happens when the "mask" bits have the specified value (hardware 66 * handshake done). There are two failure modes: "usec" have passed (major 67 * hardware flakeout), or the register reads as all-ones (hardware removed). 68 */ 69 int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec) 70 { 71 u32 result; 72 int ret; 73 74 ret = readl_poll_timeout_atomic(ptr, result, 75 (result & mask) == done || 76 result == U32_MAX, 77 1, usec); 78 if (result == U32_MAX) /* card removed */ 79 return -ENODEV; 80 81 return ret; 82 } 83 84 /* 85 * Disable interrupts and begin the xHCI halting process. 86 */ 87 void xhci_quiesce(struct xhci_hcd *xhci) 88 { 89 u32 halted; 90 u32 cmd; 91 u32 mask; 92 93 mask = ~(XHCI_IRQS); 94 halted = readl(&xhci->op_regs->status) & STS_HALT; 95 if (!halted) 96 mask &= ~CMD_RUN; 97 98 cmd = readl(&xhci->op_regs->command); 99 cmd &= mask; 100 writel(cmd, &xhci->op_regs->command); 101 } 102 103 /* 104 * Force HC into halt state. 105 * 106 * Disable any IRQs and clear the run/stop bit. 107 * HC will complete any current and actively pipelined transactions, and 108 * should halt within 16 ms of the run/stop bit being cleared. 109 * Read HC Halted bit in the status register to see when the HC is finished. 110 */ 111 int xhci_halt(struct xhci_hcd *xhci) 112 { 113 int ret; 114 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC"); 115 xhci_quiesce(xhci); 116 117 ret = xhci_handshake(&xhci->op_regs->status, 118 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); 119 if (ret) { 120 xhci_warn(xhci, "Host halt failed, %d\n", ret); 121 return ret; 122 } 123 xhci->xhc_state |= XHCI_STATE_HALTED; 124 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; 125 return ret; 126 } 127 128 /* 129 * Set the run bit and wait for the host to be running. 130 */ 131 int xhci_start(struct xhci_hcd *xhci) 132 { 133 u32 temp; 134 int ret; 135 136 temp = readl(&xhci->op_regs->command); 137 temp |= (CMD_RUN); 138 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.", 139 temp); 140 writel(temp, &xhci->op_regs->command); 141 142 /* 143 * Wait for the HCHalted Status bit to be 0 to indicate the host is 144 * running. 145 */ 146 ret = xhci_handshake(&xhci->op_regs->status, 147 STS_HALT, 0, XHCI_MAX_HALT_USEC); 148 if (ret == -ETIMEDOUT) 149 xhci_err(xhci, "Host took too long to start, " 150 "waited %u microseconds.\n", 151 XHCI_MAX_HALT_USEC); 152 if (!ret) 153 /* clear state flags. Including dying, halted or removing */ 154 xhci->xhc_state = 0; 155 156 return ret; 157 } 158 159 /* 160 * Reset a halted HC. 161 * 162 * This resets pipelines, timers, counters, state machines, etc. 163 * Transactions will be terminated immediately, and operational registers 164 * will be set to their defaults. 165 */ 166 int xhci_reset(struct xhci_hcd *xhci) 167 { 168 u32 command; 169 u32 state; 170 int ret; 171 172 state = readl(&xhci->op_regs->status); 173 174 if (state == ~(u32)0) { 175 xhci_warn(xhci, "Host not accessible, reset failed.\n"); 176 return -ENODEV; 177 } 178 179 if ((state & STS_HALT) == 0) { 180 xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); 181 return 0; 182 } 183 184 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC"); 185 command = readl(&xhci->op_regs->command); 186 command |= CMD_RESET; 187 writel(command, &xhci->op_regs->command); 188 189 /* Existing Intel xHCI controllers require a delay of 1 mS, 190 * after setting the CMD_RESET bit, and before accessing any 191 * HC registers. This allows the HC to complete the 192 * reset operation and be ready for HC register access. 193 * Without this delay, the subsequent HC register access, 194 * may result in a system hang very rarely. 195 */ 196 if (xhci->quirks & XHCI_INTEL_HOST) 197 udelay(1000); 198 199 ret = xhci_handshake(&xhci->op_regs->command, 200 CMD_RESET, 0, 10 * 1000 * 1000); 201 if (ret) 202 return ret; 203 204 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) 205 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller)); 206 207 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 208 "Wait for controller to be ready for doorbell rings"); 209 /* 210 * xHCI cannot write to any doorbells or operational registers other 211 * than status until the "Controller Not Ready" flag is cleared. 212 */ 213 ret = xhci_handshake(&xhci->op_regs->status, 214 STS_CNR, 0, 10 * 1000 * 1000); 215 216 xhci->usb2_rhub.bus_state.port_c_suspend = 0; 217 xhci->usb2_rhub.bus_state.suspended_ports = 0; 218 xhci->usb2_rhub.bus_state.resuming_ports = 0; 219 xhci->usb3_rhub.bus_state.port_c_suspend = 0; 220 xhci->usb3_rhub.bus_state.suspended_ports = 0; 221 xhci->usb3_rhub.bus_state.resuming_ports = 0; 222 223 return ret; 224 } 225 226 static void xhci_zero_64b_regs(struct xhci_hcd *xhci) 227 { 228 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 229 int err, i; 230 u64 val; 231 232 /* 233 * Some Renesas controllers get into a weird state if they are 234 * reset while programmed with 64bit addresses (they will preserve 235 * the top half of the address in internal, non visible 236 * registers). You end up with half the address coming from the 237 * kernel, and the other half coming from the firmware. Also, 238 * changing the programming leads to extra accesses even if the 239 * controller is supposed to be halted. The controller ends up with 240 * a fatal fault, and is then ripe for being properly reset. 241 * 242 * Special care is taken to only apply this if the device is behind 243 * an iommu. Doing anything when there is no iommu is definitely 244 * unsafe... 245 */ 246 if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev)) 247 return; 248 249 xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n"); 250 251 /* Clear HSEIE so that faults do not get signaled */ 252 val = readl(&xhci->op_regs->command); 253 val &= ~CMD_HSEIE; 254 writel(val, &xhci->op_regs->command); 255 256 /* Clear HSE (aka FATAL) */ 257 val = readl(&xhci->op_regs->status); 258 val |= STS_FATAL; 259 writel(val, &xhci->op_regs->status); 260 261 /* Now zero the registers, and brace for impact */ 262 val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 263 if (upper_32_bits(val)) 264 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); 265 val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 266 if (upper_32_bits(val)) 267 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); 268 269 for (i = 0; i < HCS_MAX_INTRS(xhci->hcs_params1); i++) { 270 struct xhci_intr_reg __iomem *ir; 271 272 ir = &xhci->run_regs->ir_set[i]; 273 val = xhci_read_64(xhci, &ir->erst_base); 274 if (upper_32_bits(val)) 275 xhci_write_64(xhci, 0, &ir->erst_base); 276 val= xhci_read_64(xhci, &ir->erst_dequeue); 277 if (upper_32_bits(val)) 278 xhci_write_64(xhci, 0, &ir->erst_dequeue); 279 } 280 281 /* Wait for the fault to appear. It will be cleared on reset */ 282 err = xhci_handshake(&xhci->op_regs->status, 283 STS_FATAL, STS_FATAL, 284 XHCI_MAX_HALT_USEC); 285 if (!err) 286 xhci_info(xhci, "Fault detected\n"); 287 } 288 289 #ifdef CONFIG_USB_PCI 290 /* 291 * Set up MSI 292 */ 293 static int xhci_setup_msi(struct xhci_hcd *xhci) 294 { 295 int ret; 296 /* 297 * TODO:Check with MSI Soc for sysdev 298 */ 299 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 300 301 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); 302 if (ret < 0) { 303 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 304 "failed to allocate MSI entry"); 305 return ret; 306 } 307 308 ret = request_irq(pdev->irq, xhci_msi_irq, 309 0, "xhci_hcd", xhci_to_hcd(xhci)); 310 if (ret) { 311 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 312 "disable MSI interrupt"); 313 pci_free_irq_vectors(pdev); 314 } 315 316 return ret; 317 } 318 319 /* 320 * Set up MSI-X 321 */ 322 static int xhci_setup_msix(struct xhci_hcd *xhci) 323 { 324 int i, ret = 0; 325 struct usb_hcd *hcd = xhci_to_hcd(xhci); 326 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 327 328 /* 329 * calculate number of msi-x vectors supported. 330 * - HCS_MAX_INTRS: the max number of interrupts the host can handle, 331 * with max number of interrupters based on the xhci HCSPARAMS1. 332 * - num_online_cpus: maximum msi-x vectors per CPUs core. 333 * Add additional 1 vector to ensure always available interrupt. 334 */ 335 xhci->msix_count = min(num_online_cpus() + 1, 336 HCS_MAX_INTRS(xhci->hcs_params1)); 337 338 ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count, 339 PCI_IRQ_MSIX); 340 if (ret < 0) { 341 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 342 "Failed to enable MSI-X"); 343 return ret; 344 } 345 346 for (i = 0; i < xhci->msix_count; i++) { 347 ret = request_irq(pci_irq_vector(pdev, i), xhci_msi_irq, 0, 348 "xhci_hcd", xhci_to_hcd(xhci)); 349 if (ret) 350 goto disable_msix; 351 } 352 353 hcd->msix_enabled = 1; 354 return ret; 355 356 disable_msix: 357 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt"); 358 while (--i >= 0) 359 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci)); 360 pci_free_irq_vectors(pdev); 361 return ret; 362 } 363 364 /* Free any IRQs and disable MSI-X */ 365 static void xhci_cleanup_msix(struct xhci_hcd *xhci) 366 { 367 struct usb_hcd *hcd = xhci_to_hcd(xhci); 368 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 369 370 if (xhci->quirks & XHCI_PLAT) 371 return; 372 373 /* return if using legacy interrupt */ 374 if (hcd->irq > 0) 375 return; 376 377 if (hcd->msix_enabled) { 378 int i; 379 380 for (i = 0; i < xhci->msix_count; i++) 381 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci)); 382 } else { 383 free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci)); 384 } 385 386 pci_free_irq_vectors(pdev); 387 hcd->msix_enabled = 0; 388 } 389 390 static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci) 391 { 392 struct usb_hcd *hcd = xhci_to_hcd(xhci); 393 394 if (hcd->msix_enabled) { 395 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 396 int i; 397 398 for (i = 0; i < xhci->msix_count; i++) 399 synchronize_irq(pci_irq_vector(pdev, i)); 400 } 401 } 402 403 static int xhci_try_enable_msi(struct usb_hcd *hcd) 404 { 405 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 406 struct pci_dev *pdev; 407 int ret; 408 409 /* The xhci platform device has set up IRQs through usb_add_hcd. */ 410 if (xhci->quirks & XHCI_PLAT) 411 return 0; 412 413 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 414 /* 415 * Some Fresco Logic host controllers advertise MSI, but fail to 416 * generate interrupts. Don't even try to enable MSI. 417 */ 418 if (xhci->quirks & XHCI_BROKEN_MSI) 419 goto legacy_irq; 420 421 /* unregister the legacy interrupt */ 422 if (hcd->irq) 423 free_irq(hcd->irq, hcd); 424 hcd->irq = 0; 425 426 ret = xhci_setup_msix(xhci); 427 if (ret) 428 /* fall back to msi*/ 429 ret = xhci_setup_msi(xhci); 430 431 if (!ret) { 432 hcd->msi_enabled = 1; 433 return 0; 434 } 435 436 if (!pdev->irq) { 437 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); 438 return -EINVAL; 439 } 440 441 legacy_irq: 442 if (!strlen(hcd->irq_descr)) 443 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d", 444 hcd->driver->description, hcd->self.busnum); 445 446 /* fall back to legacy interrupt*/ 447 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, 448 hcd->irq_descr, hcd); 449 if (ret) { 450 xhci_err(xhci, "request interrupt %d failed\n", 451 pdev->irq); 452 return ret; 453 } 454 hcd->irq = pdev->irq; 455 return 0; 456 } 457 458 #else 459 460 static inline int xhci_try_enable_msi(struct usb_hcd *hcd) 461 { 462 return 0; 463 } 464 465 static inline void xhci_cleanup_msix(struct xhci_hcd *xhci) 466 { 467 } 468 469 static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci) 470 { 471 } 472 473 #endif 474 475 static void compliance_mode_recovery(struct timer_list *t) 476 { 477 struct xhci_hcd *xhci; 478 struct usb_hcd *hcd; 479 struct xhci_hub *rhub; 480 u32 temp; 481 int i; 482 483 xhci = from_timer(xhci, t, comp_mode_recovery_timer); 484 rhub = &xhci->usb3_rhub; 485 486 for (i = 0; i < rhub->num_ports; i++) { 487 temp = readl(rhub->ports[i]->addr); 488 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) { 489 /* 490 * Compliance Mode Detected. Letting USB Core 491 * handle the Warm Reset 492 */ 493 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 494 "Compliance mode detected->port %d", 495 i + 1); 496 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 497 "Attempting compliance mode recovery"); 498 hcd = xhci->shared_hcd; 499 500 if (hcd->state == HC_STATE_SUSPENDED) 501 usb_hcd_resume_root_hub(hcd); 502 503 usb_hcd_poll_rh_status(hcd); 504 } 505 } 506 507 if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1)) 508 mod_timer(&xhci->comp_mode_recovery_timer, 509 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); 510 } 511 512 /* 513 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver 514 * that causes ports behind that hardware to enter compliance mode sometimes. 515 * The quirk creates a timer that polls every 2 seconds the link state of 516 * each host controller's port and recovers it by issuing a Warm reset 517 * if Compliance mode is detected, otherwise the port will become "dead" (no 518 * device connections or disconnections will be detected anymore). Becasue no 519 * status event is generated when entering compliance mode (per xhci spec), 520 * this quirk is needed on systems that have the failing hardware installed. 521 */ 522 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) 523 { 524 xhci->port_status_u0 = 0; 525 timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery, 526 0); 527 xhci->comp_mode_recovery_timer.expires = jiffies + 528 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS); 529 530 add_timer(&xhci->comp_mode_recovery_timer); 531 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 532 "Compliance mode recovery timer initialized"); 533 } 534 535 /* 536 * This function identifies the systems that have installed the SN65LVPE502CP 537 * USB3.0 re-driver and that need the Compliance Mode Quirk. 538 * Systems: 539 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820 540 */ 541 static bool xhci_compliance_mode_recovery_timer_quirk_check(void) 542 { 543 const char *dmi_product_name, *dmi_sys_vendor; 544 545 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME); 546 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR); 547 if (!dmi_product_name || !dmi_sys_vendor) 548 return false; 549 550 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard"))) 551 return false; 552 553 if (strstr(dmi_product_name, "Z420") || 554 strstr(dmi_product_name, "Z620") || 555 strstr(dmi_product_name, "Z820") || 556 strstr(dmi_product_name, "Z1 Workstation")) 557 return true; 558 559 return false; 560 } 561 562 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) 563 { 564 return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1)); 565 } 566 567 568 /* 569 * Initialize memory for HCD and xHC (one-time init). 570 * 571 * Program the PAGESIZE register, initialize the device context array, create 572 * device contexts (?), set up a command ring segment (or two?), create event 573 * ring (one for now). 574 */ 575 static int xhci_init(struct usb_hcd *hcd) 576 { 577 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 578 int retval = 0; 579 580 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init"); 581 spin_lock_init(&xhci->lock); 582 if (xhci->hci_version == 0x95 && link_quirk) { 583 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 584 "QUIRK: Not clearing Link TRB chain bits."); 585 xhci->quirks |= XHCI_LINK_TRB_QUIRK; 586 } else { 587 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 588 "xHCI doesn't need link TRB QUIRK"); 589 } 590 retval = xhci_mem_init(xhci, GFP_KERNEL); 591 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init"); 592 593 /* Initializing Compliance Mode Recovery Data If Needed */ 594 if (xhci_compliance_mode_recovery_timer_quirk_check()) { 595 xhci->quirks |= XHCI_COMP_MODE_QUIRK; 596 compliance_mode_recovery_timer_init(xhci); 597 } 598 599 return retval; 600 } 601 602 /*-------------------------------------------------------------------------*/ 603 604 605 static int xhci_run_finished(struct xhci_hcd *xhci) 606 { 607 if (xhci_start(xhci)) { 608 xhci_halt(xhci); 609 return -ENODEV; 610 } 611 xhci->shared_hcd->state = HC_STATE_RUNNING; 612 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; 613 614 if (xhci->quirks & XHCI_NEC_HOST) 615 xhci_ring_cmd_db(xhci); 616 617 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 618 "Finished xhci_run for USB3 roothub"); 619 return 0; 620 } 621 622 /* 623 * Start the HC after it was halted. 624 * 625 * This function is called by the USB core when the HC driver is added. 626 * Its opposite is xhci_stop(). 627 * 628 * xhci_init() must be called once before this function can be called. 629 * Reset the HC, enable device slot contexts, program DCBAAP, and 630 * set command ring pointer and event ring pointer. 631 * 632 * Setup MSI-X vectors and enable interrupts. 633 */ 634 int xhci_run(struct usb_hcd *hcd) 635 { 636 u32 temp; 637 u64 temp_64; 638 int ret; 639 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 640 641 /* Start the xHCI host controller running only after the USB 2.0 roothub 642 * is setup. 643 */ 644 645 hcd->uses_new_polling = 1; 646 if (!usb_hcd_is_primary_hcd(hcd)) 647 return xhci_run_finished(xhci); 648 649 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run"); 650 651 ret = xhci_try_enable_msi(hcd); 652 if (ret) 653 return ret; 654 655 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 656 temp_64 &= ~ERST_PTR_MASK; 657 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 658 "ERST deq = 64'h%0lx", (long unsigned int) temp_64); 659 660 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 661 "// Set the interrupt modulation register"); 662 temp = readl(&xhci->ir_set->irq_control); 663 temp &= ~ER_IRQ_INTERVAL_MASK; 664 temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK; 665 writel(temp, &xhci->ir_set->irq_control); 666 667 /* Set the HCD state before we enable the irqs */ 668 temp = readl(&xhci->op_regs->command); 669 temp |= (CMD_EIE); 670 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 671 "// Enable interrupts, cmd = 0x%x.", temp); 672 writel(temp, &xhci->op_regs->command); 673 674 temp = readl(&xhci->ir_set->irq_pending); 675 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 676 "// Enabling event ring interrupter %p by writing 0x%x to irq_pending", 677 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); 678 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); 679 680 if (xhci->quirks & XHCI_NEC_HOST) { 681 struct xhci_command *command; 682 683 command = xhci_alloc_command(xhci, false, GFP_KERNEL); 684 if (!command) 685 return -ENOMEM; 686 687 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0, 688 TRB_TYPE(TRB_NEC_GET_FW)); 689 if (ret) 690 xhci_free_command(xhci, command); 691 } 692 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 693 "Finished xhci_run for USB2 roothub"); 694 695 xhci_dbc_init(xhci); 696 697 xhci_debugfs_init(xhci); 698 699 return 0; 700 } 701 EXPORT_SYMBOL_GPL(xhci_run); 702 703 /* 704 * Stop xHCI driver. 705 * 706 * This function is called by the USB core when the HC driver is removed. 707 * Its opposite is xhci_run(). 708 * 709 * Disable device contexts, disable IRQs, and quiesce the HC. 710 * Reset the HC, finish any completed transactions, and cleanup memory. 711 */ 712 static void xhci_stop(struct usb_hcd *hcd) 713 { 714 u32 temp; 715 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 716 717 mutex_lock(&xhci->mutex); 718 719 /* Only halt host and free memory after both hcds are removed */ 720 if (!usb_hcd_is_primary_hcd(hcd)) { 721 mutex_unlock(&xhci->mutex); 722 return; 723 } 724 725 xhci_dbc_exit(xhci); 726 727 spin_lock_irq(&xhci->lock); 728 xhci->xhc_state |= XHCI_STATE_HALTED; 729 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; 730 xhci_halt(xhci); 731 xhci_reset(xhci); 732 spin_unlock_irq(&xhci->lock); 733 734 xhci_cleanup_msix(xhci); 735 736 /* Deleting Compliance Mode Recovery Timer */ 737 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 738 (!(xhci_all_ports_seen_u0(xhci)))) { 739 del_timer_sync(&xhci->comp_mode_recovery_timer); 740 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 741 "%s: compliance mode recovery timer deleted", 742 __func__); 743 } 744 745 if (xhci->quirks & XHCI_AMD_PLL_FIX) 746 usb_amd_dev_put(); 747 748 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 749 "// Disabling event ring interrupts"); 750 temp = readl(&xhci->op_regs->status); 751 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); 752 temp = readl(&xhci->ir_set->irq_pending); 753 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); 754 755 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); 756 xhci_mem_cleanup(xhci); 757 xhci_debugfs_exit(xhci); 758 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 759 "xhci_stop completed - status = %x", 760 readl(&xhci->op_regs->status)); 761 mutex_unlock(&xhci->mutex); 762 } 763 764 /* 765 * Shutdown HC (not bus-specific) 766 * 767 * This is called when the machine is rebooting or halting. We assume that the 768 * machine will be powered off, and the HC's internal state will be reset. 769 * Don't bother to free memory. 770 * 771 * This will only ever be called with the main usb_hcd (the USB3 roothub). 772 */ 773 void xhci_shutdown(struct usb_hcd *hcd) 774 { 775 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 776 777 if (xhci->quirks & XHCI_SPURIOUS_REBOOT) 778 usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev)); 779 780 spin_lock_irq(&xhci->lock); 781 xhci_halt(xhci); 782 /* Workaround for spurious wakeups at shutdown with HSW */ 783 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) 784 xhci_reset(xhci); 785 spin_unlock_irq(&xhci->lock); 786 787 xhci_cleanup_msix(xhci); 788 789 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 790 "xhci_shutdown completed - status = %x", 791 readl(&xhci->op_regs->status)); 792 } 793 EXPORT_SYMBOL_GPL(xhci_shutdown); 794 795 #ifdef CONFIG_PM 796 static void xhci_save_registers(struct xhci_hcd *xhci) 797 { 798 xhci->s3.command = readl(&xhci->op_regs->command); 799 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification); 800 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 801 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg); 802 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size); 803 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); 804 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 805 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending); 806 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control); 807 } 808 809 static void xhci_restore_registers(struct xhci_hcd *xhci) 810 { 811 writel(xhci->s3.command, &xhci->op_regs->command); 812 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); 813 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); 814 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); 815 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size); 816 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); 817 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); 818 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending); 819 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control); 820 } 821 822 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) 823 { 824 u64 val_64; 825 826 /* step 2: initialize command ring buffer */ 827 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 828 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 829 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 830 xhci->cmd_ring->dequeue) & 831 (u64) ~CMD_RING_RSVD_BITS) | 832 xhci->cmd_ring->cycle_state; 833 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 834 "// Setting command ring address to 0x%llx", 835 (long unsigned long) val_64); 836 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); 837 } 838 839 /* 840 * The whole command ring must be cleared to zero when we suspend the host. 841 * 842 * The host doesn't save the command ring pointer in the suspend well, so we 843 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte 844 * aligned, because of the reserved bits in the command ring dequeue pointer 845 * register. Therefore, we can't just set the dequeue pointer back in the 846 * middle of the ring (TRBs are 16-byte aligned). 847 */ 848 static void xhci_clear_command_ring(struct xhci_hcd *xhci) 849 { 850 struct xhci_ring *ring; 851 struct xhci_segment *seg; 852 853 ring = xhci->cmd_ring; 854 seg = ring->deq_seg; 855 do { 856 memset(seg->trbs, 0, 857 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); 858 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= 859 cpu_to_le32(~TRB_CYCLE); 860 seg = seg->next; 861 } while (seg != ring->deq_seg); 862 863 /* Reset the software enqueue and dequeue pointers */ 864 ring->deq_seg = ring->first_seg; 865 ring->dequeue = ring->first_seg->trbs; 866 ring->enq_seg = ring->deq_seg; 867 ring->enqueue = ring->dequeue; 868 869 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; 870 /* 871 * Ring is now zeroed, so the HW should look for change of ownership 872 * when the cycle bit is set to 1. 873 */ 874 ring->cycle_state = 1; 875 876 /* 877 * Reset the hardware dequeue pointer. 878 * Yes, this will need to be re-written after resume, but we're paranoid 879 * and want to make sure the hardware doesn't access bogus memory 880 * because, say, the BIOS or an SMI started the host without changing 881 * the command ring pointers. 882 */ 883 xhci_set_cmd_ring_deq(xhci); 884 } 885 886 static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) 887 { 888 struct xhci_port **ports; 889 int port_index; 890 unsigned long flags; 891 u32 t1, t2, portsc; 892 893 spin_lock_irqsave(&xhci->lock, flags); 894 895 /* disable usb3 ports Wake bits */ 896 port_index = xhci->usb3_rhub.num_ports; 897 ports = xhci->usb3_rhub.ports; 898 while (port_index--) { 899 t1 = readl(ports[port_index]->addr); 900 portsc = t1; 901 t1 = xhci_port_state_to_neutral(t1); 902 t2 = t1 & ~PORT_WAKE_BITS; 903 if (t1 != t2) { 904 writel(t2, ports[port_index]->addr); 905 xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n", 906 xhci->usb3_rhub.hcd->self.busnum, 907 port_index + 1, portsc, t2); 908 } 909 } 910 911 /* disable usb2 ports Wake bits */ 912 port_index = xhci->usb2_rhub.num_ports; 913 ports = xhci->usb2_rhub.ports; 914 while (port_index--) { 915 t1 = readl(ports[port_index]->addr); 916 portsc = t1; 917 t1 = xhci_port_state_to_neutral(t1); 918 t2 = t1 & ~PORT_WAKE_BITS; 919 if (t1 != t2) { 920 writel(t2, ports[port_index]->addr); 921 xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n", 922 xhci->usb2_rhub.hcd->self.busnum, 923 port_index + 1, portsc, t2); 924 } 925 } 926 spin_unlock_irqrestore(&xhci->lock, flags); 927 } 928 929 static bool xhci_pending_portevent(struct xhci_hcd *xhci) 930 { 931 struct xhci_port **ports; 932 int port_index; 933 u32 status; 934 u32 portsc; 935 936 status = readl(&xhci->op_regs->status); 937 if (status & STS_EINT) 938 return true; 939 /* 940 * Checking STS_EINT is not enough as there is a lag between a change 941 * bit being set and the Port Status Change Event that it generated 942 * being written to the Event Ring. See note in xhci 1.1 section 4.19.2. 943 */ 944 945 port_index = xhci->usb2_rhub.num_ports; 946 ports = xhci->usb2_rhub.ports; 947 while (port_index--) { 948 portsc = readl(ports[port_index]->addr); 949 if (portsc & PORT_CHANGE_MASK || 950 (portsc & PORT_PLS_MASK) == XDEV_RESUME) 951 return true; 952 } 953 port_index = xhci->usb3_rhub.num_ports; 954 ports = xhci->usb3_rhub.ports; 955 while (port_index--) { 956 portsc = readl(ports[port_index]->addr); 957 if (portsc & PORT_CHANGE_MASK || 958 (portsc & PORT_PLS_MASK) == XDEV_RESUME) 959 return true; 960 } 961 return false; 962 } 963 964 /* 965 * Stop HC (not bus-specific) 966 * 967 * This is called when the machine transition into S3/S4 mode. 968 * 969 */ 970 int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) 971 { 972 int rc = 0; 973 unsigned int delay = XHCI_MAX_HALT_USEC * 2; 974 struct usb_hcd *hcd = xhci_to_hcd(xhci); 975 u32 command; 976 u32 res; 977 978 if (!hcd->state) 979 return 0; 980 981 if (hcd->state != HC_STATE_SUSPENDED || 982 xhci->shared_hcd->state != HC_STATE_SUSPENDED) 983 return -EINVAL; 984 985 xhci_dbc_suspend(xhci); 986 987 /* Clear root port wake on bits if wakeup not allowed. */ 988 if (!do_wakeup) 989 xhci_disable_port_wake_on_bits(xhci); 990 991 /* Don't poll the roothubs on bus suspend. */ 992 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); 993 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 994 del_timer_sync(&hcd->rh_timer); 995 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 996 del_timer_sync(&xhci->shared_hcd->rh_timer); 997 998 if (xhci->quirks & XHCI_SUSPEND_DELAY) 999 usleep_range(1000, 1500); 1000 1001 spin_lock_irq(&xhci->lock); 1002 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 1003 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); 1004 /* step 1: stop endpoint */ 1005 /* skipped assuming that port suspend has done */ 1006 1007 /* step 2: clear Run/Stop bit */ 1008 command = readl(&xhci->op_regs->command); 1009 command &= ~CMD_RUN; 1010 writel(command, &xhci->op_regs->command); 1011 1012 /* Some chips from Fresco Logic need an extraordinary delay */ 1013 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1; 1014 1015 if (xhci_handshake(&xhci->op_regs->status, 1016 STS_HALT, STS_HALT, delay)) { 1017 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); 1018 spin_unlock_irq(&xhci->lock); 1019 return -ETIMEDOUT; 1020 } 1021 xhci_clear_command_ring(xhci); 1022 1023 /* step 3: save registers */ 1024 xhci_save_registers(xhci); 1025 1026 /* step 4: set CSS flag */ 1027 command = readl(&xhci->op_regs->command); 1028 command |= CMD_CSS; 1029 writel(command, &xhci->op_regs->command); 1030 xhci->broken_suspend = 0; 1031 if (xhci_handshake(&xhci->op_regs->status, 1032 STS_SAVE, 0, 20 * 1000)) { 1033 /* 1034 * AMD SNPS xHC 3.0 occasionally does not clear the 1035 * SSS bit of USBSTS and when driver tries to poll 1036 * to see if the xHC clears BIT(8) which never happens 1037 * and driver assumes that controller is not responding 1038 * and times out. To workaround this, its good to check 1039 * if SRE and HCE bits are not set (as per xhci 1040 * Section 5.4.2) and bypass the timeout. 1041 */ 1042 res = readl(&xhci->op_regs->status); 1043 if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) && 1044 (((res & STS_SRE) == 0) && 1045 ((res & STS_HCE) == 0))) { 1046 xhci->broken_suspend = 1; 1047 } else { 1048 xhci_warn(xhci, "WARN: xHC save state timeout\n"); 1049 spin_unlock_irq(&xhci->lock); 1050 return -ETIMEDOUT; 1051 } 1052 } 1053 spin_unlock_irq(&xhci->lock); 1054 1055 /* 1056 * Deleting Compliance Mode Recovery Timer because the xHCI Host 1057 * is about to be suspended. 1058 */ 1059 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 1060 (!(xhci_all_ports_seen_u0(xhci)))) { 1061 del_timer_sync(&xhci->comp_mode_recovery_timer); 1062 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1063 "%s: compliance mode recovery timer deleted", 1064 __func__); 1065 } 1066 1067 /* step 5: remove core well power */ 1068 /* synchronize irq when using MSI-X */ 1069 xhci_msix_sync_irqs(xhci); 1070 1071 return rc; 1072 } 1073 EXPORT_SYMBOL_GPL(xhci_suspend); 1074 1075 /* 1076 * start xHC (not bus-specific) 1077 * 1078 * This is called when the machine transition from S3/S4 mode. 1079 * 1080 */ 1081 int xhci_resume(struct xhci_hcd *xhci, bool hibernated) 1082 { 1083 u32 command, temp = 0; 1084 struct usb_hcd *hcd = xhci_to_hcd(xhci); 1085 struct usb_hcd *secondary_hcd; 1086 int retval = 0; 1087 bool comp_timer_running = false; 1088 1089 if (!hcd->state) 1090 return 0; 1091 1092 /* Wait a bit if either of the roothubs need to settle from the 1093 * transition into bus suspend. 1094 */ 1095 1096 if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) || 1097 time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange)) 1098 msleep(100); 1099 1100 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 1101 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); 1102 1103 spin_lock_irq(&xhci->lock); 1104 if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend) 1105 hibernated = true; 1106 1107 if (!hibernated) { 1108 /* 1109 * Some controllers might lose power during suspend, so wait 1110 * for controller not ready bit to clear, just as in xHC init. 1111 */ 1112 retval = xhci_handshake(&xhci->op_regs->status, 1113 STS_CNR, 0, 10 * 1000 * 1000); 1114 if (retval) { 1115 xhci_warn(xhci, "Controller not ready at resume %d\n", 1116 retval); 1117 spin_unlock_irq(&xhci->lock); 1118 return retval; 1119 } 1120 /* step 1: restore register */ 1121 xhci_restore_registers(xhci); 1122 /* step 2: initialize command ring buffer */ 1123 xhci_set_cmd_ring_deq(xhci); 1124 /* step 3: restore state and start state*/ 1125 /* step 3: set CRS flag */ 1126 command = readl(&xhci->op_regs->command); 1127 command |= CMD_CRS; 1128 writel(command, &xhci->op_regs->command); 1129 /* 1130 * Some controllers take up to 55+ ms to complete the controller 1131 * restore so setting the timeout to 100ms. Xhci specification 1132 * doesn't mention any timeout value. 1133 */ 1134 if (xhci_handshake(&xhci->op_regs->status, 1135 STS_RESTORE, 0, 100 * 1000)) { 1136 xhci_warn(xhci, "WARN: xHC restore state timeout\n"); 1137 spin_unlock_irq(&xhci->lock); 1138 return -ETIMEDOUT; 1139 } 1140 temp = readl(&xhci->op_regs->status); 1141 } 1142 1143 /* If restore operation fails, re-initialize the HC during resume */ 1144 if ((temp & STS_SRE) || hibernated) { 1145 1146 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 1147 !(xhci_all_ports_seen_u0(xhci))) { 1148 del_timer_sync(&xhci->comp_mode_recovery_timer); 1149 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1150 "Compliance Mode Recovery Timer deleted!"); 1151 } 1152 1153 /* Let the USB core know _both_ roothubs lost power. */ 1154 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); 1155 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); 1156 1157 xhci_dbg(xhci, "Stop HCD\n"); 1158 xhci_halt(xhci); 1159 xhci_zero_64b_regs(xhci); 1160 retval = xhci_reset(xhci); 1161 spin_unlock_irq(&xhci->lock); 1162 if (retval) 1163 return retval; 1164 xhci_cleanup_msix(xhci); 1165 1166 xhci_dbg(xhci, "// Disabling event ring interrupts\n"); 1167 temp = readl(&xhci->op_regs->status); 1168 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); 1169 temp = readl(&xhci->ir_set->irq_pending); 1170 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); 1171 1172 xhci_dbg(xhci, "cleaning up memory\n"); 1173 xhci_mem_cleanup(xhci); 1174 xhci_debugfs_exit(xhci); 1175 xhci_dbg(xhci, "xhci_stop completed - status = %x\n", 1176 readl(&xhci->op_regs->status)); 1177 1178 /* USB core calls the PCI reinit and start functions twice: 1179 * first with the primary HCD, and then with the secondary HCD. 1180 * If we don't do the same, the host will never be started. 1181 */ 1182 if (!usb_hcd_is_primary_hcd(hcd)) 1183 secondary_hcd = hcd; 1184 else 1185 secondary_hcd = xhci->shared_hcd; 1186 1187 xhci_dbg(xhci, "Initialize the xhci_hcd\n"); 1188 retval = xhci_init(hcd->primary_hcd); 1189 if (retval) 1190 return retval; 1191 comp_timer_running = true; 1192 1193 xhci_dbg(xhci, "Start the primary HCD\n"); 1194 retval = xhci_run(hcd->primary_hcd); 1195 if (!retval) { 1196 xhci_dbg(xhci, "Start the secondary HCD\n"); 1197 retval = xhci_run(secondary_hcd); 1198 } 1199 hcd->state = HC_STATE_SUSPENDED; 1200 xhci->shared_hcd->state = HC_STATE_SUSPENDED; 1201 goto done; 1202 } 1203 1204 /* step 4: set Run/Stop bit */ 1205 command = readl(&xhci->op_regs->command); 1206 command |= CMD_RUN; 1207 writel(command, &xhci->op_regs->command); 1208 xhci_handshake(&xhci->op_regs->status, STS_HALT, 1209 0, 250 * 1000); 1210 1211 /* step 5: walk topology and initialize portsc, 1212 * portpmsc and portli 1213 */ 1214 /* this is done in bus_resume */ 1215 1216 /* step 6: restart each of the previously 1217 * Running endpoints by ringing their doorbells 1218 */ 1219 1220 spin_unlock_irq(&xhci->lock); 1221 1222 xhci_dbc_resume(xhci); 1223 1224 done: 1225 if (retval == 0) { 1226 /* Resume root hubs only when have pending events. */ 1227 if (xhci_pending_portevent(xhci)) { 1228 usb_hcd_resume_root_hub(xhci->shared_hcd); 1229 usb_hcd_resume_root_hub(hcd); 1230 } 1231 } 1232 1233 /* 1234 * If system is subject to the Quirk, Compliance Mode Timer needs to 1235 * be re-initialized Always after a system resume. Ports are subject 1236 * to suffer the Compliance Mode issue again. It doesn't matter if 1237 * ports have entered previously to U0 before system's suspension. 1238 */ 1239 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running) 1240 compliance_mode_recovery_timer_init(xhci); 1241 1242 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) 1243 usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller)); 1244 1245 /* Re-enable port polling. */ 1246 xhci_dbg(xhci, "%s: starting port polling.\n", __func__); 1247 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 1248 usb_hcd_poll_rh_status(xhci->shared_hcd); 1249 set_bit(HCD_FLAG_POLL_RH, &hcd->flags); 1250 usb_hcd_poll_rh_status(hcd); 1251 1252 return retval; 1253 } 1254 EXPORT_SYMBOL_GPL(xhci_resume); 1255 #endif /* CONFIG_PM */ 1256 1257 /*-------------------------------------------------------------------------*/ 1258 1259 /* 1260 * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT), 1261 * we'll copy the actual data into the TRB address register. This is limited to 1262 * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize 1263 * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed. 1264 */ 1265 static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, 1266 gfp_t mem_flags) 1267 { 1268 if (xhci_urb_suitable_for_idt(urb)) 1269 return 0; 1270 1271 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); 1272 } 1273 1274 /** 1275 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and 1276 * HCDs. Find the index for an endpoint given its descriptor. Use the return 1277 * value to right shift 1 for the bitmask. 1278 * 1279 * Index = (epnum * 2) + direction - 1, 1280 * where direction = 0 for OUT, 1 for IN. 1281 * For control endpoints, the IN index is used (OUT index is unused), so 1282 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) 1283 */ 1284 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) 1285 { 1286 unsigned int index; 1287 if (usb_endpoint_xfer_control(desc)) 1288 index = (unsigned int) (usb_endpoint_num(desc)*2); 1289 else 1290 index = (unsigned int) (usb_endpoint_num(desc)*2) + 1291 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; 1292 return index; 1293 } 1294 1295 /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint 1296 * address from the XHCI endpoint index. 1297 */ 1298 unsigned int xhci_get_endpoint_address(unsigned int ep_index) 1299 { 1300 unsigned int number = DIV_ROUND_UP(ep_index, 2); 1301 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN; 1302 return direction | number; 1303 } 1304 1305 /* Find the flag for this endpoint (for use in the control context). Use the 1306 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is 1307 * bit 1, etc. 1308 */ 1309 static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) 1310 { 1311 return 1 << (xhci_get_endpoint_index(desc) + 1); 1312 } 1313 1314 /* Find the flag for this endpoint (for use in the control context). Use the 1315 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is 1316 * bit 1, etc. 1317 */ 1318 static unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index) 1319 { 1320 return 1 << (ep_index + 1); 1321 } 1322 1323 /* Compute the last valid endpoint context index. Basically, this is the 1324 * endpoint index plus one. For slot contexts with more than valid endpoint, 1325 * we find the most significant bit set in the added contexts flags. 1326 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 1327 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. 1328 */ 1329 unsigned int xhci_last_valid_endpoint(u32 added_ctxs) 1330 { 1331 return fls(added_ctxs) - 1; 1332 } 1333 1334 /* Returns 1 if the arguments are OK; 1335 * returns 0 this is a root hub; returns -EINVAL for NULL pointers. 1336 */ 1337 static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, 1338 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, 1339 const char *func) { 1340 struct xhci_hcd *xhci; 1341 struct xhci_virt_device *virt_dev; 1342 1343 if (!hcd || (check_ep && !ep) || !udev) { 1344 pr_debug("xHCI %s called with invalid args\n", func); 1345 return -EINVAL; 1346 } 1347 if (!udev->parent) { 1348 pr_debug("xHCI %s called for root hub\n", func); 1349 return 0; 1350 } 1351 1352 xhci = hcd_to_xhci(hcd); 1353 if (check_virt_dev) { 1354 if (!udev->slot_id || !xhci->devs[udev->slot_id]) { 1355 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n", 1356 func); 1357 return -EINVAL; 1358 } 1359 1360 virt_dev = xhci->devs[udev->slot_id]; 1361 if (virt_dev->udev != udev) { 1362 xhci_dbg(xhci, "xHCI %s called with udev and " 1363 "virt_dev does not match\n", func); 1364 return -EINVAL; 1365 } 1366 } 1367 1368 if (xhci->xhc_state & XHCI_STATE_HALTED) 1369 return -ENODEV; 1370 1371 return 1; 1372 } 1373 1374 static int xhci_configure_endpoint(struct xhci_hcd *xhci, 1375 struct usb_device *udev, struct xhci_command *command, 1376 bool ctx_change, bool must_succeed); 1377 1378 /* 1379 * Full speed devices may have a max packet size greater than 8 bytes, but the 1380 * USB core doesn't know that until it reads the first 8 bytes of the 1381 * descriptor. If the usb_device's max packet size changes after that point, 1382 * we need to issue an evaluate context command and wait on it. 1383 */ 1384 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, 1385 unsigned int ep_index, struct urb *urb) 1386 { 1387 struct xhci_container_ctx *out_ctx; 1388 struct xhci_input_control_ctx *ctrl_ctx; 1389 struct xhci_ep_ctx *ep_ctx; 1390 struct xhci_command *command; 1391 int max_packet_size; 1392 int hw_max_packet_size; 1393 int ret = 0; 1394 1395 out_ctx = xhci->devs[slot_id]->out_ctx; 1396 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1397 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); 1398 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc); 1399 if (hw_max_packet_size != max_packet_size) { 1400 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1401 "Max Packet Size for ep 0 changed."); 1402 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1403 "Max packet size in usb_device = %d", 1404 max_packet_size); 1405 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1406 "Max packet size in xHCI HW = %d", 1407 hw_max_packet_size); 1408 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1409 "Issuing evaluate context command."); 1410 1411 /* Set up the input context flags for the command */ 1412 /* FIXME: This won't work if a non-default control endpoint 1413 * changes max packet sizes. 1414 */ 1415 1416 command = xhci_alloc_command(xhci, true, GFP_KERNEL); 1417 if (!command) 1418 return -ENOMEM; 1419 1420 command->in_ctx = xhci->devs[slot_id]->in_ctx; 1421 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 1422 if (!ctrl_ctx) { 1423 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 1424 __func__); 1425 ret = -ENOMEM; 1426 goto command_cleanup; 1427 } 1428 /* Set up the modified control endpoint 0 */ 1429 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, 1430 xhci->devs[slot_id]->out_ctx, ep_index); 1431 1432 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); 1433 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); 1434 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); 1435 1436 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); 1437 ctrl_ctx->drop_flags = 0; 1438 1439 ret = xhci_configure_endpoint(xhci, urb->dev, command, 1440 true, false); 1441 1442 /* Clean up the input context for later use by bandwidth 1443 * functions. 1444 */ 1445 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); 1446 command_cleanup: 1447 kfree(command->completion); 1448 kfree(command); 1449 } 1450 return ret; 1451 } 1452 1453 /* 1454 * non-error returns are a promise to giveback() the urb later 1455 * we drop ownership so next owner (or urb unlink) can get it 1456 */ 1457 static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) 1458 { 1459 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 1460 unsigned long flags; 1461 int ret = 0; 1462 unsigned int slot_id, ep_index; 1463 unsigned int *ep_state; 1464 struct urb_priv *urb_priv; 1465 int num_tds; 1466 1467 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, 1468 true, true, __func__) <= 0) 1469 return -EINVAL; 1470 1471 slot_id = urb->dev->slot_id; 1472 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1473 ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state; 1474 1475 if (!HCD_HW_ACCESSIBLE(hcd)) { 1476 if (!in_interrupt()) 1477 xhci_dbg(xhci, "urb submitted during PCI suspend\n"); 1478 return -ESHUTDOWN; 1479 } 1480 if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) { 1481 xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n"); 1482 return -ENODEV; 1483 } 1484 1485 if (usb_endpoint_xfer_isoc(&urb->ep->desc)) 1486 num_tds = urb->number_of_packets; 1487 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) && 1488 urb->transfer_buffer_length > 0 && 1489 urb->transfer_flags & URB_ZERO_PACKET && 1490 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc))) 1491 num_tds = 2; 1492 else 1493 num_tds = 1; 1494 1495 urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags); 1496 if (!urb_priv) 1497 return -ENOMEM; 1498 1499 urb_priv->num_tds = num_tds; 1500 urb_priv->num_tds_done = 0; 1501 urb->hcpriv = urb_priv; 1502 1503 trace_xhci_urb_enqueue(urb); 1504 1505 if (usb_endpoint_xfer_control(&urb->ep->desc)) { 1506 /* Check to see if the max packet size for the default control 1507 * endpoint changed during FS device enumeration 1508 */ 1509 if (urb->dev->speed == USB_SPEED_FULL) { 1510 ret = xhci_check_maxpacket(xhci, slot_id, 1511 ep_index, urb); 1512 if (ret < 0) { 1513 xhci_urb_free_priv(urb_priv); 1514 urb->hcpriv = NULL; 1515 return ret; 1516 } 1517 } 1518 } 1519 1520 spin_lock_irqsave(&xhci->lock, flags); 1521 1522 if (xhci->xhc_state & XHCI_STATE_DYING) { 1523 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n", 1524 urb->ep->desc.bEndpointAddress, urb); 1525 ret = -ESHUTDOWN; 1526 goto free_priv; 1527 } 1528 if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) { 1529 xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n", 1530 *ep_state); 1531 ret = -EINVAL; 1532 goto free_priv; 1533 } 1534 if (*ep_state & EP_SOFT_CLEAR_TOGGLE) { 1535 xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n"); 1536 ret = -EINVAL; 1537 goto free_priv; 1538 } 1539 1540 switch (usb_endpoint_type(&urb->ep->desc)) { 1541 1542 case USB_ENDPOINT_XFER_CONTROL: 1543 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, 1544 slot_id, ep_index); 1545 break; 1546 case USB_ENDPOINT_XFER_BULK: 1547 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, 1548 slot_id, ep_index); 1549 break; 1550 case USB_ENDPOINT_XFER_INT: 1551 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, 1552 slot_id, ep_index); 1553 break; 1554 case USB_ENDPOINT_XFER_ISOC: 1555 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, 1556 slot_id, ep_index); 1557 } 1558 1559 if (ret) { 1560 free_priv: 1561 xhci_urb_free_priv(urb_priv); 1562 urb->hcpriv = NULL; 1563 } 1564 spin_unlock_irqrestore(&xhci->lock, flags); 1565 return ret; 1566 } 1567 1568 /* 1569 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop 1570 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC 1571 * should pick up where it left off in the TD, unless a Set Transfer Ring 1572 * Dequeue Pointer is issued. 1573 * 1574 * The TRBs that make up the buffers for the canceled URB will be "removed" from 1575 * the ring. Since the ring is a contiguous structure, they can't be physically 1576 * removed. Instead, there are two options: 1577 * 1578 * 1) If the HC is in the middle of processing the URB to be canceled, we 1579 * simply move the ring's dequeue pointer past those TRBs using the Set 1580 * Transfer Ring Dequeue Pointer command. This will be the common case, 1581 * when drivers timeout on the last submitted URB and attempt to cancel. 1582 * 1583 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a 1584 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The 1585 * HC will need to invalidate the any TRBs it has cached after the stop 1586 * endpoint command, as noted in the xHCI 0.95 errata. 1587 * 1588 * 3) The TD may have completed by the time the Stop Endpoint Command 1589 * completes, so software needs to handle that case too. 1590 * 1591 * This function should protect against the TD enqueueing code ringing the 1592 * doorbell while this code is waiting for a Stop Endpoint command to complete. 1593 * It also needs to account for multiple cancellations on happening at the same 1594 * time for the same endpoint. 1595 * 1596 * Note that this function can be called in any context, or so says 1597 * usb_hcd_unlink_urb() 1598 */ 1599 static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) 1600 { 1601 unsigned long flags; 1602 int ret, i; 1603 u32 temp; 1604 struct xhci_hcd *xhci; 1605 struct urb_priv *urb_priv; 1606 struct xhci_td *td; 1607 unsigned int ep_index; 1608 struct xhci_ring *ep_ring; 1609 struct xhci_virt_ep *ep; 1610 struct xhci_command *command; 1611 struct xhci_virt_device *vdev; 1612 1613 xhci = hcd_to_xhci(hcd); 1614 spin_lock_irqsave(&xhci->lock, flags); 1615 1616 trace_xhci_urb_dequeue(urb); 1617 1618 /* Make sure the URB hasn't completed or been unlinked already */ 1619 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 1620 if (ret) 1621 goto done; 1622 1623 /* give back URB now if we can't queue it for cancel */ 1624 vdev = xhci->devs[urb->dev->slot_id]; 1625 urb_priv = urb->hcpriv; 1626 if (!vdev || !urb_priv) 1627 goto err_giveback; 1628 1629 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1630 ep = &vdev->eps[ep_index]; 1631 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 1632 if (!ep || !ep_ring) 1633 goto err_giveback; 1634 1635 /* If xHC is dead take it down and return ALL URBs in xhci_hc_died() */ 1636 temp = readl(&xhci->op_regs->status); 1637 if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) { 1638 xhci_hc_died(xhci); 1639 goto done; 1640 } 1641 1642 /* 1643 * check ring is not re-allocated since URB was enqueued. If it is, then 1644 * make sure none of the ring related pointers in this URB private data 1645 * are touched, such as td_list, otherwise we overwrite freed data 1646 */ 1647 if (!td_on_ring(&urb_priv->td[0], ep_ring)) { 1648 xhci_err(xhci, "Canceled URB td not found on endpoint ring"); 1649 for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) { 1650 td = &urb_priv->td[i]; 1651 if (!list_empty(&td->cancelled_td_list)) 1652 list_del_init(&td->cancelled_td_list); 1653 } 1654 goto err_giveback; 1655 } 1656 1657 if (xhci->xhc_state & XHCI_STATE_HALTED) { 1658 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1659 "HC halted, freeing TD manually."); 1660 for (i = urb_priv->num_tds_done; 1661 i < urb_priv->num_tds; 1662 i++) { 1663 td = &urb_priv->td[i]; 1664 if (!list_empty(&td->td_list)) 1665 list_del_init(&td->td_list); 1666 if (!list_empty(&td->cancelled_td_list)) 1667 list_del_init(&td->cancelled_td_list); 1668 } 1669 goto err_giveback; 1670 } 1671 1672 i = urb_priv->num_tds_done; 1673 if (i < urb_priv->num_tds) 1674 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1675 "Cancel URB %p, dev %s, ep 0x%x, " 1676 "starting at offset 0x%llx", 1677 urb, urb->dev->devpath, 1678 urb->ep->desc.bEndpointAddress, 1679 (unsigned long long) xhci_trb_virt_to_dma( 1680 urb_priv->td[i].start_seg, 1681 urb_priv->td[i].first_trb)); 1682 1683 for (; i < urb_priv->num_tds; i++) { 1684 td = &urb_priv->td[i]; 1685 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); 1686 } 1687 1688 /* Queue a stop endpoint command, but only if this is 1689 * the first cancellation to be handled. 1690 */ 1691 if (!(ep->ep_state & EP_STOP_CMD_PENDING)) { 1692 command = xhci_alloc_command(xhci, false, GFP_ATOMIC); 1693 if (!command) { 1694 ret = -ENOMEM; 1695 goto done; 1696 } 1697 ep->ep_state |= EP_STOP_CMD_PENDING; 1698 ep->stop_cmd_timer.expires = jiffies + 1699 XHCI_STOP_EP_CMD_TIMEOUT * HZ; 1700 add_timer(&ep->stop_cmd_timer); 1701 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id, 1702 ep_index, 0); 1703 xhci_ring_cmd_db(xhci); 1704 } 1705 done: 1706 spin_unlock_irqrestore(&xhci->lock, flags); 1707 return ret; 1708 1709 err_giveback: 1710 if (urb_priv) 1711 xhci_urb_free_priv(urb_priv); 1712 usb_hcd_unlink_urb_from_ep(hcd, urb); 1713 spin_unlock_irqrestore(&xhci->lock, flags); 1714 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); 1715 return ret; 1716 } 1717 1718 /* Drop an endpoint from a new bandwidth configuration for this device. 1719 * Only one call to this function is allowed per endpoint before 1720 * check_bandwidth() or reset_bandwidth() must be called. 1721 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will 1722 * add the endpoint to the schedule with possibly new parameters denoted by a 1723 * different endpoint descriptor in usb_host_endpoint. 1724 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is 1725 * not allowed. 1726 * 1727 * The USB core will not allow URBs to be queued to an endpoint that is being 1728 * disabled, so there's no need for mutual exclusion to protect 1729 * the xhci->devs[slot_id] structure. 1730 */ 1731 static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, 1732 struct usb_host_endpoint *ep) 1733 { 1734 struct xhci_hcd *xhci; 1735 struct xhci_container_ctx *in_ctx, *out_ctx; 1736 struct xhci_input_control_ctx *ctrl_ctx; 1737 unsigned int ep_index; 1738 struct xhci_ep_ctx *ep_ctx; 1739 u32 drop_flag; 1740 u32 new_add_flags, new_drop_flags; 1741 int ret; 1742 1743 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); 1744 if (ret <= 0) 1745 return ret; 1746 xhci = hcd_to_xhci(hcd); 1747 if (xhci->xhc_state & XHCI_STATE_DYING) 1748 return -ENODEV; 1749 1750 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 1751 drop_flag = xhci_get_endpoint_flag(&ep->desc); 1752 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { 1753 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", 1754 __func__, drop_flag); 1755 return 0; 1756 } 1757 1758 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 1759 out_ctx = xhci->devs[udev->slot_id]->out_ctx; 1760 ctrl_ctx = xhci_get_input_control_ctx(in_ctx); 1761 if (!ctrl_ctx) { 1762 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 1763 __func__); 1764 return 0; 1765 } 1766 1767 ep_index = xhci_get_endpoint_index(&ep->desc); 1768 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1769 /* If the HC already knows the endpoint is disabled, 1770 * or the HCD has noted it is disabled, ignore this request 1771 */ 1772 if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) || 1773 le32_to_cpu(ctrl_ctx->drop_flags) & 1774 xhci_get_endpoint_flag(&ep->desc)) { 1775 /* Do not warn when called after a usb_device_reset */ 1776 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL) 1777 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", 1778 __func__, ep); 1779 return 0; 1780 } 1781 1782 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag); 1783 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); 1784 1785 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag); 1786 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); 1787 1788 xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index); 1789 1790 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); 1791 1792 if (xhci->quirks & XHCI_MTK_HOST) 1793 xhci_mtk_drop_ep_quirk(hcd, udev, ep); 1794 1795 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", 1796 (unsigned int) ep->desc.bEndpointAddress, 1797 udev->slot_id, 1798 (unsigned int) new_drop_flags, 1799 (unsigned int) new_add_flags); 1800 return 0; 1801 } 1802 1803 /* Add an endpoint to a new possible bandwidth configuration for this device. 1804 * Only one call to this function is allowed per endpoint before 1805 * check_bandwidth() or reset_bandwidth() must be called. 1806 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will 1807 * add the endpoint to the schedule with possibly new parameters denoted by a 1808 * different endpoint descriptor in usb_host_endpoint. 1809 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is 1810 * not allowed. 1811 * 1812 * The USB core will not allow URBs to be queued to an endpoint until the 1813 * configuration or alt setting is installed in the device, so there's no need 1814 * for mutual exclusion to protect the xhci->devs[slot_id] structure. 1815 */ 1816 static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, 1817 struct usb_host_endpoint *ep) 1818 { 1819 struct xhci_hcd *xhci; 1820 struct xhci_container_ctx *in_ctx; 1821 unsigned int ep_index; 1822 struct xhci_input_control_ctx *ctrl_ctx; 1823 struct xhci_ep_ctx *ep_ctx; 1824 u32 added_ctxs; 1825 u32 new_add_flags, new_drop_flags; 1826 struct xhci_virt_device *virt_dev; 1827 int ret = 0; 1828 1829 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); 1830 if (ret <= 0) { 1831 /* So we won't queue a reset ep command for a root hub */ 1832 ep->hcpriv = NULL; 1833 return ret; 1834 } 1835 xhci = hcd_to_xhci(hcd); 1836 if (xhci->xhc_state & XHCI_STATE_DYING) 1837 return -ENODEV; 1838 1839 added_ctxs = xhci_get_endpoint_flag(&ep->desc); 1840 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { 1841 /* FIXME when we have to issue an evaluate endpoint command to 1842 * deal with ep0 max packet size changing once we get the 1843 * descriptors 1844 */ 1845 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", 1846 __func__, added_ctxs); 1847 return 0; 1848 } 1849 1850 virt_dev = xhci->devs[udev->slot_id]; 1851 in_ctx = virt_dev->in_ctx; 1852 ctrl_ctx = xhci_get_input_control_ctx(in_ctx); 1853 if (!ctrl_ctx) { 1854 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 1855 __func__); 1856 return 0; 1857 } 1858 1859 ep_index = xhci_get_endpoint_index(&ep->desc); 1860 /* If this endpoint is already in use, and the upper layers are trying 1861 * to add it again without dropping it, reject the addition. 1862 */ 1863 if (virt_dev->eps[ep_index].ring && 1864 !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) { 1865 xhci_warn(xhci, "Trying to add endpoint 0x%x " 1866 "without dropping it.\n", 1867 (unsigned int) ep->desc.bEndpointAddress); 1868 return -EINVAL; 1869 } 1870 1871 /* If the HCD has already noted the endpoint is enabled, 1872 * ignore this request. 1873 */ 1874 if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) { 1875 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", 1876 __func__, ep); 1877 return 0; 1878 } 1879 1880 /* 1881 * Configuration and alternate setting changes must be done in 1882 * process context, not interrupt context (or so documenation 1883 * for usb_set_interface() and usb_set_configuration() claim). 1884 */ 1885 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { 1886 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", 1887 __func__, ep->desc.bEndpointAddress); 1888 return -ENOMEM; 1889 } 1890 1891 if (xhci->quirks & XHCI_MTK_HOST) { 1892 ret = xhci_mtk_add_ep_quirk(hcd, udev, ep); 1893 if (ret < 0) { 1894 xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring); 1895 virt_dev->eps[ep_index].new_ring = NULL; 1896 return ret; 1897 } 1898 } 1899 1900 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); 1901 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); 1902 1903 /* If xhci_endpoint_disable() was called for this endpoint, but the 1904 * xHC hasn't been notified yet through the check_bandwidth() call, 1905 * this re-adds a new state for the endpoint from the new endpoint 1906 * descriptors. We must drop and re-add this endpoint, so we leave the 1907 * drop flags alone. 1908 */ 1909 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); 1910 1911 /* Store the usb_device pointer for later use */ 1912 ep->hcpriv = udev; 1913 1914 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); 1915 trace_xhci_add_endpoint(ep_ctx); 1916 1917 xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index); 1918 1919 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", 1920 (unsigned int) ep->desc.bEndpointAddress, 1921 udev->slot_id, 1922 (unsigned int) new_drop_flags, 1923 (unsigned int) new_add_flags); 1924 return 0; 1925 } 1926 1927 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) 1928 { 1929 struct xhci_input_control_ctx *ctrl_ctx; 1930 struct xhci_ep_ctx *ep_ctx; 1931 struct xhci_slot_ctx *slot_ctx; 1932 int i; 1933 1934 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); 1935 if (!ctrl_ctx) { 1936 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 1937 __func__); 1938 return; 1939 } 1940 1941 /* When a device's add flag and drop flag are zero, any subsequent 1942 * configure endpoint command will leave that endpoint's state 1943 * untouched. Make sure we don't leave any old state in the input 1944 * endpoint contexts. 1945 */ 1946 ctrl_ctx->drop_flags = 0; 1947 ctrl_ctx->add_flags = 0; 1948 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 1949 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 1950 /* Endpoint 0 is always valid */ 1951 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); 1952 for (i = 1; i < 31; i++) { 1953 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); 1954 ep_ctx->ep_info = 0; 1955 ep_ctx->ep_info2 = 0; 1956 ep_ctx->deq = 0; 1957 ep_ctx->tx_info = 0; 1958 } 1959 } 1960 1961 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, 1962 struct usb_device *udev, u32 *cmd_status) 1963 { 1964 int ret; 1965 1966 switch (*cmd_status) { 1967 case COMP_COMMAND_ABORTED: 1968 case COMP_COMMAND_RING_STOPPED: 1969 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n"); 1970 ret = -ETIME; 1971 break; 1972 case COMP_RESOURCE_ERROR: 1973 dev_warn(&udev->dev, 1974 "Not enough host controller resources for new device state.\n"); 1975 ret = -ENOMEM; 1976 /* FIXME: can we allocate more resources for the HC? */ 1977 break; 1978 case COMP_BANDWIDTH_ERROR: 1979 case COMP_SECONDARY_BANDWIDTH_ERROR: 1980 dev_warn(&udev->dev, 1981 "Not enough bandwidth for new device state.\n"); 1982 ret = -ENOSPC; 1983 /* FIXME: can we go back to the old state? */ 1984 break; 1985 case COMP_TRB_ERROR: 1986 /* the HCD set up something wrong */ 1987 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " 1988 "add flag = 1, " 1989 "and endpoint is not disabled.\n"); 1990 ret = -EINVAL; 1991 break; 1992 case COMP_INCOMPATIBLE_DEVICE_ERROR: 1993 dev_warn(&udev->dev, 1994 "ERROR: Incompatible device for endpoint configure command.\n"); 1995 ret = -ENODEV; 1996 break; 1997 case COMP_SUCCESS: 1998 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1999 "Successful Endpoint Configure command"); 2000 ret = 0; 2001 break; 2002 default: 2003 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", 2004 *cmd_status); 2005 ret = -EINVAL; 2006 break; 2007 } 2008 return ret; 2009 } 2010 2011 static int xhci_evaluate_context_result(struct xhci_hcd *xhci, 2012 struct usb_device *udev, u32 *cmd_status) 2013 { 2014 int ret; 2015 2016 switch (*cmd_status) { 2017 case COMP_COMMAND_ABORTED: 2018 case COMP_COMMAND_RING_STOPPED: 2019 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n"); 2020 ret = -ETIME; 2021 break; 2022 case COMP_PARAMETER_ERROR: 2023 dev_warn(&udev->dev, 2024 "WARN: xHCI driver setup invalid evaluate context command.\n"); 2025 ret = -EINVAL; 2026 break; 2027 case COMP_SLOT_NOT_ENABLED_ERROR: 2028 dev_warn(&udev->dev, 2029 "WARN: slot not enabled for evaluate context command.\n"); 2030 ret = -EINVAL; 2031 break; 2032 case COMP_CONTEXT_STATE_ERROR: 2033 dev_warn(&udev->dev, 2034 "WARN: invalid context state for evaluate context command.\n"); 2035 ret = -EINVAL; 2036 break; 2037 case COMP_INCOMPATIBLE_DEVICE_ERROR: 2038 dev_warn(&udev->dev, 2039 "ERROR: Incompatible device for evaluate context command.\n"); 2040 ret = -ENODEV; 2041 break; 2042 case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR: 2043 /* Max Exit Latency too large error */ 2044 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n"); 2045 ret = -EINVAL; 2046 break; 2047 case COMP_SUCCESS: 2048 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 2049 "Successful evaluate context command"); 2050 ret = 0; 2051 break; 2052 default: 2053 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", 2054 *cmd_status); 2055 ret = -EINVAL; 2056 break; 2057 } 2058 return ret; 2059 } 2060 2061 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, 2062 struct xhci_input_control_ctx *ctrl_ctx) 2063 { 2064 u32 valid_add_flags; 2065 u32 valid_drop_flags; 2066 2067 /* Ignore the slot flag (bit 0), and the default control endpoint flag 2068 * (bit 1). The default control endpoint is added during the Address 2069 * Device command and is never removed until the slot is disabled. 2070 */ 2071 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; 2072 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; 2073 2074 /* Use hweight32 to count the number of ones in the add flags, or 2075 * number of endpoints added. Don't count endpoints that are changed 2076 * (both added and dropped). 2077 */ 2078 return hweight32(valid_add_flags) - 2079 hweight32(valid_add_flags & valid_drop_flags); 2080 } 2081 2082 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, 2083 struct xhci_input_control_ctx *ctrl_ctx) 2084 { 2085 u32 valid_add_flags; 2086 u32 valid_drop_flags; 2087 2088 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; 2089 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; 2090 2091 return hweight32(valid_drop_flags) - 2092 hweight32(valid_add_flags & valid_drop_flags); 2093 } 2094 2095 /* 2096 * We need to reserve the new number of endpoints before the configure endpoint 2097 * command completes. We can't subtract the dropped endpoints from the number 2098 * of active endpoints until the command completes because we can oversubscribe 2099 * the host in this case: 2100 * 2101 * - the first configure endpoint command drops more endpoints than it adds 2102 * - a second configure endpoint command that adds more endpoints is queued 2103 * - the first configure endpoint command fails, so the config is unchanged 2104 * - the second command may succeed, even though there isn't enough resources 2105 * 2106 * Must be called with xhci->lock held. 2107 */ 2108 static int xhci_reserve_host_resources(struct xhci_hcd *xhci, 2109 struct xhci_input_control_ctx *ctrl_ctx) 2110 { 2111 u32 added_eps; 2112 2113 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); 2114 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { 2115 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2116 "Not enough ep ctxs: " 2117 "%u active, need to add %u, limit is %u.", 2118 xhci->num_active_eps, added_eps, 2119 xhci->limit_active_eps); 2120 return -ENOMEM; 2121 } 2122 xhci->num_active_eps += added_eps; 2123 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2124 "Adding %u ep ctxs, %u now active.", added_eps, 2125 xhci->num_active_eps); 2126 return 0; 2127 } 2128 2129 /* 2130 * The configure endpoint was failed by the xHC for some other reason, so we 2131 * need to revert the resources that failed configuration would have used. 2132 * 2133 * Must be called with xhci->lock held. 2134 */ 2135 static void xhci_free_host_resources(struct xhci_hcd *xhci, 2136 struct xhci_input_control_ctx *ctrl_ctx) 2137 { 2138 u32 num_failed_eps; 2139 2140 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); 2141 xhci->num_active_eps -= num_failed_eps; 2142 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2143 "Removing %u failed ep ctxs, %u now active.", 2144 num_failed_eps, 2145 xhci->num_active_eps); 2146 } 2147 2148 /* 2149 * Now that the command has completed, clean up the active endpoint count by 2150 * subtracting out the endpoints that were dropped (but not changed). 2151 * 2152 * Must be called with xhci->lock held. 2153 */ 2154 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, 2155 struct xhci_input_control_ctx *ctrl_ctx) 2156 { 2157 u32 num_dropped_eps; 2158 2159 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx); 2160 xhci->num_active_eps -= num_dropped_eps; 2161 if (num_dropped_eps) 2162 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2163 "Removing %u dropped ep ctxs, %u now active.", 2164 num_dropped_eps, 2165 xhci->num_active_eps); 2166 } 2167 2168 static unsigned int xhci_get_block_size(struct usb_device *udev) 2169 { 2170 switch (udev->speed) { 2171 case USB_SPEED_LOW: 2172 case USB_SPEED_FULL: 2173 return FS_BLOCK; 2174 case USB_SPEED_HIGH: 2175 return HS_BLOCK; 2176 case USB_SPEED_SUPER: 2177 case USB_SPEED_SUPER_PLUS: 2178 return SS_BLOCK; 2179 case USB_SPEED_UNKNOWN: 2180 case USB_SPEED_WIRELESS: 2181 default: 2182 /* Should never happen */ 2183 return 1; 2184 } 2185 } 2186 2187 static unsigned int 2188 xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw) 2189 { 2190 if (interval_bw->overhead[LS_OVERHEAD_TYPE]) 2191 return LS_OVERHEAD; 2192 if (interval_bw->overhead[FS_OVERHEAD_TYPE]) 2193 return FS_OVERHEAD; 2194 return HS_OVERHEAD; 2195 } 2196 2197 /* If we are changing a LS/FS device under a HS hub, 2198 * make sure (if we are activating a new TT) that the HS bus has enough 2199 * bandwidth for this new TT. 2200 */ 2201 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, 2202 struct xhci_virt_device *virt_dev, 2203 int old_active_eps) 2204 { 2205 struct xhci_interval_bw_table *bw_table; 2206 struct xhci_tt_bw_info *tt_info; 2207 2208 /* Find the bandwidth table for the root port this TT is attached to. */ 2209 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table; 2210 tt_info = virt_dev->tt_info; 2211 /* If this TT already had active endpoints, the bandwidth for this TT 2212 * has already been added. Removing all periodic endpoints (and thus 2213 * making the TT enactive) will only decrease the bandwidth used. 2214 */ 2215 if (old_active_eps) 2216 return 0; 2217 if (old_active_eps == 0 && tt_info->active_eps != 0) { 2218 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT) 2219 return -ENOMEM; 2220 return 0; 2221 } 2222 /* Not sure why we would have no new active endpoints... 2223 * 2224 * Maybe because of an Evaluate Context change for a hub update or a 2225 * control endpoint 0 max packet size change? 2226 * FIXME: skip the bandwidth calculation in that case. 2227 */ 2228 return 0; 2229 } 2230 2231 static int xhci_check_ss_bw(struct xhci_hcd *xhci, 2232 struct xhci_virt_device *virt_dev) 2233 { 2234 unsigned int bw_reserved; 2235 2236 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100); 2237 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved)) 2238 return -ENOMEM; 2239 2240 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100); 2241 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved)) 2242 return -ENOMEM; 2243 2244 return 0; 2245 } 2246 2247 /* 2248 * This algorithm is a very conservative estimate of the worst-case scheduling 2249 * scenario for any one interval. The hardware dynamically schedules the 2250 * packets, so we can't tell which microframe could be the limiting factor in 2251 * the bandwidth scheduling. This only takes into account periodic endpoints. 2252 * 2253 * Obviously, we can't solve an NP complete problem to find the minimum worst 2254 * case scenario. Instead, we come up with an estimate that is no less than 2255 * the worst case bandwidth used for any one microframe, but may be an 2256 * over-estimate. 2257 * 2258 * We walk the requirements for each endpoint by interval, starting with the 2259 * smallest interval, and place packets in the schedule where there is only one 2260 * possible way to schedule packets for that interval. In order to simplify 2261 * this algorithm, we record the largest max packet size for each interval, and 2262 * assume all packets will be that size. 2263 * 2264 * For interval 0, we obviously must schedule all packets for each interval. 2265 * The bandwidth for interval 0 is just the amount of data to be transmitted 2266 * (the sum of all max ESIT payload sizes, plus any overhead per packet times 2267 * the number of packets). 2268 * 2269 * For interval 1, we have two possible microframes to schedule those packets 2270 * in. For this algorithm, if we can schedule the same number of packets for 2271 * each possible scheduling opportunity (each microframe), we will do so. The 2272 * remaining number of packets will be saved to be transmitted in the gaps in 2273 * the next interval's scheduling sequence. 2274 * 2275 * As we move those remaining packets to be scheduled with interval 2 packets, 2276 * we have to double the number of remaining packets to transmit. This is 2277 * because the intervals are actually powers of 2, and we would be transmitting 2278 * the previous interval's packets twice in this interval. We also have to be 2279 * sure that when we look at the largest max packet size for this interval, we 2280 * also look at the largest max packet size for the remaining packets and take 2281 * the greater of the two. 2282 * 2283 * The algorithm continues to evenly distribute packets in each scheduling 2284 * opportunity, and push the remaining packets out, until we get to the last 2285 * interval. Then those packets and their associated overhead are just added 2286 * to the bandwidth used. 2287 */ 2288 static int xhci_check_bw_table(struct xhci_hcd *xhci, 2289 struct xhci_virt_device *virt_dev, 2290 int old_active_eps) 2291 { 2292 unsigned int bw_reserved; 2293 unsigned int max_bandwidth; 2294 unsigned int bw_used; 2295 unsigned int block_size; 2296 struct xhci_interval_bw_table *bw_table; 2297 unsigned int packet_size = 0; 2298 unsigned int overhead = 0; 2299 unsigned int packets_transmitted = 0; 2300 unsigned int packets_remaining = 0; 2301 unsigned int i; 2302 2303 if (virt_dev->udev->speed >= USB_SPEED_SUPER) 2304 return xhci_check_ss_bw(xhci, virt_dev); 2305 2306 if (virt_dev->udev->speed == USB_SPEED_HIGH) { 2307 max_bandwidth = HS_BW_LIMIT; 2308 /* Convert percent of bus BW reserved to blocks reserved */ 2309 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100); 2310 } else { 2311 max_bandwidth = FS_BW_LIMIT; 2312 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100); 2313 } 2314 2315 bw_table = virt_dev->bw_table; 2316 /* We need to translate the max packet size and max ESIT payloads into 2317 * the units the hardware uses. 2318 */ 2319 block_size = xhci_get_block_size(virt_dev->udev); 2320 2321 /* If we are manipulating a LS/FS device under a HS hub, double check 2322 * that the HS bus has enough bandwidth if we are activing a new TT. 2323 */ 2324 if (virt_dev->tt_info) { 2325 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2326 "Recalculating BW for rootport %u", 2327 virt_dev->real_port); 2328 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { 2329 xhci_warn(xhci, "Not enough bandwidth on HS bus for " 2330 "newly activated TT.\n"); 2331 return -ENOMEM; 2332 } 2333 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2334 "Recalculating BW for TT slot %u port %u", 2335 virt_dev->tt_info->slot_id, 2336 virt_dev->tt_info->ttport); 2337 } else { 2338 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2339 "Recalculating BW for rootport %u", 2340 virt_dev->real_port); 2341 } 2342 2343 /* Add in how much bandwidth will be used for interval zero, or the 2344 * rounded max ESIT payload + number of packets * largest overhead. 2345 */ 2346 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) + 2347 bw_table->interval_bw[0].num_packets * 2348 xhci_get_largest_overhead(&bw_table->interval_bw[0]); 2349 2350 for (i = 1; i < XHCI_MAX_INTERVAL; i++) { 2351 unsigned int bw_added; 2352 unsigned int largest_mps; 2353 unsigned int interval_overhead; 2354 2355 /* 2356 * How many packets could we transmit in this interval? 2357 * If packets didn't fit in the previous interval, we will need 2358 * to transmit that many packets twice within this interval. 2359 */ 2360 packets_remaining = 2 * packets_remaining + 2361 bw_table->interval_bw[i].num_packets; 2362 2363 /* Find the largest max packet size of this or the previous 2364 * interval. 2365 */ 2366 if (list_empty(&bw_table->interval_bw[i].endpoints)) 2367 largest_mps = 0; 2368 else { 2369 struct xhci_virt_ep *virt_ep; 2370 struct list_head *ep_entry; 2371 2372 ep_entry = bw_table->interval_bw[i].endpoints.next; 2373 virt_ep = list_entry(ep_entry, 2374 struct xhci_virt_ep, bw_endpoint_list); 2375 /* Convert to blocks, rounding up */ 2376 largest_mps = DIV_ROUND_UP( 2377 virt_ep->bw_info.max_packet_size, 2378 block_size); 2379 } 2380 if (largest_mps > packet_size) 2381 packet_size = largest_mps; 2382 2383 /* Use the larger overhead of this or the previous interval. */ 2384 interval_overhead = xhci_get_largest_overhead( 2385 &bw_table->interval_bw[i]); 2386 if (interval_overhead > overhead) 2387 overhead = interval_overhead; 2388 2389 /* How many packets can we evenly distribute across 2390 * (1 << (i + 1)) possible scheduling opportunities? 2391 */ 2392 packets_transmitted = packets_remaining >> (i + 1); 2393 2394 /* Add in the bandwidth used for those scheduled packets */ 2395 bw_added = packets_transmitted * (overhead + packet_size); 2396 2397 /* How many packets do we have remaining to transmit? */ 2398 packets_remaining = packets_remaining % (1 << (i + 1)); 2399 2400 /* What largest max packet size should those packets have? */ 2401 /* If we've transmitted all packets, don't carry over the 2402 * largest packet size. 2403 */ 2404 if (packets_remaining == 0) { 2405 packet_size = 0; 2406 overhead = 0; 2407 } else if (packets_transmitted > 0) { 2408 /* Otherwise if we do have remaining packets, and we've 2409 * scheduled some packets in this interval, take the 2410 * largest max packet size from endpoints with this 2411 * interval. 2412 */ 2413 packet_size = largest_mps; 2414 overhead = interval_overhead; 2415 } 2416 /* Otherwise carry over packet_size and overhead from the last 2417 * time we had a remainder. 2418 */ 2419 bw_used += bw_added; 2420 if (bw_used > max_bandwidth) { 2421 xhci_warn(xhci, "Not enough bandwidth. " 2422 "Proposed: %u, Max: %u\n", 2423 bw_used, max_bandwidth); 2424 return -ENOMEM; 2425 } 2426 } 2427 /* 2428 * Ok, we know we have some packets left over after even-handedly 2429 * scheduling interval 15. We don't know which microframes they will 2430 * fit into, so we over-schedule and say they will be scheduled every 2431 * microframe. 2432 */ 2433 if (packets_remaining > 0) 2434 bw_used += overhead + packet_size; 2435 2436 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) { 2437 unsigned int port_index = virt_dev->real_port - 1; 2438 2439 /* OK, we're manipulating a HS device attached to a 2440 * root port bandwidth domain. Include the number of active TTs 2441 * in the bandwidth used. 2442 */ 2443 bw_used += TT_HS_OVERHEAD * 2444 xhci->rh_bw[port_index].num_active_tts; 2445 } 2446 2447 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2448 "Final bandwidth: %u, Limit: %u, Reserved: %u, " 2449 "Available: %u " "percent", 2450 bw_used, max_bandwidth, bw_reserved, 2451 (max_bandwidth - bw_used - bw_reserved) * 100 / 2452 max_bandwidth); 2453 2454 bw_used += bw_reserved; 2455 if (bw_used > max_bandwidth) { 2456 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n", 2457 bw_used, max_bandwidth); 2458 return -ENOMEM; 2459 } 2460 2461 bw_table->bw_used = bw_used; 2462 return 0; 2463 } 2464 2465 static bool xhci_is_async_ep(unsigned int ep_type) 2466 { 2467 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && 2468 ep_type != ISOC_IN_EP && 2469 ep_type != INT_IN_EP); 2470 } 2471 2472 static bool xhci_is_sync_in_ep(unsigned int ep_type) 2473 { 2474 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP); 2475 } 2476 2477 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw) 2478 { 2479 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK); 2480 2481 if (ep_bw->ep_interval == 0) 2482 return SS_OVERHEAD_BURST + 2483 (ep_bw->mult * ep_bw->num_packets * 2484 (SS_OVERHEAD + mps)); 2485 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets * 2486 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST), 2487 1 << ep_bw->ep_interval); 2488 2489 } 2490 2491 static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, 2492 struct xhci_bw_info *ep_bw, 2493 struct xhci_interval_bw_table *bw_table, 2494 struct usb_device *udev, 2495 struct xhci_virt_ep *virt_ep, 2496 struct xhci_tt_bw_info *tt_info) 2497 { 2498 struct xhci_interval_bw *interval_bw; 2499 int normalized_interval; 2500 2501 if (xhci_is_async_ep(ep_bw->type)) 2502 return; 2503 2504 if (udev->speed >= USB_SPEED_SUPER) { 2505 if (xhci_is_sync_in_ep(ep_bw->type)) 2506 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= 2507 xhci_get_ss_bw_consumed(ep_bw); 2508 else 2509 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= 2510 xhci_get_ss_bw_consumed(ep_bw); 2511 return; 2512 } 2513 2514 /* SuperSpeed endpoints never get added to intervals in the table, so 2515 * this check is only valid for HS/FS/LS devices. 2516 */ 2517 if (list_empty(&virt_ep->bw_endpoint_list)) 2518 return; 2519 /* For LS/FS devices, we need to translate the interval expressed in 2520 * microframes to frames. 2521 */ 2522 if (udev->speed == USB_SPEED_HIGH) 2523 normalized_interval = ep_bw->ep_interval; 2524 else 2525 normalized_interval = ep_bw->ep_interval - 3; 2526 2527 if (normalized_interval == 0) 2528 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload; 2529 interval_bw = &bw_table->interval_bw[normalized_interval]; 2530 interval_bw->num_packets -= ep_bw->num_packets; 2531 switch (udev->speed) { 2532 case USB_SPEED_LOW: 2533 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1; 2534 break; 2535 case USB_SPEED_FULL: 2536 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1; 2537 break; 2538 case USB_SPEED_HIGH: 2539 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1; 2540 break; 2541 case USB_SPEED_SUPER: 2542 case USB_SPEED_SUPER_PLUS: 2543 case USB_SPEED_UNKNOWN: 2544 case USB_SPEED_WIRELESS: 2545 /* Should never happen because only LS/FS/HS endpoints will get 2546 * added to the endpoint list. 2547 */ 2548 return; 2549 } 2550 if (tt_info) 2551 tt_info->active_eps -= 1; 2552 list_del_init(&virt_ep->bw_endpoint_list); 2553 } 2554 2555 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, 2556 struct xhci_bw_info *ep_bw, 2557 struct xhci_interval_bw_table *bw_table, 2558 struct usb_device *udev, 2559 struct xhci_virt_ep *virt_ep, 2560 struct xhci_tt_bw_info *tt_info) 2561 { 2562 struct xhci_interval_bw *interval_bw; 2563 struct xhci_virt_ep *smaller_ep; 2564 int normalized_interval; 2565 2566 if (xhci_is_async_ep(ep_bw->type)) 2567 return; 2568 2569 if (udev->speed == USB_SPEED_SUPER) { 2570 if (xhci_is_sync_in_ep(ep_bw->type)) 2571 xhci->devs[udev->slot_id]->bw_table->ss_bw_in += 2572 xhci_get_ss_bw_consumed(ep_bw); 2573 else 2574 xhci->devs[udev->slot_id]->bw_table->ss_bw_out += 2575 xhci_get_ss_bw_consumed(ep_bw); 2576 return; 2577 } 2578 2579 /* For LS/FS devices, we need to translate the interval expressed in 2580 * microframes to frames. 2581 */ 2582 if (udev->speed == USB_SPEED_HIGH) 2583 normalized_interval = ep_bw->ep_interval; 2584 else 2585 normalized_interval = ep_bw->ep_interval - 3; 2586 2587 if (normalized_interval == 0) 2588 bw_table->interval0_esit_payload += ep_bw->max_esit_payload; 2589 interval_bw = &bw_table->interval_bw[normalized_interval]; 2590 interval_bw->num_packets += ep_bw->num_packets; 2591 switch (udev->speed) { 2592 case USB_SPEED_LOW: 2593 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1; 2594 break; 2595 case USB_SPEED_FULL: 2596 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1; 2597 break; 2598 case USB_SPEED_HIGH: 2599 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1; 2600 break; 2601 case USB_SPEED_SUPER: 2602 case USB_SPEED_SUPER_PLUS: 2603 case USB_SPEED_UNKNOWN: 2604 case USB_SPEED_WIRELESS: 2605 /* Should never happen because only LS/FS/HS endpoints will get 2606 * added to the endpoint list. 2607 */ 2608 return; 2609 } 2610 2611 if (tt_info) 2612 tt_info->active_eps += 1; 2613 /* Insert the endpoint into the list, largest max packet size first. */ 2614 list_for_each_entry(smaller_ep, &interval_bw->endpoints, 2615 bw_endpoint_list) { 2616 if (ep_bw->max_packet_size >= 2617 smaller_ep->bw_info.max_packet_size) { 2618 /* Add the new ep before the smaller endpoint */ 2619 list_add_tail(&virt_ep->bw_endpoint_list, 2620 &smaller_ep->bw_endpoint_list); 2621 return; 2622 } 2623 } 2624 /* Add the new endpoint at the end of the list. */ 2625 list_add_tail(&virt_ep->bw_endpoint_list, 2626 &interval_bw->endpoints); 2627 } 2628 2629 void xhci_update_tt_active_eps(struct xhci_hcd *xhci, 2630 struct xhci_virt_device *virt_dev, 2631 int old_active_eps) 2632 { 2633 struct xhci_root_port_bw_info *rh_bw_info; 2634 if (!virt_dev->tt_info) 2635 return; 2636 2637 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1]; 2638 if (old_active_eps == 0 && 2639 virt_dev->tt_info->active_eps != 0) { 2640 rh_bw_info->num_active_tts += 1; 2641 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD; 2642 } else if (old_active_eps != 0 && 2643 virt_dev->tt_info->active_eps == 0) { 2644 rh_bw_info->num_active_tts -= 1; 2645 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD; 2646 } 2647 } 2648 2649 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, 2650 struct xhci_virt_device *virt_dev, 2651 struct xhci_container_ctx *in_ctx) 2652 { 2653 struct xhci_bw_info ep_bw_info[31]; 2654 int i; 2655 struct xhci_input_control_ctx *ctrl_ctx; 2656 int old_active_eps = 0; 2657 2658 if (virt_dev->tt_info) 2659 old_active_eps = virt_dev->tt_info->active_eps; 2660 2661 ctrl_ctx = xhci_get_input_control_ctx(in_ctx); 2662 if (!ctrl_ctx) { 2663 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 2664 __func__); 2665 return -ENOMEM; 2666 } 2667 2668 for (i = 0; i < 31; i++) { 2669 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) 2670 continue; 2671 2672 /* Make a copy of the BW info in case we need to revert this */ 2673 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info, 2674 sizeof(ep_bw_info[i])); 2675 /* Drop the endpoint from the interval table if the endpoint is 2676 * being dropped or changed. 2677 */ 2678 if (EP_IS_DROPPED(ctrl_ctx, i)) 2679 xhci_drop_ep_from_interval_table(xhci, 2680 &virt_dev->eps[i].bw_info, 2681 virt_dev->bw_table, 2682 virt_dev->udev, 2683 &virt_dev->eps[i], 2684 virt_dev->tt_info); 2685 } 2686 /* Overwrite the information stored in the endpoints' bw_info */ 2687 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); 2688 for (i = 0; i < 31; i++) { 2689 /* Add any changed or added endpoints to the interval table */ 2690 if (EP_IS_ADDED(ctrl_ctx, i)) 2691 xhci_add_ep_to_interval_table(xhci, 2692 &virt_dev->eps[i].bw_info, 2693 virt_dev->bw_table, 2694 virt_dev->udev, 2695 &virt_dev->eps[i], 2696 virt_dev->tt_info); 2697 } 2698 2699 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { 2700 /* Ok, this fits in the bandwidth we have. 2701 * Update the number of active TTs. 2702 */ 2703 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); 2704 return 0; 2705 } 2706 2707 /* We don't have enough bandwidth for this, revert the stored info. */ 2708 for (i = 0; i < 31; i++) { 2709 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) 2710 continue; 2711 2712 /* Drop the new copies of any added or changed endpoints from 2713 * the interval table. 2714 */ 2715 if (EP_IS_ADDED(ctrl_ctx, i)) { 2716 xhci_drop_ep_from_interval_table(xhci, 2717 &virt_dev->eps[i].bw_info, 2718 virt_dev->bw_table, 2719 virt_dev->udev, 2720 &virt_dev->eps[i], 2721 virt_dev->tt_info); 2722 } 2723 /* Revert the endpoint back to its old information */ 2724 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i], 2725 sizeof(ep_bw_info[i])); 2726 /* Add any changed or dropped endpoints back into the table */ 2727 if (EP_IS_DROPPED(ctrl_ctx, i)) 2728 xhci_add_ep_to_interval_table(xhci, 2729 &virt_dev->eps[i].bw_info, 2730 virt_dev->bw_table, 2731 virt_dev->udev, 2732 &virt_dev->eps[i], 2733 virt_dev->tt_info); 2734 } 2735 return -ENOMEM; 2736 } 2737 2738 2739 /* Issue a configure endpoint command or evaluate context command 2740 * and wait for it to finish. 2741 */ 2742 static int xhci_configure_endpoint(struct xhci_hcd *xhci, 2743 struct usb_device *udev, 2744 struct xhci_command *command, 2745 bool ctx_change, bool must_succeed) 2746 { 2747 int ret; 2748 unsigned long flags; 2749 struct xhci_input_control_ctx *ctrl_ctx; 2750 struct xhci_virt_device *virt_dev; 2751 struct xhci_slot_ctx *slot_ctx; 2752 2753 if (!command) 2754 return -EINVAL; 2755 2756 spin_lock_irqsave(&xhci->lock, flags); 2757 2758 if (xhci->xhc_state & XHCI_STATE_DYING) { 2759 spin_unlock_irqrestore(&xhci->lock, flags); 2760 return -ESHUTDOWN; 2761 } 2762 2763 virt_dev = xhci->devs[udev->slot_id]; 2764 2765 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 2766 if (!ctrl_ctx) { 2767 spin_unlock_irqrestore(&xhci->lock, flags); 2768 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 2769 __func__); 2770 return -ENOMEM; 2771 } 2772 2773 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && 2774 xhci_reserve_host_resources(xhci, ctrl_ctx)) { 2775 spin_unlock_irqrestore(&xhci->lock, flags); 2776 xhci_warn(xhci, "Not enough host resources, " 2777 "active endpoint contexts = %u\n", 2778 xhci->num_active_eps); 2779 return -ENOMEM; 2780 } 2781 if ((xhci->quirks & XHCI_SW_BW_CHECKING) && 2782 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) { 2783 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) 2784 xhci_free_host_resources(xhci, ctrl_ctx); 2785 spin_unlock_irqrestore(&xhci->lock, flags); 2786 xhci_warn(xhci, "Not enough bandwidth\n"); 2787 return -ENOMEM; 2788 } 2789 2790 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); 2791 2792 trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx); 2793 trace_xhci_configure_endpoint(slot_ctx); 2794 2795 if (!ctx_change) 2796 ret = xhci_queue_configure_endpoint(xhci, command, 2797 command->in_ctx->dma, 2798 udev->slot_id, must_succeed); 2799 else 2800 ret = xhci_queue_evaluate_context(xhci, command, 2801 command->in_ctx->dma, 2802 udev->slot_id, must_succeed); 2803 if (ret < 0) { 2804 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) 2805 xhci_free_host_resources(xhci, ctrl_ctx); 2806 spin_unlock_irqrestore(&xhci->lock, flags); 2807 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 2808 "FIXME allocate a new ring segment"); 2809 return -ENOMEM; 2810 } 2811 xhci_ring_cmd_db(xhci); 2812 spin_unlock_irqrestore(&xhci->lock, flags); 2813 2814 /* Wait for the configure endpoint command to complete */ 2815 wait_for_completion(command->completion); 2816 2817 if (!ctx_change) 2818 ret = xhci_configure_endpoint_result(xhci, udev, 2819 &command->status); 2820 else 2821 ret = xhci_evaluate_context_result(xhci, udev, 2822 &command->status); 2823 2824 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 2825 spin_lock_irqsave(&xhci->lock, flags); 2826 /* If the command failed, remove the reserved resources. 2827 * Otherwise, clean up the estimate to include dropped eps. 2828 */ 2829 if (ret) 2830 xhci_free_host_resources(xhci, ctrl_ctx); 2831 else 2832 xhci_finish_resource_reservation(xhci, ctrl_ctx); 2833 spin_unlock_irqrestore(&xhci->lock, flags); 2834 } 2835 return ret; 2836 } 2837 2838 static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, 2839 struct xhci_virt_device *vdev, int i) 2840 { 2841 struct xhci_virt_ep *ep = &vdev->eps[i]; 2842 2843 if (ep->ep_state & EP_HAS_STREAMS) { 2844 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n", 2845 xhci_get_endpoint_address(i)); 2846 xhci_free_stream_info(xhci, ep->stream_info); 2847 ep->stream_info = NULL; 2848 ep->ep_state &= ~EP_HAS_STREAMS; 2849 } 2850 } 2851 2852 /* Called after one or more calls to xhci_add_endpoint() or 2853 * xhci_drop_endpoint(). If this call fails, the USB core is expected 2854 * to call xhci_reset_bandwidth(). 2855 * 2856 * Since we are in the middle of changing either configuration or 2857 * installing a new alt setting, the USB core won't allow URBs to be 2858 * enqueued for any endpoint on the old config or interface. Nothing 2859 * else should be touching the xhci->devs[slot_id] structure, so we 2860 * don't need to take the xhci->lock for manipulating that. 2861 */ 2862 static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) 2863 { 2864 int i; 2865 int ret = 0; 2866 struct xhci_hcd *xhci; 2867 struct xhci_virt_device *virt_dev; 2868 struct xhci_input_control_ctx *ctrl_ctx; 2869 struct xhci_slot_ctx *slot_ctx; 2870 struct xhci_command *command; 2871 2872 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 2873 if (ret <= 0) 2874 return ret; 2875 xhci = hcd_to_xhci(hcd); 2876 if ((xhci->xhc_state & XHCI_STATE_DYING) || 2877 (xhci->xhc_state & XHCI_STATE_REMOVING)) 2878 return -ENODEV; 2879 2880 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 2881 virt_dev = xhci->devs[udev->slot_id]; 2882 2883 command = xhci_alloc_command(xhci, true, GFP_KERNEL); 2884 if (!command) 2885 return -ENOMEM; 2886 2887 command->in_ctx = virt_dev->in_ctx; 2888 2889 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ 2890 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 2891 if (!ctrl_ctx) { 2892 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 2893 __func__); 2894 ret = -ENOMEM; 2895 goto command_cleanup; 2896 } 2897 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 2898 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); 2899 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); 2900 2901 /* Don't issue the command if there's no endpoints to update. */ 2902 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && 2903 ctrl_ctx->drop_flags == 0) { 2904 ret = 0; 2905 goto command_cleanup; 2906 } 2907 /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */ 2908 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 2909 for (i = 31; i >= 1; i--) { 2910 __le32 le32 = cpu_to_le32(BIT(i)); 2911 2912 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32)) 2913 || (ctrl_ctx->add_flags & le32) || i == 1) { 2914 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 2915 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i)); 2916 break; 2917 } 2918 } 2919 2920 ret = xhci_configure_endpoint(xhci, udev, command, 2921 false, false); 2922 if (ret) 2923 /* Callee should call reset_bandwidth() */ 2924 goto command_cleanup; 2925 2926 /* Free any rings that were dropped, but not changed. */ 2927 for (i = 1; i < 31; i++) { 2928 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) && 2929 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) { 2930 xhci_free_endpoint_ring(xhci, virt_dev, i); 2931 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); 2932 } 2933 } 2934 xhci_zero_in_ctx(xhci, virt_dev); 2935 /* 2936 * Install any rings for completely new endpoints or changed endpoints, 2937 * and free any old rings from changed endpoints. 2938 */ 2939 for (i = 1; i < 31; i++) { 2940 if (!virt_dev->eps[i].new_ring) 2941 continue; 2942 /* Only free the old ring if it exists. 2943 * It may not if this is the first add of an endpoint. 2944 */ 2945 if (virt_dev->eps[i].ring) { 2946 xhci_free_endpoint_ring(xhci, virt_dev, i); 2947 } 2948 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); 2949 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; 2950 virt_dev->eps[i].new_ring = NULL; 2951 } 2952 command_cleanup: 2953 kfree(command->completion); 2954 kfree(command); 2955 2956 return ret; 2957 } 2958 2959 static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) 2960 { 2961 struct xhci_hcd *xhci; 2962 struct xhci_virt_device *virt_dev; 2963 int i, ret; 2964 2965 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 2966 if (ret <= 0) 2967 return; 2968 xhci = hcd_to_xhci(hcd); 2969 2970 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 2971 virt_dev = xhci->devs[udev->slot_id]; 2972 /* Free any rings allocated for added endpoints */ 2973 for (i = 0; i < 31; i++) { 2974 if (virt_dev->eps[i].new_ring) { 2975 xhci_debugfs_remove_endpoint(xhci, virt_dev, i); 2976 xhci_ring_free(xhci, virt_dev->eps[i].new_ring); 2977 virt_dev->eps[i].new_ring = NULL; 2978 } 2979 } 2980 xhci_zero_in_ctx(xhci, virt_dev); 2981 } 2982 2983 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, 2984 struct xhci_container_ctx *in_ctx, 2985 struct xhci_container_ctx *out_ctx, 2986 struct xhci_input_control_ctx *ctrl_ctx, 2987 u32 add_flags, u32 drop_flags) 2988 { 2989 ctrl_ctx->add_flags = cpu_to_le32(add_flags); 2990 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags); 2991 xhci_slot_copy(xhci, in_ctx, out_ctx); 2992 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 2993 } 2994 2995 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, 2996 unsigned int slot_id, unsigned int ep_index, 2997 struct xhci_dequeue_state *deq_state) 2998 { 2999 struct xhci_input_control_ctx *ctrl_ctx; 3000 struct xhci_container_ctx *in_ctx; 3001 struct xhci_ep_ctx *ep_ctx; 3002 u32 added_ctxs; 3003 dma_addr_t addr; 3004 3005 in_ctx = xhci->devs[slot_id]->in_ctx; 3006 ctrl_ctx = xhci_get_input_control_ctx(in_ctx); 3007 if (!ctrl_ctx) { 3008 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 3009 __func__); 3010 return; 3011 } 3012 3013 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, 3014 xhci->devs[slot_id]->out_ctx, ep_index); 3015 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); 3016 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, 3017 deq_state->new_deq_ptr); 3018 if (addr == 0) { 3019 xhci_warn(xhci, "WARN Cannot submit config ep after " 3020 "reset ep command\n"); 3021 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", 3022 deq_state->new_deq_seg, 3023 deq_state->new_deq_ptr); 3024 return; 3025 } 3026 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state); 3027 3028 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); 3029 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, 3030 xhci->devs[slot_id]->out_ctx, ctrl_ctx, 3031 added_ctxs, added_ctxs); 3032 } 3033 3034 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id, 3035 unsigned int ep_index, unsigned int stream_id, 3036 struct xhci_td *td) 3037 { 3038 struct xhci_dequeue_state deq_state; 3039 3040 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, 3041 "Cleaning up stalled endpoint ring"); 3042 /* We need to move the HW's dequeue pointer past this TD, 3043 * or it will attempt to resend it on the next doorbell ring. 3044 */ 3045 xhci_find_new_dequeue_state(xhci, slot_id, ep_index, stream_id, td, 3046 &deq_state); 3047 3048 if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg) 3049 return; 3050 3051 /* HW with the reset endpoint quirk will use the saved dequeue state to 3052 * issue a configure endpoint command later. 3053 */ 3054 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { 3055 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, 3056 "Queueing new dequeue state"); 3057 xhci_queue_new_dequeue_state(xhci, slot_id, 3058 ep_index, &deq_state); 3059 } else { 3060 /* Better hope no one uses the input context between now and the 3061 * reset endpoint completion! 3062 * XXX: No idea how this hardware will react when stream rings 3063 * are enabled. 3064 */ 3065 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 3066 "Setting up input context for " 3067 "configure endpoint command"); 3068 xhci_setup_input_ctx_for_quirk(xhci, slot_id, 3069 ep_index, &deq_state); 3070 } 3071 } 3072 3073 static void xhci_endpoint_disable(struct usb_hcd *hcd, 3074 struct usb_host_endpoint *host_ep) 3075 { 3076 struct xhci_hcd *xhci; 3077 struct xhci_virt_device *vdev; 3078 struct xhci_virt_ep *ep; 3079 struct usb_device *udev; 3080 unsigned long flags; 3081 unsigned int ep_index; 3082 3083 xhci = hcd_to_xhci(hcd); 3084 rescan: 3085 spin_lock_irqsave(&xhci->lock, flags); 3086 3087 udev = (struct usb_device *)host_ep->hcpriv; 3088 if (!udev || !udev->slot_id) 3089 goto done; 3090 3091 vdev = xhci->devs[udev->slot_id]; 3092 if (!vdev) 3093 goto done; 3094 3095 ep_index = xhci_get_endpoint_index(&host_ep->desc); 3096 ep = &vdev->eps[ep_index]; 3097 if (!ep) 3098 goto done; 3099 3100 /* wait for hub_tt_work to finish clearing hub TT */ 3101 if (ep->ep_state & EP_CLEARING_TT) { 3102 spin_unlock_irqrestore(&xhci->lock, flags); 3103 schedule_timeout_uninterruptible(1); 3104 goto rescan; 3105 } 3106 3107 if (ep->ep_state) 3108 xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n", 3109 ep->ep_state); 3110 done: 3111 host_ep->hcpriv = NULL; 3112 spin_unlock_irqrestore(&xhci->lock, flags); 3113 } 3114 3115 /* 3116 * Called after usb core issues a clear halt control message. 3117 * The host side of the halt should already be cleared by a reset endpoint 3118 * command issued when the STALL event was received. 3119 * 3120 * The reset endpoint command may only be issued to endpoints in the halted 3121 * state. For software that wishes to reset the data toggle or sequence number 3122 * of an endpoint that isn't in the halted state this function will issue a 3123 * configure endpoint command with the Drop and Add bits set for the target 3124 * endpoint. Refer to the additional note in xhci spcification section 4.6.8. 3125 */ 3126 3127 static void xhci_endpoint_reset(struct usb_hcd *hcd, 3128 struct usb_host_endpoint *host_ep) 3129 { 3130 struct xhci_hcd *xhci; 3131 struct usb_device *udev; 3132 struct xhci_virt_device *vdev; 3133 struct xhci_virt_ep *ep; 3134 struct xhci_input_control_ctx *ctrl_ctx; 3135 struct xhci_command *stop_cmd, *cfg_cmd; 3136 unsigned int ep_index; 3137 unsigned long flags; 3138 u32 ep_flag; 3139 int err; 3140 3141 xhci = hcd_to_xhci(hcd); 3142 if (!host_ep->hcpriv) 3143 return; 3144 udev = (struct usb_device *) host_ep->hcpriv; 3145 vdev = xhci->devs[udev->slot_id]; 3146 3147 /* 3148 * vdev may be lost due to xHC restore error and re-initialization 3149 * during S3/S4 resume. A new vdev will be allocated later by 3150 * xhci_discover_or_reset_device() 3151 */ 3152 if (!udev->slot_id || !vdev) 3153 return; 3154 ep_index = xhci_get_endpoint_index(&host_ep->desc); 3155 ep = &vdev->eps[ep_index]; 3156 if (!ep) 3157 return; 3158 3159 /* Bail out if toggle is already being cleared by a endpoint reset */ 3160 if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) { 3161 ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE; 3162 return; 3163 } 3164 /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */ 3165 if (usb_endpoint_xfer_control(&host_ep->desc) || 3166 usb_endpoint_xfer_isoc(&host_ep->desc)) 3167 return; 3168 3169 ep_flag = xhci_get_endpoint_flag(&host_ep->desc); 3170 3171 if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG) 3172 return; 3173 3174 stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT); 3175 if (!stop_cmd) 3176 return; 3177 3178 cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT); 3179 if (!cfg_cmd) 3180 goto cleanup; 3181 3182 spin_lock_irqsave(&xhci->lock, flags); 3183 3184 /* block queuing new trbs and ringing ep doorbell */ 3185 ep->ep_state |= EP_SOFT_CLEAR_TOGGLE; 3186 3187 /* 3188 * Make sure endpoint ring is empty before resetting the toggle/seq. 3189 * Driver is required to synchronously cancel all transfer request. 3190 * Stop the endpoint to force xHC to update the output context 3191 */ 3192 3193 if (!list_empty(&ep->ring->td_list)) { 3194 dev_err(&udev->dev, "EP not empty, refuse reset\n"); 3195 spin_unlock_irqrestore(&xhci->lock, flags); 3196 xhci_free_command(xhci, cfg_cmd); 3197 goto cleanup; 3198 } 3199 3200 err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, 3201 ep_index, 0); 3202 if (err < 0) { 3203 spin_unlock_irqrestore(&xhci->lock, flags); 3204 xhci_free_command(xhci, cfg_cmd); 3205 xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ", 3206 __func__, err); 3207 goto cleanup; 3208 } 3209 3210 xhci_ring_cmd_db(xhci); 3211 spin_unlock_irqrestore(&xhci->lock, flags); 3212 3213 wait_for_completion(stop_cmd->completion); 3214 3215 spin_lock_irqsave(&xhci->lock, flags); 3216 3217 /* config ep command clears toggle if add and drop ep flags are set */ 3218 ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx); 3219 xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx, 3220 ctrl_ctx, ep_flag, ep_flag); 3221 xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index); 3222 3223 err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma, 3224 udev->slot_id, false); 3225 if (err < 0) { 3226 spin_unlock_irqrestore(&xhci->lock, flags); 3227 xhci_free_command(xhci, cfg_cmd); 3228 xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ", 3229 __func__, err); 3230 goto cleanup; 3231 } 3232 3233 xhci_ring_cmd_db(xhci); 3234 spin_unlock_irqrestore(&xhci->lock, flags); 3235 3236 wait_for_completion(cfg_cmd->completion); 3237 3238 ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; 3239 xhci_free_command(xhci, cfg_cmd); 3240 cleanup: 3241 xhci_free_command(xhci, stop_cmd); 3242 } 3243 3244 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, 3245 struct usb_device *udev, struct usb_host_endpoint *ep, 3246 unsigned int slot_id) 3247 { 3248 int ret; 3249 unsigned int ep_index; 3250 unsigned int ep_state; 3251 3252 if (!ep) 3253 return -EINVAL; 3254 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); 3255 if (ret <= 0) 3256 return -EINVAL; 3257 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) { 3258 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" 3259 " descriptor for ep 0x%x does not support streams\n", 3260 ep->desc.bEndpointAddress); 3261 return -EINVAL; 3262 } 3263 3264 ep_index = xhci_get_endpoint_index(&ep->desc); 3265 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 3266 if (ep_state & EP_HAS_STREAMS || 3267 ep_state & EP_GETTING_STREAMS) { 3268 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " 3269 "already has streams set up.\n", 3270 ep->desc.bEndpointAddress); 3271 xhci_warn(xhci, "Send email to xHCI maintainer and ask for " 3272 "dynamic stream context array reallocation.\n"); 3273 return -EINVAL; 3274 } 3275 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { 3276 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " 3277 "endpoint 0x%x; URBs are pending.\n", 3278 ep->desc.bEndpointAddress); 3279 return -EINVAL; 3280 } 3281 return 0; 3282 } 3283 3284 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, 3285 unsigned int *num_streams, unsigned int *num_stream_ctxs) 3286 { 3287 unsigned int max_streams; 3288 3289 /* The stream context array size must be a power of two */ 3290 *num_stream_ctxs = roundup_pow_of_two(*num_streams); 3291 /* 3292 * Find out how many primary stream array entries the host controller 3293 * supports. Later we may use secondary stream arrays (similar to 2nd 3294 * level page entries), but that's an optional feature for xHCI host 3295 * controllers. xHCs must support at least 4 stream IDs. 3296 */ 3297 max_streams = HCC_MAX_PSA(xhci->hcc_params); 3298 if (*num_stream_ctxs > max_streams) { 3299 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", 3300 max_streams); 3301 *num_stream_ctxs = max_streams; 3302 *num_streams = max_streams; 3303 } 3304 } 3305 3306 /* Returns an error code if one of the endpoint already has streams. 3307 * This does not change any data structures, it only checks and gathers 3308 * information. 3309 */ 3310 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, 3311 struct usb_device *udev, 3312 struct usb_host_endpoint **eps, unsigned int num_eps, 3313 unsigned int *num_streams, u32 *changed_ep_bitmask) 3314 { 3315 unsigned int max_streams; 3316 unsigned int endpoint_flag; 3317 int i; 3318 int ret; 3319 3320 for (i = 0; i < num_eps; i++) { 3321 ret = xhci_check_streams_endpoint(xhci, udev, 3322 eps[i], udev->slot_id); 3323 if (ret < 0) 3324 return ret; 3325 3326 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp); 3327 if (max_streams < (*num_streams - 1)) { 3328 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", 3329 eps[i]->desc.bEndpointAddress, 3330 max_streams); 3331 *num_streams = max_streams+1; 3332 } 3333 3334 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc); 3335 if (*changed_ep_bitmask & endpoint_flag) 3336 return -EINVAL; 3337 *changed_ep_bitmask |= endpoint_flag; 3338 } 3339 return 0; 3340 } 3341 3342 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, 3343 struct usb_device *udev, 3344 struct usb_host_endpoint **eps, unsigned int num_eps) 3345 { 3346 u32 changed_ep_bitmask = 0; 3347 unsigned int slot_id; 3348 unsigned int ep_index; 3349 unsigned int ep_state; 3350 int i; 3351 3352 slot_id = udev->slot_id; 3353 if (!xhci->devs[slot_id]) 3354 return 0; 3355 3356 for (i = 0; i < num_eps; i++) { 3357 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3358 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 3359 /* Are streams already being freed for the endpoint? */ 3360 if (ep_state & EP_GETTING_NO_STREAMS) { 3361 xhci_warn(xhci, "WARN Can't disable streams for " 3362 "endpoint 0x%x, " 3363 "streams are being disabled already\n", 3364 eps[i]->desc.bEndpointAddress); 3365 return 0; 3366 } 3367 /* Are there actually any streams to free? */ 3368 if (!(ep_state & EP_HAS_STREAMS) && 3369 !(ep_state & EP_GETTING_STREAMS)) { 3370 xhci_warn(xhci, "WARN Can't disable streams for " 3371 "endpoint 0x%x, " 3372 "streams are already disabled!\n", 3373 eps[i]->desc.bEndpointAddress); 3374 xhci_warn(xhci, "WARN xhci_free_streams() called " 3375 "with non-streams endpoint\n"); 3376 return 0; 3377 } 3378 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc); 3379 } 3380 return changed_ep_bitmask; 3381 } 3382 3383 /* 3384 * The USB device drivers use this function (through the HCD interface in USB 3385 * core) to prepare a set of bulk endpoints to use streams. Streams are used to 3386 * coordinate mass storage command queueing across multiple endpoints (basically 3387 * a stream ID == a task ID). 3388 * 3389 * Setting up streams involves allocating the same size stream context array 3390 * for each endpoint and issuing a configure endpoint command for all endpoints. 3391 * 3392 * Don't allow the call to succeed if one endpoint only supports one stream 3393 * (which means it doesn't support streams at all). 3394 * 3395 * Drivers may get less stream IDs than they asked for, if the host controller 3396 * hardware or endpoints claim they can't support the number of requested 3397 * stream IDs. 3398 */ 3399 static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, 3400 struct usb_host_endpoint **eps, unsigned int num_eps, 3401 unsigned int num_streams, gfp_t mem_flags) 3402 { 3403 int i, ret; 3404 struct xhci_hcd *xhci; 3405 struct xhci_virt_device *vdev; 3406 struct xhci_command *config_cmd; 3407 struct xhci_input_control_ctx *ctrl_ctx; 3408 unsigned int ep_index; 3409 unsigned int num_stream_ctxs; 3410 unsigned int max_packet; 3411 unsigned long flags; 3412 u32 changed_ep_bitmask = 0; 3413 3414 if (!eps) 3415 return -EINVAL; 3416 3417 /* Add one to the number of streams requested to account for 3418 * stream 0 that is reserved for xHCI usage. 3419 */ 3420 num_streams += 1; 3421 xhci = hcd_to_xhci(hcd); 3422 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", 3423 num_streams); 3424 3425 /* MaxPSASize value 0 (2 streams) means streams are not supported */ 3426 if ((xhci->quirks & XHCI_BROKEN_STREAMS) || 3427 HCC_MAX_PSA(xhci->hcc_params) < 4) { 3428 xhci_dbg(xhci, "xHCI controller does not support streams.\n"); 3429 return -ENOSYS; 3430 } 3431 3432 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags); 3433 if (!config_cmd) 3434 return -ENOMEM; 3435 3436 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx); 3437 if (!ctrl_ctx) { 3438 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 3439 __func__); 3440 xhci_free_command(xhci, config_cmd); 3441 return -ENOMEM; 3442 } 3443 3444 /* Check to make sure all endpoints are not already configured for 3445 * streams. While we're at it, find the maximum number of streams that 3446 * all the endpoints will support and check for duplicate endpoints. 3447 */ 3448 spin_lock_irqsave(&xhci->lock, flags); 3449 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, 3450 num_eps, &num_streams, &changed_ep_bitmask); 3451 if (ret < 0) { 3452 xhci_free_command(xhci, config_cmd); 3453 spin_unlock_irqrestore(&xhci->lock, flags); 3454 return ret; 3455 } 3456 if (num_streams <= 1) { 3457 xhci_warn(xhci, "WARN: endpoints can't handle " 3458 "more than one stream.\n"); 3459 xhci_free_command(xhci, config_cmd); 3460 spin_unlock_irqrestore(&xhci->lock, flags); 3461 return -EINVAL; 3462 } 3463 vdev = xhci->devs[udev->slot_id]; 3464 /* Mark each endpoint as being in transition, so 3465 * xhci_urb_enqueue() will reject all URBs. 3466 */ 3467 for (i = 0; i < num_eps; i++) { 3468 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3469 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; 3470 } 3471 spin_unlock_irqrestore(&xhci->lock, flags); 3472 3473 /* Setup internal data structures and allocate HW data structures for 3474 * streams (but don't install the HW structures in the input context 3475 * until we're sure all memory allocation succeeded). 3476 */ 3477 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); 3478 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", 3479 num_stream_ctxs, num_streams); 3480 3481 for (i = 0; i < num_eps; i++) { 3482 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3483 max_packet = usb_endpoint_maxp(&eps[i]->desc); 3484 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, 3485 num_stream_ctxs, 3486 num_streams, 3487 max_packet, mem_flags); 3488 if (!vdev->eps[ep_index].stream_info) 3489 goto cleanup; 3490 /* Set maxPstreams in endpoint context and update deq ptr to 3491 * point to stream context array. FIXME 3492 */ 3493 } 3494 3495 /* Set up the input context for a configure endpoint command. */ 3496 for (i = 0; i < num_eps; i++) { 3497 struct xhci_ep_ctx *ep_ctx; 3498 3499 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3500 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); 3501 3502 xhci_endpoint_copy(xhci, config_cmd->in_ctx, 3503 vdev->out_ctx, ep_index); 3504 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, 3505 vdev->eps[ep_index].stream_info); 3506 } 3507 /* Tell the HW to drop its old copy of the endpoint context info 3508 * and add the updated copy from the input context. 3509 */ 3510 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, 3511 vdev->out_ctx, ctrl_ctx, 3512 changed_ep_bitmask, changed_ep_bitmask); 3513 3514 /* Issue and wait for the configure endpoint command */ 3515 ret = xhci_configure_endpoint(xhci, udev, config_cmd, 3516 false, false); 3517 3518 /* xHC rejected the configure endpoint command for some reason, so we 3519 * leave the old ring intact and free our internal streams data 3520 * structure. 3521 */ 3522 if (ret < 0) 3523 goto cleanup; 3524 3525 spin_lock_irqsave(&xhci->lock, flags); 3526 for (i = 0; i < num_eps; i++) { 3527 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3528 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; 3529 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", 3530 udev->slot_id, ep_index); 3531 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; 3532 } 3533 xhci_free_command(xhci, config_cmd); 3534 spin_unlock_irqrestore(&xhci->lock, flags); 3535 3536 /* Subtract 1 for stream 0, which drivers can't use */ 3537 return num_streams - 1; 3538 3539 cleanup: 3540 /* If it didn't work, free the streams! */ 3541 for (i = 0; i < num_eps; i++) { 3542 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3543 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); 3544 vdev->eps[ep_index].stream_info = NULL; 3545 /* FIXME Unset maxPstreams in endpoint context and 3546 * update deq ptr to point to normal string ring. 3547 */ 3548 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; 3549 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; 3550 xhci_endpoint_zero(xhci, vdev, eps[i]); 3551 } 3552 xhci_free_command(xhci, config_cmd); 3553 return -ENOMEM; 3554 } 3555 3556 /* Transition the endpoint from using streams to being a "normal" endpoint 3557 * without streams. 3558 * 3559 * Modify the endpoint context state, submit a configure endpoint command, 3560 * and free all endpoint rings for streams if that completes successfully. 3561 */ 3562 static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, 3563 struct usb_host_endpoint **eps, unsigned int num_eps, 3564 gfp_t mem_flags) 3565 { 3566 int i, ret; 3567 struct xhci_hcd *xhci; 3568 struct xhci_virt_device *vdev; 3569 struct xhci_command *command; 3570 struct xhci_input_control_ctx *ctrl_ctx; 3571 unsigned int ep_index; 3572 unsigned long flags; 3573 u32 changed_ep_bitmask; 3574 3575 xhci = hcd_to_xhci(hcd); 3576 vdev = xhci->devs[udev->slot_id]; 3577 3578 /* Set up a configure endpoint command to remove the streams rings */ 3579 spin_lock_irqsave(&xhci->lock, flags); 3580 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, 3581 udev, eps, num_eps); 3582 if (changed_ep_bitmask == 0) { 3583 spin_unlock_irqrestore(&xhci->lock, flags); 3584 return -EINVAL; 3585 } 3586 3587 /* Use the xhci_command structure from the first endpoint. We may have 3588 * allocated too many, but the driver may call xhci_free_streams() for 3589 * each endpoint it grouped into one call to xhci_alloc_streams(). 3590 */ 3591 ep_index = xhci_get_endpoint_index(&eps[0]->desc); 3592 command = vdev->eps[ep_index].stream_info->free_streams_command; 3593 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 3594 if (!ctrl_ctx) { 3595 spin_unlock_irqrestore(&xhci->lock, flags); 3596 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 3597 __func__); 3598 return -EINVAL; 3599 } 3600 3601 for (i = 0; i < num_eps; i++) { 3602 struct xhci_ep_ctx *ep_ctx; 3603 3604 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3605 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); 3606 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= 3607 EP_GETTING_NO_STREAMS; 3608 3609 xhci_endpoint_copy(xhci, command->in_ctx, 3610 vdev->out_ctx, ep_index); 3611 xhci_setup_no_streams_ep_input_ctx(ep_ctx, 3612 &vdev->eps[ep_index]); 3613 } 3614 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, 3615 vdev->out_ctx, ctrl_ctx, 3616 changed_ep_bitmask, changed_ep_bitmask); 3617 spin_unlock_irqrestore(&xhci->lock, flags); 3618 3619 /* Issue and wait for the configure endpoint command, 3620 * which must succeed. 3621 */ 3622 ret = xhci_configure_endpoint(xhci, udev, command, 3623 false, true); 3624 3625 /* xHC rejected the configure endpoint command for some reason, so we 3626 * leave the streams rings intact. 3627 */ 3628 if (ret < 0) 3629 return ret; 3630 3631 spin_lock_irqsave(&xhci->lock, flags); 3632 for (i = 0; i < num_eps; i++) { 3633 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3634 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); 3635 vdev->eps[ep_index].stream_info = NULL; 3636 /* FIXME Unset maxPstreams in endpoint context and 3637 * update deq ptr to point to normal string ring. 3638 */ 3639 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; 3640 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; 3641 } 3642 spin_unlock_irqrestore(&xhci->lock, flags); 3643 3644 return 0; 3645 } 3646 3647 /* 3648 * Deletes endpoint resources for endpoints that were active before a Reset 3649 * Device command, or a Disable Slot command. The Reset Device command leaves 3650 * the control endpoint intact, whereas the Disable Slot command deletes it. 3651 * 3652 * Must be called with xhci->lock held. 3653 */ 3654 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, 3655 struct xhci_virt_device *virt_dev, bool drop_control_ep) 3656 { 3657 int i; 3658 unsigned int num_dropped_eps = 0; 3659 unsigned int drop_flags = 0; 3660 3661 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) { 3662 if (virt_dev->eps[i].ring) { 3663 drop_flags |= 1 << i; 3664 num_dropped_eps++; 3665 } 3666 } 3667 xhci->num_active_eps -= num_dropped_eps; 3668 if (num_dropped_eps) 3669 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 3670 "Dropped %u ep ctxs, flags = 0x%x, " 3671 "%u now active.", 3672 num_dropped_eps, drop_flags, 3673 xhci->num_active_eps); 3674 } 3675 3676 /* 3677 * This submits a Reset Device Command, which will set the device state to 0, 3678 * set the device address to 0, and disable all the endpoints except the default 3679 * control endpoint. The USB core should come back and call 3680 * xhci_address_device(), and then re-set up the configuration. If this is 3681 * called because of a usb_reset_and_verify_device(), then the old alternate 3682 * settings will be re-installed through the normal bandwidth allocation 3683 * functions. 3684 * 3685 * Wait for the Reset Device command to finish. Remove all structures 3686 * associated with the endpoints that were disabled. Clear the input device 3687 * structure? Reset the control endpoint 0 max packet size? 3688 * 3689 * If the virt_dev to be reset does not exist or does not match the udev, 3690 * it means the device is lost, possibly due to the xHC restore error and 3691 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to 3692 * re-allocate the device. 3693 */ 3694 static int xhci_discover_or_reset_device(struct usb_hcd *hcd, 3695 struct usb_device *udev) 3696 { 3697 int ret, i; 3698 unsigned long flags; 3699 struct xhci_hcd *xhci; 3700 unsigned int slot_id; 3701 struct xhci_virt_device *virt_dev; 3702 struct xhci_command *reset_device_cmd; 3703 struct xhci_slot_ctx *slot_ctx; 3704 int old_active_eps = 0; 3705 3706 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); 3707 if (ret <= 0) 3708 return ret; 3709 xhci = hcd_to_xhci(hcd); 3710 slot_id = udev->slot_id; 3711 virt_dev = xhci->devs[slot_id]; 3712 if (!virt_dev) { 3713 xhci_dbg(xhci, "The device to be reset with slot ID %u does " 3714 "not exist. Re-allocate the device\n", slot_id); 3715 ret = xhci_alloc_dev(hcd, udev); 3716 if (ret == 1) 3717 return 0; 3718 else 3719 return -EINVAL; 3720 } 3721 3722 if (virt_dev->tt_info) 3723 old_active_eps = virt_dev->tt_info->active_eps; 3724 3725 if (virt_dev->udev != udev) { 3726 /* If the virt_dev and the udev does not match, this virt_dev 3727 * may belong to another udev. 3728 * Re-allocate the device. 3729 */ 3730 xhci_dbg(xhci, "The device to be reset with slot ID %u does " 3731 "not match the udev. Re-allocate the device\n", 3732 slot_id); 3733 ret = xhci_alloc_dev(hcd, udev); 3734 if (ret == 1) 3735 return 0; 3736 else 3737 return -EINVAL; 3738 } 3739 3740 /* If device is not setup, there is no point in resetting it */ 3741 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 3742 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == 3743 SLOT_STATE_DISABLED) 3744 return 0; 3745 3746 trace_xhci_discover_or_reset_device(slot_ctx); 3747 3748 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); 3749 /* Allocate the command structure that holds the struct completion. 3750 * Assume we're in process context, since the normal device reset 3751 * process has to wait for the device anyway. Storage devices are 3752 * reset as part of error handling, so use GFP_NOIO instead of 3753 * GFP_KERNEL. 3754 */ 3755 reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO); 3756 if (!reset_device_cmd) { 3757 xhci_dbg(xhci, "Couldn't allocate command structure.\n"); 3758 return -ENOMEM; 3759 } 3760 3761 /* Attempt to submit the Reset Device command to the command ring */ 3762 spin_lock_irqsave(&xhci->lock, flags); 3763 3764 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id); 3765 if (ret) { 3766 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3767 spin_unlock_irqrestore(&xhci->lock, flags); 3768 goto command_cleanup; 3769 } 3770 xhci_ring_cmd_db(xhci); 3771 spin_unlock_irqrestore(&xhci->lock, flags); 3772 3773 /* Wait for the Reset Device command to finish */ 3774 wait_for_completion(reset_device_cmd->completion); 3775 3776 /* The Reset Device command can't fail, according to the 0.95/0.96 spec, 3777 * unless we tried to reset a slot ID that wasn't enabled, 3778 * or the device wasn't in the addressed or configured state. 3779 */ 3780 ret = reset_device_cmd->status; 3781 switch (ret) { 3782 case COMP_COMMAND_ABORTED: 3783 case COMP_COMMAND_RING_STOPPED: 3784 xhci_warn(xhci, "Timeout waiting for reset device command\n"); 3785 ret = -ETIME; 3786 goto command_cleanup; 3787 case COMP_SLOT_NOT_ENABLED_ERROR: /* 0.95 completion for bad slot ID */ 3788 case COMP_CONTEXT_STATE_ERROR: /* 0.96 completion code for same thing */ 3789 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n", 3790 slot_id, 3791 xhci_get_slot_state(xhci, virt_dev->out_ctx)); 3792 xhci_dbg(xhci, "Not freeing device rings.\n"); 3793 /* Don't treat this as an error. May change my mind later. */ 3794 ret = 0; 3795 goto command_cleanup; 3796 case COMP_SUCCESS: 3797 xhci_dbg(xhci, "Successful reset device command.\n"); 3798 break; 3799 default: 3800 if (xhci_is_vendor_info_code(xhci, ret)) 3801 break; 3802 xhci_warn(xhci, "Unknown completion code %u for " 3803 "reset device command.\n", ret); 3804 ret = -EINVAL; 3805 goto command_cleanup; 3806 } 3807 3808 /* Free up host controller endpoint resources */ 3809 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 3810 spin_lock_irqsave(&xhci->lock, flags); 3811 /* Don't delete the default control endpoint resources */ 3812 xhci_free_device_endpoint_resources(xhci, virt_dev, false); 3813 spin_unlock_irqrestore(&xhci->lock, flags); 3814 } 3815 3816 /* Everything but endpoint 0 is disabled, so free the rings. */ 3817 for (i = 1; i < 31; i++) { 3818 struct xhci_virt_ep *ep = &virt_dev->eps[i]; 3819 3820 if (ep->ep_state & EP_HAS_STREAMS) { 3821 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n", 3822 xhci_get_endpoint_address(i)); 3823 xhci_free_stream_info(xhci, ep->stream_info); 3824 ep->stream_info = NULL; 3825 ep->ep_state &= ~EP_HAS_STREAMS; 3826 } 3827 3828 if (ep->ring) { 3829 xhci_debugfs_remove_endpoint(xhci, virt_dev, i); 3830 xhci_free_endpoint_ring(xhci, virt_dev, i); 3831 } 3832 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list)) 3833 xhci_drop_ep_from_interval_table(xhci, 3834 &virt_dev->eps[i].bw_info, 3835 virt_dev->bw_table, 3836 udev, 3837 &virt_dev->eps[i], 3838 virt_dev->tt_info); 3839 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info); 3840 } 3841 /* If necessary, update the number of active TTs on this root port */ 3842 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); 3843 virt_dev->flags = 0; 3844 ret = 0; 3845 3846 command_cleanup: 3847 xhci_free_command(xhci, reset_device_cmd); 3848 return ret; 3849 } 3850 3851 /* 3852 * At this point, the struct usb_device is about to go away, the device has 3853 * disconnected, and all traffic has been stopped and the endpoints have been 3854 * disabled. Free any HC data structures associated with that device. 3855 */ 3856 static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) 3857 { 3858 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3859 struct xhci_virt_device *virt_dev; 3860 struct xhci_slot_ctx *slot_ctx; 3861 int i, ret; 3862 3863 #ifndef CONFIG_USB_DEFAULT_PERSIST 3864 /* 3865 * We called pm_runtime_get_noresume when the device was attached. 3866 * Decrement the counter here to allow controller to runtime suspend 3867 * if no devices remain. 3868 */ 3869 if (xhci->quirks & XHCI_RESET_ON_RESUME) 3870 pm_runtime_put_noidle(hcd->self.controller); 3871 #endif 3872 3873 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 3874 /* If the host is halted due to driver unload, we still need to free the 3875 * device. 3876 */ 3877 if (ret <= 0 && ret != -ENODEV) 3878 return; 3879 3880 virt_dev = xhci->devs[udev->slot_id]; 3881 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 3882 trace_xhci_free_dev(slot_ctx); 3883 3884 /* Stop any wayward timer functions (which may grab the lock) */ 3885 for (i = 0; i < 31; i++) { 3886 virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING; 3887 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); 3888 } 3889 virt_dev->udev = NULL; 3890 ret = xhci_disable_slot(xhci, udev->slot_id); 3891 if (ret) 3892 xhci_free_virt_device(xhci, udev->slot_id); 3893 } 3894 3895 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) 3896 { 3897 struct xhci_command *command; 3898 unsigned long flags; 3899 u32 state; 3900 int ret = 0; 3901 3902 command = xhci_alloc_command(xhci, false, GFP_KERNEL); 3903 if (!command) 3904 return -ENOMEM; 3905 3906 xhci_debugfs_remove_slot(xhci, slot_id); 3907 3908 spin_lock_irqsave(&xhci->lock, flags); 3909 /* Don't disable the slot if the host controller is dead. */ 3910 state = readl(&xhci->op_regs->status); 3911 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || 3912 (xhci->xhc_state & XHCI_STATE_HALTED)) { 3913 spin_unlock_irqrestore(&xhci->lock, flags); 3914 kfree(command); 3915 return -ENODEV; 3916 } 3917 3918 ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, 3919 slot_id); 3920 if (ret) { 3921 spin_unlock_irqrestore(&xhci->lock, flags); 3922 kfree(command); 3923 return ret; 3924 } 3925 xhci_ring_cmd_db(xhci); 3926 spin_unlock_irqrestore(&xhci->lock, flags); 3927 return ret; 3928 } 3929 3930 /* 3931 * Checks if we have enough host controller resources for the default control 3932 * endpoint. 3933 * 3934 * Must be called with xhci->lock held. 3935 */ 3936 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) 3937 { 3938 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { 3939 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 3940 "Not enough ep ctxs: " 3941 "%u active, need to add 1, limit is %u.", 3942 xhci->num_active_eps, xhci->limit_active_eps); 3943 return -ENOMEM; 3944 } 3945 xhci->num_active_eps += 1; 3946 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 3947 "Adding 1 ep ctx, %u now active.", 3948 xhci->num_active_eps); 3949 return 0; 3950 } 3951 3952 3953 /* 3954 * Returns 0 if the xHC ran out of device slots, the Enable Slot command 3955 * timed out, or allocating memory failed. Returns 1 on success. 3956 */ 3957 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) 3958 { 3959 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3960 struct xhci_virt_device *vdev; 3961 struct xhci_slot_ctx *slot_ctx; 3962 unsigned long flags; 3963 int ret, slot_id; 3964 struct xhci_command *command; 3965 3966 command = xhci_alloc_command(xhci, true, GFP_KERNEL); 3967 if (!command) 3968 return 0; 3969 3970 spin_lock_irqsave(&xhci->lock, flags); 3971 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0); 3972 if (ret) { 3973 spin_unlock_irqrestore(&xhci->lock, flags); 3974 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3975 xhci_free_command(xhci, command); 3976 return 0; 3977 } 3978 xhci_ring_cmd_db(xhci); 3979 spin_unlock_irqrestore(&xhci->lock, flags); 3980 3981 wait_for_completion(command->completion); 3982 slot_id = command->slot_id; 3983 3984 if (!slot_id || command->status != COMP_SUCCESS) { 3985 xhci_err(xhci, "Error while assigning device slot ID\n"); 3986 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", 3987 HCS_MAX_SLOTS( 3988 readl(&xhci->cap_regs->hcs_params1))); 3989 xhci_free_command(xhci, command); 3990 return 0; 3991 } 3992 3993 xhci_free_command(xhci, command); 3994 3995 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 3996 spin_lock_irqsave(&xhci->lock, flags); 3997 ret = xhci_reserve_host_control_ep_resources(xhci); 3998 if (ret) { 3999 spin_unlock_irqrestore(&xhci->lock, flags); 4000 xhci_warn(xhci, "Not enough host resources, " 4001 "active endpoint contexts = %u\n", 4002 xhci->num_active_eps); 4003 goto disable_slot; 4004 } 4005 spin_unlock_irqrestore(&xhci->lock, flags); 4006 } 4007 /* Use GFP_NOIO, since this function can be called from 4008 * xhci_discover_or_reset_device(), which may be called as part of 4009 * mass storage driver error handling. 4010 */ 4011 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) { 4012 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); 4013 goto disable_slot; 4014 } 4015 vdev = xhci->devs[slot_id]; 4016 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); 4017 trace_xhci_alloc_dev(slot_ctx); 4018 4019 udev->slot_id = slot_id; 4020 4021 xhci_debugfs_create_slot(xhci, slot_id); 4022 4023 #ifndef CONFIG_USB_DEFAULT_PERSIST 4024 /* 4025 * If resetting upon resume, we can't put the controller into runtime 4026 * suspend if there is a device attached. 4027 */ 4028 if (xhci->quirks & XHCI_RESET_ON_RESUME) 4029 pm_runtime_get_noresume(hcd->self.controller); 4030 #endif 4031 4032 /* Is this a LS or FS device under a HS hub? */ 4033 /* Hub or peripherial? */ 4034 return 1; 4035 4036 disable_slot: 4037 ret = xhci_disable_slot(xhci, udev->slot_id); 4038 if (ret) 4039 xhci_free_virt_device(xhci, udev->slot_id); 4040 4041 return 0; 4042 } 4043 4044 /* 4045 * Issue an Address Device command and optionally send a corresponding 4046 * SetAddress request to the device. 4047 */ 4048 static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, 4049 enum xhci_setup_dev setup) 4050 { 4051 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address"; 4052 unsigned long flags; 4053 struct xhci_virt_device *virt_dev; 4054 int ret = 0; 4055 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4056 struct xhci_slot_ctx *slot_ctx; 4057 struct xhci_input_control_ctx *ctrl_ctx; 4058 u64 temp_64; 4059 struct xhci_command *command = NULL; 4060 4061 mutex_lock(&xhci->mutex); 4062 4063 if (xhci->xhc_state) { /* dying, removing or halted */ 4064 ret = -ESHUTDOWN; 4065 goto out; 4066 } 4067 4068 if (!udev->slot_id) { 4069 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4070 "Bad Slot ID %d", udev->slot_id); 4071 ret = -EINVAL; 4072 goto out; 4073 } 4074 4075 virt_dev = xhci->devs[udev->slot_id]; 4076 4077 if (WARN_ON(!virt_dev)) { 4078 /* 4079 * In plug/unplug torture test with an NEC controller, 4080 * a zero-dereference was observed once due to virt_dev = 0. 4081 * Print useful debug rather than crash if it is observed again! 4082 */ 4083 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", 4084 udev->slot_id); 4085 ret = -EINVAL; 4086 goto out; 4087 } 4088 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 4089 trace_xhci_setup_device_slot(slot_ctx); 4090 4091 if (setup == SETUP_CONTEXT_ONLY) { 4092 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == 4093 SLOT_STATE_DEFAULT) { 4094 xhci_dbg(xhci, "Slot already in default state\n"); 4095 goto out; 4096 } 4097 } 4098 4099 command = xhci_alloc_command(xhci, true, GFP_KERNEL); 4100 if (!command) { 4101 ret = -ENOMEM; 4102 goto out; 4103 } 4104 4105 command->in_ctx = virt_dev->in_ctx; 4106 4107 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 4108 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); 4109 if (!ctrl_ctx) { 4110 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 4111 __func__); 4112 ret = -EINVAL; 4113 goto out; 4114 } 4115 /* 4116 * If this is the first Set Address since device plug-in or 4117 * virt_device realloaction after a resume with an xHCI power loss, 4118 * then set up the slot context. 4119 */ 4120 if (!slot_ctx->dev_info) 4121 xhci_setup_addressable_virt_dev(xhci, udev); 4122 /* Otherwise, update the control endpoint ring enqueue pointer. */ 4123 else 4124 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); 4125 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); 4126 ctrl_ctx->drop_flags = 0; 4127 4128 trace_xhci_address_ctx(xhci, virt_dev->in_ctx, 4129 le32_to_cpu(slot_ctx->dev_info) >> 27); 4130 4131 trace_xhci_address_ctrl_ctx(ctrl_ctx); 4132 spin_lock_irqsave(&xhci->lock, flags); 4133 trace_xhci_setup_device(virt_dev); 4134 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma, 4135 udev->slot_id, setup); 4136 if (ret) { 4137 spin_unlock_irqrestore(&xhci->lock, flags); 4138 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4139 "FIXME: allocate a command ring segment"); 4140 goto out; 4141 } 4142 xhci_ring_cmd_db(xhci); 4143 spin_unlock_irqrestore(&xhci->lock, flags); 4144 4145 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ 4146 wait_for_completion(command->completion); 4147 4148 /* FIXME: From section 4.3.4: "Software shall be responsible for timing 4149 * the SetAddress() "recovery interval" required by USB and aborting the 4150 * command on a timeout. 4151 */ 4152 switch (command->status) { 4153 case COMP_COMMAND_ABORTED: 4154 case COMP_COMMAND_RING_STOPPED: 4155 xhci_warn(xhci, "Timeout while waiting for setup device command\n"); 4156 ret = -ETIME; 4157 break; 4158 case COMP_CONTEXT_STATE_ERROR: 4159 case COMP_SLOT_NOT_ENABLED_ERROR: 4160 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n", 4161 act, udev->slot_id); 4162 ret = -EINVAL; 4163 break; 4164 case COMP_USB_TRANSACTION_ERROR: 4165 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act); 4166 4167 mutex_unlock(&xhci->mutex); 4168 ret = xhci_disable_slot(xhci, udev->slot_id); 4169 if (!ret) 4170 xhci_alloc_dev(hcd, udev); 4171 kfree(command->completion); 4172 kfree(command); 4173 return -EPROTO; 4174 case COMP_INCOMPATIBLE_DEVICE_ERROR: 4175 dev_warn(&udev->dev, 4176 "ERROR: Incompatible device for setup %s command\n", act); 4177 ret = -ENODEV; 4178 break; 4179 case COMP_SUCCESS: 4180 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4181 "Successful setup %s command", act); 4182 break; 4183 default: 4184 xhci_err(xhci, 4185 "ERROR: unexpected setup %s command completion code 0x%x.\n", 4186 act, command->status); 4187 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1); 4188 ret = -EINVAL; 4189 break; 4190 } 4191 if (ret) 4192 goto out; 4193 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 4194 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4195 "Op regs DCBAA ptr = %#016llx", temp_64); 4196 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4197 "Slot ID %d dcbaa entry @%p = %#016llx", 4198 udev->slot_id, 4199 &xhci->dcbaa->dev_context_ptrs[udev->slot_id], 4200 (unsigned long long) 4201 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); 4202 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4203 "Output Context DMA address = %#08llx", 4204 (unsigned long long)virt_dev->out_ctx->dma); 4205 trace_xhci_address_ctx(xhci, virt_dev->in_ctx, 4206 le32_to_cpu(slot_ctx->dev_info) >> 27); 4207 /* 4208 * USB core uses address 1 for the roothubs, so we add one to the 4209 * address given back to us by the HC. 4210 */ 4211 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 4212 le32_to_cpu(slot_ctx->dev_info) >> 27); 4213 /* Zero the input context control for later use */ 4214 ctrl_ctx->add_flags = 0; 4215 ctrl_ctx->drop_flags = 0; 4216 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 4217 udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); 4218 4219 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4220 "Internal device address = %d", 4221 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); 4222 out: 4223 mutex_unlock(&xhci->mutex); 4224 if (command) { 4225 kfree(command->completion); 4226 kfree(command); 4227 } 4228 return ret; 4229 } 4230 4231 static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) 4232 { 4233 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS); 4234 } 4235 4236 static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev) 4237 { 4238 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY); 4239 } 4240 4241 /* 4242 * Transfer the port index into real index in the HW port status 4243 * registers. Caculate offset between the port's PORTSC register 4244 * and port status base. Divide the number of per port register 4245 * to get the real index. The raw port number bases 1. 4246 */ 4247 int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1) 4248 { 4249 struct xhci_hub *rhub; 4250 4251 rhub = xhci_get_rhub(hcd); 4252 return rhub->ports[port1 - 1]->hw_portnum + 1; 4253 } 4254 4255 /* 4256 * Issue an Evaluate Context command to change the Maximum Exit Latency in the 4257 * slot context. If that succeeds, store the new MEL in the xhci_virt_device. 4258 */ 4259 static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, 4260 struct usb_device *udev, u16 max_exit_latency) 4261 { 4262 struct xhci_virt_device *virt_dev; 4263 struct xhci_command *command; 4264 struct xhci_input_control_ctx *ctrl_ctx; 4265 struct xhci_slot_ctx *slot_ctx; 4266 unsigned long flags; 4267 int ret; 4268 4269 spin_lock_irqsave(&xhci->lock, flags); 4270 4271 virt_dev = xhci->devs[udev->slot_id]; 4272 4273 /* 4274 * virt_dev might not exists yet if xHC resumed from hibernate (S4) and 4275 * xHC was re-initialized. Exit latency will be set later after 4276 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated 4277 */ 4278 4279 if (!virt_dev || max_exit_latency == virt_dev->current_mel) { 4280 spin_unlock_irqrestore(&xhci->lock, flags); 4281 return 0; 4282 } 4283 4284 /* Attempt to issue an Evaluate Context command to change the MEL. */ 4285 command = xhci->lpm_command; 4286 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 4287 if (!ctrl_ctx) { 4288 spin_unlock_irqrestore(&xhci->lock, flags); 4289 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 4290 __func__); 4291 return -ENOMEM; 4292 } 4293 4294 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx); 4295 spin_unlock_irqrestore(&xhci->lock, flags); 4296 4297 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 4298 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); 4299 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT)); 4300 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency); 4301 slot_ctx->dev_state = 0; 4302 4303 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 4304 "Set up evaluate context for LPM MEL change."); 4305 4306 /* Issue and wait for the evaluate context command. */ 4307 ret = xhci_configure_endpoint(xhci, udev, command, 4308 true, true); 4309 4310 if (!ret) { 4311 spin_lock_irqsave(&xhci->lock, flags); 4312 virt_dev->current_mel = max_exit_latency; 4313 spin_unlock_irqrestore(&xhci->lock, flags); 4314 } 4315 return ret; 4316 } 4317 4318 #ifdef CONFIG_PM 4319 4320 /* BESL to HIRD Encoding array for USB2 LPM */ 4321 static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000, 4322 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000}; 4323 4324 /* Calculate HIRD/BESL for USB2 PORTPMSC*/ 4325 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci, 4326 struct usb_device *udev) 4327 { 4328 int u2del, besl, besl_host; 4329 int besl_device = 0; 4330 u32 field; 4331 4332 u2del = HCS_U2_LATENCY(xhci->hcs_params3); 4333 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); 4334 4335 if (field & USB_BESL_SUPPORT) { 4336 for (besl_host = 0; besl_host < 16; besl_host++) { 4337 if (xhci_besl_encoding[besl_host] >= u2del) 4338 break; 4339 } 4340 /* Use baseline BESL value as default */ 4341 if (field & USB_BESL_BASELINE_VALID) 4342 besl_device = USB_GET_BESL_BASELINE(field); 4343 else if (field & USB_BESL_DEEP_VALID) 4344 besl_device = USB_GET_BESL_DEEP(field); 4345 } else { 4346 if (u2del <= 50) 4347 besl_host = 0; 4348 else 4349 besl_host = (u2del - 51) / 75 + 1; 4350 } 4351 4352 besl = besl_host + besl_device; 4353 if (besl > 15) 4354 besl = 15; 4355 4356 return besl; 4357 } 4358 4359 /* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */ 4360 static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev) 4361 { 4362 u32 field; 4363 int l1; 4364 int besld = 0; 4365 int hirdm = 0; 4366 4367 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); 4368 4369 /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */ 4370 l1 = udev->l1_params.timeout / 256; 4371 4372 /* device has preferred BESLD */ 4373 if (field & USB_BESL_DEEP_VALID) { 4374 besld = USB_GET_BESL_DEEP(field); 4375 hirdm = 1; 4376 } 4377 4378 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm); 4379 } 4380 4381 static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, 4382 struct usb_device *udev, int enable) 4383 { 4384 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4385 struct xhci_port **ports; 4386 __le32 __iomem *pm_addr, *hlpm_addr; 4387 u32 pm_val, hlpm_val, field; 4388 unsigned int port_num; 4389 unsigned long flags; 4390 int hird, exit_latency; 4391 int ret; 4392 4393 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support || 4394 !udev->lpm_capable) 4395 return -EPERM; 4396 4397 if (!udev->parent || udev->parent->parent || 4398 udev->descriptor.bDeviceClass == USB_CLASS_HUB) 4399 return -EPERM; 4400 4401 if (udev->usb2_hw_lpm_capable != 1) 4402 return -EPERM; 4403 4404 spin_lock_irqsave(&xhci->lock, flags); 4405 4406 ports = xhci->usb2_rhub.ports; 4407 port_num = udev->portnum - 1; 4408 pm_addr = ports[port_num]->addr + PORTPMSC; 4409 pm_val = readl(pm_addr); 4410 hlpm_addr = ports[port_num]->addr + PORTHLPMC; 4411 4412 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", 4413 enable ? "enable" : "disable", port_num + 1); 4414 4415 if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) { 4416 /* Host supports BESL timeout instead of HIRD */ 4417 if (udev->usb2_hw_lpm_besl_capable) { 4418 /* if device doesn't have a preferred BESL value use a 4419 * default one which works with mixed HIRD and BESL 4420 * systems. See XHCI_DEFAULT_BESL definition in xhci.h 4421 */ 4422 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); 4423 if ((field & USB_BESL_SUPPORT) && 4424 (field & USB_BESL_BASELINE_VALID)) 4425 hird = USB_GET_BESL_BASELINE(field); 4426 else 4427 hird = udev->l1_params.besl; 4428 4429 exit_latency = xhci_besl_encoding[hird]; 4430 spin_unlock_irqrestore(&xhci->lock, flags); 4431 4432 /* USB 3.0 code dedicate one xhci->lpm_command->in_ctx 4433 * input context for link powermanagement evaluate 4434 * context commands. It is protected by hcd->bandwidth 4435 * mutex and is shared by all devices. We need to set 4436 * the max ext latency in USB 2 BESL LPM as well, so 4437 * use the same mutex and xhci_change_max_exit_latency() 4438 */ 4439 mutex_lock(hcd->bandwidth_mutex); 4440 ret = xhci_change_max_exit_latency(xhci, udev, 4441 exit_latency); 4442 mutex_unlock(hcd->bandwidth_mutex); 4443 4444 if (ret < 0) 4445 return ret; 4446 spin_lock_irqsave(&xhci->lock, flags); 4447 4448 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev); 4449 writel(hlpm_val, hlpm_addr); 4450 /* flush write */ 4451 readl(hlpm_addr); 4452 } else { 4453 hird = xhci_calculate_hird_besl(xhci, udev); 4454 } 4455 4456 pm_val &= ~PORT_HIRD_MASK; 4457 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id); 4458 writel(pm_val, pm_addr); 4459 pm_val = readl(pm_addr); 4460 pm_val |= PORT_HLE; 4461 writel(pm_val, pm_addr); 4462 /* flush write */ 4463 readl(pm_addr); 4464 } else { 4465 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK); 4466 writel(pm_val, pm_addr); 4467 /* flush write */ 4468 readl(pm_addr); 4469 if (udev->usb2_hw_lpm_besl_capable) { 4470 spin_unlock_irqrestore(&xhci->lock, flags); 4471 mutex_lock(hcd->bandwidth_mutex); 4472 xhci_change_max_exit_latency(xhci, udev, 0); 4473 mutex_unlock(hcd->bandwidth_mutex); 4474 return 0; 4475 } 4476 } 4477 4478 spin_unlock_irqrestore(&xhci->lock, flags); 4479 return 0; 4480 } 4481 4482 /* check if a usb2 port supports a given extened capability protocol 4483 * only USB2 ports extended protocol capability values are cached. 4484 * Return 1 if capability is supported 4485 */ 4486 static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port, 4487 unsigned capability) 4488 { 4489 u32 port_offset, port_count; 4490 int i; 4491 4492 for (i = 0; i < xhci->num_ext_caps; i++) { 4493 if (xhci->ext_caps[i] & capability) { 4494 /* port offsets starts at 1 */ 4495 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1; 4496 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]); 4497 if (port >= port_offset && 4498 port < port_offset + port_count) 4499 return 1; 4500 } 4501 } 4502 return 0; 4503 } 4504 4505 static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) 4506 { 4507 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4508 int portnum = udev->portnum - 1; 4509 4510 if (hcd->speed >= HCD_USB3 || !udev->lpm_capable) 4511 return 0; 4512 4513 /* we only support lpm for non-hub device connected to root hub yet */ 4514 if (!udev->parent || udev->parent->parent || 4515 udev->descriptor.bDeviceClass == USB_CLASS_HUB) 4516 return 0; 4517 4518 if (xhci->hw_lpm_support == 1 && 4519 xhci_check_usb2_port_capability( 4520 xhci, portnum, XHCI_HLC)) { 4521 udev->usb2_hw_lpm_capable = 1; 4522 udev->l1_params.timeout = XHCI_L1_TIMEOUT; 4523 udev->l1_params.besl = XHCI_DEFAULT_BESL; 4524 if (xhci_check_usb2_port_capability(xhci, portnum, 4525 XHCI_BLC)) 4526 udev->usb2_hw_lpm_besl_capable = 1; 4527 } 4528 4529 return 0; 4530 } 4531 4532 /*---------------------- USB 3.0 Link PM functions ------------------------*/ 4533 4534 /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */ 4535 static unsigned long long xhci_service_interval_to_ns( 4536 struct usb_endpoint_descriptor *desc) 4537 { 4538 return (1ULL << (desc->bInterval - 1)) * 125 * 1000; 4539 } 4540 4541 static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev, 4542 enum usb3_link_state state) 4543 { 4544 unsigned long long sel; 4545 unsigned long long pel; 4546 unsigned int max_sel_pel; 4547 char *state_name; 4548 4549 switch (state) { 4550 case USB3_LPM_U1: 4551 /* Convert SEL and PEL stored in nanoseconds to microseconds */ 4552 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); 4553 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); 4554 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL; 4555 state_name = "U1"; 4556 break; 4557 case USB3_LPM_U2: 4558 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000); 4559 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000); 4560 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL; 4561 state_name = "U2"; 4562 break; 4563 default: 4564 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n", 4565 __func__); 4566 return USB3_LPM_DISABLED; 4567 } 4568 4569 if (sel <= max_sel_pel && pel <= max_sel_pel) 4570 return USB3_LPM_DEVICE_INITIATED; 4571 4572 if (sel > max_sel_pel) 4573 dev_dbg(&udev->dev, "Device-initiated %s disabled " 4574 "due to long SEL %llu ms\n", 4575 state_name, sel); 4576 else 4577 dev_dbg(&udev->dev, "Device-initiated %s disabled " 4578 "due to long PEL %llu ms\n", 4579 state_name, pel); 4580 return USB3_LPM_DISABLED; 4581 } 4582 4583 /* The U1 timeout should be the maximum of the following values: 4584 * - For control endpoints, U1 system exit latency (SEL) * 3 4585 * - For bulk endpoints, U1 SEL * 5 4586 * - For interrupt endpoints: 4587 * - Notification EPs, U1 SEL * 3 4588 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2) 4589 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2) 4590 */ 4591 static unsigned long long xhci_calculate_intel_u1_timeout( 4592 struct usb_device *udev, 4593 struct usb_endpoint_descriptor *desc) 4594 { 4595 unsigned long long timeout_ns; 4596 int ep_type; 4597 int intr_type; 4598 4599 ep_type = usb_endpoint_type(desc); 4600 switch (ep_type) { 4601 case USB_ENDPOINT_XFER_CONTROL: 4602 timeout_ns = udev->u1_params.sel * 3; 4603 break; 4604 case USB_ENDPOINT_XFER_BULK: 4605 timeout_ns = udev->u1_params.sel * 5; 4606 break; 4607 case USB_ENDPOINT_XFER_INT: 4608 intr_type = usb_endpoint_interrupt_type(desc); 4609 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) { 4610 timeout_ns = udev->u1_params.sel * 3; 4611 break; 4612 } 4613 /* Otherwise the calculation is the same as isoc eps */ 4614 /* fall through */ 4615 case USB_ENDPOINT_XFER_ISOC: 4616 timeout_ns = xhci_service_interval_to_ns(desc); 4617 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100); 4618 if (timeout_ns < udev->u1_params.sel * 2) 4619 timeout_ns = udev->u1_params.sel * 2; 4620 break; 4621 default: 4622 return 0; 4623 } 4624 4625 return timeout_ns; 4626 } 4627 4628 /* Returns the hub-encoded U1 timeout value. */ 4629 static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, 4630 struct usb_device *udev, 4631 struct usb_endpoint_descriptor *desc) 4632 { 4633 unsigned long long timeout_ns; 4634 4635 /* Prevent U1 if service interval is shorter than U1 exit latency */ 4636 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { 4637 if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) { 4638 dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n"); 4639 return USB3_LPM_DISABLED; 4640 } 4641 } 4642 4643 if (xhci->quirks & XHCI_INTEL_HOST) 4644 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); 4645 else 4646 timeout_ns = udev->u1_params.sel; 4647 4648 /* The U1 timeout is encoded in 1us intervals. 4649 * Don't return a timeout of zero, because that's USB3_LPM_DISABLED. 4650 */ 4651 if (timeout_ns == USB3_LPM_DISABLED) 4652 timeout_ns = 1; 4653 else 4654 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000); 4655 4656 /* If the necessary timeout value is bigger than what we can set in the 4657 * USB 3.0 hub, we have to disable hub-initiated U1. 4658 */ 4659 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT) 4660 return timeout_ns; 4661 dev_dbg(&udev->dev, "Hub-initiated U1 disabled " 4662 "due to long timeout %llu ms\n", timeout_ns); 4663 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1); 4664 } 4665 4666 /* The U2 timeout should be the maximum of: 4667 * - 10 ms (to avoid the bandwidth impact on the scheduler) 4668 * - largest bInterval of any active periodic endpoint (to avoid going 4669 * into lower power link states between intervals). 4670 * - the U2 Exit Latency of the device 4671 */ 4672 static unsigned long long xhci_calculate_intel_u2_timeout( 4673 struct usb_device *udev, 4674 struct usb_endpoint_descriptor *desc) 4675 { 4676 unsigned long long timeout_ns; 4677 unsigned long long u2_del_ns; 4678 4679 timeout_ns = 10 * 1000 * 1000; 4680 4681 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) && 4682 (xhci_service_interval_to_ns(desc) > timeout_ns)) 4683 timeout_ns = xhci_service_interval_to_ns(desc); 4684 4685 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL; 4686 if (u2_del_ns > timeout_ns) 4687 timeout_ns = u2_del_ns; 4688 4689 return timeout_ns; 4690 } 4691 4692 /* Returns the hub-encoded U2 timeout value. */ 4693 static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, 4694 struct usb_device *udev, 4695 struct usb_endpoint_descriptor *desc) 4696 { 4697 unsigned long long timeout_ns; 4698 4699 /* Prevent U2 if service interval is shorter than U2 exit latency */ 4700 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { 4701 if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) { 4702 dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n"); 4703 return USB3_LPM_DISABLED; 4704 } 4705 } 4706 4707 if (xhci->quirks & XHCI_INTEL_HOST) 4708 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); 4709 else 4710 timeout_ns = udev->u2_params.sel; 4711 4712 /* The U2 timeout is encoded in 256us intervals */ 4713 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000); 4714 /* If the necessary timeout value is bigger than what we can set in the 4715 * USB 3.0 hub, we have to disable hub-initiated U2. 4716 */ 4717 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT) 4718 return timeout_ns; 4719 dev_dbg(&udev->dev, "Hub-initiated U2 disabled " 4720 "due to long timeout %llu ms\n", timeout_ns); 4721 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2); 4722 } 4723 4724 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci, 4725 struct usb_device *udev, 4726 struct usb_endpoint_descriptor *desc, 4727 enum usb3_link_state state, 4728 u16 *timeout) 4729 { 4730 if (state == USB3_LPM_U1) 4731 return xhci_calculate_u1_timeout(xhci, udev, desc); 4732 else if (state == USB3_LPM_U2) 4733 return xhci_calculate_u2_timeout(xhci, udev, desc); 4734 4735 return USB3_LPM_DISABLED; 4736 } 4737 4738 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci, 4739 struct usb_device *udev, 4740 struct usb_endpoint_descriptor *desc, 4741 enum usb3_link_state state, 4742 u16 *timeout) 4743 { 4744 u16 alt_timeout; 4745 4746 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev, 4747 desc, state, timeout); 4748 4749 /* If we found we can't enable hub-initiated LPM, and 4750 * the U1 or U2 exit latency was too high to allow 4751 * device-initiated LPM as well, then we will disable LPM 4752 * for this device, so stop searching any further. 4753 */ 4754 if (alt_timeout == USB3_LPM_DISABLED) { 4755 *timeout = alt_timeout; 4756 return -E2BIG; 4757 } 4758 if (alt_timeout > *timeout) 4759 *timeout = alt_timeout; 4760 return 0; 4761 } 4762 4763 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci, 4764 struct usb_device *udev, 4765 struct usb_host_interface *alt, 4766 enum usb3_link_state state, 4767 u16 *timeout) 4768 { 4769 int j; 4770 4771 for (j = 0; j < alt->desc.bNumEndpoints; j++) { 4772 if (xhci_update_timeout_for_endpoint(xhci, udev, 4773 &alt->endpoint[j].desc, state, timeout)) 4774 return -E2BIG; 4775 continue; 4776 } 4777 return 0; 4778 } 4779 4780 static int xhci_check_intel_tier_policy(struct usb_device *udev, 4781 enum usb3_link_state state) 4782 { 4783 struct usb_device *parent; 4784 unsigned int num_hubs; 4785 4786 if (state == USB3_LPM_U2) 4787 return 0; 4788 4789 /* Don't enable U1 if the device is on a 2nd tier hub or lower. */ 4790 for (parent = udev->parent, num_hubs = 0; parent->parent; 4791 parent = parent->parent) 4792 num_hubs++; 4793 4794 if (num_hubs < 2) 4795 return 0; 4796 4797 dev_dbg(&udev->dev, "Disabling U1 link state for device" 4798 " below second-tier hub.\n"); 4799 dev_dbg(&udev->dev, "Plug device into first-tier hub " 4800 "to decrease power consumption.\n"); 4801 return -E2BIG; 4802 } 4803 4804 static int xhci_check_tier_policy(struct xhci_hcd *xhci, 4805 struct usb_device *udev, 4806 enum usb3_link_state state) 4807 { 4808 if (xhci->quirks & XHCI_INTEL_HOST) 4809 return xhci_check_intel_tier_policy(udev, state); 4810 else 4811 return 0; 4812 } 4813 4814 /* Returns the U1 or U2 timeout that should be enabled. 4815 * If the tier check or timeout setting functions return with a non-zero exit 4816 * code, that means the timeout value has been finalized and we shouldn't look 4817 * at any more endpoints. 4818 */ 4819 static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd, 4820 struct usb_device *udev, enum usb3_link_state state) 4821 { 4822 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4823 struct usb_host_config *config; 4824 char *state_name; 4825 int i; 4826 u16 timeout = USB3_LPM_DISABLED; 4827 4828 if (state == USB3_LPM_U1) 4829 state_name = "U1"; 4830 else if (state == USB3_LPM_U2) 4831 state_name = "U2"; 4832 else { 4833 dev_warn(&udev->dev, "Can't enable unknown link state %i\n", 4834 state); 4835 return timeout; 4836 } 4837 4838 if (xhci_check_tier_policy(xhci, udev, state) < 0) 4839 return timeout; 4840 4841 /* Gather some information about the currently installed configuration 4842 * and alternate interface settings. 4843 */ 4844 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc, 4845 state, &timeout)) 4846 return timeout; 4847 4848 config = udev->actconfig; 4849 if (!config) 4850 return timeout; 4851 4852 for (i = 0; i < config->desc.bNumInterfaces; i++) { 4853 struct usb_driver *driver; 4854 struct usb_interface *intf = config->interface[i]; 4855 4856 if (!intf) 4857 continue; 4858 4859 /* Check if any currently bound drivers want hub-initiated LPM 4860 * disabled. 4861 */ 4862 if (intf->dev.driver) { 4863 driver = to_usb_driver(intf->dev.driver); 4864 if (driver && driver->disable_hub_initiated_lpm) { 4865 dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n", 4866 state_name, driver->name); 4867 timeout = xhci_get_timeout_no_hub_lpm(udev, 4868 state); 4869 if (timeout == USB3_LPM_DISABLED) 4870 return timeout; 4871 } 4872 } 4873 4874 /* Not sure how this could happen... */ 4875 if (!intf->cur_altsetting) 4876 continue; 4877 4878 if (xhci_update_timeout_for_interface(xhci, udev, 4879 intf->cur_altsetting, 4880 state, &timeout)) 4881 return timeout; 4882 } 4883 return timeout; 4884 } 4885 4886 static int calculate_max_exit_latency(struct usb_device *udev, 4887 enum usb3_link_state state_changed, 4888 u16 hub_encoded_timeout) 4889 { 4890 unsigned long long u1_mel_us = 0; 4891 unsigned long long u2_mel_us = 0; 4892 unsigned long long mel_us = 0; 4893 bool disabling_u1; 4894 bool disabling_u2; 4895 bool enabling_u1; 4896 bool enabling_u2; 4897 4898 disabling_u1 = (state_changed == USB3_LPM_U1 && 4899 hub_encoded_timeout == USB3_LPM_DISABLED); 4900 disabling_u2 = (state_changed == USB3_LPM_U2 && 4901 hub_encoded_timeout == USB3_LPM_DISABLED); 4902 4903 enabling_u1 = (state_changed == USB3_LPM_U1 && 4904 hub_encoded_timeout != USB3_LPM_DISABLED); 4905 enabling_u2 = (state_changed == USB3_LPM_U2 && 4906 hub_encoded_timeout != USB3_LPM_DISABLED); 4907 4908 /* If U1 was already enabled and we're not disabling it, 4909 * or we're going to enable U1, account for the U1 max exit latency. 4910 */ 4911 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) || 4912 enabling_u1) 4913 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000); 4914 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) || 4915 enabling_u2) 4916 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000); 4917 4918 if (u1_mel_us > u2_mel_us) 4919 mel_us = u1_mel_us; 4920 else 4921 mel_us = u2_mel_us; 4922 /* xHCI host controller max exit latency field is only 16 bits wide. */ 4923 if (mel_us > MAX_EXIT) { 4924 dev_warn(&udev->dev, "Link PM max exit latency of %lluus " 4925 "is too big.\n", mel_us); 4926 return -E2BIG; 4927 } 4928 return mel_us; 4929 } 4930 4931 /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */ 4932 static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, 4933 struct usb_device *udev, enum usb3_link_state state) 4934 { 4935 struct xhci_hcd *xhci; 4936 u16 hub_encoded_timeout; 4937 int mel; 4938 int ret; 4939 4940 xhci = hcd_to_xhci(hcd); 4941 /* The LPM timeout values are pretty host-controller specific, so don't 4942 * enable hub-initiated timeouts unless the vendor has provided 4943 * information about their timeout algorithm. 4944 */ 4945 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || 4946 !xhci->devs[udev->slot_id]) 4947 return USB3_LPM_DISABLED; 4948 4949 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state); 4950 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout); 4951 if (mel < 0) { 4952 /* Max Exit Latency is too big, disable LPM. */ 4953 hub_encoded_timeout = USB3_LPM_DISABLED; 4954 mel = 0; 4955 } 4956 4957 ret = xhci_change_max_exit_latency(xhci, udev, mel); 4958 if (ret) 4959 return ret; 4960 return hub_encoded_timeout; 4961 } 4962 4963 static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, 4964 struct usb_device *udev, enum usb3_link_state state) 4965 { 4966 struct xhci_hcd *xhci; 4967 u16 mel; 4968 4969 xhci = hcd_to_xhci(hcd); 4970 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || 4971 !xhci->devs[udev->slot_id]) 4972 return 0; 4973 4974 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED); 4975 return xhci_change_max_exit_latency(xhci, udev, mel); 4976 } 4977 #else /* CONFIG_PM */ 4978 4979 static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, 4980 struct usb_device *udev, int enable) 4981 { 4982 return 0; 4983 } 4984 4985 static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) 4986 { 4987 return 0; 4988 } 4989 4990 static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, 4991 struct usb_device *udev, enum usb3_link_state state) 4992 { 4993 return USB3_LPM_DISABLED; 4994 } 4995 4996 static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, 4997 struct usb_device *udev, enum usb3_link_state state) 4998 { 4999 return 0; 5000 } 5001 #endif /* CONFIG_PM */ 5002 5003 /*-------------------------------------------------------------------------*/ 5004 5005 /* Once a hub descriptor is fetched for a device, we need to update the xHC's 5006 * internal data structures for the device. 5007 */ 5008 static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, 5009 struct usb_tt *tt, gfp_t mem_flags) 5010 { 5011 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 5012 struct xhci_virt_device *vdev; 5013 struct xhci_command *config_cmd; 5014 struct xhci_input_control_ctx *ctrl_ctx; 5015 struct xhci_slot_ctx *slot_ctx; 5016 unsigned long flags; 5017 unsigned think_time; 5018 int ret; 5019 5020 /* Ignore root hubs */ 5021 if (!hdev->parent) 5022 return 0; 5023 5024 vdev = xhci->devs[hdev->slot_id]; 5025 if (!vdev) { 5026 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); 5027 return -EINVAL; 5028 } 5029 5030 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags); 5031 if (!config_cmd) 5032 return -ENOMEM; 5033 5034 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx); 5035 if (!ctrl_ctx) { 5036 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 5037 __func__); 5038 xhci_free_command(xhci, config_cmd); 5039 return -ENOMEM; 5040 } 5041 5042 spin_lock_irqsave(&xhci->lock, flags); 5043 if (hdev->speed == USB_SPEED_HIGH && 5044 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) { 5045 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n"); 5046 xhci_free_command(xhci, config_cmd); 5047 spin_unlock_irqrestore(&xhci->lock, flags); 5048 return -ENOMEM; 5049 } 5050 5051 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); 5052 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 5053 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); 5054 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); 5055 /* 5056 * refer to section 6.2.2: MTT should be 0 for full speed hub, 5057 * but it may be already set to 1 when setup an xHCI virtual 5058 * device, so clear it anyway. 5059 */ 5060 if (tt->multi) 5061 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); 5062 else if (hdev->speed == USB_SPEED_FULL) 5063 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT); 5064 5065 if (xhci->hci_version > 0x95) { 5066 xhci_dbg(xhci, "xHCI version %x needs hub " 5067 "TT think time and number of ports\n", 5068 (unsigned int) xhci->hci_version); 5069 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild)); 5070 /* Set TT think time - convert from ns to FS bit times. 5071 * 0 = 8 FS bit times, 1 = 16 FS bit times, 5072 * 2 = 24 FS bit times, 3 = 32 FS bit times. 5073 * 5074 * xHCI 1.0: this field shall be 0 if the device is not a 5075 * High-spped hub. 5076 */ 5077 think_time = tt->think_time; 5078 if (think_time != 0) 5079 think_time = (think_time / 666) - 1; 5080 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) 5081 slot_ctx->tt_info |= 5082 cpu_to_le32(TT_THINK_TIME(think_time)); 5083 } else { 5084 xhci_dbg(xhci, "xHCI version %x doesn't need hub " 5085 "TT think time or number of ports\n", 5086 (unsigned int) xhci->hci_version); 5087 } 5088 slot_ctx->dev_state = 0; 5089 spin_unlock_irqrestore(&xhci->lock, flags); 5090 5091 xhci_dbg(xhci, "Set up %s for hub device.\n", 5092 (xhci->hci_version > 0x95) ? 5093 "configure endpoint" : "evaluate context"); 5094 5095 /* Issue and wait for the configure endpoint or 5096 * evaluate context command. 5097 */ 5098 if (xhci->hci_version > 0x95) 5099 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, 5100 false, false); 5101 else 5102 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, 5103 true, false); 5104 5105 xhci_free_command(xhci, config_cmd); 5106 return ret; 5107 } 5108 5109 static int xhci_get_frame(struct usb_hcd *hcd) 5110 { 5111 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 5112 /* EHCI mods by the periodic size. Why? */ 5113 return readl(&xhci->run_regs->microframe_index) >> 3; 5114 } 5115 5116 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) 5117 { 5118 struct xhci_hcd *xhci; 5119 /* 5120 * TODO: Check with DWC3 clients for sysdev according to 5121 * quirks 5122 */ 5123 struct device *dev = hcd->self.sysdev; 5124 unsigned int minor_rev; 5125 int retval; 5126 5127 /* Accept arbitrarily long scatter-gather lists */ 5128 hcd->self.sg_tablesize = ~0; 5129 5130 /* support to build packet from discontinuous buffers */ 5131 hcd->self.no_sg_constraint = 1; 5132 5133 /* XHCI controllers don't stop the ep queue on short packets :| */ 5134 hcd->self.no_stop_on_short = 1; 5135 5136 xhci = hcd_to_xhci(hcd); 5137 5138 if (usb_hcd_is_primary_hcd(hcd)) { 5139 xhci->main_hcd = hcd; 5140 xhci->usb2_rhub.hcd = hcd; 5141 /* Mark the first roothub as being USB 2.0. 5142 * The xHCI driver will register the USB 3.0 roothub. 5143 */ 5144 hcd->speed = HCD_USB2; 5145 hcd->self.root_hub->speed = USB_SPEED_HIGH; 5146 /* 5147 * USB 2.0 roothub under xHCI has an integrated TT, 5148 * (rate matching hub) as opposed to having an OHCI/UHCI 5149 * companion controller. 5150 */ 5151 hcd->has_tt = 1; 5152 } else { 5153 /* 5154 * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts 5155 * should return 0x31 for sbrn, or that the minor revision 5156 * is a two digit BCD containig minor and sub-minor numbers. 5157 * This was later clarified in xHCI 1.2. 5158 * 5159 * Some USB 3.1 capable hosts therefore have sbrn 0x30, and 5160 * minor revision set to 0x1 instead of 0x10. 5161 */ 5162 if (xhci->usb3_rhub.min_rev == 0x1) 5163 minor_rev = 1; 5164 else 5165 minor_rev = xhci->usb3_rhub.min_rev / 0x10; 5166 5167 switch (minor_rev) { 5168 case 2: 5169 hcd->speed = HCD_USB32; 5170 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; 5171 hcd->self.root_hub->rx_lanes = 2; 5172 hcd->self.root_hub->tx_lanes = 2; 5173 break; 5174 case 1: 5175 hcd->speed = HCD_USB31; 5176 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; 5177 break; 5178 } 5179 xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n", 5180 minor_rev, 5181 minor_rev ? "Enhanced " : ""); 5182 5183 xhci->usb3_rhub.hcd = hcd; 5184 /* xHCI private pointer was set in xhci_pci_probe for the second 5185 * registered roothub. 5186 */ 5187 return 0; 5188 } 5189 5190 mutex_init(&xhci->mutex); 5191 xhci->cap_regs = hcd->regs; 5192 xhci->op_regs = hcd->regs + 5193 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); 5194 xhci->run_regs = hcd->regs + 5195 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK); 5196 /* Cache read-only capability registers */ 5197 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1); 5198 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2); 5199 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3); 5200 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase); 5201 xhci->hci_version = HC_VERSION(xhci->hcc_params); 5202 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params); 5203 if (xhci->hci_version > 0x100) 5204 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2); 5205 5206 xhci->quirks |= quirks; 5207 5208 get_quirks(dev, xhci); 5209 5210 /* In xhci controllers which follow xhci 1.0 spec gives a spurious 5211 * success event after a short transfer. This quirk will ignore such 5212 * spurious event. 5213 */ 5214 if (xhci->hci_version > 0x96) 5215 xhci->quirks |= XHCI_SPURIOUS_SUCCESS; 5216 5217 /* Make sure the HC is halted. */ 5218 retval = xhci_halt(xhci); 5219 if (retval) 5220 return retval; 5221 5222 xhci_zero_64b_regs(xhci); 5223 5224 xhci_dbg(xhci, "Resetting HCD\n"); 5225 /* Reset the internal HC memory state and registers. */ 5226 retval = xhci_reset(xhci); 5227 if (retval) 5228 return retval; 5229 xhci_dbg(xhci, "Reset complete\n"); 5230 5231 /* 5232 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0) 5233 * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit 5234 * address memory pointers actually. So, this driver clears the AC64 5235 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev, 5236 * DMA_BIT_MASK(32)) in this xhci_gen_setup(). 5237 */ 5238 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT) 5239 xhci->hcc_params &= ~BIT(0); 5240 5241 /* Set dma_mask and coherent_dma_mask to 64-bits, 5242 * if xHC supports 64-bit addressing */ 5243 if (HCC_64BIT_ADDR(xhci->hcc_params) && 5244 !dma_set_mask(dev, DMA_BIT_MASK(64))) { 5245 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); 5246 dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); 5247 } else { 5248 /* 5249 * This is to avoid error in cases where a 32-bit USB 5250 * controller is used on a 64-bit capable system. 5251 */ 5252 retval = dma_set_mask(dev, DMA_BIT_MASK(32)); 5253 if (retval) 5254 return retval; 5255 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n"); 5256 dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); 5257 } 5258 5259 xhci_dbg(xhci, "Calling HCD init\n"); 5260 /* Initialize HCD and host controller data structures. */ 5261 retval = xhci_init(hcd); 5262 if (retval) 5263 return retval; 5264 xhci_dbg(xhci, "Called HCD init\n"); 5265 5266 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n", 5267 xhci->hcc_params, xhci->hci_version, xhci->quirks); 5268 5269 return 0; 5270 } 5271 EXPORT_SYMBOL_GPL(xhci_gen_setup); 5272 5273 static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd, 5274 struct usb_host_endpoint *ep) 5275 { 5276 struct xhci_hcd *xhci; 5277 struct usb_device *udev; 5278 unsigned int slot_id; 5279 unsigned int ep_index; 5280 unsigned long flags; 5281 5282 xhci = hcd_to_xhci(hcd); 5283 5284 spin_lock_irqsave(&xhci->lock, flags); 5285 udev = (struct usb_device *)ep->hcpriv; 5286 slot_id = udev->slot_id; 5287 ep_index = xhci_get_endpoint_index(&ep->desc); 5288 5289 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT; 5290 xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 5291 spin_unlock_irqrestore(&xhci->lock, flags); 5292 } 5293 5294 static const struct hc_driver xhci_hc_driver = { 5295 .description = "xhci-hcd", 5296 .product_desc = "xHCI Host Controller", 5297 .hcd_priv_size = sizeof(struct xhci_hcd), 5298 5299 /* 5300 * generic hardware linkage 5301 */ 5302 .irq = xhci_irq, 5303 .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED | 5304 HCD_BH, 5305 5306 /* 5307 * basic lifecycle operations 5308 */ 5309 .reset = NULL, /* set in xhci_init_driver() */ 5310 .start = xhci_run, 5311 .stop = xhci_stop, 5312 .shutdown = xhci_shutdown, 5313 5314 /* 5315 * managing i/o requests and associated device resources 5316 */ 5317 .map_urb_for_dma = xhci_map_urb_for_dma, 5318 .urb_enqueue = xhci_urb_enqueue, 5319 .urb_dequeue = xhci_urb_dequeue, 5320 .alloc_dev = xhci_alloc_dev, 5321 .free_dev = xhci_free_dev, 5322 .alloc_streams = xhci_alloc_streams, 5323 .free_streams = xhci_free_streams, 5324 .add_endpoint = xhci_add_endpoint, 5325 .drop_endpoint = xhci_drop_endpoint, 5326 .endpoint_disable = xhci_endpoint_disable, 5327 .endpoint_reset = xhci_endpoint_reset, 5328 .check_bandwidth = xhci_check_bandwidth, 5329 .reset_bandwidth = xhci_reset_bandwidth, 5330 .address_device = xhci_address_device, 5331 .enable_device = xhci_enable_device, 5332 .update_hub_device = xhci_update_hub_device, 5333 .reset_device = xhci_discover_or_reset_device, 5334 5335 /* 5336 * scheduling support 5337 */ 5338 .get_frame_number = xhci_get_frame, 5339 5340 /* 5341 * root hub support 5342 */ 5343 .hub_control = xhci_hub_control, 5344 .hub_status_data = xhci_hub_status_data, 5345 .bus_suspend = xhci_bus_suspend, 5346 .bus_resume = xhci_bus_resume, 5347 .get_resuming_ports = xhci_get_resuming_ports, 5348 5349 /* 5350 * call back when device connected and addressed 5351 */ 5352 .update_device = xhci_update_device, 5353 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm, 5354 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout, 5355 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout, 5356 .find_raw_port_number = xhci_find_raw_port_number, 5357 .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete, 5358 }; 5359 5360 void xhci_init_driver(struct hc_driver *drv, 5361 const struct xhci_driver_overrides *over) 5362 { 5363 BUG_ON(!over); 5364 5365 /* Copy the generic table to drv then apply the overrides */ 5366 *drv = xhci_hc_driver; 5367 5368 if (over) { 5369 drv->hcd_priv_size += over->extra_priv_size; 5370 if (over->reset) 5371 drv->reset = over->reset; 5372 if (over->start) 5373 drv->start = over->start; 5374 } 5375 } 5376 EXPORT_SYMBOL_GPL(xhci_init_driver); 5377 5378 MODULE_DESCRIPTION(DRIVER_DESC); 5379 MODULE_AUTHOR(DRIVER_AUTHOR); 5380 MODULE_LICENSE("GPL"); 5381 5382 static int __init xhci_hcd_init(void) 5383 { 5384 /* 5385 * Check the compiler generated sizes of structures that must be laid 5386 * out in specific ways for hardware access. 5387 */ 5388 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); 5389 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); 5390 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); 5391 /* xhci_device_control has eight fields, and also 5392 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx 5393 */ 5394 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); 5395 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); 5396 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); 5397 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8); 5398 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); 5399 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ 5400 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); 5401 5402 if (usb_disabled()) 5403 return -ENODEV; 5404 5405 xhci_debugfs_create_root(); 5406 5407 return 0; 5408 } 5409 5410 /* 5411 * If an init function is provided, an exit function must also be provided 5412 * to allow module unload. 5413 */ 5414 static void __exit xhci_hcd_fini(void) 5415 { 5416 xhci_debugfs_remove_root(); 5417 } 5418 5419 module_init(xhci_hcd_init); 5420 module_exit(xhci_hcd_fini); 5421