1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * xHCI host controller driver 4 * 5 * Copyright (C) 2008 Intel Corp. 6 * 7 * Author: Sarah Sharp 8 * Some code borrowed from the Linux EHCI driver. 9 */ 10 11 #include <linux/pci.h> 12 #include <linux/iopoll.h> 13 #include <linux/irq.h> 14 #include <linux/log2.h> 15 #include <linux/module.h> 16 #include <linux/moduleparam.h> 17 #include <linux/slab.h> 18 #include <linux/dmi.h> 19 #include <linux/dma-mapping.h> 20 21 #include "xhci.h" 22 #include "xhci-trace.h" 23 #include "xhci-debugfs.h" 24 #include "xhci-dbgcap.h" 25 26 #define DRIVER_AUTHOR "Sarah Sharp" 27 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" 28 29 #define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E) 30 31 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ 32 static int link_quirk; 33 module_param(link_quirk, int, S_IRUGO | S_IWUSR); 34 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); 35 36 static unsigned long long quirks; 37 module_param(quirks, ullong, S_IRUGO); 38 MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default"); 39 40 static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) 41 { 42 struct xhci_segment *seg = ring->first_seg; 43 44 if (!td || !td->start_seg) 45 return false; 46 do { 47 if (seg == td->start_seg) 48 return true; 49 seg = seg->next; 50 } while (seg && seg != ring->first_seg); 51 52 return false; 53 } 54 55 /* 56 * xhci_handshake - spin reading hc until handshake completes or fails 57 * @ptr: address of hc register to be read 58 * @mask: bits to look at in result of read 59 * @done: value of those bits when handshake succeeds 60 * @usec: timeout in microseconds 61 * 62 * Returns negative errno, or zero on success 63 * 64 * Success happens when the "mask" bits have the specified value (hardware 65 * handshake done). There are two failure modes: "usec" have passed (major 66 * hardware flakeout), or the register reads as all-ones (hardware removed). 67 */ 68 int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us) 69 { 70 u32 result; 71 int ret; 72 73 ret = readl_poll_timeout_atomic(ptr, result, 74 (result & mask) == done || 75 result == U32_MAX, 76 1, timeout_us); 77 if (result == U32_MAX) /* card removed */ 78 return -ENODEV; 79 80 return ret; 81 } 82 83 /* 84 * Disable interrupts and begin the xHCI halting process. 85 */ 86 void xhci_quiesce(struct xhci_hcd *xhci) 87 { 88 u32 halted; 89 u32 cmd; 90 u32 mask; 91 92 mask = ~(XHCI_IRQS); 93 halted = readl(&xhci->op_regs->status) & STS_HALT; 94 if (!halted) 95 mask &= ~CMD_RUN; 96 97 cmd = readl(&xhci->op_regs->command); 98 cmd &= mask; 99 writel(cmd, &xhci->op_regs->command); 100 } 101 102 /* 103 * Force HC into halt state. 104 * 105 * Disable any IRQs and clear the run/stop bit. 106 * HC will complete any current and actively pipelined transactions, and 107 * should halt within 16 ms of the run/stop bit being cleared. 108 * Read HC Halted bit in the status register to see when the HC is finished. 109 */ 110 int xhci_halt(struct xhci_hcd *xhci) 111 { 112 int ret; 113 114 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC"); 115 xhci_quiesce(xhci); 116 117 ret = xhci_handshake(&xhci->op_regs->status, 118 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); 119 if (ret) { 120 xhci_warn(xhci, "Host halt failed, %d\n", ret); 121 return ret; 122 } 123 124 xhci->xhc_state |= XHCI_STATE_HALTED; 125 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; 126 127 return ret; 128 } 129 130 /* 131 * Set the run bit and wait for the host to be running. 132 */ 133 int xhci_start(struct xhci_hcd *xhci) 134 { 135 u32 temp; 136 int ret; 137 138 temp = readl(&xhci->op_regs->command); 139 temp |= (CMD_RUN); 140 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.", 141 temp); 142 writel(temp, &xhci->op_regs->command); 143 144 /* 145 * Wait for the HCHalted Status bit to be 0 to indicate the host is 146 * running. 147 */ 148 ret = xhci_handshake(&xhci->op_regs->status, 149 STS_HALT, 0, XHCI_MAX_HALT_USEC); 150 if (ret == -ETIMEDOUT) 151 xhci_err(xhci, "Host took too long to start, " 152 "waited %u microseconds.\n", 153 XHCI_MAX_HALT_USEC); 154 if (!ret) 155 /* clear state flags. Including dying, halted or removing */ 156 xhci->xhc_state = 0; 157 158 return ret; 159 } 160 161 /* 162 * Reset a halted HC. 163 * 164 * This resets pipelines, timers, counters, state machines, etc. 165 * Transactions will be terminated immediately, and operational registers 166 * will be set to their defaults. 167 */ 168 int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us) 169 { 170 u32 command; 171 u32 state; 172 int ret; 173 174 state = readl(&xhci->op_regs->status); 175 176 if (state == ~(u32)0) { 177 xhci_warn(xhci, "Host not accessible, reset failed.\n"); 178 return -ENODEV; 179 } 180 181 if ((state & STS_HALT) == 0) { 182 xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); 183 return 0; 184 } 185 186 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC"); 187 command = readl(&xhci->op_regs->command); 188 command |= CMD_RESET; 189 writel(command, &xhci->op_regs->command); 190 191 /* Existing Intel xHCI controllers require a delay of 1 mS, 192 * after setting the CMD_RESET bit, and before accessing any 193 * HC registers. This allows the HC to complete the 194 * reset operation and be ready for HC register access. 195 * Without this delay, the subsequent HC register access, 196 * may result in a system hang very rarely. 197 */ 198 if (xhci->quirks & XHCI_INTEL_HOST) 199 udelay(1000); 200 201 ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us); 202 if (ret) 203 return ret; 204 205 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) 206 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller)); 207 208 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 209 "Wait for controller to be ready for doorbell rings"); 210 /* 211 * xHCI cannot write to any doorbells or operational registers other 212 * than status until the "Controller Not Ready" flag is cleared. 213 */ 214 ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us); 215 216 xhci->usb2_rhub.bus_state.port_c_suspend = 0; 217 xhci->usb2_rhub.bus_state.suspended_ports = 0; 218 xhci->usb2_rhub.bus_state.resuming_ports = 0; 219 xhci->usb3_rhub.bus_state.port_c_suspend = 0; 220 xhci->usb3_rhub.bus_state.suspended_ports = 0; 221 xhci->usb3_rhub.bus_state.resuming_ports = 0; 222 223 return ret; 224 } 225 226 static void xhci_zero_64b_regs(struct xhci_hcd *xhci) 227 { 228 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 229 int err, i; 230 u64 val; 231 u32 intrs; 232 233 /* 234 * Some Renesas controllers get into a weird state if they are 235 * reset while programmed with 64bit addresses (they will preserve 236 * the top half of the address in internal, non visible 237 * registers). You end up with half the address coming from the 238 * kernel, and the other half coming from the firmware. Also, 239 * changing the programming leads to extra accesses even if the 240 * controller is supposed to be halted. The controller ends up with 241 * a fatal fault, and is then ripe for being properly reset. 242 * 243 * Special care is taken to only apply this if the device is behind 244 * an iommu. Doing anything when there is no iommu is definitely 245 * unsafe... 246 */ 247 if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev)) 248 return; 249 250 xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n"); 251 252 /* Clear HSEIE so that faults do not get signaled */ 253 val = readl(&xhci->op_regs->command); 254 val &= ~CMD_HSEIE; 255 writel(val, &xhci->op_regs->command); 256 257 /* Clear HSE (aka FATAL) */ 258 val = readl(&xhci->op_regs->status); 259 val |= STS_FATAL; 260 writel(val, &xhci->op_regs->status); 261 262 /* Now zero the registers, and brace for impact */ 263 val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 264 if (upper_32_bits(val)) 265 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); 266 val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 267 if (upper_32_bits(val)) 268 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); 269 270 intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1), 271 ARRAY_SIZE(xhci->run_regs->ir_set)); 272 273 for (i = 0; i < intrs; i++) { 274 struct xhci_intr_reg __iomem *ir; 275 276 ir = &xhci->run_regs->ir_set[i]; 277 val = xhci_read_64(xhci, &ir->erst_base); 278 if (upper_32_bits(val)) 279 xhci_write_64(xhci, 0, &ir->erst_base); 280 val= xhci_read_64(xhci, &ir->erst_dequeue); 281 if (upper_32_bits(val)) 282 xhci_write_64(xhci, 0, &ir->erst_dequeue); 283 } 284 285 /* Wait for the fault to appear. It will be cleared on reset */ 286 err = xhci_handshake(&xhci->op_regs->status, 287 STS_FATAL, STS_FATAL, 288 XHCI_MAX_HALT_USEC); 289 if (!err) 290 xhci_info(xhci, "Fault detected\n"); 291 } 292 293 #ifdef CONFIG_USB_PCI 294 /* 295 * Set up MSI 296 */ 297 static int xhci_setup_msi(struct xhci_hcd *xhci) 298 { 299 int ret; 300 /* 301 * TODO:Check with MSI Soc for sysdev 302 */ 303 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 304 305 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); 306 if (ret < 0) { 307 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 308 "failed to allocate MSI entry"); 309 return ret; 310 } 311 312 ret = request_irq(pdev->irq, xhci_msi_irq, 313 0, "xhci_hcd", xhci_to_hcd(xhci)); 314 if (ret) { 315 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 316 "disable MSI interrupt"); 317 pci_free_irq_vectors(pdev); 318 } 319 320 return ret; 321 } 322 323 /* 324 * Set up MSI-X 325 */ 326 static int xhci_setup_msix(struct xhci_hcd *xhci) 327 { 328 int i, ret; 329 struct usb_hcd *hcd = xhci_to_hcd(xhci); 330 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 331 332 /* 333 * calculate number of msi-x vectors supported. 334 * - HCS_MAX_INTRS: the max number of interrupts the host can handle, 335 * with max number of interrupters based on the xhci HCSPARAMS1. 336 * - num_online_cpus: maximum msi-x vectors per CPUs core. 337 * Add additional 1 vector to ensure always available interrupt. 338 */ 339 xhci->msix_count = min(num_online_cpus() + 1, 340 HCS_MAX_INTRS(xhci->hcs_params1)); 341 342 ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count, 343 PCI_IRQ_MSIX); 344 if (ret < 0) { 345 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 346 "Failed to enable MSI-X"); 347 return ret; 348 } 349 350 for (i = 0; i < xhci->msix_count; i++) { 351 ret = request_irq(pci_irq_vector(pdev, i), xhci_msi_irq, 0, 352 "xhci_hcd", xhci_to_hcd(xhci)); 353 if (ret) 354 goto disable_msix; 355 } 356 357 hcd->msix_enabled = 1; 358 return ret; 359 360 disable_msix: 361 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt"); 362 while (--i >= 0) 363 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci)); 364 pci_free_irq_vectors(pdev); 365 return ret; 366 } 367 368 /* Free any IRQs and disable MSI-X */ 369 static void xhci_cleanup_msix(struct xhci_hcd *xhci) 370 { 371 struct usb_hcd *hcd = xhci_to_hcd(xhci); 372 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 373 374 if (xhci->quirks & XHCI_PLAT) 375 return; 376 377 /* return if using legacy interrupt */ 378 if (hcd->irq > 0) 379 return; 380 381 if (hcd->msix_enabled) { 382 int i; 383 384 for (i = 0; i < xhci->msix_count; i++) 385 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci)); 386 } else { 387 free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci)); 388 } 389 390 pci_free_irq_vectors(pdev); 391 hcd->msix_enabled = 0; 392 } 393 394 static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci) 395 { 396 struct usb_hcd *hcd = xhci_to_hcd(xhci); 397 398 if (hcd->msix_enabled) { 399 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 400 int i; 401 402 for (i = 0; i < xhci->msix_count; i++) 403 synchronize_irq(pci_irq_vector(pdev, i)); 404 } 405 } 406 407 static int xhci_try_enable_msi(struct usb_hcd *hcd) 408 { 409 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 410 struct pci_dev *pdev; 411 int ret; 412 413 /* The xhci platform device has set up IRQs through usb_add_hcd. */ 414 if (xhci->quirks & XHCI_PLAT) 415 return 0; 416 417 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 418 /* 419 * Some Fresco Logic host controllers advertise MSI, but fail to 420 * generate interrupts. Don't even try to enable MSI. 421 */ 422 if (xhci->quirks & XHCI_BROKEN_MSI) 423 goto legacy_irq; 424 425 /* unregister the legacy interrupt */ 426 if (hcd->irq) 427 free_irq(hcd->irq, hcd); 428 hcd->irq = 0; 429 430 ret = xhci_setup_msix(xhci); 431 if (ret) 432 /* fall back to msi*/ 433 ret = xhci_setup_msi(xhci); 434 435 if (!ret) { 436 hcd->msi_enabled = 1; 437 return 0; 438 } 439 440 if (!pdev->irq) { 441 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); 442 return -EINVAL; 443 } 444 445 legacy_irq: 446 if (!strlen(hcd->irq_descr)) 447 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d", 448 hcd->driver->description, hcd->self.busnum); 449 450 /* fall back to legacy interrupt*/ 451 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, 452 hcd->irq_descr, hcd); 453 if (ret) { 454 xhci_err(xhci, "request interrupt %d failed\n", 455 pdev->irq); 456 return ret; 457 } 458 hcd->irq = pdev->irq; 459 return 0; 460 } 461 462 #else 463 464 static inline int xhci_try_enable_msi(struct usb_hcd *hcd) 465 { 466 return 0; 467 } 468 469 static inline void xhci_cleanup_msix(struct xhci_hcd *xhci) 470 { 471 } 472 473 static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci) 474 { 475 } 476 477 #endif 478 479 static void compliance_mode_recovery(struct timer_list *t) 480 { 481 struct xhci_hcd *xhci; 482 struct usb_hcd *hcd; 483 struct xhci_hub *rhub; 484 u32 temp; 485 int i; 486 487 xhci = from_timer(xhci, t, comp_mode_recovery_timer); 488 rhub = &xhci->usb3_rhub; 489 490 for (i = 0; i < rhub->num_ports; i++) { 491 temp = readl(rhub->ports[i]->addr); 492 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) { 493 /* 494 * Compliance Mode Detected. Letting USB Core 495 * handle the Warm Reset 496 */ 497 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 498 "Compliance mode detected->port %d", 499 i + 1); 500 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 501 "Attempting compliance mode recovery"); 502 hcd = xhci->shared_hcd; 503 504 if (hcd->state == HC_STATE_SUSPENDED) 505 usb_hcd_resume_root_hub(hcd); 506 507 usb_hcd_poll_rh_status(hcd); 508 } 509 } 510 511 if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1)) 512 mod_timer(&xhci->comp_mode_recovery_timer, 513 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); 514 } 515 516 /* 517 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver 518 * that causes ports behind that hardware to enter compliance mode sometimes. 519 * The quirk creates a timer that polls every 2 seconds the link state of 520 * each host controller's port and recovers it by issuing a Warm reset 521 * if Compliance mode is detected, otherwise the port will become "dead" (no 522 * device connections or disconnections will be detected anymore). Becasue no 523 * status event is generated when entering compliance mode (per xhci spec), 524 * this quirk is needed on systems that have the failing hardware installed. 525 */ 526 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) 527 { 528 xhci->port_status_u0 = 0; 529 timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery, 530 0); 531 xhci->comp_mode_recovery_timer.expires = jiffies + 532 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS); 533 534 add_timer(&xhci->comp_mode_recovery_timer); 535 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 536 "Compliance mode recovery timer initialized"); 537 } 538 539 /* 540 * This function identifies the systems that have installed the SN65LVPE502CP 541 * USB3.0 re-driver and that need the Compliance Mode Quirk. 542 * Systems: 543 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820 544 */ 545 static bool xhci_compliance_mode_recovery_timer_quirk_check(void) 546 { 547 const char *dmi_product_name, *dmi_sys_vendor; 548 549 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME); 550 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR); 551 if (!dmi_product_name || !dmi_sys_vendor) 552 return false; 553 554 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard"))) 555 return false; 556 557 if (strstr(dmi_product_name, "Z420") || 558 strstr(dmi_product_name, "Z620") || 559 strstr(dmi_product_name, "Z820") || 560 strstr(dmi_product_name, "Z1 Workstation")) 561 return true; 562 563 return false; 564 } 565 566 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) 567 { 568 return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1)); 569 } 570 571 572 /* 573 * Initialize memory for HCD and xHC (one-time init). 574 * 575 * Program the PAGESIZE register, initialize the device context array, create 576 * device contexts (?), set up a command ring segment (or two?), create event 577 * ring (one for now). 578 */ 579 static int xhci_init(struct usb_hcd *hcd) 580 { 581 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 582 int retval; 583 584 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init"); 585 spin_lock_init(&xhci->lock); 586 if (xhci->hci_version == 0x95 && link_quirk) { 587 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 588 "QUIRK: Not clearing Link TRB chain bits."); 589 xhci->quirks |= XHCI_LINK_TRB_QUIRK; 590 } else { 591 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 592 "xHCI doesn't need link TRB QUIRK"); 593 } 594 retval = xhci_mem_init(xhci, GFP_KERNEL); 595 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init"); 596 597 /* Initializing Compliance Mode Recovery Data If Needed */ 598 if (xhci_compliance_mode_recovery_timer_quirk_check()) { 599 xhci->quirks |= XHCI_COMP_MODE_QUIRK; 600 compliance_mode_recovery_timer_init(xhci); 601 } 602 603 return retval; 604 } 605 606 /*-------------------------------------------------------------------------*/ 607 608 609 static int xhci_run_finished(struct xhci_hcd *xhci) 610 { 611 if (xhci_start(xhci)) { 612 xhci_halt(xhci); 613 return -ENODEV; 614 } 615 xhci->shared_hcd->state = HC_STATE_RUNNING; 616 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; 617 618 if (xhci->quirks & XHCI_NEC_HOST) 619 xhci_ring_cmd_db(xhci); 620 621 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 622 "Finished xhci_run for USB3 roothub"); 623 return 0; 624 } 625 626 /* 627 * Start the HC after it was halted. 628 * 629 * This function is called by the USB core when the HC driver is added. 630 * Its opposite is xhci_stop(). 631 * 632 * xhci_init() must be called once before this function can be called. 633 * Reset the HC, enable device slot contexts, program DCBAAP, and 634 * set command ring pointer and event ring pointer. 635 * 636 * Setup MSI-X vectors and enable interrupts. 637 */ 638 int xhci_run(struct usb_hcd *hcd) 639 { 640 u32 temp; 641 u64 temp_64; 642 int ret; 643 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 644 645 /* Start the xHCI host controller running only after the USB 2.0 roothub 646 * is setup. 647 */ 648 649 hcd->uses_new_polling = 1; 650 if (!usb_hcd_is_primary_hcd(hcd)) 651 return xhci_run_finished(xhci); 652 653 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run"); 654 655 ret = xhci_try_enable_msi(hcd); 656 if (ret) 657 return ret; 658 659 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 660 temp_64 &= ~ERST_PTR_MASK; 661 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 662 "ERST deq = 64'h%0lx", (long unsigned int) temp_64); 663 664 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 665 "// Set the interrupt modulation register"); 666 temp = readl(&xhci->ir_set->irq_control); 667 temp &= ~ER_IRQ_INTERVAL_MASK; 668 temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK; 669 writel(temp, &xhci->ir_set->irq_control); 670 671 /* Set the HCD state before we enable the irqs */ 672 temp = readl(&xhci->op_regs->command); 673 temp |= (CMD_EIE); 674 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 675 "// Enable interrupts, cmd = 0x%x.", temp); 676 writel(temp, &xhci->op_regs->command); 677 678 temp = readl(&xhci->ir_set->irq_pending); 679 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 680 "// Enabling event ring interrupter %p by writing 0x%x to irq_pending", 681 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); 682 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); 683 684 if (xhci->quirks & XHCI_NEC_HOST) { 685 struct xhci_command *command; 686 687 command = xhci_alloc_command(xhci, false, GFP_KERNEL); 688 if (!command) 689 return -ENOMEM; 690 691 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0, 692 TRB_TYPE(TRB_NEC_GET_FW)); 693 if (ret) 694 xhci_free_command(xhci, command); 695 } 696 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 697 "Finished xhci_run for USB2 roothub"); 698 699 xhci_create_dbc_dev(xhci); 700 701 xhci_debugfs_init(xhci); 702 703 return 0; 704 } 705 EXPORT_SYMBOL_GPL(xhci_run); 706 707 /* 708 * Stop xHCI driver. 709 * 710 * This function is called by the USB core when the HC driver is removed. 711 * Its opposite is xhci_run(). 712 * 713 * Disable device contexts, disable IRQs, and quiesce the HC. 714 * Reset the HC, finish any completed transactions, and cleanup memory. 715 */ 716 static void xhci_stop(struct usb_hcd *hcd) 717 { 718 u32 temp; 719 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 720 721 mutex_lock(&xhci->mutex); 722 723 /* Only halt host and free memory after both hcds are removed */ 724 if (!usb_hcd_is_primary_hcd(hcd)) { 725 mutex_unlock(&xhci->mutex); 726 return; 727 } 728 729 xhci_remove_dbc_dev(xhci); 730 731 spin_lock_irq(&xhci->lock); 732 xhci->xhc_state |= XHCI_STATE_HALTED; 733 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; 734 xhci_halt(xhci); 735 xhci_reset(xhci, XHCI_RESET_SHORT_USEC); 736 spin_unlock_irq(&xhci->lock); 737 738 xhci_cleanup_msix(xhci); 739 740 /* Deleting Compliance Mode Recovery Timer */ 741 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 742 (!(xhci_all_ports_seen_u0(xhci)))) { 743 del_timer_sync(&xhci->comp_mode_recovery_timer); 744 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 745 "%s: compliance mode recovery timer deleted", 746 __func__); 747 } 748 749 if (xhci->quirks & XHCI_AMD_PLL_FIX) 750 usb_amd_dev_put(); 751 752 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 753 "// Disabling event ring interrupts"); 754 temp = readl(&xhci->op_regs->status); 755 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); 756 temp = readl(&xhci->ir_set->irq_pending); 757 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); 758 759 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); 760 xhci_mem_cleanup(xhci); 761 xhci_debugfs_exit(xhci); 762 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 763 "xhci_stop completed - status = %x", 764 readl(&xhci->op_regs->status)); 765 mutex_unlock(&xhci->mutex); 766 } 767 768 /* 769 * Shutdown HC (not bus-specific) 770 * 771 * This is called when the machine is rebooting or halting. We assume that the 772 * machine will be powered off, and the HC's internal state will be reset. 773 * Don't bother to free memory. 774 * 775 * This will only ever be called with the main usb_hcd (the USB3 roothub). 776 */ 777 void xhci_shutdown(struct usb_hcd *hcd) 778 { 779 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 780 781 if (xhci->quirks & XHCI_SPURIOUS_REBOOT) 782 usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev)); 783 784 /* Don't poll the roothubs after shutdown. */ 785 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", 786 __func__, hcd->self.busnum); 787 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 788 del_timer_sync(&hcd->rh_timer); 789 790 if (xhci->shared_hcd) { 791 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 792 del_timer_sync(&xhci->shared_hcd->rh_timer); 793 } 794 795 spin_lock_irq(&xhci->lock); 796 xhci_halt(xhci); 797 /* Workaround for spurious wakeups at shutdown with HSW */ 798 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) 799 xhci_reset(xhci, XHCI_RESET_SHORT_USEC); 800 spin_unlock_irq(&xhci->lock); 801 802 xhci_cleanup_msix(xhci); 803 804 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 805 "xhci_shutdown completed - status = %x", 806 readl(&xhci->op_regs->status)); 807 } 808 EXPORT_SYMBOL_GPL(xhci_shutdown); 809 810 #ifdef CONFIG_PM 811 static void xhci_save_registers(struct xhci_hcd *xhci) 812 { 813 xhci->s3.command = readl(&xhci->op_regs->command); 814 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification); 815 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 816 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg); 817 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size); 818 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); 819 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 820 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending); 821 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control); 822 } 823 824 static void xhci_restore_registers(struct xhci_hcd *xhci) 825 { 826 writel(xhci->s3.command, &xhci->op_regs->command); 827 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); 828 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); 829 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); 830 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size); 831 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); 832 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); 833 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending); 834 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control); 835 } 836 837 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) 838 { 839 u64 val_64; 840 841 /* step 2: initialize command ring buffer */ 842 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 843 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 844 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 845 xhci->cmd_ring->dequeue) & 846 (u64) ~CMD_RING_RSVD_BITS) | 847 xhci->cmd_ring->cycle_state; 848 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 849 "// Setting command ring address to 0x%llx", 850 (long unsigned long) val_64); 851 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); 852 } 853 854 /* 855 * The whole command ring must be cleared to zero when we suspend the host. 856 * 857 * The host doesn't save the command ring pointer in the suspend well, so we 858 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte 859 * aligned, because of the reserved bits in the command ring dequeue pointer 860 * register. Therefore, we can't just set the dequeue pointer back in the 861 * middle of the ring (TRBs are 16-byte aligned). 862 */ 863 static void xhci_clear_command_ring(struct xhci_hcd *xhci) 864 { 865 struct xhci_ring *ring; 866 struct xhci_segment *seg; 867 868 ring = xhci->cmd_ring; 869 seg = ring->deq_seg; 870 do { 871 memset(seg->trbs, 0, 872 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); 873 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= 874 cpu_to_le32(~TRB_CYCLE); 875 seg = seg->next; 876 } while (seg != ring->deq_seg); 877 878 /* Reset the software enqueue and dequeue pointers */ 879 ring->deq_seg = ring->first_seg; 880 ring->dequeue = ring->first_seg->trbs; 881 ring->enq_seg = ring->deq_seg; 882 ring->enqueue = ring->dequeue; 883 884 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; 885 /* 886 * Ring is now zeroed, so the HW should look for change of ownership 887 * when the cycle bit is set to 1. 888 */ 889 ring->cycle_state = 1; 890 891 /* 892 * Reset the hardware dequeue pointer. 893 * Yes, this will need to be re-written after resume, but we're paranoid 894 * and want to make sure the hardware doesn't access bogus memory 895 * because, say, the BIOS or an SMI started the host without changing 896 * the command ring pointers. 897 */ 898 xhci_set_cmd_ring_deq(xhci); 899 } 900 901 /* 902 * Disable port wake bits if do_wakeup is not set. 903 * 904 * Also clear a possible internal port wake state left hanging for ports that 905 * detected termination but never successfully enumerated (trained to 0U). 906 * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done 907 * at enumeration clears this wake, force one here as well for unconnected ports 908 */ 909 910 static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci, 911 struct xhci_hub *rhub, 912 bool do_wakeup) 913 { 914 unsigned long flags; 915 u32 t1, t2, portsc; 916 int i; 917 918 spin_lock_irqsave(&xhci->lock, flags); 919 920 for (i = 0; i < rhub->num_ports; i++) { 921 portsc = readl(rhub->ports[i]->addr); 922 t1 = xhci_port_state_to_neutral(portsc); 923 t2 = t1; 924 925 /* clear wake bits if do_wake is not set */ 926 if (!do_wakeup) 927 t2 &= ~PORT_WAKE_BITS; 928 929 /* Don't touch csc bit if connected or connect change is set */ 930 if (!(portsc & (PORT_CSC | PORT_CONNECT))) 931 t2 |= PORT_CSC; 932 933 if (t1 != t2) { 934 writel(t2, rhub->ports[i]->addr); 935 xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n", 936 rhub->hcd->self.busnum, i + 1, portsc, t2); 937 } 938 } 939 spin_unlock_irqrestore(&xhci->lock, flags); 940 } 941 942 static bool xhci_pending_portevent(struct xhci_hcd *xhci) 943 { 944 struct xhci_port **ports; 945 int port_index; 946 u32 status; 947 u32 portsc; 948 949 status = readl(&xhci->op_regs->status); 950 if (status & STS_EINT) 951 return true; 952 /* 953 * Checking STS_EINT is not enough as there is a lag between a change 954 * bit being set and the Port Status Change Event that it generated 955 * being written to the Event Ring. See note in xhci 1.1 section 4.19.2. 956 */ 957 958 port_index = xhci->usb2_rhub.num_ports; 959 ports = xhci->usb2_rhub.ports; 960 while (port_index--) { 961 portsc = readl(ports[port_index]->addr); 962 if (portsc & PORT_CHANGE_MASK || 963 (portsc & PORT_PLS_MASK) == XDEV_RESUME) 964 return true; 965 } 966 port_index = xhci->usb3_rhub.num_ports; 967 ports = xhci->usb3_rhub.ports; 968 while (port_index--) { 969 portsc = readl(ports[port_index]->addr); 970 if (portsc & PORT_CHANGE_MASK || 971 (portsc & PORT_PLS_MASK) == XDEV_RESUME) 972 return true; 973 } 974 return false; 975 } 976 977 /* 978 * Stop HC (not bus-specific) 979 * 980 * This is called when the machine transition into S3/S4 mode. 981 * 982 */ 983 int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) 984 { 985 int rc = 0; 986 unsigned int delay = XHCI_MAX_HALT_USEC * 2; 987 struct usb_hcd *hcd = xhci_to_hcd(xhci); 988 u32 command; 989 u32 res; 990 991 if (!hcd->state) 992 return 0; 993 994 if (hcd->state != HC_STATE_SUSPENDED || 995 xhci->shared_hcd->state != HC_STATE_SUSPENDED) 996 return -EINVAL; 997 998 /* Clear root port wake on bits if wakeup not allowed. */ 999 xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup); 1000 xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup); 1001 1002 if (!HCD_HW_ACCESSIBLE(hcd)) 1003 return 0; 1004 1005 xhci_dbc_suspend(xhci); 1006 1007 /* Don't poll the roothubs on bus suspend. */ 1008 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", 1009 __func__, hcd->self.busnum); 1010 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 1011 del_timer_sync(&hcd->rh_timer); 1012 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 1013 del_timer_sync(&xhci->shared_hcd->rh_timer); 1014 1015 if (xhci->quirks & XHCI_SUSPEND_DELAY) 1016 usleep_range(1000, 1500); 1017 1018 spin_lock_irq(&xhci->lock); 1019 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 1020 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); 1021 /* step 1: stop endpoint */ 1022 /* skipped assuming that port suspend has done */ 1023 1024 /* step 2: clear Run/Stop bit */ 1025 command = readl(&xhci->op_regs->command); 1026 command &= ~CMD_RUN; 1027 writel(command, &xhci->op_regs->command); 1028 1029 /* Some chips from Fresco Logic need an extraordinary delay */ 1030 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1; 1031 1032 if (xhci_handshake(&xhci->op_regs->status, 1033 STS_HALT, STS_HALT, delay)) { 1034 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); 1035 spin_unlock_irq(&xhci->lock); 1036 return -ETIMEDOUT; 1037 } 1038 xhci_clear_command_ring(xhci); 1039 1040 /* step 3: save registers */ 1041 xhci_save_registers(xhci); 1042 1043 /* step 4: set CSS flag */ 1044 command = readl(&xhci->op_regs->command); 1045 command |= CMD_CSS; 1046 writel(command, &xhci->op_regs->command); 1047 xhci->broken_suspend = 0; 1048 if (xhci_handshake(&xhci->op_regs->status, 1049 STS_SAVE, 0, 20 * 1000)) { 1050 /* 1051 * AMD SNPS xHC 3.0 occasionally does not clear the 1052 * SSS bit of USBSTS and when driver tries to poll 1053 * to see if the xHC clears BIT(8) which never happens 1054 * and driver assumes that controller is not responding 1055 * and times out. To workaround this, its good to check 1056 * if SRE and HCE bits are not set (as per xhci 1057 * Section 5.4.2) and bypass the timeout. 1058 */ 1059 res = readl(&xhci->op_regs->status); 1060 if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) && 1061 (((res & STS_SRE) == 0) && 1062 ((res & STS_HCE) == 0))) { 1063 xhci->broken_suspend = 1; 1064 } else { 1065 xhci_warn(xhci, "WARN: xHC save state timeout\n"); 1066 spin_unlock_irq(&xhci->lock); 1067 return -ETIMEDOUT; 1068 } 1069 } 1070 spin_unlock_irq(&xhci->lock); 1071 1072 /* 1073 * Deleting Compliance Mode Recovery Timer because the xHCI Host 1074 * is about to be suspended. 1075 */ 1076 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 1077 (!(xhci_all_ports_seen_u0(xhci)))) { 1078 del_timer_sync(&xhci->comp_mode_recovery_timer); 1079 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1080 "%s: compliance mode recovery timer deleted", 1081 __func__); 1082 } 1083 1084 /* step 5: remove core well power */ 1085 /* synchronize irq when using MSI-X */ 1086 xhci_msix_sync_irqs(xhci); 1087 1088 return rc; 1089 } 1090 EXPORT_SYMBOL_GPL(xhci_suspend); 1091 1092 /* 1093 * start xHC (not bus-specific) 1094 * 1095 * This is called when the machine transition from S3/S4 mode. 1096 * 1097 */ 1098 int xhci_resume(struct xhci_hcd *xhci, bool hibernated) 1099 { 1100 u32 command, temp = 0; 1101 struct usb_hcd *hcd = xhci_to_hcd(xhci); 1102 struct usb_hcd *secondary_hcd; 1103 int retval = 0; 1104 bool comp_timer_running = false; 1105 bool pending_portevent = false; 1106 bool reinit_xhc = false; 1107 1108 if (!hcd->state) 1109 return 0; 1110 1111 /* Wait a bit if either of the roothubs need to settle from the 1112 * transition into bus suspend. 1113 */ 1114 1115 if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) || 1116 time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange)) 1117 msleep(100); 1118 1119 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 1120 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); 1121 1122 spin_lock_irq(&xhci->lock); 1123 1124 if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend) 1125 reinit_xhc = true; 1126 1127 if (!reinit_xhc) { 1128 /* 1129 * Some controllers might lose power during suspend, so wait 1130 * for controller not ready bit to clear, just as in xHC init. 1131 */ 1132 retval = xhci_handshake(&xhci->op_regs->status, 1133 STS_CNR, 0, 10 * 1000 * 1000); 1134 if (retval) { 1135 xhci_warn(xhci, "Controller not ready at resume %d\n", 1136 retval); 1137 spin_unlock_irq(&xhci->lock); 1138 return retval; 1139 } 1140 /* step 1: restore register */ 1141 xhci_restore_registers(xhci); 1142 /* step 2: initialize command ring buffer */ 1143 xhci_set_cmd_ring_deq(xhci); 1144 /* step 3: restore state and start state*/ 1145 /* step 3: set CRS flag */ 1146 command = readl(&xhci->op_regs->command); 1147 command |= CMD_CRS; 1148 writel(command, &xhci->op_regs->command); 1149 /* 1150 * Some controllers take up to 55+ ms to complete the controller 1151 * restore so setting the timeout to 100ms. Xhci specification 1152 * doesn't mention any timeout value. 1153 */ 1154 if (xhci_handshake(&xhci->op_regs->status, 1155 STS_RESTORE, 0, 100 * 1000)) { 1156 xhci_warn(xhci, "WARN: xHC restore state timeout\n"); 1157 spin_unlock_irq(&xhci->lock); 1158 return -ETIMEDOUT; 1159 } 1160 } 1161 1162 temp = readl(&xhci->op_regs->status); 1163 1164 /* re-initialize the HC on Restore Error, or Host Controller Error */ 1165 if (temp & (STS_SRE | STS_HCE)) { 1166 reinit_xhc = true; 1167 xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp); 1168 } 1169 1170 if (reinit_xhc) { 1171 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 1172 !(xhci_all_ports_seen_u0(xhci))) { 1173 del_timer_sync(&xhci->comp_mode_recovery_timer); 1174 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1175 "Compliance Mode Recovery Timer deleted!"); 1176 } 1177 1178 /* Let the USB core know _both_ roothubs lost power. */ 1179 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); 1180 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); 1181 1182 xhci_dbg(xhci, "Stop HCD\n"); 1183 xhci_halt(xhci); 1184 xhci_zero_64b_regs(xhci); 1185 retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); 1186 spin_unlock_irq(&xhci->lock); 1187 if (retval) 1188 return retval; 1189 xhci_cleanup_msix(xhci); 1190 1191 xhci_dbg(xhci, "// Disabling event ring interrupts\n"); 1192 temp = readl(&xhci->op_regs->status); 1193 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); 1194 temp = readl(&xhci->ir_set->irq_pending); 1195 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); 1196 1197 xhci_dbg(xhci, "cleaning up memory\n"); 1198 xhci_mem_cleanup(xhci); 1199 xhci_debugfs_exit(xhci); 1200 xhci_dbg(xhci, "xhci_stop completed - status = %x\n", 1201 readl(&xhci->op_regs->status)); 1202 1203 /* USB core calls the PCI reinit and start functions twice: 1204 * first with the primary HCD, and then with the secondary HCD. 1205 * If we don't do the same, the host will never be started. 1206 */ 1207 if (!usb_hcd_is_primary_hcd(hcd)) 1208 secondary_hcd = hcd; 1209 else 1210 secondary_hcd = xhci->shared_hcd; 1211 1212 xhci_dbg(xhci, "Initialize the xhci_hcd\n"); 1213 retval = xhci_init(hcd->primary_hcd); 1214 if (retval) 1215 return retval; 1216 comp_timer_running = true; 1217 1218 xhci_dbg(xhci, "Start the primary HCD\n"); 1219 retval = xhci_run(hcd->primary_hcd); 1220 if (!retval) { 1221 xhci_dbg(xhci, "Start the secondary HCD\n"); 1222 retval = xhci_run(secondary_hcd); 1223 } 1224 hcd->state = HC_STATE_SUSPENDED; 1225 xhci->shared_hcd->state = HC_STATE_SUSPENDED; 1226 goto done; 1227 } 1228 1229 /* step 4: set Run/Stop bit */ 1230 command = readl(&xhci->op_regs->command); 1231 command |= CMD_RUN; 1232 writel(command, &xhci->op_regs->command); 1233 xhci_handshake(&xhci->op_regs->status, STS_HALT, 1234 0, 250 * 1000); 1235 1236 /* step 5: walk topology and initialize portsc, 1237 * portpmsc and portli 1238 */ 1239 /* this is done in bus_resume */ 1240 1241 /* step 6: restart each of the previously 1242 * Running endpoints by ringing their doorbells 1243 */ 1244 1245 spin_unlock_irq(&xhci->lock); 1246 1247 xhci_dbc_resume(xhci); 1248 1249 done: 1250 if (retval == 0) { 1251 /* 1252 * Resume roothubs only if there are pending events. 1253 * USB 3 devices resend U3 LFPS wake after a 100ms delay if 1254 * the first wake signalling failed, give it that chance. 1255 */ 1256 pending_portevent = xhci_pending_portevent(xhci); 1257 if (!pending_portevent) { 1258 msleep(120); 1259 pending_portevent = xhci_pending_portevent(xhci); 1260 } 1261 1262 if (pending_portevent) { 1263 usb_hcd_resume_root_hub(xhci->shared_hcd); 1264 usb_hcd_resume_root_hub(hcd); 1265 } 1266 } 1267 /* 1268 * If system is subject to the Quirk, Compliance Mode Timer needs to 1269 * be re-initialized Always after a system resume. Ports are subject 1270 * to suffer the Compliance Mode issue again. It doesn't matter if 1271 * ports have entered previously to U0 before system's suspension. 1272 */ 1273 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running) 1274 compliance_mode_recovery_timer_init(xhci); 1275 1276 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) 1277 usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller)); 1278 1279 /* Re-enable port polling. */ 1280 xhci_dbg(xhci, "%s: starting usb%d port polling.\n", 1281 __func__, hcd->self.busnum); 1282 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 1283 usb_hcd_poll_rh_status(xhci->shared_hcd); 1284 set_bit(HCD_FLAG_POLL_RH, &hcd->flags); 1285 usb_hcd_poll_rh_status(hcd); 1286 1287 return retval; 1288 } 1289 EXPORT_SYMBOL_GPL(xhci_resume); 1290 #endif /* CONFIG_PM */ 1291 1292 /*-------------------------------------------------------------------------*/ 1293 1294 static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb) 1295 { 1296 void *temp; 1297 int ret = 0; 1298 unsigned int buf_len; 1299 enum dma_data_direction dir; 1300 1301 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 1302 buf_len = urb->transfer_buffer_length; 1303 1304 temp = kzalloc_node(buf_len, GFP_ATOMIC, 1305 dev_to_node(hcd->self.sysdev)); 1306 1307 if (usb_urb_dir_out(urb)) 1308 sg_pcopy_to_buffer(urb->sg, urb->num_sgs, 1309 temp, buf_len, 0); 1310 1311 urb->transfer_buffer = temp; 1312 urb->transfer_dma = dma_map_single(hcd->self.sysdev, 1313 urb->transfer_buffer, 1314 urb->transfer_buffer_length, 1315 dir); 1316 1317 if (dma_mapping_error(hcd->self.sysdev, 1318 urb->transfer_dma)) { 1319 ret = -EAGAIN; 1320 kfree(temp); 1321 } else { 1322 urb->transfer_flags |= URB_DMA_MAP_SINGLE; 1323 } 1324 1325 return ret; 1326 } 1327 1328 static bool xhci_urb_temp_buffer_required(struct usb_hcd *hcd, 1329 struct urb *urb) 1330 { 1331 bool ret = false; 1332 unsigned int i; 1333 unsigned int len = 0; 1334 unsigned int trb_size; 1335 unsigned int max_pkt; 1336 struct scatterlist *sg; 1337 struct scatterlist *tail_sg; 1338 1339 tail_sg = urb->sg; 1340 max_pkt = usb_endpoint_maxp(&urb->ep->desc); 1341 1342 if (!urb->num_sgs) 1343 return ret; 1344 1345 if (urb->dev->speed >= USB_SPEED_SUPER) 1346 trb_size = TRB_CACHE_SIZE_SS; 1347 else 1348 trb_size = TRB_CACHE_SIZE_HS; 1349 1350 if (urb->transfer_buffer_length != 0 && 1351 !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { 1352 for_each_sg(urb->sg, sg, urb->num_sgs, i) { 1353 len = len + sg->length; 1354 if (i > trb_size - 2) { 1355 len = len - tail_sg->length; 1356 if (len < max_pkt) { 1357 ret = true; 1358 break; 1359 } 1360 1361 tail_sg = sg_next(tail_sg); 1362 } 1363 } 1364 } 1365 return ret; 1366 } 1367 1368 static void xhci_unmap_temp_buf(struct usb_hcd *hcd, struct urb *urb) 1369 { 1370 unsigned int len; 1371 unsigned int buf_len; 1372 enum dma_data_direction dir; 1373 1374 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 1375 1376 buf_len = urb->transfer_buffer_length; 1377 1378 if (IS_ENABLED(CONFIG_HAS_DMA) && 1379 (urb->transfer_flags & URB_DMA_MAP_SINGLE)) 1380 dma_unmap_single(hcd->self.sysdev, 1381 urb->transfer_dma, 1382 urb->transfer_buffer_length, 1383 dir); 1384 1385 if (usb_urb_dir_in(urb)) { 1386 len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, 1387 urb->transfer_buffer, 1388 buf_len, 1389 0); 1390 if (len != buf_len) { 1391 xhci_dbg(hcd_to_xhci(hcd), 1392 "Copy from tmp buf to urb sg list failed\n"); 1393 urb->actual_length = len; 1394 } 1395 } 1396 urb->transfer_flags &= ~URB_DMA_MAP_SINGLE; 1397 kfree(urb->transfer_buffer); 1398 urb->transfer_buffer = NULL; 1399 } 1400 1401 /* 1402 * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT), 1403 * we'll copy the actual data into the TRB address register. This is limited to 1404 * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize 1405 * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed. 1406 */ 1407 static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, 1408 gfp_t mem_flags) 1409 { 1410 struct xhci_hcd *xhci; 1411 1412 xhci = hcd_to_xhci(hcd); 1413 1414 if (xhci_urb_suitable_for_idt(urb)) 1415 return 0; 1416 1417 if (xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) { 1418 if (xhci_urb_temp_buffer_required(hcd, urb)) 1419 return xhci_map_temp_buffer(hcd, urb); 1420 } 1421 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); 1422 } 1423 1424 static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) 1425 { 1426 struct xhci_hcd *xhci; 1427 bool unmap_temp_buf = false; 1428 1429 xhci = hcd_to_xhci(hcd); 1430 1431 if (urb->num_sgs && (urb->transfer_flags & URB_DMA_MAP_SINGLE)) 1432 unmap_temp_buf = true; 1433 1434 if ((xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) && unmap_temp_buf) 1435 xhci_unmap_temp_buf(hcd, urb); 1436 else 1437 usb_hcd_unmap_urb_for_dma(hcd, urb); 1438 } 1439 1440 /** 1441 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and 1442 * HCDs. Find the index for an endpoint given its descriptor. Use the return 1443 * value to right shift 1 for the bitmask. 1444 * 1445 * Index = (epnum * 2) + direction - 1, 1446 * where direction = 0 for OUT, 1 for IN. 1447 * For control endpoints, the IN index is used (OUT index is unused), so 1448 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) 1449 */ 1450 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) 1451 { 1452 unsigned int index; 1453 if (usb_endpoint_xfer_control(desc)) 1454 index = (unsigned int) (usb_endpoint_num(desc)*2); 1455 else 1456 index = (unsigned int) (usb_endpoint_num(desc)*2) + 1457 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; 1458 return index; 1459 } 1460 EXPORT_SYMBOL_GPL(xhci_get_endpoint_index); 1461 1462 /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint 1463 * address from the XHCI endpoint index. 1464 */ 1465 unsigned int xhci_get_endpoint_address(unsigned int ep_index) 1466 { 1467 unsigned int number = DIV_ROUND_UP(ep_index, 2); 1468 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN; 1469 return direction | number; 1470 } 1471 1472 /* Find the flag for this endpoint (for use in the control context). Use the 1473 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is 1474 * bit 1, etc. 1475 */ 1476 static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) 1477 { 1478 return 1 << (xhci_get_endpoint_index(desc) + 1); 1479 } 1480 1481 /* Compute the last valid endpoint context index. Basically, this is the 1482 * endpoint index plus one. For slot contexts with more than valid endpoint, 1483 * we find the most significant bit set in the added contexts flags. 1484 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 1485 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. 1486 */ 1487 unsigned int xhci_last_valid_endpoint(u32 added_ctxs) 1488 { 1489 return fls(added_ctxs) - 1; 1490 } 1491 1492 /* Returns 1 if the arguments are OK; 1493 * returns 0 this is a root hub; returns -EINVAL for NULL pointers. 1494 */ 1495 static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, 1496 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, 1497 const char *func) { 1498 struct xhci_hcd *xhci; 1499 struct xhci_virt_device *virt_dev; 1500 1501 if (!hcd || (check_ep && !ep) || !udev) { 1502 pr_debug("xHCI %s called with invalid args\n", func); 1503 return -EINVAL; 1504 } 1505 if (!udev->parent) { 1506 pr_debug("xHCI %s called for root hub\n", func); 1507 return 0; 1508 } 1509 1510 xhci = hcd_to_xhci(hcd); 1511 if (check_virt_dev) { 1512 if (!udev->slot_id || !xhci->devs[udev->slot_id]) { 1513 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n", 1514 func); 1515 return -EINVAL; 1516 } 1517 1518 virt_dev = xhci->devs[udev->slot_id]; 1519 if (virt_dev->udev != udev) { 1520 xhci_dbg(xhci, "xHCI %s called with udev and " 1521 "virt_dev does not match\n", func); 1522 return -EINVAL; 1523 } 1524 } 1525 1526 if (xhci->xhc_state & XHCI_STATE_HALTED) 1527 return -ENODEV; 1528 1529 return 1; 1530 } 1531 1532 static int xhci_configure_endpoint(struct xhci_hcd *xhci, 1533 struct usb_device *udev, struct xhci_command *command, 1534 bool ctx_change, bool must_succeed); 1535 1536 /* 1537 * Full speed devices may have a max packet size greater than 8 bytes, but the 1538 * USB core doesn't know that until it reads the first 8 bytes of the 1539 * descriptor. If the usb_device's max packet size changes after that point, 1540 * we need to issue an evaluate context command and wait on it. 1541 */ 1542 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, 1543 unsigned int ep_index, struct urb *urb, gfp_t mem_flags) 1544 { 1545 struct xhci_container_ctx *out_ctx; 1546 struct xhci_input_control_ctx *ctrl_ctx; 1547 struct xhci_ep_ctx *ep_ctx; 1548 struct xhci_command *command; 1549 int max_packet_size; 1550 int hw_max_packet_size; 1551 int ret = 0; 1552 1553 out_ctx = xhci->devs[slot_id]->out_ctx; 1554 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1555 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); 1556 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc); 1557 if (hw_max_packet_size != max_packet_size) { 1558 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1559 "Max Packet Size for ep 0 changed."); 1560 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1561 "Max packet size in usb_device = %d", 1562 max_packet_size); 1563 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1564 "Max packet size in xHCI HW = %d", 1565 hw_max_packet_size); 1566 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1567 "Issuing evaluate context command."); 1568 1569 /* Set up the input context flags for the command */ 1570 /* FIXME: This won't work if a non-default control endpoint 1571 * changes max packet sizes. 1572 */ 1573 1574 command = xhci_alloc_command(xhci, true, mem_flags); 1575 if (!command) 1576 return -ENOMEM; 1577 1578 command->in_ctx = xhci->devs[slot_id]->in_ctx; 1579 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 1580 if (!ctrl_ctx) { 1581 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 1582 __func__); 1583 ret = -ENOMEM; 1584 goto command_cleanup; 1585 } 1586 /* Set up the modified control endpoint 0 */ 1587 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, 1588 xhci->devs[slot_id]->out_ctx, ep_index); 1589 1590 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); 1591 ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */ 1592 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); 1593 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); 1594 1595 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); 1596 ctrl_ctx->drop_flags = 0; 1597 1598 ret = xhci_configure_endpoint(xhci, urb->dev, command, 1599 true, false); 1600 1601 /* Clean up the input context for later use by bandwidth 1602 * functions. 1603 */ 1604 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); 1605 command_cleanup: 1606 kfree(command->completion); 1607 kfree(command); 1608 } 1609 return ret; 1610 } 1611 1612 /* 1613 * non-error returns are a promise to giveback() the urb later 1614 * we drop ownership so next owner (or urb unlink) can get it 1615 */ 1616 static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) 1617 { 1618 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 1619 unsigned long flags; 1620 int ret = 0; 1621 unsigned int slot_id, ep_index; 1622 unsigned int *ep_state; 1623 struct urb_priv *urb_priv; 1624 int num_tds; 1625 1626 if (!urb) 1627 return -EINVAL; 1628 ret = xhci_check_args(hcd, urb->dev, urb->ep, 1629 true, true, __func__); 1630 if (ret <= 0) 1631 return ret ? ret : -EINVAL; 1632 1633 slot_id = urb->dev->slot_id; 1634 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1635 ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state; 1636 1637 if (!HCD_HW_ACCESSIBLE(hcd)) 1638 return -ESHUTDOWN; 1639 1640 if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) { 1641 xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n"); 1642 return -ENODEV; 1643 } 1644 1645 if (usb_endpoint_xfer_isoc(&urb->ep->desc)) 1646 num_tds = urb->number_of_packets; 1647 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) && 1648 urb->transfer_buffer_length > 0 && 1649 urb->transfer_flags & URB_ZERO_PACKET && 1650 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc))) 1651 num_tds = 2; 1652 else 1653 num_tds = 1; 1654 1655 urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags); 1656 if (!urb_priv) 1657 return -ENOMEM; 1658 1659 urb_priv->num_tds = num_tds; 1660 urb_priv->num_tds_done = 0; 1661 urb->hcpriv = urb_priv; 1662 1663 trace_xhci_urb_enqueue(urb); 1664 1665 if (usb_endpoint_xfer_control(&urb->ep->desc)) { 1666 /* Check to see if the max packet size for the default control 1667 * endpoint changed during FS device enumeration 1668 */ 1669 if (urb->dev->speed == USB_SPEED_FULL) { 1670 ret = xhci_check_maxpacket(xhci, slot_id, 1671 ep_index, urb, mem_flags); 1672 if (ret < 0) { 1673 xhci_urb_free_priv(urb_priv); 1674 urb->hcpriv = NULL; 1675 return ret; 1676 } 1677 } 1678 } 1679 1680 spin_lock_irqsave(&xhci->lock, flags); 1681 1682 if (xhci->xhc_state & XHCI_STATE_DYING) { 1683 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n", 1684 urb->ep->desc.bEndpointAddress, urb); 1685 ret = -ESHUTDOWN; 1686 goto free_priv; 1687 } 1688 if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) { 1689 xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n", 1690 *ep_state); 1691 ret = -EINVAL; 1692 goto free_priv; 1693 } 1694 if (*ep_state & EP_SOFT_CLEAR_TOGGLE) { 1695 xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n"); 1696 ret = -EINVAL; 1697 goto free_priv; 1698 } 1699 1700 switch (usb_endpoint_type(&urb->ep->desc)) { 1701 1702 case USB_ENDPOINT_XFER_CONTROL: 1703 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, 1704 slot_id, ep_index); 1705 break; 1706 case USB_ENDPOINT_XFER_BULK: 1707 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, 1708 slot_id, ep_index); 1709 break; 1710 case USB_ENDPOINT_XFER_INT: 1711 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, 1712 slot_id, ep_index); 1713 break; 1714 case USB_ENDPOINT_XFER_ISOC: 1715 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, 1716 slot_id, ep_index); 1717 } 1718 1719 if (ret) { 1720 free_priv: 1721 xhci_urb_free_priv(urb_priv); 1722 urb->hcpriv = NULL; 1723 } 1724 spin_unlock_irqrestore(&xhci->lock, flags); 1725 return ret; 1726 } 1727 1728 /* 1729 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop 1730 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC 1731 * should pick up where it left off in the TD, unless a Set Transfer Ring 1732 * Dequeue Pointer is issued. 1733 * 1734 * The TRBs that make up the buffers for the canceled URB will be "removed" from 1735 * the ring. Since the ring is a contiguous structure, they can't be physically 1736 * removed. Instead, there are two options: 1737 * 1738 * 1) If the HC is in the middle of processing the URB to be canceled, we 1739 * simply move the ring's dequeue pointer past those TRBs using the Set 1740 * Transfer Ring Dequeue Pointer command. This will be the common case, 1741 * when drivers timeout on the last submitted URB and attempt to cancel. 1742 * 1743 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a 1744 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The 1745 * HC will need to invalidate the any TRBs it has cached after the stop 1746 * endpoint command, as noted in the xHCI 0.95 errata. 1747 * 1748 * 3) The TD may have completed by the time the Stop Endpoint Command 1749 * completes, so software needs to handle that case too. 1750 * 1751 * This function should protect against the TD enqueueing code ringing the 1752 * doorbell while this code is waiting for a Stop Endpoint command to complete. 1753 * It also needs to account for multiple cancellations on happening at the same 1754 * time for the same endpoint. 1755 * 1756 * Note that this function can be called in any context, or so says 1757 * usb_hcd_unlink_urb() 1758 */ 1759 static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) 1760 { 1761 unsigned long flags; 1762 int ret, i; 1763 u32 temp; 1764 struct xhci_hcd *xhci; 1765 struct urb_priv *urb_priv; 1766 struct xhci_td *td; 1767 unsigned int ep_index; 1768 struct xhci_ring *ep_ring; 1769 struct xhci_virt_ep *ep; 1770 struct xhci_command *command; 1771 struct xhci_virt_device *vdev; 1772 1773 xhci = hcd_to_xhci(hcd); 1774 spin_lock_irqsave(&xhci->lock, flags); 1775 1776 trace_xhci_urb_dequeue(urb); 1777 1778 /* Make sure the URB hasn't completed or been unlinked already */ 1779 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 1780 if (ret) 1781 goto done; 1782 1783 /* give back URB now if we can't queue it for cancel */ 1784 vdev = xhci->devs[urb->dev->slot_id]; 1785 urb_priv = urb->hcpriv; 1786 if (!vdev || !urb_priv) 1787 goto err_giveback; 1788 1789 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1790 ep = &vdev->eps[ep_index]; 1791 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 1792 if (!ep || !ep_ring) 1793 goto err_giveback; 1794 1795 /* If xHC is dead take it down and return ALL URBs in xhci_hc_died() */ 1796 temp = readl(&xhci->op_regs->status); 1797 if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) { 1798 xhci_hc_died(xhci); 1799 goto done; 1800 } 1801 1802 /* 1803 * check ring is not re-allocated since URB was enqueued. If it is, then 1804 * make sure none of the ring related pointers in this URB private data 1805 * are touched, such as td_list, otherwise we overwrite freed data 1806 */ 1807 if (!td_on_ring(&urb_priv->td[0], ep_ring)) { 1808 xhci_err(xhci, "Canceled URB td not found on endpoint ring"); 1809 for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) { 1810 td = &urb_priv->td[i]; 1811 if (!list_empty(&td->cancelled_td_list)) 1812 list_del_init(&td->cancelled_td_list); 1813 } 1814 goto err_giveback; 1815 } 1816 1817 if (xhci->xhc_state & XHCI_STATE_HALTED) { 1818 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1819 "HC halted, freeing TD manually."); 1820 for (i = urb_priv->num_tds_done; 1821 i < urb_priv->num_tds; 1822 i++) { 1823 td = &urb_priv->td[i]; 1824 if (!list_empty(&td->td_list)) 1825 list_del_init(&td->td_list); 1826 if (!list_empty(&td->cancelled_td_list)) 1827 list_del_init(&td->cancelled_td_list); 1828 } 1829 goto err_giveback; 1830 } 1831 1832 i = urb_priv->num_tds_done; 1833 if (i < urb_priv->num_tds) 1834 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1835 "Cancel URB %p, dev %s, ep 0x%x, " 1836 "starting at offset 0x%llx", 1837 urb, urb->dev->devpath, 1838 urb->ep->desc.bEndpointAddress, 1839 (unsigned long long) xhci_trb_virt_to_dma( 1840 urb_priv->td[i].start_seg, 1841 urb_priv->td[i].first_trb)); 1842 1843 for (; i < urb_priv->num_tds; i++) { 1844 td = &urb_priv->td[i]; 1845 /* TD can already be on cancelled list if ep halted on it */ 1846 if (list_empty(&td->cancelled_td_list)) { 1847 td->cancel_status = TD_DIRTY; 1848 list_add_tail(&td->cancelled_td_list, 1849 &ep->cancelled_td_list); 1850 } 1851 } 1852 1853 /* Queue a stop endpoint command, but only if this is 1854 * the first cancellation to be handled. 1855 */ 1856 if (!(ep->ep_state & EP_STOP_CMD_PENDING)) { 1857 command = xhci_alloc_command(xhci, false, GFP_ATOMIC); 1858 if (!command) { 1859 ret = -ENOMEM; 1860 goto done; 1861 } 1862 ep->ep_state |= EP_STOP_CMD_PENDING; 1863 ep->stop_cmd_timer.expires = jiffies + 1864 XHCI_STOP_EP_CMD_TIMEOUT * HZ; 1865 add_timer(&ep->stop_cmd_timer); 1866 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id, 1867 ep_index, 0); 1868 xhci_ring_cmd_db(xhci); 1869 } 1870 done: 1871 spin_unlock_irqrestore(&xhci->lock, flags); 1872 return ret; 1873 1874 err_giveback: 1875 if (urb_priv) 1876 xhci_urb_free_priv(urb_priv); 1877 usb_hcd_unlink_urb_from_ep(hcd, urb); 1878 spin_unlock_irqrestore(&xhci->lock, flags); 1879 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); 1880 return ret; 1881 } 1882 1883 /* Drop an endpoint from a new bandwidth configuration for this device. 1884 * Only one call to this function is allowed per endpoint before 1885 * check_bandwidth() or reset_bandwidth() must be called. 1886 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will 1887 * add the endpoint to the schedule with possibly new parameters denoted by a 1888 * different endpoint descriptor in usb_host_endpoint. 1889 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is 1890 * not allowed. 1891 * 1892 * The USB core will not allow URBs to be queued to an endpoint that is being 1893 * disabled, so there's no need for mutual exclusion to protect 1894 * the xhci->devs[slot_id] structure. 1895 */ 1896 int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, 1897 struct usb_host_endpoint *ep) 1898 { 1899 struct xhci_hcd *xhci; 1900 struct xhci_container_ctx *in_ctx, *out_ctx; 1901 struct xhci_input_control_ctx *ctrl_ctx; 1902 unsigned int ep_index; 1903 struct xhci_ep_ctx *ep_ctx; 1904 u32 drop_flag; 1905 u32 new_add_flags, new_drop_flags; 1906 int ret; 1907 1908 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); 1909 if (ret <= 0) 1910 return ret; 1911 xhci = hcd_to_xhci(hcd); 1912 if (xhci->xhc_state & XHCI_STATE_DYING) 1913 return -ENODEV; 1914 1915 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 1916 drop_flag = xhci_get_endpoint_flag(&ep->desc); 1917 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { 1918 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", 1919 __func__, drop_flag); 1920 return 0; 1921 } 1922 1923 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 1924 out_ctx = xhci->devs[udev->slot_id]->out_ctx; 1925 ctrl_ctx = xhci_get_input_control_ctx(in_ctx); 1926 if (!ctrl_ctx) { 1927 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 1928 __func__); 1929 return 0; 1930 } 1931 1932 ep_index = xhci_get_endpoint_index(&ep->desc); 1933 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1934 /* If the HC already knows the endpoint is disabled, 1935 * or the HCD has noted it is disabled, ignore this request 1936 */ 1937 if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) || 1938 le32_to_cpu(ctrl_ctx->drop_flags) & 1939 xhci_get_endpoint_flag(&ep->desc)) { 1940 /* Do not warn when called after a usb_device_reset */ 1941 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL) 1942 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", 1943 __func__, ep); 1944 return 0; 1945 } 1946 1947 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag); 1948 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); 1949 1950 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag); 1951 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); 1952 1953 xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index); 1954 1955 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); 1956 1957 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", 1958 (unsigned int) ep->desc.bEndpointAddress, 1959 udev->slot_id, 1960 (unsigned int) new_drop_flags, 1961 (unsigned int) new_add_flags); 1962 return 0; 1963 } 1964 EXPORT_SYMBOL_GPL(xhci_drop_endpoint); 1965 1966 /* Add an endpoint to a new possible bandwidth configuration for this device. 1967 * Only one call to this function is allowed per endpoint before 1968 * check_bandwidth() or reset_bandwidth() must be called. 1969 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will 1970 * add the endpoint to the schedule with possibly new parameters denoted by a 1971 * different endpoint descriptor in usb_host_endpoint. 1972 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is 1973 * not allowed. 1974 * 1975 * The USB core will not allow URBs to be queued to an endpoint until the 1976 * configuration or alt setting is installed in the device, so there's no need 1977 * for mutual exclusion to protect the xhci->devs[slot_id] structure. 1978 */ 1979 int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, 1980 struct usb_host_endpoint *ep) 1981 { 1982 struct xhci_hcd *xhci; 1983 struct xhci_container_ctx *in_ctx; 1984 unsigned int ep_index; 1985 struct xhci_input_control_ctx *ctrl_ctx; 1986 struct xhci_ep_ctx *ep_ctx; 1987 u32 added_ctxs; 1988 u32 new_add_flags, new_drop_flags; 1989 struct xhci_virt_device *virt_dev; 1990 int ret = 0; 1991 1992 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); 1993 if (ret <= 0) { 1994 /* So we won't queue a reset ep command for a root hub */ 1995 ep->hcpriv = NULL; 1996 return ret; 1997 } 1998 xhci = hcd_to_xhci(hcd); 1999 if (xhci->xhc_state & XHCI_STATE_DYING) 2000 return -ENODEV; 2001 2002 added_ctxs = xhci_get_endpoint_flag(&ep->desc); 2003 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { 2004 /* FIXME when we have to issue an evaluate endpoint command to 2005 * deal with ep0 max packet size changing once we get the 2006 * descriptors 2007 */ 2008 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", 2009 __func__, added_ctxs); 2010 return 0; 2011 } 2012 2013 virt_dev = xhci->devs[udev->slot_id]; 2014 in_ctx = virt_dev->in_ctx; 2015 ctrl_ctx = xhci_get_input_control_ctx(in_ctx); 2016 if (!ctrl_ctx) { 2017 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 2018 __func__); 2019 return 0; 2020 } 2021 2022 ep_index = xhci_get_endpoint_index(&ep->desc); 2023 /* If this endpoint is already in use, and the upper layers are trying 2024 * to add it again without dropping it, reject the addition. 2025 */ 2026 if (virt_dev->eps[ep_index].ring && 2027 !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) { 2028 xhci_warn(xhci, "Trying to add endpoint 0x%x " 2029 "without dropping it.\n", 2030 (unsigned int) ep->desc.bEndpointAddress); 2031 return -EINVAL; 2032 } 2033 2034 /* If the HCD has already noted the endpoint is enabled, 2035 * ignore this request. 2036 */ 2037 if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) { 2038 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", 2039 __func__, ep); 2040 return 0; 2041 } 2042 2043 /* 2044 * Configuration and alternate setting changes must be done in 2045 * process context, not interrupt context (or so documenation 2046 * for usb_set_interface() and usb_set_configuration() claim). 2047 */ 2048 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { 2049 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", 2050 __func__, ep->desc.bEndpointAddress); 2051 return -ENOMEM; 2052 } 2053 2054 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); 2055 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); 2056 2057 /* If xhci_endpoint_disable() was called for this endpoint, but the 2058 * xHC hasn't been notified yet through the check_bandwidth() call, 2059 * this re-adds a new state for the endpoint from the new endpoint 2060 * descriptors. We must drop and re-add this endpoint, so we leave the 2061 * drop flags alone. 2062 */ 2063 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); 2064 2065 /* Store the usb_device pointer for later use */ 2066 ep->hcpriv = udev; 2067 2068 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); 2069 trace_xhci_add_endpoint(ep_ctx); 2070 2071 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", 2072 (unsigned int) ep->desc.bEndpointAddress, 2073 udev->slot_id, 2074 (unsigned int) new_drop_flags, 2075 (unsigned int) new_add_flags); 2076 return 0; 2077 } 2078 EXPORT_SYMBOL_GPL(xhci_add_endpoint); 2079 2080 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) 2081 { 2082 struct xhci_input_control_ctx *ctrl_ctx; 2083 struct xhci_ep_ctx *ep_ctx; 2084 struct xhci_slot_ctx *slot_ctx; 2085 int i; 2086 2087 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); 2088 if (!ctrl_ctx) { 2089 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 2090 __func__); 2091 return; 2092 } 2093 2094 /* When a device's add flag and drop flag are zero, any subsequent 2095 * configure endpoint command will leave that endpoint's state 2096 * untouched. Make sure we don't leave any old state in the input 2097 * endpoint contexts. 2098 */ 2099 ctrl_ctx->drop_flags = 0; 2100 ctrl_ctx->add_flags = 0; 2101 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 2102 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 2103 /* Endpoint 0 is always valid */ 2104 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); 2105 for (i = 1; i < 31; i++) { 2106 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); 2107 ep_ctx->ep_info = 0; 2108 ep_ctx->ep_info2 = 0; 2109 ep_ctx->deq = 0; 2110 ep_ctx->tx_info = 0; 2111 } 2112 } 2113 2114 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, 2115 struct usb_device *udev, u32 *cmd_status) 2116 { 2117 int ret; 2118 2119 switch (*cmd_status) { 2120 case COMP_COMMAND_ABORTED: 2121 case COMP_COMMAND_RING_STOPPED: 2122 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n"); 2123 ret = -ETIME; 2124 break; 2125 case COMP_RESOURCE_ERROR: 2126 dev_warn(&udev->dev, 2127 "Not enough host controller resources for new device state.\n"); 2128 ret = -ENOMEM; 2129 /* FIXME: can we allocate more resources for the HC? */ 2130 break; 2131 case COMP_BANDWIDTH_ERROR: 2132 case COMP_SECONDARY_BANDWIDTH_ERROR: 2133 dev_warn(&udev->dev, 2134 "Not enough bandwidth for new device state.\n"); 2135 ret = -ENOSPC; 2136 /* FIXME: can we go back to the old state? */ 2137 break; 2138 case COMP_TRB_ERROR: 2139 /* the HCD set up something wrong */ 2140 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " 2141 "add flag = 1, " 2142 "and endpoint is not disabled.\n"); 2143 ret = -EINVAL; 2144 break; 2145 case COMP_INCOMPATIBLE_DEVICE_ERROR: 2146 dev_warn(&udev->dev, 2147 "ERROR: Incompatible device for endpoint configure command.\n"); 2148 ret = -ENODEV; 2149 break; 2150 case COMP_SUCCESS: 2151 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 2152 "Successful Endpoint Configure command"); 2153 ret = 0; 2154 break; 2155 default: 2156 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", 2157 *cmd_status); 2158 ret = -EINVAL; 2159 break; 2160 } 2161 return ret; 2162 } 2163 2164 static int xhci_evaluate_context_result(struct xhci_hcd *xhci, 2165 struct usb_device *udev, u32 *cmd_status) 2166 { 2167 int ret; 2168 2169 switch (*cmd_status) { 2170 case COMP_COMMAND_ABORTED: 2171 case COMP_COMMAND_RING_STOPPED: 2172 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n"); 2173 ret = -ETIME; 2174 break; 2175 case COMP_PARAMETER_ERROR: 2176 dev_warn(&udev->dev, 2177 "WARN: xHCI driver setup invalid evaluate context command.\n"); 2178 ret = -EINVAL; 2179 break; 2180 case COMP_SLOT_NOT_ENABLED_ERROR: 2181 dev_warn(&udev->dev, 2182 "WARN: slot not enabled for evaluate context command.\n"); 2183 ret = -EINVAL; 2184 break; 2185 case COMP_CONTEXT_STATE_ERROR: 2186 dev_warn(&udev->dev, 2187 "WARN: invalid context state for evaluate context command.\n"); 2188 ret = -EINVAL; 2189 break; 2190 case COMP_INCOMPATIBLE_DEVICE_ERROR: 2191 dev_warn(&udev->dev, 2192 "ERROR: Incompatible device for evaluate context command.\n"); 2193 ret = -ENODEV; 2194 break; 2195 case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR: 2196 /* Max Exit Latency too large error */ 2197 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n"); 2198 ret = -EINVAL; 2199 break; 2200 case COMP_SUCCESS: 2201 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 2202 "Successful evaluate context command"); 2203 ret = 0; 2204 break; 2205 default: 2206 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", 2207 *cmd_status); 2208 ret = -EINVAL; 2209 break; 2210 } 2211 return ret; 2212 } 2213 2214 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, 2215 struct xhci_input_control_ctx *ctrl_ctx) 2216 { 2217 u32 valid_add_flags; 2218 u32 valid_drop_flags; 2219 2220 /* Ignore the slot flag (bit 0), and the default control endpoint flag 2221 * (bit 1). The default control endpoint is added during the Address 2222 * Device command and is never removed until the slot is disabled. 2223 */ 2224 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; 2225 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; 2226 2227 /* Use hweight32 to count the number of ones in the add flags, or 2228 * number of endpoints added. Don't count endpoints that are changed 2229 * (both added and dropped). 2230 */ 2231 return hweight32(valid_add_flags) - 2232 hweight32(valid_add_flags & valid_drop_flags); 2233 } 2234 2235 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, 2236 struct xhci_input_control_ctx *ctrl_ctx) 2237 { 2238 u32 valid_add_flags; 2239 u32 valid_drop_flags; 2240 2241 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; 2242 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; 2243 2244 return hweight32(valid_drop_flags) - 2245 hweight32(valid_add_flags & valid_drop_flags); 2246 } 2247 2248 /* 2249 * We need to reserve the new number of endpoints before the configure endpoint 2250 * command completes. We can't subtract the dropped endpoints from the number 2251 * of active endpoints until the command completes because we can oversubscribe 2252 * the host in this case: 2253 * 2254 * - the first configure endpoint command drops more endpoints than it adds 2255 * - a second configure endpoint command that adds more endpoints is queued 2256 * - the first configure endpoint command fails, so the config is unchanged 2257 * - the second command may succeed, even though there isn't enough resources 2258 * 2259 * Must be called with xhci->lock held. 2260 */ 2261 static int xhci_reserve_host_resources(struct xhci_hcd *xhci, 2262 struct xhci_input_control_ctx *ctrl_ctx) 2263 { 2264 u32 added_eps; 2265 2266 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); 2267 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { 2268 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2269 "Not enough ep ctxs: " 2270 "%u active, need to add %u, limit is %u.", 2271 xhci->num_active_eps, added_eps, 2272 xhci->limit_active_eps); 2273 return -ENOMEM; 2274 } 2275 xhci->num_active_eps += added_eps; 2276 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2277 "Adding %u ep ctxs, %u now active.", added_eps, 2278 xhci->num_active_eps); 2279 return 0; 2280 } 2281 2282 /* 2283 * The configure endpoint was failed by the xHC for some other reason, so we 2284 * need to revert the resources that failed configuration would have used. 2285 * 2286 * Must be called with xhci->lock held. 2287 */ 2288 static void xhci_free_host_resources(struct xhci_hcd *xhci, 2289 struct xhci_input_control_ctx *ctrl_ctx) 2290 { 2291 u32 num_failed_eps; 2292 2293 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); 2294 xhci->num_active_eps -= num_failed_eps; 2295 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2296 "Removing %u failed ep ctxs, %u now active.", 2297 num_failed_eps, 2298 xhci->num_active_eps); 2299 } 2300 2301 /* 2302 * Now that the command has completed, clean up the active endpoint count by 2303 * subtracting out the endpoints that were dropped (but not changed). 2304 * 2305 * Must be called with xhci->lock held. 2306 */ 2307 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, 2308 struct xhci_input_control_ctx *ctrl_ctx) 2309 { 2310 u32 num_dropped_eps; 2311 2312 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx); 2313 xhci->num_active_eps -= num_dropped_eps; 2314 if (num_dropped_eps) 2315 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2316 "Removing %u dropped ep ctxs, %u now active.", 2317 num_dropped_eps, 2318 xhci->num_active_eps); 2319 } 2320 2321 static unsigned int xhci_get_block_size(struct usb_device *udev) 2322 { 2323 switch (udev->speed) { 2324 case USB_SPEED_LOW: 2325 case USB_SPEED_FULL: 2326 return FS_BLOCK; 2327 case USB_SPEED_HIGH: 2328 return HS_BLOCK; 2329 case USB_SPEED_SUPER: 2330 case USB_SPEED_SUPER_PLUS: 2331 return SS_BLOCK; 2332 case USB_SPEED_UNKNOWN: 2333 case USB_SPEED_WIRELESS: 2334 default: 2335 /* Should never happen */ 2336 return 1; 2337 } 2338 } 2339 2340 static unsigned int 2341 xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw) 2342 { 2343 if (interval_bw->overhead[LS_OVERHEAD_TYPE]) 2344 return LS_OVERHEAD; 2345 if (interval_bw->overhead[FS_OVERHEAD_TYPE]) 2346 return FS_OVERHEAD; 2347 return HS_OVERHEAD; 2348 } 2349 2350 /* If we are changing a LS/FS device under a HS hub, 2351 * make sure (if we are activating a new TT) that the HS bus has enough 2352 * bandwidth for this new TT. 2353 */ 2354 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, 2355 struct xhci_virt_device *virt_dev, 2356 int old_active_eps) 2357 { 2358 struct xhci_interval_bw_table *bw_table; 2359 struct xhci_tt_bw_info *tt_info; 2360 2361 /* Find the bandwidth table for the root port this TT is attached to. */ 2362 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table; 2363 tt_info = virt_dev->tt_info; 2364 /* If this TT already had active endpoints, the bandwidth for this TT 2365 * has already been added. Removing all periodic endpoints (and thus 2366 * making the TT enactive) will only decrease the bandwidth used. 2367 */ 2368 if (old_active_eps) 2369 return 0; 2370 if (old_active_eps == 0 && tt_info->active_eps != 0) { 2371 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT) 2372 return -ENOMEM; 2373 return 0; 2374 } 2375 /* Not sure why we would have no new active endpoints... 2376 * 2377 * Maybe because of an Evaluate Context change for a hub update or a 2378 * control endpoint 0 max packet size change? 2379 * FIXME: skip the bandwidth calculation in that case. 2380 */ 2381 return 0; 2382 } 2383 2384 static int xhci_check_ss_bw(struct xhci_hcd *xhci, 2385 struct xhci_virt_device *virt_dev) 2386 { 2387 unsigned int bw_reserved; 2388 2389 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100); 2390 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved)) 2391 return -ENOMEM; 2392 2393 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100); 2394 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved)) 2395 return -ENOMEM; 2396 2397 return 0; 2398 } 2399 2400 /* 2401 * This algorithm is a very conservative estimate of the worst-case scheduling 2402 * scenario for any one interval. The hardware dynamically schedules the 2403 * packets, so we can't tell which microframe could be the limiting factor in 2404 * the bandwidth scheduling. This only takes into account periodic endpoints. 2405 * 2406 * Obviously, we can't solve an NP complete problem to find the minimum worst 2407 * case scenario. Instead, we come up with an estimate that is no less than 2408 * the worst case bandwidth used for any one microframe, but may be an 2409 * over-estimate. 2410 * 2411 * We walk the requirements for each endpoint by interval, starting with the 2412 * smallest interval, and place packets in the schedule where there is only one 2413 * possible way to schedule packets for that interval. In order to simplify 2414 * this algorithm, we record the largest max packet size for each interval, and 2415 * assume all packets will be that size. 2416 * 2417 * For interval 0, we obviously must schedule all packets for each interval. 2418 * The bandwidth for interval 0 is just the amount of data to be transmitted 2419 * (the sum of all max ESIT payload sizes, plus any overhead per packet times 2420 * the number of packets). 2421 * 2422 * For interval 1, we have two possible microframes to schedule those packets 2423 * in. For this algorithm, if we can schedule the same number of packets for 2424 * each possible scheduling opportunity (each microframe), we will do so. The 2425 * remaining number of packets will be saved to be transmitted in the gaps in 2426 * the next interval's scheduling sequence. 2427 * 2428 * As we move those remaining packets to be scheduled with interval 2 packets, 2429 * we have to double the number of remaining packets to transmit. This is 2430 * because the intervals are actually powers of 2, and we would be transmitting 2431 * the previous interval's packets twice in this interval. We also have to be 2432 * sure that when we look at the largest max packet size for this interval, we 2433 * also look at the largest max packet size for the remaining packets and take 2434 * the greater of the two. 2435 * 2436 * The algorithm continues to evenly distribute packets in each scheduling 2437 * opportunity, and push the remaining packets out, until we get to the last 2438 * interval. Then those packets and their associated overhead are just added 2439 * to the bandwidth used. 2440 */ 2441 static int xhci_check_bw_table(struct xhci_hcd *xhci, 2442 struct xhci_virt_device *virt_dev, 2443 int old_active_eps) 2444 { 2445 unsigned int bw_reserved; 2446 unsigned int max_bandwidth; 2447 unsigned int bw_used; 2448 unsigned int block_size; 2449 struct xhci_interval_bw_table *bw_table; 2450 unsigned int packet_size = 0; 2451 unsigned int overhead = 0; 2452 unsigned int packets_transmitted = 0; 2453 unsigned int packets_remaining = 0; 2454 unsigned int i; 2455 2456 if (virt_dev->udev->speed >= USB_SPEED_SUPER) 2457 return xhci_check_ss_bw(xhci, virt_dev); 2458 2459 if (virt_dev->udev->speed == USB_SPEED_HIGH) { 2460 max_bandwidth = HS_BW_LIMIT; 2461 /* Convert percent of bus BW reserved to blocks reserved */ 2462 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100); 2463 } else { 2464 max_bandwidth = FS_BW_LIMIT; 2465 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100); 2466 } 2467 2468 bw_table = virt_dev->bw_table; 2469 /* We need to translate the max packet size and max ESIT payloads into 2470 * the units the hardware uses. 2471 */ 2472 block_size = xhci_get_block_size(virt_dev->udev); 2473 2474 /* If we are manipulating a LS/FS device under a HS hub, double check 2475 * that the HS bus has enough bandwidth if we are activing a new TT. 2476 */ 2477 if (virt_dev->tt_info) { 2478 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2479 "Recalculating BW for rootport %u", 2480 virt_dev->real_port); 2481 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { 2482 xhci_warn(xhci, "Not enough bandwidth on HS bus for " 2483 "newly activated TT.\n"); 2484 return -ENOMEM; 2485 } 2486 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2487 "Recalculating BW for TT slot %u port %u", 2488 virt_dev->tt_info->slot_id, 2489 virt_dev->tt_info->ttport); 2490 } else { 2491 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2492 "Recalculating BW for rootport %u", 2493 virt_dev->real_port); 2494 } 2495 2496 /* Add in how much bandwidth will be used for interval zero, or the 2497 * rounded max ESIT payload + number of packets * largest overhead. 2498 */ 2499 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) + 2500 bw_table->interval_bw[0].num_packets * 2501 xhci_get_largest_overhead(&bw_table->interval_bw[0]); 2502 2503 for (i = 1; i < XHCI_MAX_INTERVAL; i++) { 2504 unsigned int bw_added; 2505 unsigned int largest_mps; 2506 unsigned int interval_overhead; 2507 2508 /* 2509 * How many packets could we transmit in this interval? 2510 * If packets didn't fit in the previous interval, we will need 2511 * to transmit that many packets twice within this interval. 2512 */ 2513 packets_remaining = 2 * packets_remaining + 2514 bw_table->interval_bw[i].num_packets; 2515 2516 /* Find the largest max packet size of this or the previous 2517 * interval. 2518 */ 2519 if (list_empty(&bw_table->interval_bw[i].endpoints)) 2520 largest_mps = 0; 2521 else { 2522 struct xhci_virt_ep *virt_ep; 2523 struct list_head *ep_entry; 2524 2525 ep_entry = bw_table->interval_bw[i].endpoints.next; 2526 virt_ep = list_entry(ep_entry, 2527 struct xhci_virt_ep, bw_endpoint_list); 2528 /* Convert to blocks, rounding up */ 2529 largest_mps = DIV_ROUND_UP( 2530 virt_ep->bw_info.max_packet_size, 2531 block_size); 2532 } 2533 if (largest_mps > packet_size) 2534 packet_size = largest_mps; 2535 2536 /* Use the larger overhead of this or the previous interval. */ 2537 interval_overhead = xhci_get_largest_overhead( 2538 &bw_table->interval_bw[i]); 2539 if (interval_overhead > overhead) 2540 overhead = interval_overhead; 2541 2542 /* How many packets can we evenly distribute across 2543 * (1 << (i + 1)) possible scheduling opportunities? 2544 */ 2545 packets_transmitted = packets_remaining >> (i + 1); 2546 2547 /* Add in the bandwidth used for those scheduled packets */ 2548 bw_added = packets_transmitted * (overhead + packet_size); 2549 2550 /* How many packets do we have remaining to transmit? */ 2551 packets_remaining = packets_remaining % (1 << (i + 1)); 2552 2553 /* What largest max packet size should those packets have? */ 2554 /* If we've transmitted all packets, don't carry over the 2555 * largest packet size. 2556 */ 2557 if (packets_remaining == 0) { 2558 packet_size = 0; 2559 overhead = 0; 2560 } else if (packets_transmitted > 0) { 2561 /* Otherwise if we do have remaining packets, and we've 2562 * scheduled some packets in this interval, take the 2563 * largest max packet size from endpoints with this 2564 * interval. 2565 */ 2566 packet_size = largest_mps; 2567 overhead = interval_overhead; 2568 } 2569 /* Otherwise carry over packet_size and overhead from the last 2570 * time we had a remainder. 2571 */ 2572 bw_used += bw_added; 2573 if (bw_used > max_bandwidth) { 2574 xhci_warn(xhci, "Not enough bandwidth. " 2575 "Proposed: %u, Max: %u\n", 2576 bw_used, max_bandwidth); 2577 return -ENOMEM; 2578 } 2579 } 2580 /* 2581 * Ok, we know we have some packets left over after even-handedly 2582 * scheduling interval 15. We don't know which microframes they will 2583 * fit into, so we over-schedule and say they will be scheduled every 2584 * microframe. 2585 */ 2586 if (packets_remaining > 0) 2587 bw_used += overhead + packet_size; 2588 2589 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) { 2590 unsigned int port_index = virt_dev->real_port - 1; 2591 2592 /* OK, we're manipulating a HS device attached to a 2593 * root port bandwidth domain. Include the number of active TTs 2594 * in the bandwidth used. 2595 */ 2596 bw_used += TT_HS_OVERHEAD * 2597 xhci->rh_bw[port_index].num_active_tts; 2598 } 2599 2600 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2601 "Final bandwidth: %u, Limit: %u, Reserved: %u, " 2602 "Available: %u " "percent", 2603 bw_used, max_bandwidth, bw_reserved, 2604 (max_bandwidth - bw_used - bw_reserved) * 100 / 2605 max_bandwidth); 2606 2607 bw_used += bw_reserved; 2608 if (bw_used > max_bandwidth) { 2609 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n", 2610 bw_used, max_bandwidth); 2611 return -ENOMEM; 2612 } 2613 2614 bw_table->bw_used = bw_used; 2615 return 0; 2616 } 2617 2618 static bool xhci_is_async_ep(unsigned int ep_type) 2619 { 2620 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && 2621 ep_type != ISOC_IN_EP && 2622 ep_type != INT_IN_EP); 2623 } 2624 2625 static bool xhci_is_sync_in_ep(unsigned int ep_type) 2626 { 2627 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP); 2628 } 2629 2630 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw) 2631 { 2632 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK); 2633 2634 if (ep_bw->ep_interval == 0) 2635 return SS_OVERHEAD_BURST + 2636 (ep_bw->mult * ep_bw->num_packets * 2637 (SS_OVERHEAD + mps)); 2638 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets * 2639 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST), 2640 1 << ep_bw->ep_interval); 2641 2642 } 2643 2644 static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, 2645 struct xhci_bw_info *ep_bw, 2646 struct xhci_interval_bw_table *bw_table, 2647 struct usb_device *udev, 2648 struct xhci_virt_ep *virt_ep, 2649 struct xhci_tt_bw_info *tt_info) 2650 { 2651 struct xhci_interval_bw *interval_bw; 2652 int normalized_interval; 2653 2654 if (xhci_is_async_ep(ep_bw->type)) 2655 return; 2656 2657 if (udev->speed >= USB_SPEED_SUPER) { 2658 if (xhci_is_sync_in_ep(ep_bw->type)) 2659 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= 2660 xhci_get_ss_bw_consumed(ep_bw); 2661 else 2662 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= 2663 xhci_get_ss_bw_consumed(ep_bw); 2664 return; 2665 } 2666 2667 /* SuperSpeed endpoints never get added to intervals in the table, so 2668 * this check is only valid for HS/FS/LS devices. 2669 */ 2670 if (list_empty(&virt_ep->bw_endpoint_list)) 2671 return; 2672 /* For LS/FS devices, we need to translate the interval expressed in 2673 * microframes to frames. 2674 */ 2675 if (udev->speed == USB_SPEED_HIGH) 2676 normalized_interval = ep_bw->ep_interval; 2677 else 2678 normalized_interval = ep_bw->ep_interval - 3; 2679 2680 if (normalized_interval == 0) 2681 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload; 2682 interval_bw = &bw_table->interval_bw[normalized_interval]; 2683 interval_bw->num_packets -= ep_bw->num_packets; 2684 switch (udev->speed) { 2685 case USB_SPEED_LOW: 2686 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1; 2687 break; 2688 case USB_SPEED_FULL: 2689 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1; 2690 break; 2691 case USB_SPEED_HIGH: 2692 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1; 2693 break; 2694 case USB_SPEED_SUPER: 2695 case USB_SPEED_SUPER_PLUS: 2696 case USB_SPEED_UNKNOWN: 2697 case USB_SPEED_WIRELESS: 2698 /* Should never happen because only LS/FS/HS endpoints will get 2699 * added to the endpoint list. 2700 */ 2701 return; 2702 } 2703 if (tt_info) 2704 tt_info->active_eps -= 1; 2705 list_del_init(&virt_ep->bw_endpoint_list); 2706 } 2707 2708 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, 2709 struct xhci_bw_info *ep_bw, 2710 struct xhci_interval_bw_table *bw_table, 2711 struct usb_device *udev, 2712 struct xhci_virt_ep *virt_ep, 2713 struct xhci_tt_bw_info *tt_info) 2714 { 2715 struct xhci_interval_bw *interval_bw; 2716 struct xhci_virt_ep *smaller_ep; 2717 int normalized_interval; 2718 2719 if (xhci_is_async_ep(ep_bw->type)) 2720 return; 2721 2722 if (udev->speed == USB_SPEED_SUPER) { 2723 if (xhci_is_sync_in_ep(ep_bw->type)) 2724 xhci->devs[udev->slot_id]->bw_table->ss_bw_in += 2725 xhci_get_ss_bw_consumed(ep_bw); 2726 else 2727 xhci->devs[udev->slot_id]->bw_table->ss_bw_out += 2728 xhci_get_ss_bw_consumed(ep_bw); 2729 return; 2730 } 2731 2732 /* For LS/FS devices, we need to translate the interval expressed in 2733 * microframes to frames. 2734 */ 2735 if (udev->speed == USB_SPEED_HIGH) 2736 normalized_interval = ep_bw->ep_interval; 2737 else 2738 normalized_interval = ep_bw->ep_interval - 3; 2739 2740 if (normalized_interval == 0) 2741 bw_table->interval0_esit_payload += ep_bw->max_esit_payload; 2742 interval_bw = &bw_table->interval_bw[normalized_interval]; 2743 interval_bw->num_packets += ep_bw->num_packets; 2744 switch (udev->speed) { 2745 case USB_SPEED_LOW: 2746 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1; 2747 break; 2748 case USB_SPEED_FULL: 2749 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1; 2750 break; 2751 case USB_SPEED_HIGH: 2752 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1; 2753 break; 2754 case USB_SPEED_SUPER: 2755 case USB_SPEED_SUPER_PLUS: 2756 case USB_SPEED_UNKNOWN: 2757 case USB_SPEED_WIRELESS: 2758 /* Should never happen because only LS/FS/HS endpoints will get 2759 * added to the endpoint list. 2760 */ 2761 return; 2762 } 2763 2764 if (tt_info) 2765 tt_info->active_eps += 1; 2766 /* Insert the endpoint into the list, largest max packet size first. */ 2767 list_for_each_entry(smaller_ep, &interval_bw->endpoints, 2768 bw_endpoint_list) { 2769 if (ep_bw->max_packet_size >= 2770 smaller_ep->bw_info.max_packet_size) { 2771 /* Add the new ep before the smaller endpoint */ 2772 list_add_tail(&virt_ep->bw_endpoint_list, 2773 &smaller_ep->bw_endpoint_list); 2774 return; 2775 } 2776 } 2777 /* Add the new endpoint at the end of the list. */ 2778 list_add_tail(&virt_ep->bw_endpoint_list, 2779 &interval_bw->endpoints); 2780 } 2781 2782 void xhci_update_tt_active_eps(struct xhci_hcd *xhci, 2783 struct xhci_virt_device *virt_dev, 2784 int old_active_eps) 2785 { 2786 struct xhci_root_port_bw_info *rh_bw_info; 2787 if (!virt_dev->tt_info) 2788 return; 2789 2790 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1]; 2791 if (old_active_eps == 0 && 2792 virt_dev->tt_info->active_eps != 0) { 2793 rh_bw_info->num_active_tts += 1; 2794 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD; 2795 } else if (old_active_eps != 0 && 2796 virt_dev->tt_info->active_eps == 0) { 2797 rh_bw_info->num_active_tts -= 1; 2798 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD; 2799 } 2800 } 2801 2802 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, 2803 struct xhci_virt_device *virt_dev, 2804 struct xhci_container_ctx *in_ctx) 2805 { 2806 struct xhci_bw_info ep_bw_info[31]; 2807 int i; 2808 struct xhci_input_control_ctx *ctrl_ctx; 2809 int old_active_eps = 0; 2810 2811 if (virt_dev->tt_info) 2812 old_active_eps = virt_dev->tt_info->active_eps; 2813 2814 ctrl_ctx = xhci_get_input_control_ctx(in_ctx); 2815 if (!ctrl_ctx) { 2816 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 2817 __func__); 2818 return -ENOMEM; 2819 } 2820 2821 for (i = 0; i < 31; i++) { 2822 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) 2823 continue; 2824 2825 /* Make a copy of the BW info in case we need to revert this */ 2826 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info, 2827 sizeof(ep_bw_info[i])); 2828 /* Drop the endpoint from the interval table if the endpoint is 2829 * being dropped or changed. 2830 */ 2831 if (EP_IS_DROPPED(ctrl_ctx, i)) 2832 xhci_drop_ep_from_interval_table(xhci, 2833 &virt_dev->eps[i].bw_info, 2834 virt_dev->bw_table, 2835 virt_dev->udev, 2836 &virt_dev->eps[i], 2837 virt_dev->tt_info); 2838 } 2839 /* Overwrite the information stored in the endpoints' bw_info */ 2840 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); 2841 for (i = 0; i < 31; i++) { 2842 /* Add any changed or added endpoints to the interval table */ 2843 if (EP_IS_ADDED(ctrl_ctx, i)) 2844 xhci_add_ep_to_interval_table(xhci, 2845 &virt_dev->eps[i].bw_info, 2846 virt_dev->bw_table, 2847 virt_dev->udev, 2848 &virt_dev->eps[i], 2849 virt_dev->tt_info); 2850 } 2851 2852 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { 2853 /* Ok, this fits in the bandwidth we have. 2854 * Update the number of active TTs. 2855 */ 2856 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); 2857 return 0; 2858 } 2859 2860 /* We don't have enough bandwidth for this, revert the stored info. */ 2861 for (i = 0; i < 31; i++) { 2862 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) 2863 continue; 2864 2865 /* Drop the new copies of any added or changed endpoints from 2866 * the interval table. 2867 */ 2868 if (EP_IS_ADDED(ctrl_ctx, i)) { 2869 xhci_drop_ep_from_interval_table(xhci, 2870 &virt_dev->eps[i].bw_info, 2871 virt_dev->bw_table, 2872 virt_dev->udev, 2873 &virt_dev->eps[i], 2874 virt_dev->tt_info); 2875 } 2876 /* Revert the endpoint back to its old information */ 2877 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i], 2878 sizeof(ep_bw_info[i])); 2879 /* Add any changed or dropped endpoints back into the table */ 2880 if (EP_IS_DROPPED(ctrl_ctx, i)) 2881 xhci_add_ep_to_interval_table(xhci, 2882 &virt_dev->eps[i].bw_info, 2883 virt_dev->bw_table, 2884 virt_dev->udev, 2885 &virt_dev->eps[i], 2886 virt_dev->tt_info); 2887 } 2888 return -ENOMEM; 2889 } 2890 2891 2892 /* Issue a configure endpoint command or evaluate context command 2893 * and wait for it to finish. 2894 */ 2895 static int xhci_configure_endpoint(struct xhci_hcd *xhci, 2896 struct usb_device *udev, 2897 struct xhci_command *command, 2898 bool ctx_change, bool must_succeed) 2899 { 2900 int ret; 2901 unsigned long flags; 2902 struct xhci_input_control_ctx *ctrl_ctx; 2903 struct xhci_virt_device *virt_dev; 2904 struct xhci_slot_ctx *slot_ctx; 2905 2906 if (!command) 2907 return -EINVAL; 2908 2909 spin_lock_irqsave(&xhci->lock, flags); 2910 2911 if (xhci->xhc_state & XHCI_STATE_DYING) { 2912 spin_unlock_irqrestore(&xhci->lock, flags); 2913 return -ESHUTDOWN; 2914 } 2915 2916 virt_dev = xhci->devs[udev->slot_id]; 2917 2918 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 2919 if (!ctrl_ctx) { 2920 spin_unlock_irqrestore(&xhci->lock, flags); 2921 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 2922 __func__); 2923 return -ENOMEM; 2924 } 2925 2926 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && 2927 xhci_reserve_host_resources(xhci, ctrl_ctx)) { 2928 spin_unlock_irqrestore(&xhci->lock, flags); 2929 xhci_warn(xhci, "Not enough host resources, " 2930 "active endpoint contexts = %u\n", 2931 xhci->num_active_eps); 2932 return -ENOMEM; 2933 } 2934 if ((xhci->quirks & XHCI_SW_BW_CHECKING) && 2935 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) { 2936 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) 2937 xhci_free_host_resources(xhci, ctrl_ctx); 2938 spin_unlock_irqrestore(&xhci->lock, flags); 2939 xhci_warn(xhci, "Not enough bandwidth\n"); 2940 return -ENOMEM; 2941 } 2942 2943 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); 2944 2945 trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx); 2946 trace_xhci_configure_endpoint(slot_ctx); 2947 2948 if (!ctx_change) 2949 ret = xhci_queue_configure_endpoint(xhci, command, 2950 command->in_ctx->dma, 2951 udev->slot_id, must_succeed); 2952 else 2953 ret = xhci_queue_evaluate_context(xhci, command, 2954 command->in_ctx->dma, 2955 udev->slot_id, must_succeed); 2956 if (ret < 0) { 2957 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) 2958 xhci_free_host_resources(xhci, ctrl_ctx); 2959 spin_unlock_irqrestore(&xhci->lock, flags); 2960 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 2961 "FIXME allocate a new ring segment"); 2962 return -ENOMEM; 2963 } 2964 xhci_ring_cmd_db(xhci); 2965 spin_unlock_irqrestore(&xhci->lock, flags); 2966 2967 /* Wait for the configure endpoint command to complete */ 2968 wait_for_completion(command->completion); 2969 2970 if (!ctx_change) 2971 ret = xhci_configure_endpoint_result(xhci, udev, 2972 &command->status); 2973 else 2974 ret = xhci_evaluate_context_result(xhci, udev, 2975 &command->status); 2976 2977 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 2978 spin_lock_irqsave(&xhci->lock, flags); 2979 /* If the command failed, remove the reserved resources. 2980 * Otherwise, clean up the estimate to include dropped eps. 2981 */ 2982 if (ret) 2983 xhci_free_host_resources(xhci, ctrl_ctx); 2984 else 2985 xhci_finish_resource_reservation(xhci, ctrl_ctx); 2986 spin_unlock_irqrestore(&xhci->lock, flags); 2987 } 2988 return ret; 2989 } 2990 2991 static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, 2992 struct xhci_virt_device *vdev, int i) 2993 { 2994 struct xhci_virt_ep *ep = &vdev->eps[i]; 2995 2996 if (ep->ep_state & EP_HAS_STREAMS) { 2997 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n", 2998 xhci_get_endpoint_address(i)); 2999 xhci_free_stream_info(xhci, ep->stream_info); 3000 ep->stream_info = NULL; 3001 ep->ep_state &= ~EP_HAS_STREAMS; 3002 } 3003 } 3004 3005 /* Called after one or more calls to xhci_add_endpoint() or 3006 * xhci_drop_endpoint(). If this call fails, the USB core is expected 3007 * to call xhci_reset_bandwidth(). 3008 * 3009 * Since we are in the middle of changing either configuration or 3010 * installing a new alt setting, the USB core won't allow URBs to be 3011 * enqueued for any endpoint on the old config or interface. Nothing 3012 * else should be touching the xhci->devs[slot_id] structure, so we 3013 * don't need to take the xhci->lock for manipulating that. 3014 */ 3015 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) 3016 { 3017 int i; 3018 int ret = 0; 3019 struct xhci_hcd *xhci; 3020 struct xhci_virt_device *virt_dev; 3021 struct xhci_input_control_ctx *ctrl_ctx; 3022 struct xhci_slot_ctx *slot_ctx; 3023 struct xhci_command *command; 3024 3025 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 3026 if (ret <= 0) 3027 return ret; 3028 xhci = hcd_to_xhci(hcd); 3029 if ((xhci->xhc_state & XHCI_STATE_DYING) || 3030 (xhci->xhc_state & XHCI_STATE_REMOVING)) 3031 return -ENODEV; 3032 3033 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 3034 virt_dev = xhci->devs[udev->slot_id]; 3035 3036 command = xhci_alloc_command(xhci, true, GFP_KERNEL); 3037 if (!command) 3038 return -ENOMEM; 3039 3040 command->in_ctx = virt_dev->in_ctx; 3041 3042 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ 3043 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 3044 if (!ctrl_ctx) { 3045 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 3046 __func__); 3047 ret = -ENOMEM; 3048 goto command_cleanup; 3049 } 3050 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 3051 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); 3052 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); 3053 3054 /* Don't issue the command if there's no endpoints to update. */ 3055 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && 3056 ctrl_ctx->drop_flags == 0) { 3057 ret = 0; 3058 goto command_cleanup; 3059 } 3060 /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */ 3061 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 3062 for (i = 31; i >= 1; i--) { 3063 __le32 le32 = cpu_to_le32(BIT(i)); 3064 3065 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32)) 3066 || (ctrl_ctx->add_flags & le32) || i == 1) { 3067 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 3068 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i)); 3069 break; 3070 } 3071 } 3072 3073 ret = xhci_configure_endpoint(xhci, udev, command, 3074 false, false); 3075 if (ret) 3076 /* Callee should call reset_bandwidth() */ 3077 goto command_cleanup; 3078 3079 /* Free any rings that were dropped, but not changed. */ 3080 for (i = 1; i < 31; i++) { 3081 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) && 3082 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) { 3083 xhci_free_endpoint_ring(xhci, virt_dev, i); 3084 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); 3085 } 3086 } 3087 xhci_zero_in_ctx(xhci, virt_dev); 3088 /* 3089 * Install any rings for completely new endpoints or changed endpoints, 3090 * and free any old rings from changed endpoints. 3091 */ 3092 for (i = 1; i < 31; i++) { 3093 if (!virt_dev->eps[i].new_ring) 3094 continue; 3095 /* Only free the old ring if it exists. 3096 * It may not if this is the first add of an endpoint. 3097 */ 3098 if (virt_dev->eps[i].ring) { 3099 xhci_free_endpoint_ring(xhci, virt_dev, i); 3100 } 3101 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); 3102 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; 3103 virt_dev->eps[i].new_ring = NULL; 3104 xhci_debugfs_create_endpoint(xhci, virt_dev, i); 3105 } 3106 command_cleanup: 3107 kfree(command->completion); 3108 kfree(command); 3109 3110 return ret; 3111 } 3112 EXPORT_SYMBOL_GPL(xhci_check_bandwidth); 3113 3114 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) 3115 { 3116 struct xhci_hcd *xhci; 3117 struct xhci_virt_device *virt_dev; 3118 int i, ret; 3119 3120 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 3121 if (ret <= 0) 3122 return; 3123 xhci = hcd_to_xhci(hcd); 3124 3125 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 3126 virt_dev = xhci->devs[udev->slot_id]; 3127 /* Free any rings allocated for added endpoints */ 3128 for (i = 0; i < 31; i++) { 3129 if (virt_dev->eps[i].new_ring) { 3130 xhci_debugfs_remove_endpoint(xhci, virt_dev, i); 3131 xhci_ring_free(xhci, virt_dev->eps[i].new_ring); 3132 virt_dev->eps[i].new_ring = NULL; 3133 } 3134 } 3135 xhci_zero_in_ctx(xhci, virt_dev); 3136 } 3137 EXPORT_SYMBOL_GPL(xhci_reset_bandwidth); 3138 3139 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, 3140 struct xhci_container_ctx *in_ctx, 3141 struct xhci_container_ctx *out_ctx, 3142 struct xhci_input_control_ctx *ctrl_ctx, 3143 u32 add_flags, u32 drop_flags) 3144 { 3145 ctrl_ctx->add_flags = cpu_to_le32(add_flags); 3146 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags); 3147 xhci_slot_copy(xhci, in_ctx, out_ctx); 3148 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 3149 } 3150 3151 static void xhci_endpoint_disable(struct usb_hcd *hcd, 3152 struct usb_host_endpoint *host_ep) 3153 { 3154 struct xhci_hcd *xhci; 3155 struct xhci_virt_device *vdev; 3156 struct xhci_virt_ep *ep; 3157 struct usb_device *udev; 3158 unsigned long flags; 3159 unsigned int ep_index; 3160 3161 xhci = hcd_to_xhci(hcd); 3162 rescan: 3163 spin_lock_irqsave(&xhci->lock, flags); 3164 3165 udev = (struct usb_device *)host_ep->hcpriv; 3166 if (!udev || !udev->slot_id) 3167 goto done; 3168 3169 vdev = xhci->devs[udev->slot_id]; 3170 if (!vdev) 3171 goto done; 3172 3173 ep_index = xhci_get_endpoint_index(&host_ep->desc); 3174 ep = &vdev->eps[ep_index]; 3175 3176 /* wait for hub_tt_work to finish clearing hub TT */ 3177 if (ep->ep_state & EP_CLEARING_TT) { 3178 spin_unlock_irqrestore(&xhci->lock, flags); 3179 schedule_timeout_uninterruptible(1); 3180 goto rescan; 3181 } 3182 3183 if (ep->ep_state) 3184 xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n", 3185 ep->ep_state); 3186 done: 3187 host_ep->hcpriv = NULL; 3188 spin_unlock_irqrestore(&xhci->lock, flags); 3189 } 3190 3191 /* 3192 * Called after usb core issues a clear halt control message. 3193 * The host side of the halt should already be cleared by a reset endpoint 3194 * command issued when the STALL event was received. 3195 * 3196 * The reset endpoint command may only be issued to endpoints in the halted 3197 * state. For software that wishes to reset the data toggle or sequence number 3198 * of an endpoint that isn't in the halted state this function will issue a 3199 * configure endpoint command with the Drop and Add bits set for the target 3200 * endpoint. Refer to the additional note in xhci spcification section 4.6.8. 3201 */ 3202 3203 static void xhci_endpoint_reset(struct usb_hcd *hcd, 3204 struct usb_host_endpoint *host_ep) 3205 { 3206 struct xhci_hcd *xhci; 3207 struct usb_device *udev; 3208 struct xhci_virt_device *vdev; 3209 struct xhci_virt_ep *ep; 3210 struct xhci_input_control_ctx *ctrl_ctx; 3211 struct xhci_command *stop_cmd, *cfg_cmd; 3212 unsigned int ep_index; 3213 unsigned long flags; 3214 u32 ep_flag; 3215 int err; 3216 3217 xhci = hcd_to_xhci(hcd); 3218 if (!host_ep->hcpriv) 3219 return; 3220 udev = (struct usb_device *) host_ep->hcpriv; 3221 vdev = xhci->devs[udev->slot_id]; 3222 3223 /* 3224 * vdev may be lost due to xHC restore error and re-initialization 3225 * during S3/S4 resume. A new vdev will be allocated later by 3226 * xhci_discover_or_reset_device() 3227 */ 3228 if (!udev->slot_id || !vdev) 3229 return; 3230 ep_index = xhci_get_endpoint_index(&host_ep->desc); 3231 ep = &vdev->eps[ep_index]; 3232 3233 /* Bail out if toggle is already being cleared by a endpoint reset */ 3234 spin_lock_irqsave(&xhci->lock, flags); 3235 if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) { 3236 ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE; 3237 spin_unlock_irqrestore(&xhci->lock, flags); 3238 return; 3239 } 3240 spin_unlock_irqrestore(&xhci->lock, flags); 3241 /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */ 3242 if (usb_endpoint_xfer_control(&host_ep->desc) || 3243 usb_endpoint_xfer_isoc(&host_ep->desc)) 3244 return; 3245 3246 ep_flag = xhci_get_endpoint_flag(&host_ep->desc); 3247 3248 if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG) 3249 return; 3250 3251 stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT); 3252 if (!stop_cmd) 3253 return; 3254 3255 cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT); 3256 if (!cfg_cmd) 3257 goto cleanup; 3258 3259 spin_lock_irqsave(&xhci->lock, flags); 3260 3261 /* block queuing new trbs and ringing ep doorbell */ 3262 ep->ep_state |= EP_SOFT_CLEAR_TOGGLE; 3263 3264 /* 3265 * Make sure endpoint ring is empty before resetting the toggle/seq. 3266 * Driver is required to synchronously cancel all transfer request. 3267 * Stop the endpoint to force xHC to update the output context 3268 */ 3269 3270 if (!list_empty(&ep->ring->td_list)) { 3271 dev_err(&udev->dev, "EP not empty, refuse reset\n"); 3272 spin_unlock_irqrestore(&xhci->lock, flags); 3273 xhci_free_command(xhci, cfg_cmd); 3274 goto cleanup; 3275 } 3276 3277 err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, 3278 ep_index, 0); 3279 if (err < 0) { 3280 spin_unlock_irqrestore(&xhci->lock, flags); 3281 xhci_free_command(xhci, cfg_cmd); 3282 xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ", 3283 __func__, err); 3284 goto cleanup; 3285 } 3286 3287 xhci_ring_cmd_db(xhci); 3288 spin_unlock_irqrestore(&xhci->lock, flags); 3289 3290 wait_for_completion(stop_cmd->completion); 3291 3292 spin_lock_irqsave(&xhci->lock, flags); 3293 3294 /* config ep command clears toggle if add and drop ep flags are set */ 3295 ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx); 3296 if (!ctrl_ctx) { 3297 spin_unlock_irqrestore(&xhci->lock, flags); 3298 xhci_free_command(xhci, cfg_cmd); 3299 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 3300 __func__); 3301 goto cleanup; 3302 } 3303 3304 xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx, 3305 ctrl_ctx, ep_flag, ep_flag); 3306 xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index); 3307 3308 err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma, 3309 udev->slot_id, false); 3310 if (err < 0) { 3311 spin_unlock_irqrestore(&xhci->lock, flags); 3312 xhci_free_command(xhci, cfg_cmd); 3313 xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ", 3314 __func__, err); 3315 goto cleanup; 3316 } 3317 3318 xhci_ring_cmd_db(xhci); 3319 spin_unlock_irqrestore(&xhci->lock, flags); 3320 3321 wait_for_completion(cfg_cmd->completion); 3322 3323 xhci_free_command(xhci, cfg_cmd); 3324 cleanup: 3325 xhci_free_command(xhci, stop_cmd); 3326 spin_lock_irqsave(&xhci->lock, flags); 3327 if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE) 3328 ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; 3329 spin_unlock_irqrestore(&xhci->lock, flags); 3330 } 3331 3332 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, 3333 struct usb_device *udev, struct usb_host_endpoint *ep, 3334 unsigned int slot_id) 3335 { 3336 int ret; 3337 unsigned int ep_index; 3338 unsigned int ep_state; 3339 3340 if (!ep) 3341 return -EINVAL; 3342 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); 3343 if (ret <= 0) 3344 return ret ? ret : -EINVAL; 3345 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) { 3346 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" 3347 " descriptor for ep 0x%x does not support streams\n", 3348 ep->desc.bEndpointAddress); 3349 return -EINVAL; 3350 } 3351 3352 ep_index = xhci_get_endpoint_index(&ep->desc); 3353 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 3354 if (ep_state & EP_HAS_STREAMS || 3355 ep_state & EP_GETTING_STREAMS) { 3356 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " 3357 "already has streams set up.\n", 3358 ep->desc.bEndpointAddress); 3359 xhci_warn(xhci, "Send email to xHCI maintainer and ask for " 3360 "dynamic stream context array reallocation.\n"); 3361 return -EINVAL; 3362 } 3363 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { 3364 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " 3365 "endpoint 0x%x; URBs are pending.\n", 3366 ep->desc.bEndpointAddress); 3367 return -EINVAL; 3368 } 3369 return 0; 3370 } 3371 3372 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, 3373 unsigned int *num_streams, unsigned int *num_stream_ctxs) 3374 { 3375 unsigned int max_streams; 3376 3377 /* The stream context array size must be a power of two */ 3378 *num_stream_ctxs = roundup_pow_of_two(*num_streams); 3379 /* 3380 * Find out how many primary stream array entries the host controller 3381 * supports. Later we may use secondary stream arrays (similar to 2nd 3382 * level page entries), but that's an optional feature for xHCI host 3383 * controllers. xHCs must support at least 4 stream IDs. 3384 */ 3385 max_streams = HCC_MAX_PSA(xhci->hcc_params); 3386 if (*num_stream_ctxs > max_streams) { 3387 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", 3388 max_streams); 3389 *num_stream_ctxs = max_streams; 3390 *num_streams = max_streams; 3391 } 3392 } 3393 3394 /* Returns an error code if one of the endpoint already has streams. 3395 * This does not change any data structures, it only checks and gathers 3396 * information. 3397 */ 3398 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, 3399 struct usb_device *udev, 3400 struct usb_host_endpoint **eps, unsigned int num_eps, 3401 unsigned int *num_streams, u32 *changed_ep_bitmask) 3402 { 3403 unsigned int max_streams; 3404 unsigned int endpoint_flag; 3405 int i; 3406 int ret; 3407 3408 for (i = 0; i < num_eps; i++) { 3409 ret = xhci_check_streams_endpoint(xhci, udev, 3410 eps[i], udev->slot_id); 3411 if (ret < 0) 3412 return ret; 3413 3414 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp); 3415 if (max_streams < (*num_streams - 1)) { 3416 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", 3417 eps[i]->desc.bEndpointAddress, 3418 max_streams); 3419 *num_streams = max_streams+1; 3420 } 3421 3422 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc); 3423 if (*changed_ep_bitmask & endpoint_flag) 3424 return -EINVAL; 3425 *changed_ep_bitmask |= endpoint_flag; 3426 } 3427 return 0; 3428 } 3429 3430 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, 3431 struct usb_device *udev, 3432 struct usb_host_endpoint **eps, unsigned int num_eps) 3433 { 3434 u32 changed_ep_bitmask = 0; 3435 unsigned int slot_id; 3436 unsigned int ep_index; 3437 unsigned int ep_state; 3438 int i; 3439 3440 slot_id = udev->slot_id; 3441 if (!xhci->devs[slot_id]) 3442 return 0; 3443 3444 for (i = 0; i < num_eps; i++) { 3445 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3446 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 3447 /* Are streams already being freed for the endpoint? */ 3448 if (ep_state & EP_GETTING_NO_STREAMS) { 3449 xhci_warn(xhci, "WARN Can't disable streams for " 3450 "endpoint 0x%x, " 3451 "streams are being disabled already\n", 3452 eps[i]->desc.bEndpointAddress); 3453 return 0; 3454 } 3455 /* Are there actually any streams to free? */ 3456 if (!(ep_state & EP_HAS_STREAMS) && 3457 !(ep_state & EP_GETTING_STREAMS)) { 3458 xhci_warn(xhci, "WARN Can't disable streams for " 3459 "endpoint 0x%x, " 3460 "streams are already disabled!\n", 3461 eps[i]->desc.bEndpointAddress); 3462 xhci_warn(xhci, "WARN xhci_free_streams() called " 3463 "with non-streams endpoint\n"); 3464 return 0; 3465 } 3466 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc); 3467 } 3468 return changed_ep_bitmask; 3469 } 3470 3471 /* 3472 * The USB device drivers use this function (through the HCD interface in USB 3473 * core) to prepare a set of bulk endpoints to use streams. Streams are used to 3474 * coordinate mass storage command queueing across multiple endpoints (basically 3475 * a stream ID == a task ID). 3476 * 3477 * Setting up streams involves allocating the same size stream context array 3478 * for each endpoint and issuing a configure endpoint command for all endpoints. 3479 * 3480 * Don't allow the call to succeed if one endpoint only supports one stream 3481 * (which means it doesn't support streams at all). 3482 * 3483 * Drivers may get less stream IDs than they asked for, if the host controller 3484 * hardware or endpoints claim they can't support the number of requested 3485 * stream IDs. 3486 */ 3487 static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, 3488 struct usb_host_endpoint **eps, unsigned int num_eps, 3489 unsigned int num_streams, gfp_t mem_flags) 3490 { 3491 int i, ret; 3492 struct xhci_hcd *xhci; 3493 struct xhci_virt_device *vdev; 3494 struct xhci_command *config_cmd; 3495 struct xhci_input_control_ctx *ctrl_ctx; 3496 unsigned int ep_index; 3497 unsigned int num_stream_ctxs; 3498 unsigned int max_packet; 3499 unsigned long flags; 3500 u32 changed_ep_bitmask = 0; 3501 3502 if (!eps) 3503 return -EINVAL; 3504 3505 /* Add one to the number of streams requested to account for 3506 * stream 0 that is reserved for xHCI usage. 3507 */ 3508 num_streams += 1; 3509 xhci = hcd_to_xhci(hcd); 3510 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", 3511 num_streams); 3512 3513 /* MaxPSASize value 0 (2 streams) means streams are not supported */ 3514 if ((xhci->quirks & XHCI_BROKEN_STREAMS) || 3515 HCC_MAX_PSA(xhci->hcc_params) < 4) { 3516 xhci_dbg(xhci, "xHCI controller does not support streams.\n"); 3517 return -ENOSYS; 3518 } 3519 3520 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags); 3521 if (!config_cmd) 3522 return -ENOMEM; 3523 3524 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx); 3525 if (!ctrl_ctx) { 3526 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 3527 __func__); 3528 xhci_free_command(xhci, config_cmd); 3529 return -ENOMEM; 3530 } 3531 3532 /* Check to make sure all endpoints are not already configured for 3533 * streams. While we're at it, find the maximum number of streams that 3534 * all the endpoints will support and check for duplicate endpoints. 3535 */ 3536 spin_lock_irqsave(&xhci->lock, flags); 3537 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, 3538 num_eps, &num_streams, &changed_ep_bitmask); 3539 if (ret < 0) { 3540 xhci_free_command(xhci, config_cmd); 3541 spin_unlock_irqrestore(&xhci->lock, flags); 3542 return ret; 3543 } 3544 if (num_streams <= 1) { 3545 xhci_warn(xhci, "WARN: endpoints can't handle " 3546 "more than one stream.\n"); 3547 xhci_free_command(xhci, config_cmd); 3548 spin_unlock_irqrestore(&xhci->lock, flags); 3549 return -EINVAL; 3550 } 3551 vdev = xhci->devs[udev->slot_id]; 3552 /* Mark each endpoint as being in transition, so 3553 * xhci_urb_enqueue() will reject all URBs. 3554 */ 3555 for (i = 0; i < num_eps; i++) { 3556 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3557 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; 3558 } 3559 spin_unlock_irqrestore(&xhci->lock, flags); 3560 3561 /* Setup internal data structures and allocate HW data structures for 3562 * streams (but don't install the HW structures in the input context 3563 * until we're sure all memory allocation succeeded). 3564 */ 3565 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); 3566 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", 3567 num_stream_ctxs, num_streams); 3568 3569 for (i = 0; i < num_eps; i++) { 3570 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3571 max_packet = usb_endpoint_maxp(&eps[i]->desc); 3572 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, 3573 num_stream_ctxs, 3574 num_streams, 3575 max_packet, mem_flags); 3576 if (!vdev->eps[ep_index].stream_info) 3577 goto cleanup; 3578 /* Set maxPstreams in endpoint context and update deq ptr to 3579 * point to stream context array. FIXME 3580 */ 3581 } 3582 3583 /* Set up the input context for a configure endpoint command. */ 3584 for (i = 0; i < num_eps; i++) { 3585 struct xhci_ep_ctx *ep_ctx; 3586 3587 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3588 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); 3589 3590 xhci_endpoint_copy(xhci, config_cmd->in_ctx, 3591 vdev->out_ctx, ep_index); 3592 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, 3593 vdev->eps[ep_index].stream_info); 3594 } 3595 /* Tell the HW to drop its old copy of the endpoint context info 3596 * and add the updated copy from the input context. 3597 */ 3598 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, 3599 vdev->out_ctx, ctrl_ctx, 3600 changed_ep_bitmask, changed_ep_bitmask); 3601 3602 /* Issue and wait for the configure endpoint command */ 3603 ret = xhci_configure_endpoint(xhci, udev, config_cmd, 3604 false, false); 3605 3606 /* xHC rejected the configure endpoint command for some reason, so we 3607 * leave the old ring intact and free our internal streams data 3608 * structure. 3609 */ 3610 if (ret < 0) 3611 goto cleanup; 3612 3613 spin_lock_irqsave(&xhci->lock, flags); 3614 for (i = 0; i < num_eps; i++) { 3615 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3616 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; 3617 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", 3618 udev->slot_id, ep_index); 3619 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; 3620 } 3621 xhci_free_command(xhci, config_cmd); 3622 spin_unlock_irqrestore(&xhci->lock, flags); 3623 3624 for (i = 0; i < num_eps; i++) { 3625 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3626 xhci_debugfs_create_stream_files(xhci, vdev, ep_index); 3627 } 3628 /* Subtract 1 for stream 0, which drivers can't use */ 3629 return num_streams - 1; 3630 3631 cleanup: 3632 /* If it didn't work, free the streams! */ 3633 for (i = 0; i < num_eps; i++) { 3634 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3635 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); 3636 vdev->eps[ep_index].stream_info = NULL; 3637 /* FIXME Unset maxPstreams in endpoint context and 3638 * update deq ptr to point to normal string ring. 3639 */ 3640 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; 3641 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; 3642 xhci_endpoint_zero(xhci, vdev, eps[i]); 3643 } 3644 xhci_free_command(xhci, config_cmd); 3645 return -ENOMEM; 3646 } 3647 3648 /* Transition the endpoint from using streams to being a "normal" endpoint 3649 * without streams. 3650 * 3651 * Modify the endpoint context state, submit a configure endpoint command, 3652 * and free all endpoint rings for streams if that completes successfully. 3653 */ 3654 static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, 3655 struct usb_host_endpoint **eps, unsigned int num_eps, 3656 gfp_t mem_flags) 3657 { 3658 int i, ret; 3659 struct xhci_hcd *xhci; 3660 struct xhci_virt_device *vdev; 3661 struct xhci_command *command; 3662 struct xhci_input_control_ctx *ctrl_ctx; 3663 unsigned int ep_index; 3664 unsigned long flags; 3665 u32 changed_ep_bitmask; 3666 3667 xhci = hcd_to_xhci(hcd); 3668 vdev = xhci->devs[udev->slot_id]; 3669 3670 /* Set up a configure endpoint command to remove the streams rings */ 3671 spin_lock_irqsave(&xhci->lock, flags); 3672 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, 3673 udev, eps, num_eps); 3674 if (changed_ep_bitmask == 0) { 3675 spin_unlock_irqrestore(&xhci->lock, flags); 3676 return -EINVAL; 3677 } 3678 3679 /* Use the xhci_command structure from the first endpoint. We may have 3680 * allocated too many, but the driver may call xhci_free_streams() for 3681 * each endpoint it grouped into one call to xhci_alloc_streams(). 3682 */ 3683 ep_index = xhci_get_endpoint_index(&eps[0]->desc); 3684 command = vdev->eps[ep_index].stream_info->free_streams_command; 3685 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 3686 if (!ctrl_ctx) { 3687 spin_unlock_irqrestore(&xhci->lock, flags); 3688 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 3689 __func__); 3690 return -EINVAL; 3691 } 3692 3693 for (i = 0; i < num_eps; i++) { 3694 struct xhci_ep_ctx *ep_ctx; 3695 3696 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3697 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); 3698 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= 3699 EP_GETTING_NO_STREAMS; 3700 3701 xhci_endpoint_copy(xhci, command->in_ctx, 3702 vdev->out_ctx, ep_index); 3703 xhci_setup_no_streams_ep_input_ctx(ep_ctx, 3704 &vdev->eps[ep_index]); 3705 } 3706 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, 3707 vdev->out_ctx, ctrl_ctx, 3708 changed_ep_bitmask, changed_ep_bitmask); 3709 spin_unlock_irqrestore(&xhci->lock, flags); 3710 3711 /* Issue and wait for the configure endpoint command, 3712 * which must succeed. 3713 */ 3714 ret = xhci_configure_endpoint(xhci, udev, command, 3715 false, true); 3716 3717 /* xHC rejected the configure endpoint command for some reason, so we 3718 * leave the streams rings intact. 3719 */ 3720 if (ret < 0) 3721 return ret; 3722 3723 spin_lock_irqsave(&xhci->lock, flags); 3724 for (i = 0; i < num_eps; i++) { 3725 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3726 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); 3727 vdev->eps[ep_index].stream_info = NULL; 3728 /* FIXME Unset maxPstreams in endpoint context and 3729 * update deq ptr to point to normal string ring. 3730 */ 3731 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; 3732 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; 3733 } 3734 spin_unlock_irqrestore(&xhci->lock, flags); 3735 3736 return 0; 3737 } 3738 3739 /* 3740 * Deletes endpoint resources for endpoints that were active before a Reset 3741 * Device command, or a Disable Slot command. The Reset Device command leaves 3742 * the control endpoint intact, whereas the Disable Slot command deletes it. 3743 * 3744 * Must be called with xhci->lock held. 3745 */ 3746 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, 3747 struct xhci_virt_device *virt_dev, bool drop_control_ep) 3748 { 3749 int i; 3750 unsigned int num_dropped_eps = 0; 3751 unsigned int drop_flags = 0; 3752 3753 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) { 3754 if (virt_dev->eps[i].ring) { 3755 drop_flags |= 1 << i; 3756 num_dropped_eps++; 3757 } 3758 } 3759 xhci->num_active_eps -= num_dropped_eps; 3760 if (num_dropped_eps) 3761 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 3762 "Dropped %u ep ctxs, flags = 0x%x, " 3763 "%u now active.", 3764 num_dropped_eps, drop_flags, 3765 xhci->num_active_eps); 3766 } 3767 3768 /* 3769 * This submits a Reset Device Command, which will set the device state to 0, 3770 * set the device address to 0, and disable all the endpoints except the default 3771 * control endpoint. The USB core should come back and call 3772 * xhci_address_device(), and then re-set up the configuration. If this is 3773 * called because of a usb_reset_and_verify_device(), then the old alternate 3774 * settings will be re-installed through the normal bandwidth allocation 3775 * functions. 3776 * 3777 * Wait for the Reset Device command to finish. Remove all structures 3778 * associated with the endpoints that were disabled. Clear the input device 3779 * structure? Reset the control endpoint 0 max packet size? 3780 * 3781 * If the virt_dev to be reset does not exist or does not match the udev, 3782 * it means the device is lost, possibly due to the xHC restore error and 3783 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to 3784 * re-allocate the device. 3785 */ 3786 static int xhci_discover_or_reset_device(struct usb_hcd *hcd, 3787 struct usb_device *udev) 3788 { 3789 int ret, i; 3790 unsigned long flags; 3791 struct xhci_hcd *xhci; 3792 unsigned int slot_id; 3793 struct xhci_virt_device *virt_dev; 3794 struct xhci_command *reset_device_cmd; 3795 struct xhci_slot_ctx *slot_ctx; 3796 int old_active_eps = 0; 3797 3798 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); 3799 if (ret <= 0) 3800 return ret; 3801 xhci = hcd_to_xhci(hcd); 3802 slot_id = udev->slot_id; 3803 virt_dev = xhci->devs[slot_id]; 3804 if (!virt_dev) { 3805 xhci_dbg(xhci, "The device to be reset with slot ID %u does " 3806 "not exist. Re-allocate the device\n", slot_id); 3807 ret = xhci_alloc_dev(hcd, udev); 3808 if (ret == 1) 3809 return 0; 3810 else 3811 return -EINVAL; 3812 } 3813 3814 if (virt_dev->tt_info) 3815 old_active_eps = virt_dev->tt_info->active_eps; 3816 3817 if (virt_dev->udev != udev) { 3818 /* If the virt_dev and the udev does not match, this virt_dev 3819 * may belong to another udev. 3820 * Re-allocate the device. 3821 */ 3822 xhci_dbg(xhci, "The device to be reset with slot ID %u does " 3823 "not match the udev. Re-allocate the device\n", 3824 slot_id); 3825 ret = xhci_alloc_dev(hcd, udev); 3826 if (ret == 1) 3827 return 0; 3828 else 3829 return -EINVAL; 3830 } 3831 3832 /* If device is not setup, there is no point in resetting it */ 3833 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 3834 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == 3835 SLOT_STATE_DISABLED) 3836 return 0; 3837 3838 trace_xhci_discover_or_reset_device(slot_ctx); 3839 3840 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); 3841 /* Allocate the command structure that holds the struct completion. 3842 * Assume we're in process context, since the normal device reset 3843 * process has to wait for the device anyway. Storage devices are 3844 * reset as part of error handling, so use GFP_NOIO instead of 3845 * GFP_KERNEL. 3846 */ 3847 reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO); 3848 if (!reset_device_cmd) { 3849 xhci_dbg(xhci, "Couldn't allocate command structure.\n"); 3850 return -ENOMEM; 3851 } 3852 3853 /* Attempt to submit the Reset Device command to the command ring */ 3854 spin_lock_irqsave(&xhci->lock, flags); 3855 3856 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id); 3857 if (ret) { 3858 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3859 spin_unlock_irqrestore(&xhci->lock, flags); 3860 goto command_cleanup; 3861 } 3862 xhci_ring_cmd_db(xhci); 3863 spin_unlock_irqrestore(&xhci->lock, flags); 3864 3865 /* Wait for the Reset Device command to finish */ 3866 wait_for_completion(reset_device_cmd->completion); 3867 3868 /* The Reset Device command can't fail, according to the 0.95/0.96 spec, 3869 * unless we tried to reset a slot ID that wasn't enabled, 3870 * or the device wasn't in the addressed or configured state. 3871 */ 3872 ret = reset_device_cmd->status; 3873 switch (ret) { 3874 case COMP_COMMAND_ABORTED: 3875 case COMP_COMMAND_RING_STOPPED: 3876 xhci_warn(xhci, "Timeout waiting for reset device command\n"); 3877 ret = -ETIME; 3878 goto command_cleanup; 3879 case COMP_SLOT_NOT_ENABLED_ERROR: /* 0.95 completion for bad slot ID */ 3880 case COMP_CONTEXT_STATE_ERROR: /* 0.96 completion code for same thing */ 3881 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n", 3882 slot_id, 3883 xhci_get_slot_state(xhci, virt_dev->out_ctx)); 3884 xhci_dbg(xhci, "Not freeing device rings.\n"); 3885 /* Don't treat this as an error. May change my mind later. */ 3886 ret = 0; 3887 goto command_cleanup; 3888 case COMP_SUCCESS: 3889 xhci_dbg(xhci, "Successful reset device command.\n"); 3890 break; 3891 default: 3892 if (xhci_is_vendor_info_code(xhci, ret)) 3893 break; 3894 xhci_warn(xhci, "Unknown completion code %u for " 3895 "reset device command.\n", ret); 3896 ret = -EINVAL; 3897 goto command_cleanup; 3898 } 3899 3900 /* Free up host controller endpoint resources */ 3901 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 3902 spin_lock_irqsave(&xhci->lock, flags); 3903 /* Don't delete the default control endpoint resources */ 3904 xhci_free_device_endpoint_resources(xhci, virt_dev, false); 3905 spin_unlock_irqrestore(&xhci->lock, flags); 3906 } 3907 3908 /* Everything but endpoint 0 is disabled, so free the rings. */ 3909 for (i = 1; i < 31; i++) { 3910 struct xhci_virt_ep *ep = &virt_dev->eps[i]; 3911 3912 if (ep->ep_state & EP_HAS_STREAMS) { 3913 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n", 3914 xhci_get_endpoint_address(i)); 3915 xhci_free_stream_info(xhci, ep->stream_info); 3916 ep->stream_info = NULL; 3917 ep->ep_state &= ~EP_HAS_STREAMS; 3918 } 3919 3920 if (ep->ring) { 3921 xhci_debugfs_remove_endpoint(xhci, virt_dev, i); 3922 xhci_free_endpoint_ring(xhci, virt_dev, i); 3923 } 3924 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list)) 3925 xhci_drop_ep_from_interval_table(xhci, 3926 &virt_dev->eps[i].bw_info, 3927 virt_dev->bw_table, 3928 udev, 3929 &virt_dev->eps[i], 3930 virt_dev->tt_info); 3931 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info); 3932 } 3933 /* If necessary, update the number of active TTs on this root port */ 3934 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); 3935 virt_dev->flags = 0; 3936 ret = 0; 3937 3938 command_cleanup: 3939 xhci_free_command(xhci, reset_device_cmd); 3940 return ret; 3941 } 3942 3943 /* 3944 * At this point, the struct usb_device is about to go away, the device has 3945 * disconnected, and all traffic has been stopped and the endpoints have been 3946 * disabled. Free any HC data structures associated with that device. 3947 */ 3948 static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) 3949 { 3950 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3951 struct xhci_virt_device *virt_dev; 3952 struct xhci_slot_ctx *slot_ctx; 3953 int i, ret; 3954 3955 /* 3956 * We called pm_runtime_get_noresume when the device was attached. 3957 * Decrement the counter here to allow controller to runtime suspend 3958 * if no devices remain. 3959 */ 3960 if (xhci->quirks & XHCI_RESET_ON_RESUME) 3961 pm_runtime_put_noidle(hcd->self.controller); 3962 3963 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 3964 /* If the host is halted due to driver unload, we still need to free the 3965 * device. 3966 */ 3967 if (ret <= 0 && ret != -ENODEV) 3968 return; 3969 3970 virt_dev = xhci->devs[udev->slot_id]; 3971 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 3972 trace_xhci_free_dev(slot_ctx); 3973 3974 /* Stop any wayward timer functions (which may grab the lock) */ 3975 for (i = 0; i < 31; i++) { 3976 virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING; 3977 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); 3978 } 3979 virt_dev->udev = NULL; 3980 xhci_disable_slot(xhci, udev->slot_id); 3981 xhci_free_virt_device(xhci, udev->slot_id); 3982 } 3983 3984 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) 3985 { 3986 struct xhci_command *command; 3987 unsigned long flags; 3988 u32 state; 3989 int ret; 3990 3991 command = xhci_alloc_command(xhci, true, GFP_KERNEL); 3992 if (!command) 3993 return -ENOMEM; 3994 3995 xhci_debugfs_remove_slot(xhci, slot_id); 3996 3997 spin_lock_irqsave(&xhci->lock, flags); 3998 /* Don't disable the slot if the host controller is dead. */ 3999 state = readl(&xhci->op_regs->status); 4000 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || 4001 (xhci->xhc_state & XHCI_STATE_HALTED)) { 4002 spin_unlock_irqrestore(&xhci->lock, flags); 4003 kfree(command); 4004 return -ENODEV; 4005 } 4006 4007 ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, 4008 slot_id); 4009 if (ret) { 4010 spin_unlock_irqrestore(&xhci->lock, flags); 4011 kfree(command); 4012 return ret; 4013 } 4014 xhci_ring_cmd_db(xhci); 4015 spin_unlock_irqrestore(&xhci->lock, flags); 4016 4017 wait_for_completion(command->completion); 4018 4019 if (command->status != COMP_SUCCESS) 4020 xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n", 4021 slot_id, command->status); 4022 4023 xhci_free_command(xhci, command); 4024 4025 return 0; 4026 } 4027 4028 /* 4029 * Checks if we have enough host controller resources for the default control 4030 * endpoint. 4031 * 4032 * Must be called with xhci->lock held. 4033 */ 4034 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) 4035 { 4036 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { 4037 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 4038 "Not enough ep ctxs: " 4039 "%u active, need to add 1, limit is %u.", 4040 xhci->num_active_eps, xhci->limit_active_eps); 4041 return -ENOMEM; 4042 } 4043 xhci->num_active_eps += 1; 4044 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 4045 "Adding 1 ep ctx, %u now active.", 4046 xhci->num_active_eps); 4047 return 0; 4048 } 4049 4050 4051 /* 4052 * Returns 0 if the xHC ran out of device slots, the Enable Slot command 4053 * timed out, or allocating memory failed. Returns 1 on success. 4054 */ 4055 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) 4056 { 4057 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4058 struct xhci_virt_device *vdev; 4059 struct xhci_slot_ctx *slot_ctx; 4060 unsigned long flags; 4061 int ret, slot_id; 4062 struct xhci_command *command; 4063 4064 command = xhci_alloc_command(xhci, true, GFP_KERNEL); 4065 if (!command) 4066 return 0; 4067 4068 spin_lock_irqsave(&xhci->lock, flags); 4069 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0); 4070 if (ret) { 4071 spin_unlock_irqrestore(&xhci->lock, flags); 4072 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 4073 xhci_free_command(xhci, command); 4074 return 0; 4075 } 4076 xhci_ring_cmd_db(xhci); 4077 spin_unlock_irqrestore(&xhci->lock, flags); 4078 4079 wait_for_completion(command->completion); 4080 slot_id = command->slot_id; 4081 4082 if (!slot_id || command->status != COMP_SUCCESS) { 4083 xhci_err(xhci, "Error while assigning device slot ID\n"); 4084 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", 4085 HCS_MAX_SLOTS( 4086 readl(&xhci->cap_regs->hcs_params1))); 4087 xhci_free_command(xhci, command); 4088 return 0; 4089 } 4090 4091 xhci_free_command(xhci, command); 4092 4093 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 4094 spin_lock_irqsave(&xhci->lock, flags); 4095 ret = xhci_reserve_host_control_ep_resources(xhci); 4096 if (ret) { 4097 spin_unlock_irqrestore(&xhci->lock, flags); 4098 xhci_warn(xhci, "Not enough host resources, " 4099 "active endpoint contexts = %u\n", 4100 xhci->num_active_eps); 4101 goto disable_slot; 4102 } 4103 spin_unlock_irqrestore(&xhci->lock, flags); 4104 } 4105 /* Use GFP_NOIO, since this function can be called from 4106 * xhci_discover_or_reset_device(), which may be called as part of 4107 * mass storage driver error handling. 4108 */ 4109 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) { 4110 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); 4111 goto disable_slot; 4112 } 4113 vdev = xhci->devs[slot_id]; 4114 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); 4115 trace_xhci_alloc_dev(slot_ctx); 4116 4117 udev->slot_id = slot_id; 4118 4119 xhci_debugfs_create_slot(xhci, slot_id); 4120 4121 /* 4122 * If resetting upon resume, we can't put the controller into runtime 4123 * suspend if there is a device attached. 4124 */ 4125 if (xhci->quirks & XHCI_RESET_ON_RESUME) 4126 pm_runtime_get_noresume(hcd->self.controller); 4127 4128 /* Is this a LS or FS device under a HS hub? */ 4129 /* Hub or peripherial? */ 4130 return 1; 4131 4132 disable_slot: 4133 xhci_disable_slot(xhci, udev->slot_id); 4134 xhci_free_virt_device(xhci, udev->slot_id); 4135 4136 return 0; 4137 } 4138 4139 /* 4140 * Issue an Address Device command and optionally send a corresponding 4141 * SetAddress request to the device. 4142 */ 4143 static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, 4144 enum xhci_setup_dev setup) 4145 { 4146 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address"; 4147 unsigned long flags; 4148 struct xhci_virt_device *virt_dev; 4149 int ret = 0; 4150 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4151 struct xhci_slot_ctx *slot_ctx; 4152 struct xhci_input_control_ctx *ctrl_ctx; 4153 u64 temp_64; 4154 struct xhci_command *command = NULL; 4155 4156 mutex_lock(&xhci->mutex); 4157 4158 if (xhci->xhc_state) { /* dying, removing or halted */ 4159 ret = -ESHUTDOWN; 4160 goto out; 4161 } 4162 4163 if (!udev->slot_id) { 4164 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4165 "Bad Slot ID %d", udev->slot_id); 4166 ret = -EINVAL; 4167 goto out; 4168 } 4169 4170 virt_dev = xhci->devs[udev->slot_id]; 4171 4172 if (WARN_ON(!virt_dev)) { 4173 /* 4174 * In plug/unplug torture test with an NEC controller, 4175 * a zero-dereference was observed once due to virt_dev = 0. 4176 * Print useful debug rather than crash if it is observed again! 4177 */ 4178 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", 4179 udev->slot_id); 4180 ret = -EINVAL; 4181 goto out; 4182 } 4183 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 4184 trace_xhci_setup_device_slot(slot_ctx); 4185 4186 if (setup == SETUP_CONTEXT_ONLY) { 4187 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == 4188 SLOT_STATE_DEFAULT) { 4189 xhci_dbg(xhci, "Slot already in default state\n"); 4190 goto out; 4191 } 4192 } 4193 4194 command = xhci_alloc_command(xhci, true, GFP_KERNEL); 4195 if (!command) { 4196 ret = -ENOMEM; 4197 goto out; 4198 } 4199 4200 command->in_ctx = virt_dev->in_ctx; 4201 4202 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 4203 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); 4204 if (!ctrl_ctx) { 4205 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 4206 __func__); 4207 ret = -EINVAL; 4208 goto out; 4209 } 4210 /* 4211 * If this is the first Set Address since device plug-in or 4212 * virt_device realloaction after a resume with an xHCI power loss, 4213 * then set up the slot context. 4214 */ 4215 if (!slot_ctx->dev_info) 4216 xhci_setup_addressable_virt_dev(xhci, udev); 4217 /* Otherwise, update the control endpoint ring enqueue pointer. */ 4218 else 4219 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); 4220 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); 4221 ctrl_ctx->drop_flags = 0; 4222 4223 trace_xhci_address_ctx(xhci, virt_dev->in_ctx, 4224 le32_to_cpu(slot_ctx->dev_info) >> 27); 4225 4226 trace_xhci_address_ctrl_ctx(ctrl_ctx); 4227 spin_lock_irqsave(&xhci->lock, flags); 4228 trace_xhci_setup_device(virt_dev); 4229 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma, 4230 udev->slot_id, setup); 4231 if (ret) { 4232 spin_unlock_irqrestore(&xhci->lock, flags); 4233 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4234 "FIXME: allocate a command ring segment"); 4235 goto out; 4236 } 4237 xhci_ring_cmd_db(xhci); 4238 spin_unlock_irqrestore(&xhci->lock, flags); 4239 4240 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ 4241 wait_for_completion(command->completion); 4242 4243 /* FIXME: From section 4.3.4: "Software shall be responsible for timing 4244 * the SetAddress() "recovery interval" required by USB and aborting the 4245 * command on a timeout. 4246 */ 4247 switch (command->status) { 4248 case COMP_COMMAND_ABORTED: 4249 case COMP_COMMAND_RING_STOPPED: 4250 xhci_warn(xhci, "Timeout while waiting for setup device command\n"); 4251 ret = -ETIME; 4252 break; 4253 case COMP_CONTEXT_STATE_ERROR: 4254 case COMP_SLOT_NOT_ENABLED_ERROR: 4255 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n", 4256 act, udev->slot_id); 4257 ret = -EINVAL; 4258 break; 4259 case COMP_USB_TRANSACTION_ERROR: 4260 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act); 4261 4262 mutex_unlock(&xhci->mutex); 4263 ret = xhci_disable_slot(xhci, udev->slot_id); 4264 xhci_free_virt_device(xhci, udev->slot_id); 4265 if (!ret) 4266 xhci_alloc_dev(hcd, udev); 4267 kfree(command->completion); 4268 kfree(command); 4269 return -EPROTO; 4270 case COMP_INCOMPATIBLE_DEVICE_ERROR: 4271 dev_warn(&udev->dev, 4272 "ERROR: Incompatible device for setup %s command\n", act); 4273 ret = -ENODEV; 4274 break; 4275 case COMP_SUCCESS: 4276 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4277 "Successful setup %s command", act); 4278 break; 4279 default: 4280 xhci_err(xhci, 4281 "ERROR: unexpected setup %s command completion code 0x%x.\n", 4282 act, command->status); 4283 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1); 4284 ret = -EINVAL; 4285 break; 4286 } 4287 if (ret) 4288 goto out; 4289 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 4290 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4291 "Op regs DCBAA ptr = %#016llx", temp_64); 4292 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4293 "Slot ID %d dcbaa entry @%p = %#016llx", 4294 udev->slot_id, 4295 &xhci->dcbaa->dev_context_ptrs[udev->slot_id], 4296 (unsigned long long) 4297 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); 4298 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4299 "Output Context DMA address = %#08llx", 4300 (unsigned long long)virt_dev->out_ctx->dma); 4301 trace_xhci_address_ctx(xhci, virt_dev->in_ctx, 4302 le32_to_cpu(slot_ctx->dev_info) >> 27); 4303 /* 4304 * USB core uses address 1 for the roothubs, so we add one to the 4305 * address given back to us by the HC. 4306 */ 4307 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 4308 le32_to_cpu(slot_ctx->dev_info) >> 27); 4309 /* Zero the input context control for later use */ 4310 ctrl_ctx->add_flags = 0; 4311 ctrl_ctx->drop_flags = 0; 4312 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 4313 udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); 4314 4315 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4316 "Internal device address = %d", 4317 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); 4318 out: 4319 mutex_unlock(&xhci->mutex); 4320 if (command) { 4321 kfree(command->completion); 4322 kfree(command); 4323 } 4324 return ret; 4325 } 4326 4327 static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) 4328 { 4329 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS); 4330 } 4331 4332 static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev) 4333 { 4334 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY); 4335 } 4336 4337 /* 4338 * Transfer the port index into real index in the HW port status 4339 * registers. Caculate offset between the port's PORTSC register 4340 * and port status base. Divide the number of per port register 4341 * to get the real index. The raw port number bases 1. 4342 */ 4343 int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1) 4344 { 4345 struct xhci_hub *rhub; 4346 4347 rhub = xhci_get_rhub(hcd); 4348 return rhub->ports[port1 - 1]->hw_portnum + 1; 4349 } 4350 4351 /* 4352 * Issue an Evaluate Context command to change the Maximum Exit Latency in the 4353 * slot context. If that succeeds, store the new MEL in the xhci_virt_device. 4354 */ 4355 static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, 4356 struct usb_device *udev, u16 max_exit_latency) 4357 { 4358 struct xhci_virt_device *virt_dev; 4359 struct xhci_command *command; 4360 struct xhci_input_control_ctx *ctrl_ctx; 4361 struct xhci_slot_ctx *slot_ctx; 4362 unsigned long flags; 4363 int ret; 4364 4365 command = xhci_alloc_command_with_ctx(xhci, true, GFP_KERNEL); 4366 if (!command) 4367 return -ENOMEM; 4368 4369 spin_lock_irqsave(&xhci->lock, flags); 4370 4371 virt_dev = xhci->devs[udev->slot_id]; 4372 4373 /* 4374 * virt_dev might not exists yet if xHC resumed from hibernate (S4) and 4375 * xHC was re-initialized. Exit latency will be set later after 4376 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated 4377 */ 4378 4379 if (!virt_dev || max_exit_latency == virt_dev->current_mel) { 4380 spin_unlock_irqrestore(&xhci->lock, flags); 4381 return 0; 4382 } 4383 4384 /* Attempt to issue an Evaluate Context command to change the MEL. */ 4385 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 4386 if (!ctrl_ctx) { 4387 spin_unlock_irqrestore(&xhci->lock, flags); 4388 xhci_free_command(xhci, command); 4389 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 4390 __func__); 4391 return -ENOMEM; 4392 } 4393 4394 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx); 4395 spin_unlock_irqrestore(&xhci->lock, flags); 4396 4397 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 4398 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); 4399 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT)); 4400 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency); 4401 slot_ctx->dev_state = 0; 4402 4403 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 4404 "Set up evaluate context for LPM MEL change."); 4405 4406 /* Issue and wait for the evaluate context command. */ 4407 ret = xhci_configure_endpoint(xhci, udev, command, 4408 true, true); 4409 4410 if (!ret) { 4411 spin_lock_irqsave(&xhci->lock, flags); 4412 virt_dev->current_mel = max_exit_latency; 4413 spin_unlock_irqrestore(&xhci->lock, flags); 4414 } 4415 4416 xhci_free_command(xhci, command); 4417 4418 return ret; 4419 } 4420 4421 #ifdef CONFIG_PM 4422 4423 /* BESL to HIRD Encoding array for USB2 LPM */ 4424 static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000, 4425 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000}; 4426 4427 /* Calculate HIRD/BESL for USB2 PORTPMSC*/ 4428 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci, 4429 struct usb_device *udev) 4430 { 4431 int u2del, besl, besl_host; 4432 int besl_device = 0; 4433 u32 field; 4434 4435 u2del = HCS_U2_LATENCY(xhci->hcs_params3); 4436 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); 4437 4438 if (field & USB_BESL_SUPPORT) { 4439 for (besl_host = 0; besl_host < 16; besl_host++) { 4440 if (xhci_besl_encoding[besl_host] >= u2del) 4441 break; 4442 } 4443 /* Use baseline BESL value as default */ 4444 if (field & USB_BESL_BASELINE_VALID) 4445 besl_device = USB_GET_BESL_BASELINE(field); 4446 else if (field & USB_BESL_DEEP_VALID) 4447 besl_device = USB_GET_BESL_DEEP(field); 4448 } else { 4449 if (u2del <= 50) 4450 besl_host = 0; 4451 else 4452 besl_host = (u2del - 51) / 75 + 1; 4453 } 4454 4455 besl = besl_host + besl_device; 4456 if (besl > 15) 4457 besl = 15; 4458 4459 return besl; 4460 } 4461 4462 /* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */ 4463 static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev) 4464 { 4465 u32 field; 4466 int l1; 4467 int besld = 0; 4468 int hirdm = 0; 4469 4470 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); 4471 4472 /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */ 4473 l1 = udev->l1_params.timeout / 256; 4474 4475 /* device has preferred BESLD */ 4476 if (field & USB_BESL_DEEP_VALID) { 4477 besld = USB_GET_BESL_DEEP(field); 4478 hirdm = 1; 4479 } 4480 4481 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm); 4482 } 4483 4484 static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, 4485 struct usb_device *udev, int enable) 4486 { 4487 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4488 struct xhci_port **ports; 4489 __le32 __iomem *pm_addr, *hlpm_addr; 4490 u32 pm_val, hlpm_val, field; 4491 unsigned int port_num; 4492 unsigned long flags; 4493 int hird, exit_latency; 4494 int ret; 4495 4496 if (xhci->quirks & XHCI_HW_LPM_DISABLE) 4497 return -EPERM; 4498 4499 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support || 4500 !udev->lpm_capable) 4501 return -EPERM; 4502 4503 if (!udev->parent || udev->parent->parent || 4504 udev->descriptor.bDeviceClass == USB_CLASS_HUB) 4505 return -EPERM; 4506 4507 if (udev->usb2_hw_lpm_capable != 1) 4508 return -EPERM; 4509 4510 spin_lock_irqsave(&xhci->lock, flags); 4511 4512 ports = xhci->usb2_rhub.ports; 4513 port_num = udev->portnum - 1; 4514 pm_addr = ports[port_num]->addr + PORTPMSC; 4515 pm_val = readl(pm_addr); 4516 hlpm_addr = ports[port_num]->addr + PORTHLPMC; 4517 4518 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", 4519 enable ? "enable" : "disable", port_num + 1); 4520 4521 if (enable) { 4522 /* Host supports BESL timeout instead of HIRD */ 4523 if (udev->usb2_hw_lpm_besl_capable) { 4524 /* if device doesn't have a preferred BESL value use a 4525 * default one which works with mixed HIRD and BESL 4526 * systems. See XHCI_DEFAULT_BESL definition in xhci.h 4527 */ 4528 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); 4529 if ((field & USB_BESL_SUPPORT) && 4530 (field & USB_BESL_BASELINE_VALID)) 4531 hird = USB_GET_BESL_BASELINE(field); 4532 else 4533 hird = udev->l1_params.besl; 4534 4535 exit_latency = xhci_besl_encoding[hird]; 4536 spin_unlock_irqrestore(&xhci->lock, flags); 4537 4538 ret = xhci_change_max_exit_latency(xhci, udev, 4539 exit_latency); 4540 if (ret < 0) 4541 return ret; 4542 spin_lock_irqsave(&xhci->lock, flags); 4543 4544 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev); 4545 writel(hlpm_val, hlpm_addr); 4546 /* flush write */ 4547 readl(hlpm_addr); 4548 } else { 4549 hird = xhci_calculate_hird_besl(xhci, udev); 4550 } 4551 4552 pm_val &= ~PORT_HIRD_MASK; 4553 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id); 4554 writel(pm_val, pm_addr); 4555 pm_val = readl(pm_addr); 4556 pm_val |= PORT_HLE; 4557 writel(pm_val, pm_addr); 4558 /* flush write */ 4559 readl(pm_addr); 4560 } else { 4561 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK); 4562 writel(pm_val, pm_addr); 4563 /* flush write */ 4564 readl(pm_addr); 4565 if (udev->usb2_hw_lpm_besl_capable) { 4566 spin_unlock_irqrestore(&xhci->lock, flags); 4567 xhci_change_max_exit_latency(xhci, udev, 0); 4568 readl_poll_timeout(ports[port_num]->addr, pm_val, 4569 (pm_val & PORT_PLS_MASK) == XDEV_U0, 4570 100, 10000); 4571 return 0; 4572 } 4573 } 4574 4575 spin_unlock_irqrestore(&xhci->lock, flags); 4576 return 0; 4577 } 4578 4579 /* check if a usb2 port supports a given extened capability protocol 4580 * only USB2 ports extended protocol capability values are cached. 4581 * Return 1 if capability is supported 4582 */ 4583 static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port, 4584 unsigned capability) 4585 { 4586 u32 port_offset, port_count; 4587 int i; 4588 4589 for (i = 0; i < xhci->num_ext_caps; i++) { 4590 if (xhci->ext_caps[i] & capability) { 4591 /* port offsets starts at 1 */ 4592 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1; 4593 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]); 4594 if (port >= port_offset && 4595 port < port_offset + port_count) 4596 return 1; 4597 } 4598 } 4599 return 0; 4600 } 4601 4602 static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) 4603 { 4604 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4605 int portnum = udev->portnum - 1; 4606 4607 if (hcd->speed >= HCD_USB3 || !udev->lpm_capable) 4608 return 0; 4609 4610 /* we only support lpm for non-hub device connected to root hub yet */ 4611 if (!udev->parent || udev->parent->parent || 4612 udev->descriptor.bDeviceClass == USB_CLASS_HUB) 4613 return 0; 4614 4615 if (xhci->hw_lpm_support == 1 && 4616 xhci_check_usb2_port_capability( 4617 xhci, portnum, XHCI_HLC)) { 4618 udev->usb2_hw_lpm_capable = 1; 4619 udev->l1_params.timeout = XHCI_L1_TIMEOUT; 4620 udev->l1_params.besl = XHCI_DEFAULT_BESL; 4621 if (xhci_check_usb2_port_capability(xhci, portnum, 4622 XHCI_BLC)) 4623 udev->usb2_hw_lpm_besl_capable = 1; 4624 } 4625 4626 return 0; 4627 } 4628 4629 /*---------------------- USB 3.0 Link PM functions ------------------------*/ 4630 4631 /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */ 4632 static unsigned long long xhci_service_interval_to_ns( 4633 struct usb_endpoint_descriptor *desc) 4634 { 4635 return (1ULL << (desc->bInterval - 1)) * 125 * 1000; 4636 } 4637 4638 static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev, 4639 enum usb3_link_state state) 4640 { 4641 unsigned long long sel; 4642 unsigned long long pel; 4643 unsigned int max_sel_pel; 4644 char *state_name; 4645 4646 switch (state) { 4647 case USB3_LPM_U1: 4648 /* Convert SEL and PEL stored in nanoseconds to microseconds */ 4649 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); 4650 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); 4651 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL; 4652 state_name = "U1"; 4653 break; 4654 case USB3_LPM_U2: 4655 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000); 4656 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000); 4657 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL; 4658 state_name = "U2"; 4659 break; 4660 default: 4661 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n", 4662 __func__); 4663 return USB3_LPM_DISABLED; 4664 } 4665 4666 if (sel <= max_sel_pel && pel <= max_sel_pel) 4667 return USB3_LPM_DEVICE_INITIATED; 4668 4669 if (sel > max_sel_pel) 4670 dev_dbg(&udev->dev, "Device-initiated %s disabled " 4671 "due to long SEL %llu ms\n", 4672 state_name, sel); 4673 else 4674 dev_dbg(&udev->dev, "Device-initiated %s disabled " 4675 "due to long PEL %llu ms\n", 4676 state_name, pel); 4677 return USB3_LPM_DISABLED; 4678 } 4679 4680 /* The U1 timeout should be the maximum of the following values: 4681 * - For control endpoints, U1 system exit latency (SEL) * 3 4682 * - For bulk endpoints, U1 SEL * 5 4683 * - For interrupt endpoints: 4684 * - Notification EPs, U1 SEL * 3 4685 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2) 4686 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2) 4687 */ 4688 static unsigned long long xhci_calculate_intel_u1_timeout( 4689 struct usb_device *udev, 4690 struct usb_endpoint_descriptor *desc) 4691 { 4692 unsigned long long timeout_ns; 4693 int ep_type; 4694 int intr_type; 4695 4696 ep_type = usb_endpoint_type(desc); 4697 switch (ep_type) { 4698 case USB_ENDPOINT_XFER_CONTROL: 4699 timeout_ns = udev->u1_params.sel * 3; 4700 break; 4701 case USB_ENDPOINT_XFER_BULK: 4702 timeout_ns = udev->u1_params.sel * 5; 4703 break; 4704 case USB_ENDPOINT_XFER_INT: 4705 intr_type = usb_endpoint_interrupt_type(desc); 4706 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) { 4707 timeout_ns = udev->u1_params.sel * 3; 4708 break; 4709 } 4710 /* Otherwise the calculation is the same as isoc eps */ 4711 fallthrough; 4712 case USB_ENDPOINT_XFER_ISOC: 4713 timeout_ns = xhci_service_interval_to_ns(desc); 4714 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100); 4715 if (timeout_ns < udev->u1_params.sel * 2) 4716 timeout_ns = udev->u1_params.sel * 2; 4717 break; 4718 default: 4719 return 0; 4720 } 4721 4722 return timeout_ns; 4723 } 4724 4725 /* Returns the hub-encoded U1 timeout value. */ 4726 static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, 4727 struct usb_device *udev, 4728 struct usb_endpoint_descriptor *desc) 4729 { 4730 unsigned long long timeout_ns; 4731 4732 /* Prevent U1 if service interval is shorter than U1 exit latency */ 4733 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { 4734 if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) { 4735 dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n"); 4736 return USB3_LPM_DISABLED; 4737 } 4738 } 4739 4740 if (xhci->quirks & XHCI_INTEL_HOST) 4741 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); 4742 else 4743 timeout_ns = udev->u1_params.sel; 4744 4745 /* The U1 timeout is encoded in 1us intervals. 4746 * Don't return a timeout of zero, because that's USB3_LPM_DISABLED. 4747 */ 4748 if (timeout_ns == USB3_LPM_DISABLED) 4749 timeout_ns = 1; 4750 else 4751 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000); 4752 4753 /* If the necessary timeout value is bigger than what we can set in the 4754 * USB 3.0 hub, we have to disable hub-initiated U1. 4755 */ 4756 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT) 4757 return timeout_ns; 4758 dev_dbg(&udev->dev, "Hub-initiated U1 disabled " 4759 "due to long timeout %llu ms\n", timeout_ns); 4760 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1); 4761 } 4762 4763 /* The U2 timeout should be the maximum of: 4764 * - 10 ms (to avoid the bandwidth impact on the scheduler) 4765 * - largest bInterval of any active periodic endpoint (to avoid going 4766 * into lower power link states between intervals). 4767 * - the U2 Exit Latency of the device 4768 */ 4769 static unsigned long long xhci_calculate_intel_u2_timeout( 4770 struct usb_device *udev, 4771 struct usb_endpoint_descriptor *desc) 4772 { 4773 unsigned long long timeout_ns; 4774 unsigned long long u2_del_ns; 4775 4776 timeout_ns = 10 * 1000 * 1000; 4777 4778 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) && 4779 (xhci_service_interval_to_ns(desc) > timeout_ns)) 4780 timeout_ns = xhci_service_interval_to_ns(desc); 4781 4782 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL; 4783 if (u2_del_ns > timeout_ns) 4784 timeout_ns = u2_del_ns; 4785 4786 return timeout_ns; 4787 } 4788 4789 /* Returns the hub-encoded U2 timeout value. */ 4790 static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, 4791 struct usb_device *udev, 4792 struct usb_endpoint_descriptor *desc) 4793 { 4794 unsigned long long timeout_ns; 4795 4796 /* Prevent U2 if service interval is shorter than U2 exit latency */ 4797 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { 4798 if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) { 4799 dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n"); 4800 return USB3_LPM_DISABLED; 4801 } 4802 } 4803 4804 if (xhci->quirks & XHCI_INTEL_HOST) 4805 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); 4806 else 4807 timeout_ns = udev->u2_params.sel; 4808 4809 /* The U2 timeout is encoded in 256us intervals */ 4810 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000); 4811 /* If the necessary timeout value is bigger than what we can set in the 4812 * USB 3.0 hub, we have to disable hub-initiated U2. 4813 */ 4814 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT) 4815 return timeout_ns; 4816 dev_dbg(&udev->dev, "Hub-initiated U2 disabled " 4817 "due to long timeout %llu ms\n", timeout_ns); 4818 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2); 4819 } 4820 4821 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci, 4822 struct usb_device *udev, 4823 struct usb_endpoint_descriptor *desc, 4824 enum usb3_link_state state, 4825 u16 *timeout) 4826 { 4827 if (state == USB3_LPM_U1) 4828 return xhci_calculate_u1_timeout(xhci, udev, desc); 4829 else if (state == USB3_LPM_U2) 4830 return xhci_calculate_u2_timeout(xhci, udev, desc); 4831 4832 return USB3_LPM_DISABLED; 4833 } 4834 4835 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci, 4836 struct usb_device *udev, 4837 struct usb_endpoint_descriptor *desc, 4838 enum usb3_link_state state, 4839 u16 *timeout) 4840 { 4841 u16 alt_timeout; 4842 4843 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev, 4844 desc, state, timeout); 4845 4846 /* If we found we can't enable hub-initiated LPM, and 4847 * the U1 or U2 exit latency was too high to allow 4848 * device-initiated LPM as well, then we will disable LPM 4849 * for this device, so stop searching any further. 4850 */ 4851 if (alt_timeout == USB3_LPM_DISABLED) { 4852 *timeout = alt_timeout; 4853 return -E2BIG; 4854 } 4855 if (alt_timeout > *timeout) 4856 *timeout = alt_timeout; 4857 return 0; 4858 } 4859 4860 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci, 4861 struct usb_device *udev, 4862 struct usb_host_interface *alt, 4863 enum usb3_link_state state, 4864 u16 *timeout) 4865 { 4866 int j; 4867 4868 for (j = 0; j < alt->desc.bNumEndpoints; j++) { 4869 if (xhci_update_timeout_for_endpoint(xhci, udev, 4870 &alt->endpoint[j].desc, state, timeout)) 4871 return -E2BIG; 4872 } 4873 return 0; 4874 } 4875 4876 static int xhci_check_intel_tier_policy(struct usb_device *udev, 4877 enum usb3_link_state state) 4878 { 4879 struct usb_device *parent; 4880 unsigned int num_hubs; 4881 4882 if (state == USB3_LPM_U2) 4883 return 0; 4884 4885 /* Don't enable U1 if the device is on a 2nd tier hub or lower. */ 4886 for (parent = udev->parent, num_hubs = 0; parent->parent; 4887 parent = parent->parent) 4888 num_hubs++; 4889 4890 if (num_hubs < 2) 4891 return 0; 4892 4893 dev_dbg(&udev->dev, "Disabling U1 link state for device" 4894 " below second-tier hub.\n"); 4895 dev_dbg(&udev->dev, "Plug device into first-tier hub " 4896 "to decrease power consumption.\n"); 4897 return -E2BIG; 4898 } 4899 4900 static int xhci_check_tier_policy(struct xhci_hcd *xhci, 4901 struct usb_device *udev, 4902 enum usb3_link_state state) 4903 { 4904 if (xhci->quirks & XHCI_INTEL_HOST) 4905 return xhci_check_intel_tier_policy(udev, state); 4906 else 4907 return 0; 4908 } 4909 4910 /* Returns the U1 or U2 timeout that should be enabled. 4911 * If the tier check or timeout setting functions return with a non-zero exit 4912 * code, that means the timeout value has been finalized and we shouldn't look 4913 * at any more endpoints. 4914 */ 4915 static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd, 4916 struct usb_device *udev, enum usb3_link_state state) 4917 { 4918 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4919 struct usb_host_config *config; 4920 char *state_name; 4921 int i; 4922 u16 timeout = USB3_LPM_DISABLED; 4923 4924 if (state == USB3_LPM_U1) 4925 state_name = "U1"; 4926 else if (state == USB3_LPM_U2) 4927 state_name = "U2"; 4928 else { 4929 dev_warn(&udev->dev, "Can't enable unknown link state %i\n", 4930 state); 4931 return timeout; 4932 } 4933 4934 if (xhci_check_tier_policy(xhci, udev, state) < 0) 4935 return timeout; 4936 4937 /* Gather some information about the currently installed configuration 4938 * and alternate interface settings. 4939 */ 4940 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc, 4941 state, &timeout)) 4942 return timeout; 4943 4944 config = udev->actconfig; 4945 if (!config) 4946 return timeout; 4947 4948 for (i = 0; i < config->desc.bNumInterfaces; i++) { 4949 struct usb_driver *driver; 4950 struct usb_interface *intf = config->interface[i]; 4951 4952 if (!intf) 4953 continue; 4954 4955 /* Check if any currently bound drivers want hub-initiated LPM 4956 * disabled. 4957 */ 4958 if (intf->dev.driver) { 4959 driver = to_usb_driver(intf->dev.driver); 4960 if (driver && driver->disable_hub_initiated_lpm) { 4961 dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n", 4962 state_name, driver->name); 4963 timeout = xhci_get_timeout_no_hub_lpm(udev, 4964 state); 4965 if (timeout == USB3_LPM_DISABLED) 4966 return timeout; 4967 } 4968 } 4969 4970 /* Not sure how this could happen... */ 4971 if (!intf->cur_altsetting) 4972 continue; 4973 4974 if (xhci_update_timeout_for_interface(xhci, udev, 4975 intf->cur_altsetting, 4976 state, &timeout)) 4977 return timeout; 4978 } 4979 return timeout; 4980 } 4981 4982 static int calculate_max_exit_latency(struct usb_device *udev, 4983 enum usb3_link_state state_changed, 4984 u16 hub_encoded_timeout) 4985 { 4986 unsigned long long u1_mel_us = 0; 4987 unsigned long long u2_mel_us = 0; 4988 unsigned long long mel_us = 0; 4989 bool disabling_u1; 4990 bool disabling_u2; 4991 bool enabling_u1; 4992 bool enabling_u2; 4993 4994 disabling_u1 = (state_changed == USB3_LPM_U1 && 4995 hub_encoded_timeout == USB3_LPM_DISABLED); 4996 disabling_u2 = (state_changed == USB3_LPM_U2 && 4997 hub_encoded_timeout == USB3_LPM_DISABLED); 4998 4999 enabling_u1 = (state_changed == USB3_LPM_U1 && 5000 hub_encoded_timeout != USB3_LPM_DISABLED); 5001 enabling_u2 = (state_changed == USB3_LPM_U2 && 5002 hub_encoded_timeout != USB3_LPM_DISABLED); 5003 5004 /* If U1 was already enabled and we're not disabling it, 5005 * or we're going to enable U1, account for the U1 max exit latency. 5006 */ 5007 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) || 5008 enabling_u1) 5009 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000); 5010 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) || 5011 enabling_u2) 5012 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000); 5013 5014 mel_us = max(u1_mel_us, u2_mel_us); 5015 5016 /* xHCI host controller max exit latency field is only 16 bits wide. */ 5017 if (mel_us > MAX_EXIT) { 5018 dev_warn(&udev->dev, "Link PM max exit latency of %lluus " 5019 "is too big.\n", mel_us); 5020 return -E2BIG; 5021 } 5022 return mel_us; 5023 } 5024 5025 /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */ 5026 static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, 5027 struct usb_device *udev, enum usb3_link_state state) 5028 { 5029 struct xhci_hcd *xhci; 5030 u16 hub_encoded_timeout; 5031 int mel; 5032 int ret; 5033 5034 xhci = hcd_to_xhci(hcd); 5035 /* The LPM timeout values are pretty host-controller specific, so don't 5036 * enable hub-initiated timeouts unless the vendor has provided 5037 * information about their timeout algorithm. 5038 */ 5039 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || 5040 !xhci->devs[udev->slot_id]) 5041 return USB3_LPM_DISABLED; 5042 5043 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state); 5044 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout); 5045 if (mel < 0) { 5046 /* Max Exit Latency is too big, disable LPM. */ 5047 hub_encoded_timeout = USB3_LPM_DISABLED; 5048 mel = 0; 5049 } 5050 5051 ret = xhci_change_max_exit_latency(xhci, udev, mel); 5052 if (ret) 5053 return ret; 5054 return hub_encoded_timeout; 5055 } 5056 5057 static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, 5058 struct usb_device *udev, enum usb3_link_state state) 5059 { 5060 struct xhci_hcd *xhci; 5061 u16 mel; 5062 5063 xhci = hcd_to_xhci(hcd); 5064 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || 5065 !xhci->devs[udev->slot_id]) 5066 return 0; 5067 5068 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED); 5069 return xhci_change_max_exit_latency(xhci, udev, mel); 5070 } 5071 #else /* CONFIG_PM */ 5072 5073 static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, 5074 struct usb_device *udev, int enable) 5075 { 5076 return 0; 5077 } 5078 5079 static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) 5080 { 5081 return 0; 5082 } 5083 5084 static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, 5085 struct usb_device *udev, enum usb3_link_state state) 5086 { 5087 return USB3_LPM_DISABLED; 5088 } 5089 5090 static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, 5091 struct usb_device *udev, enum usb3_link_state state) 5092 { 5093 return 0; 5094 } 5095 #endif /* CONFIG_PM */ 5096 5097 /*-------------------------------------------------------------------------*/ 5098 5099 /* Once a hub descriptor is fetched for a device, we need to update the xHC's 5100 * internal data structures for the device. 5101 */ 5102 static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, 5103 struct usb_tt *tt, gfp_t mem_flags) 5104 { 5105 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 5106 struct xhci_virt_device *vdev; 5107 struct xhci_command *config_cmd; 5108 struct xhci_input_control_ctx *ctrl_ctx; 5109 struct xhci_slot_ctx *slot_ctx; 5110 unsigned long flags; 5111 unsigned think_time; 5112 int ret; 5113 5114 /* Ignore root hubs */ 5115 if (!hdev->parent) 5116 return 0; 5117 5118 vdev = xhci->devs[hdev->slot_id]; 5119 if (!vdev) { 5120 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); 5121 return -EINVAL; 5122 } 5123 5124 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags); 5125 if (!config_cmd) 5126 return -ENOMEM; 5127 5128 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx); 5129 if (!ctrl_ctx) { 5130 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 5131 __func__); 5132 xhci_free_command(xhci, config_cmd); 5133 return -ENOMEM; 5134 } 5135 5136 spin_lock_irqsave(&xhci->lock, flags); 5137 if (hdev->speed == USB_SPEED_HIGH && 5138 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) { 5139 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n"); 5140 xhci_free_command(xhci, config_cmd); 5141 spin_unlock_irqrestore(&xhci->lock, flags); 5142 return -ENOMEM; 5143 } 5144 5145 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); 5146 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 5147 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); 5148 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); 5149 /* 5150 * refer to section 6.2.2: MTT should be 0 for full speed hub, 5151 * but it may be already set to 1 when setup an xHCI virtual 5152 * device, so clear it anyway. 5153 */ 5154 if (tt->multi) 5155 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); 5156 else if (hdev->speed == USB_SPEED_FULL) 5157 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT); 5158 5159 if (xhci->hci_version > 0x95) { 5160 xhci_dbg(xhci, "xHCI version %x needs hub " 5161 "TT think time and number of ports\n", 5162 (unsigned int) xhci->hci_version); 5163 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild)); 5164 /* Set TT think time - convert from ns to FS bit times. 5165 * 0 = 8 FS bit times, 1 = 16 FS bit times, 5166 * 2 = 24 FS bit times, 3 = 32 FS bit times. 5167 * 5168 * xHCI 1.0: this field shall be 0 if the device is not a 5169 * High-spped hub. 5170 */ 5171 think_time = tt->think_time; 5172 if (think_time != 0) 5173 think_time = (think_time / 666) - 1; 5174 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) 5175 slot_ctx->tt_info |= 5176 cpu_to_le32(TT_THINK_TIME(think_time)); 5177 } else { 5178 xhci_dbg(xhci, "xHCI version %x doesn't need hub " 5179 "TT think time or number of ports\n", 5180 (unsigned int) xhci->hci_version); 5181 } 5182 slot_ctx->dev_state = 0; 5183 spin_unlock_irqrestore(&xhci->lock, flags); 5184 5185 xhci_dbg(xhci, "Set up %s for hub device.\n", 5186 (xhci->hci_version > 0x95) ? 5187 "configure endpoint" : "evaluate context"); 5188 5189 /* Issue and wait for the configure endpoint or 5190 * evaluate context command. 5191 */ 5192 if (xhci->hci_version > 0x95) 5193 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, 5194 false, false); 5195 else 5196 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, 5197 true, false); 5198 5199 xhci_free_command(xhci, config_cmd); 5200 return ret; 5201 } 5202 5203 static int xhci_get_frame(struct usb_hcd *hcd) 5204 { 5205 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 5206 /* EHCI mods by the periodic size. Why? */ 5207 return readl(&xhci->run_regs->microframe_index) >> 3; 5208 } 5209 5210 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) 5211 { 5212 struct xhci_hcd *xhci; 5213 /* 5214 * TODO: Check with DWC3 clients for sysdev according to 5215 * quirks 5216 */ 5217 struct device *dev = hcd->self.sysdev; 5218 unsigned int minor_rev; 5219 int retval; 5220 5221 /* Accept arbitrarily long scatter-gather lists */ 5222 hcd->self.sg_tablesize = ~0; 5223 5224 /* support to build packet from discontinuous buffers */ 5225 hcd->self.no_sg_constraint = 1; 5226 5227 /* XHCI controllers don't stop the ep queue on short packets :| */ 5228 hcd->self.no_stop_on_short = 1; 5229 5230 xhci = hcd_to_xhci(hcd); 5231 5232 if (usb_hcd_is_primary_hcd(hcd)) { 5233 xhci->main_hcd = hcd; 5234 xhci->usb2_rhub.hcd = hcd; 5235 /* Mark the first roothub as being USB 2.0. 5236 * The xHCI driver will register the USB 3.0 roothub. 5237 */ 5238 hcd->speed = HCD_USB2; 5239 hcd->self.root_hub->speed = USB_SPEED_HIGH; 5240 /* 5241 * USB 2.0 roothub under xHCI has an integrated TT, 5242 * (rate matching hub) as opposed to having an OHCI/UHCI 5243 * companion controller. 5244 */ 5245 hcd->has_tt = 1; 5246 } else { 5247 /* 5248 * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts 5249 * should return 0x31 for sbrn, or that the minor revision 5250 * is a two digit BCD containig minor and sub-minor numbers. 5251 * This was later clarified in xHCI 1.2. 5252 * 5253 * Some USB 3.1 capable hosts therefore have sbrn 0x30, and 5254 * minor revision set to 0x1 instead of 0x10. 5255 */ 5256 if (xhci->usb3_rhub.min_rev == 0x1) 5257 minor_rev = 1; 5258 else 5259 minor_rev = xhci->usb3_rhub.min_rev / 0x10; 5260 5261 switch (minor_rev) { 5262 case 2: 5263 hcd->speed = HCD_USB32; 5264 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; 5265 hcd->self.root_hub->rx_lanes = 2; 5266 hcd->self.root_hub->tx_lanes = 2; 5267 hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2; 5268 break; 5269 case 1: 5270 hcd->speed = HCD_USB31; 5271 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; 5272 hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1; 5273 break; 5274 } 5275 xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n", 5276 minor_rev, 5277 minor_rev ? "Enhanced " : ""); 5278 5279 xhci->usb3_rhub.hcd = hcd; 5280 /* xHCI private pointer was set in xhci_pci_probe for the second 5281 * registered roothub. 5282 */ 5283 return 0; 5284 } 5285 5286 mutex_init(&xhci->mutex); 5287 xhci->cap_regs = hcd->regs; 5288 xhci->op_regs = hcd->regs + 5289 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); 5290 xhci->run_regs = hcd->regs + 5291 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK); 5292 /* Cache read-only capability registers */ 5293 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1); 5294 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2); 5295 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3); 5296 xhci->hci_version = HC_VERSION(readl(&xhci->cap_regs->hc_capbase)); 5297 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params); 5298 if (xhci->hci_version > 0x100) 5299 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2); 5300 5301 xhci->quirks |= quirks; 5302 5303 get_quirks(dev, xhci); 5304 5305 /* In xhci controllers which follow xhci 1.0 spec gives a spurious 5306 * success event after a short transfer. This quirk will ignore such 5307 * spurious event. 5308 */ 5309 if (xhci->hci_version > 0x96) 5310 xhci->quirks |= XHCI_SPURIOUS_SUCCESS; 5311 5312 /* Make sure the HC is halted. */ 5313 retval = xhci_halt(xhci); 5314 if (retval) 5315 return retval; 5316 5317 xhci_zero_64b_regs(xhci); 5318 5319 xhci_dbg(xhci, "Resetting HCD\n"); 5320 /* Reset the internal HC memory state and registers. */ 5321 retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); 5322 if (retval) 5323 return retval; 5324 xhci_dbg(xhci, "Reset complete\n"); 5325 5326 /* 5327 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0) 5328 * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit 5329 * address memory pointers actually. So, this driver clears the AC64 5330 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev, 5331 * DMA_BIT_MASK(32)) in this xhci_gen_setup(). 5332 */ 5333 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT) 5334 xhci->hcc_params &= ~BIT(0); 5335 5336 /* Set dma_mask and coherent_dma_mask to 64-bits, 5337 * if xHC supports 64-bit addressing */ 5338 if (HCC_64BIT_ADDR(xhci->hcc_params) && 5339 !dma_set_mask(dev, DMA_BIT_MASK(64))) { 5340 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); 5341 dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); 5342 } else { 5343 /* 5344 * This is to avoid error in cases where a 32-bit USB 5345 * controller is used on a 64-bit capable system. 5346 */ 5347 retval = dma_set_mask(dev, DMA_BIT_MASK(32)); 5348 if (retval) 5349 return retval; 5350 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n"); 5351 dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); 5352 } 5353 5354 xhci_dbg(xhci, "Calling HCD init\n"); 5355 /* Initialize HCD and host controller data structures. */ 5356 retval = xhci_init(hcd); 5357 if (retval) 5358 return retval; 5359 xhci_dbg(xhci, "Called HCD init\n"); 5360 5361 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n", 5362 xhci->hcc_params, xhci->hci_version, xhci->quirks); 5363 5364 return 0; 5365 } 5366 EXPORT_SYMBOL_GPL(xhci_gen_setup); 5367 5368 static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd, 5369 struct usb_host_endpoint *ep) 5370 { 5371 struct xhci_hcd *xhci; 5372 struct usb_device *udev; 5373 unsigned int slot_id; 5374 unsigned int ep_index; 5375 unsigned long flags; 5376 5377 xhci = hcd_to_xhci(hcd); 5378 5379 spin_lock_irqsave(&xhci->lock, flags); 5380 udev = (struct usb_device *)ep->hcpriv; 5381 slot_id = udev->slot_id; 5382 ep_index = xhci_get_endpoint_index(&ep->desc); 5383 5384 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT; 5385 xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 5386 spin_unlock_irqrestore(&xhci->lock, flags); 5387 } 5388 5389 static const struct hc_driver xhci_hc_driver = { 5390 .description = "xhci-hcd", 5391 .product_desc = "xHCI Host Controller", 5392 .hcd_priv_size = sizeof(struct xhci_hcd), 5393 5394 /* 5395 * generic hardware linkage 5396 */ 5397 .irq = xhci_irq, 5398 .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED | 5399 HCD_BH, 5400 5401 /* 5402 * basic lifecycle operations 5403 */ 5404 .reset = NULL, /* set in xhci_init_driver() */ 5405 .start = xhci_run, 5406 .stop = xhci_stop, 5407 .shutdown = xhci_shutdown, 5408 5409 /* 5410 * managing i/o requests and associated device resources 5411 */ 5412 .map_urb_for_dma = xhci_map_urb_for_dma, 5413 .unmap_urb_for_dma = xhci_unmap_urb_for_dma, 5414 .urb_enqueue = xhci_urb_enqueue, 5415 .urb_dequeue = xhci_urb_dequeue, 5416 .alloc_dev = xhci_alloc_dev, 5417 .free_dev = xhci_free_dev, 5418 .alloc_streams = xhci_alloc_streams, 5419 .free_streams = xhci_free_streams, 5420 .add_endpoint = xhci_add_endpoint, 5421 .drop_endpoint = xhci_drop_endpoint, 5422 .endpoint_disable = xhci_endpoint_disable, 5423 .endpoint_reset = xhci_endpoint_reset, 5424 .check_bandwidth = xhci_check_bandwidth, 5425 .reset_bandwidth = xhci_reset_bandwidth, 5426 .address_device = xhci_address_device, 5427 .enable_device = xhci_enable_device, 5428 .update_hub_device = xhci_update_hub_device, 5429 .reset_device = xhci_discover_or_reset_device, 5430 5431 /* 5432 * scheduling support 5433 */ 5434 .get_frame_number = xhci_get_frame, 5435 5436 /* 5437 * root hub support 5438 */ 5439 .hub_control = xhci_hub_control, 5440 .hub_status_data = xhci_hub_status_data, 5441 .bus_suspend = xhci_bus_suspend, 5442 .bus_resume = xhci_bus_resume, 5443 .get_resuming_ports = xhci_get_resuming_ports, 5444 5445 /* 5446 * call back when device connected and addressed 5447 */ 5448 .update_device = xhci_update_device, 5449 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm, 5450 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout, 5451 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout, 5452 .find_raw_port_number = xhci_find_raw_port_number, 5453 .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete, 5454 }; 5455 5456 void xhci_init_driver(struct hc_driver *drv, 5457 const struct xhci_driver_overrides *over) 5458 { 5459 BUG_ON(!over); 5460 5461 /* Copy the generic table to drv then apply the overrides */ 5462 *drv = xhci_hc_driver; 5463 5464 if (over) { 5465 drv->hcd_priv_size += over->extra_priv_size; 5466 if (over->reset) 5467 drv->reset = over->reset; 5468 if (over->start) 5469 drv->start = over->start; 5470 if (over->add_endpoint) 5471 drv->add_endpoint = over->add_endpoint; 5472 if (over->drop_endpoint) 5473 drv->drop_endpoint = over->drop_endpoint; 5474 if (over->check_bandwidth) 5475 drv->check_bandwidth = over->check_bandwidth; 5476 if (over->reset_bandwidth) 5477 drv->reset_bandwidth = over->reset_bandwidth; 5478 } 5479 } 5480 EXPORT_SYMBOL_GPL(xhci_init_driver); 5481 5482 MODULE_DESCRIPTION(DRIVER_DESC); 5483 MODULE_AUTHOR(DRIVER_AUTHOR); 5484 MODULE_LICENSE("GPL"); 5485 5486 static int __init xhci_hcd_init(void) 5487 { 5488 /* 5489 * Check the compiler generated sizes of structures that must be laid 5490 * out in specific ways for hardware access. 5491 */ 5492 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); 5493 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); 5494 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); 5495 /* xhci_device_control has eight fields, and also 5496 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx 5497 */ 5498 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); 5499 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); 5500 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); 5501 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8); 5502 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); 5503 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ 5504 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); 5505 5506 if (usb_disabled()) 5507 return -ENODEV; 5508 5509 xhci_debugfs_create_root(); 5510 xhci_dbc_init(); 5511 5512 return 0; 5513 } 5514 5515 /* 5516 * If an init function is provided, an exit function must also be provided 5517 * to allow module unload. 5518 */ 5519 static void __exit xhci_hcd_fini(void) 5520 { 5521 xhci_debugfs_remove_root(); 5522 xhci_dbc_exit(); 5523 } 5524 5525 module_init(xhci_hcd_init); 5526 module_exit(xhci_hcd_fini); 5527