1 /* 2 * xHCI host controller driver 3 * 4 * Copyright (C) 2008 Intel Corp. 5 * 6 * Author: Sarah Sharp 7 * Some code borrowed from the Linux EHCI driver. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16 * for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software Foundation, 20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #include <linux/pci.h> 24 #include <linux/irq.h> 25 #include <linux/log2.h> 26 #include <linux/module.h> 27 #include <linux/moduleparam.h> 28 #include <linux/slab.h> 29 #include <linux/dmi.h> 30 #include <linux/dma-mapping.h> 31 32 #include "xhci.h" 33 #include "xhci-trace.h" 34 #include "xhci-mtk.h" 35 36 #define DRIVER_AUTHOR "Sarah Sharp" 37 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" 38 39 #define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E) 40 41 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ 42 static int link_quirk; 43 module_param(link_quirk, int, S_IRUGO | S_IWUSR); 44 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); 45 46 static unsigned int quirks; 47 module_param(quirks, uint, S_IRUGO); 48 MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default"); 49 50 /* TODO: copied from ehci-hcd.c - can this be refactored? */ 51 /* 52 * xhci_handshake - spin reading hc until handshake completes or fails 53 * @ptr: address of hc register to be read 54 * @mask: bits to look at in result of read 55 * @done: value of those bits when handshake succeeds 56 * @usec: timeout in microseconds 57 * 58 * Returns negative errno, or zero on success 59 * 60 * Success happens when the "mask" bits have the specified value (hardware 61 * handshake done). There are two failure modes: "usec" have passed (major 62 * hardware flakeout), or the register reads as all-ones (hardware removed). 63 */ 64 int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec) 65 { 66 u32 result; 67 68 do { 69 result = readl(ptr); 70 if (result == ~(u32)0) /* card removed */ 71 return -ENODEV; 72 result &= mask; 73 if (result == done) 74 return 0; 75 udelay(1); 76 usec--; 77 } while (usec > 0); 78 return -ETIMEDOUT; 79 } 80 81 /* 82 * Disable interrupts and begin the xHCI halting process. 83 */ 84 void xhci_quiesce(struct xhci_hcd *xhci) 85 { 86 u32 halted; 87 u32 cmd; 88 u32 mask; 89 90 mask = ~(XHCI_IRQS); 91 halted = readl(&xhci->op_regs->status) & STS_HALT; 92 if (!halted) 93 mask &= ~CMD_RUN; 94 95 cmd = readl(&xhci->op_regs->command); 96 cmd &= mask; 97 writel(cmd, &xhci->op_regs->command); 98 } 99 100 /* 101 * Force HC into halt state. 102 * 103 * Disable any IRQs and clear the run/stop bit. 104 * HC will complete any current and actively pipelined transactions, and 105 * should halt within 16 ms of the run/stop bit being cleared. 106 * Read HC Halted bit in the status register to see when the HC is finished. 107 */ 108 int xhci_halt(struct xhci_hcd *xhci) 109 { 110 int ret; 111 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC"); 112 xhci_quiesce(xhci); 113 114 ret = xhci_handshake(&xhci->op_regs->status, 115 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); 116 if (!ret) { 117 xhci->xhc_state |= XHCI_STATE_HALTED; 118 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; 119 } else 120 xhci_warn(xhci, "Host not halted after %u microseconds.\n", 121 XHCI_MAX_HALT_USEC); 122 return ret; 123 } 124 125 /* 126 * Set the run bit and wait for the host to be running. 127 */ 128 static int xhci_start(struct xhci_hcd *xhci) 129 { 130 u32 temp; 131 int ret; 132 133 temp = readl(&xhci->op_regs->command); 134 temp |= (CMD_RUN); 135 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.", 136 temp); 137 writel(temp, &xhci->op_regs->command); 138 139 /* 140 * Wait for the HCHalted Status bit to be 0 to indicate the host is 141 * running. 142 */ 143 ret = xhci_handshake(&xhci->op_regs->status, 144 STS_HALT, 0, XHCI_MAX_HALT_USEC); 145 if (ret == -ETIMEDOUT) 146 xhci_err(xhci, "Host took too long to start, " 147 "waited %u microseconds.\n", 148 XHCI_MAX_HALT_USEC); 149 if (!ret) 150 xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING); 151 152 return ret; 153 } 154 155 /* 156 * Reset a halted HC. 157 * 158 * This resets pipelines, timers, counters, state machines, etc. 159 * Transactions will be terminated immediately, and operational registers 160 * will be set to their defaults. 161 */ 162 int xhci_reset(struct xhci_hcd *xhci) 163 { 164 u32 command; 165 u32 state; 166 int ret, i; 167 168 state = readl(&xhci->op_regs->status); 169 if ((state & STS_HALT) == 0) { 170 xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); 171 return 0; 172 } 173 174 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC"); 175 command = readl(&xhci->op_regs->command); 176 command |= CMD_RESET; 177 writel(command, &xhci->op_regs->command); 178 179 /* Existing Intel xHCI controllers require a delay of 1 mS, 180 * after setting the CMD_RESET bit, and before accessing any 181 * HC registers. This allows the HC to complete the 182 * reset operation and be ready for HC register access. 183 * Without this delay, the subsequent HC register access, 184 * may result in a system hang very rarely. 185 */ 186 if (xhci->quirks & XHCI_INTEL_HOST) 187 udelay(1000); 188 189 ret = xhci_handshake(&xhci->op_regs->command, 190 CMD_RESET, 0, 10 * 1000 * 1000); 191 if (ret) 192 return ret; 193 194 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 195 "Wait for controller to be ready for doorbell rings"); 196 /* 197 * xHCI cannot write to any doorbells or operational registers other 198 * than status until the "Controller Not Ready" flag is cleared. 199 */ 200 ret = xhci_handshake(&xhci->op_regs->status, 201 STS_CNR, 0, 10 * 1000 * 1000); 202 203 for (i = 0; i < 2; ++i) { 204 xhci->bus_state[i].port_c_suspend = 0; 205 xhci->bus_state[i].suspended_ports = 0; 206 xhci->bus_state[i].resuming_ports = 0; 207 } 208 209 return ret; 210 } 211 212 #ifdef CONFIG_PCI 213 static int xhci_free_msi(struct xhci_hcd *xhci) 214 { 215 int i; 216 217 if (!xhci->msix_entries) 218 return -EINVAL; 219 220 for (i = 0; i < xhci->msix_count; i++) 221 if (xhci->msix_entries[i].vector) 222 free_irq(xhci->msix_entries[i].vector, 223 xhci_to_hcd(xhci)); 224 return 0; 225 } 226 227 /* 228 * Set up MSI 229 */ 230 static int xhci_setup_msi(struct xhci_hcd *xhci) 231 { 232 int ret; 233 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 234 235 ret = pci_enable_msi(pdev); 236 if (ret) { 237 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 238 "failed to allocate MSI entry"); 239 return ret; 240 } 241 242 ret = request_irq(pdev->irq, xhci_msi_irq, 243 0, "xhci_hcd", xhci_to_hcd(xhci)); 244 if (ret) { 245 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 246 "disable MSI interrupt"); 247 pci_disable_msi(pdev); 248 } 249 250 return ret; 251 } 252 253 /* 254 * Free IRQs 255 * free all IRQs request 256 */ 257 static void xhci_free_irq(struct xhci_hcd *xhci) 258 { 259 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 260 int ret; 261 262 /* return if using legacy interrupt */ 263 if (xhci_to_hcd(xhci)->irq > 0) 264 return; 265 266 ret = xhci_free_msi(xhci); 267 if (!ret) 268 return; 269 if (pdev->irq > 0) 270 free_irq(pdev->irq, xhci_to_hcd(xhci)); 271 272 return; 273 } 274 275 /* 276 * Set up MSI-X 277 */ 278 static int xhci_setup_msix(struct xhci_hcd *xhci) 279 { 280 int i, ret = 0; 281 struct usb_hcd *hcd = xhci_to_hcd(xhci); 282 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 283 284 /* 285 * calculate number of msi-x vectors supported. 286 * - HCS_MAX_INTRS: the max number of interrupts the host can handle, 287 * with max number of interrupters based on the xhci HCSPARAMS1. 288 * - num_online_cpus: maximum msi-x vectors per CPUs core. 289 * Add additional 1 vector to ensure always available interrupt. 290 */ 291 xhci->msix_count = min(num_online_cpus() + 1, 292 HCS_MAX_INTRS(xhci->hcs_params1)); 293 294 xhci->msix_entries = 295 kmalloc((sizeof(struct msix_entry))*xhci->msix_count, 296 GFP_KERNEL); 297 if (!xhci->msix_entries) { 298 xhci_err(xhci, "Failed to allocate MSI-X entries\n"); 299 return -ENOMEM; 300 } 301 302 for (i = 0; i < xhci->msix_count; i++) { 303 xhci->msix_entries[i].entry = i; 304 xhci->msix_entries[i].vector = 0; 305 } 306 307 ret = pci_enable_msix_exact(pdev, xhci->msix_entries, xhci->msix_count); 308 if (ret) { 309 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 310 "Failed to enable MSI-X"); 311 goto free_entries; 312 } 313 314 for (i = 0; i < xhci->msix_count; i++) { 315 ret = request_irq(xhci->msix_entries[i].vector, 316 xhci_msi_irq, 317 0, "xhci_hcd", xhci_to_hcd(xhci)); 318 if (ret) 319 goto disable_msix; 320 } 321 322 hcd->msix_enabled = 1; 323 return ret; 324 325 disable_msix: 326 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt"); 327 xhci_free_irq(xhci); 328 pci_disable_msix(pdev); 329 free_entries: 330 kfree(xhci->msix_entries); 331 xhci->msix_entries = NULL; 332 return ret; 333 } 334 335 /* Free any IRQs and disable MSI-X */ 336 static void xhci_cleanup_msix(struct xhci_hcd *xhci) 337 { 338 struct usb_hcd *hcd = xhci_to_hcd(xhci); 339 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 340 341 if (xhci->quirks & XHCI_PLAT) 342 return; 343 344 xhci_free_irq(xhci); 345 346 if (xhci->msix_entries) { 347 pci_disable_msix(pdev); 348 kfree(xhci->msix_entries); 349 xhci->msix_entries = NULL; 350 } else { 351 pci_disable_msi(pdev); 352 } 353 354 hcd->msix_enabled = 0; 355 return; 356 } 357 358 static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci) 359 { 360 int i; 361 362 if (xhci->msix_entries) { 363 for (i = 0; i < xhci->msix_count; i++) 364 synchronize_irq(xhci->msix_entries[i].vector); 365 } 366 } 367 368 static int xhci_try_enable_msi(struct usb_hcd *hcd) 369 { 370 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 371 struct pci_dev *pdev; 372 int ret; 373 374 /* The xhci platform device has set up IRQs through usb_add_hcd. */ 375 if (xhci->quirks & XHCI_PLAT) 376 return 0; 377 378 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 379 /* 380 * Some Fresco Logic host controllers advertise MSI, but fail to 381 * generate interrupts. Don't even try to enable MSI. 382 */ 383 if (xhci->quirks & XHCI_BROKEN_MSI) 384 goto legacy_irq; 385 386 /* unregister the legacy interrupt */ 387 if (hcd->irq) 388 free_irq(hcd->irq, hcd); 389 hcd->irq = 0; 390 391 ret = xhci_setup_msix(xhci); 392 if (ret) 393 /* fall back to msi*/ 394 ret = xhci_setup_msi(xhci); 395 396 if (!ret) 397 /* hcd->irq is 0, we have MSI */ 398 return 0; 399 400 if (!pdev->irq) { 401 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); 402 return -EINVAL; 403 } 404 405 legacy_irq: 406 if (!strlen(hcd->irq_descr)) 407 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d", 408 hcd->driver->description, hcd->self.busnum); 409 410 /* fall back to legacy interrupt*/ 411 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, 412 hcd->irq_descr, hcd); 413 if (ret) { 414 xhci_err(xhci, "request interrupt %d failed\n", 415 pdev->irq); 416 return ret; 417 } 418 hcd->irq = pdev->irq; 419 return 0; 420 } 421 422 #else 423 424 static inline int xhci_try_enable_msi(struct usb_hcd *hcd) 425 { 426 return 0; 427 } 428 429 static inline void xhci_cleanup_msix(struct xhci_hcd *xhci) 430 { 431 } 432 433 static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci) 434 { 435 } 436 437 #endif 438 439 static void compliance_mode_recovery(unsigned long arg) 440 { 441 struct xhci_hcd *xhci; 442 struct usb_hcd *hcd; 443 u32 temp; 444 int i; 445 446 xhci = (struct xhci_hcd *)arg; 447 448 for (i = 0; i < xhci->num_usb3_ports; i++) { 449 temp = readl(xhci->usb3_ports[i]); 450 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) { 451 /* 452 * Compliance Mode Detected. Letting USB Core 453 * handle the Warm Reset 454 */ 455 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 456 "Compliance mode detected->port %d", 457 i + 1); 458 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 459 "Attempting compliance mode recovery"); 460 hcd = xhci->shared_hcd; 461 462 if (hcd->state == HC_STATE_SUSPENDED) 463 usb_hcd_resume_root_hub(hcd); 464 465 usb_hcd_poll_rh_status(hcd); 466 } 467 } 468 469 if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1)) 470 mod_timer(&xhci->comp_mode_recovery_timer, 471 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); 472 } 473 474 /* 475 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver 476 * that causes ports behind that hardware to enter compliance mode sometimes. 477 * The quirk creates a timer that polls every 2 seconds the link state of 478 * each host controller's port and recovers it by issuing a Warm reset 479 * if Compliance mode is detected, otherwise the port will become "dead" (no 480 * device connections or disconnections will be detected anymore). Becasue no 481 * status event is generated when entering compliance mode (per xhci spec), 482 * this quirk is needed on systems that have the failing hardware installed. 483 */ 484 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) 485 { 486 xhci->port_status_u0 = 0; 487 setup_timer(&xhci->comp_mode_recovery_timer, 488 compliance_mode_recovery, (unsigned long)xhci); 489 xhci->comp_mode_recovery_timer.expires = jiffies + 490 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS); 491 492 set_timer_slack(&xhci->comp_mode_recovery_timer, 493 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); 494 add_timer(&xhci->comp_mode_recovery_timer); 495 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 496 "Compliance mode recovery timer initialized"); 497 } 498 499 /* 500 * This function identifies the systems that have installed the SN65LVPE502CP 501 * USB3.0 re-driver and that need the Compliance Mode Quirk. 502 * Systems: 503 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820 504 */ 505 static bool xhci_compliance_mode_recovery_timer_quirk_check(void) 506 { 507 const char *dmi_product_name, *dmi_sys_vendor; 508 509 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME); 510 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR); 511 if (!dmi_product_name || !dmi_sys_vendor) 512 return false; 513 514 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard"))) 515 return false; 516 517 if (strstr(dmi_product_name, "Z420") || 518 strstr(dmi_product_name, "Z620") || 519 strstr(dmi_product_name, "Z820") || 520 strstr(dmi_product_name, "Z1 Workstation")) 521 return true; 522 523 return false; 524 } 525 526 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) 527 { 528 return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1)); 529 } 530 531 532 /* 533 * Initialize memory for HCD and xHC (one-time init). 534 * 535 * Program the PAGESIZE register, initialize the device context array, create 536 * device contexts (?), set up a command ring segment (or two?), create event 537 * ring (one for now). 538 */ 539 int xhci_init(struct usb_hcd *hcd) 540 { 541 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 542 int retval = 0; 543 544 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init"); 545 spin_lock_init(&xhci->lock); 546 if (xhci->hci_version == 0x95 && link_quirk) { 547 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 548 "QUIRK: Not clearing Link TRB chain bits."); 549 xhci->quirks |= XHCI_LINK_TRB_QUIRK; 550 } else { 551 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 552 "xHCI doesn't need link TRB QUIRK"); 553 } 554 retval = xhci_mem_init(xhci, GFP_KERNEL); 555 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init"); 556 557 /* Initializing Compliance Mode Recovery Data If Needed */ 558 if (xhci_compliance_mode_recovery_timer_quirk_check()) { 559 xhci->quirks |= XHCI_COMP_MODE_QUIRK; 560 compliance_mode_recovery_timer_init(xhci); 561 } 562 563 return retval; 564 } 565 566 /*-------------------------------------------------------------------------*/ 567 568 569 static int xhci_run_finished(struct xhci_hcd *xhci) 570 { 571 if (xhci_start(xhci)) { 572 xhci_halt(xhci); 573 return -ENODEV; 574 } 575 xhci->shared_hcd->state = HC_STATE_RUNNING; 576 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; 577 578 if (xhci->quirks & XHCI_NEC_HOST) 579 xhci_ring_cmd_db(xhci); 580 581 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 582 "Finished xhci_run for USB3 roothub"); 583 return 0; 584 } 585 586 /* 587 * Start the HC after it was halted. 588 * 589 * This function is called by the USB core when the HC driver is added. 590 * Its opposite is xhci_stop(). 591 * 592 * xhci_init() must be called once before this function can be called. 593 * Reset the HC, enable device slot contexts, program DCBAAP, and 594 * set command ring pointer and event ring pointer. 595 * 596 * Setup MSI-X vectors and enable interrupts. 597 */ 598 int xhci_run(struct usb_hcd *hcd) 599 { 600 u32 temp; 601 u64 temp_64; 602 int ret; 603 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 604 605 /* Start the xHCI host controller running only after the USB 2.0 roothub 606 * is setup. 607 */ 608 609 hcd->uses_new_polling = 1; 610 if (!usb_hcd_is_primary_hcd(hcd)) 611 return xhci_run_finished(xhci); 612 613 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run"); 614 615 ret = xhci_try_enable_msi(hcd); 616 if (ret) 617 return ret; 618 619 xhci_dbg(xhci, "Command ring memory map follows:\n"); 620 xhci_debug_ring(xhci, xhci->cmd_ring); 621 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); 622 xhci_dbg_cmd_ptrs(xhci); 623 624 xhci_dbg(xhci, "ERST memory map follows:\n"); 625 xhci_dbg_erst(xhci, &xhci->erst); 626 xhci_dbg(xhci, "Event ring:\n"); 627 xhci_debug_ring(xhci, xhci->event_ring); 628 xhci_dbg_ring_ptrs(xhci, xhci->event_ring); 629 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 630 temp_64 &= ~ERST_PTR_MASK; 631 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 632 "ERST deq = 64'h%0lx", (long unsigned int) temp_64); 633 634 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 635 "// Set the interrupt modulation register"); 636 temp = readl(&xhci->ir_set->irq_control); 637 temp &= ~ER_IRQ_INTERVAL_MASK; 638 /* 639 * the increment interval is 8 times as much as that defined 640 * in xHCI spec on MTK's controller 641 */ 642 temp |= (u32) ((xhci->quirks & XHCI_MTK_HOST) ? 20 : 160); 643 writel(temp, &xhci->ir_set->irq_control); 644 645 /* Set the HCD state before we enable the irqs */ 646 temp = readl(&xhci->op_regs->command); 647 temp |= (CMD_EIE); 648 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 649 "// Enable interrupts, cmd = 0x%x.", temp); 650 writel(temp, &xhci->op_regs->command); 651 652 temp = readl(&xhci->ir_set->irq_pending); 653 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 654 "// Enabling event ring interrupter %p by writing 0x%x to irq_pending", 655 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); 656 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); 657 xhci_print_ir_set(xhci, 0); 658 659 if (xhci->quirks & XHCI_NEC_HOST) { 660 struct xhci_command *command; 661 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); 662 if (!command) 663 return -ENOMEM; 664 xhci_queue_vendor_command(xhci, command, 0, 0, 0, 665 TRB_TYPE(TRB_NEC_GET_FW)); 666 } 667 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 668 "Finished xhci_run for USB2 roothub"); 669 return 0; 670 } 671 EXPORT_SYMBOL_GPL(xhci_run); 672 673 /* 674 * Stop xHCI driver. 675 * 676 * This function is called by the USB core when the HC driver is removed. 677 * Its opposite is xhci_run(). 678 * 679 * Disable device contexts, disable IRQs, and quiesce the HC. 680 * Reset the HC, finish any completed transactions, and cleanup memory. 681 */ 682 void xhci_stop(struct usb_hcd *hcd) 683 { 684 u32 temp; 685 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 686 687 if (xhci->xhc_state & XHCI_STATE_HALTED) 688 return; 689 690 mutex_lock(&xhci->mutex); 691 spin_lock_irq(&xhci->lock); 692 xhci->xhc_state |= XHCI_STATE_HALTED; 693 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; 694 695 /* Make sure the xHC is halted for a USB3 roothub 696 * (xhci_stop() could be called as part of failed init). 697 */ 698 xhci_halt(xhci); 699 xhci_reset(xhci); 700 spin_unlock_irq(&xhci->lock); 701 702 xhci_cleanup_msix(xhci); 703 704 /* Deleting Compliance Mode Recovery Timer */ 705 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 706 (!(xhci_all_ports_seen_u0(xhci)))) { 707 del_timer_sync(&xhci->comp_mode_recovery_timer); 708 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 709 "%s: compliance mode recovery timer deleted", 710 __func__); 711 } 712 713 if (xhci->quirks & XHCI_AMD_PLL_FIX) 714 usb_amd_dev_put(); 715 716 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 717 "// Disabling event ring interrupts"); 718 temp = readl(&xhci->op_regs->status); 719 writel(temp & ~STS_EINT, &xhci->op_regs->status); 720 temp = readl(&xhci->ir_set->irq_pending); 721 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); 722 xhci_print_ir_set(xhci, 0); 723 724 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); 725 xhci_mem_cleanup(xhci); 726 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 727 "xhci_stop completed - status = %x", 728 readl(&xhci->op_regs->status)); 729 mutex_unlock(&xhci->mutex); 730 } 731 732 /* 733 * Shutdown HC (not bus-specific) 734 * 735 * This is called when the machine is rebooting or halting. We assume that the 736 * machine will be powered off, and the HC's internal state will be reset. 737 * Don't bother to free memory. 738 * 739 * This will only ever be called with the main usb_hcd (the USB3 roothub). 740 */ 741 void xhci_shutdown(struct usb_hcd *hcd) 742 { 743 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 744 745 if (xhci->quirks & XHCI_SPURIOUS_REBOOT) 746 usb_disable_xhci_ports(to_pci_dev(hcd->self.controller)); 747 748 spin_lock_irq(&xhci->lock); 749 xhci_halt(xhci); 750 /* Workaround for spurious wakeups at shutdown with HSW */ 751 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) 752 xhci_reset(xhci); 753 spin_unlock_irq(&xhci->lock); 754 755 xhci_cleanup_msix(xhci); 756 757 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 758 "xhci_shutdown completed - status = %x", 759 readl(&xhci->op_regs->status)); 760 761 /* Yet another workaround for spurious wakeups at shutdown with HSW */ 762 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) 763 pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot); 764 } 765 766 #ifdef CONFIG_PM 767 static void xhci_save_registers(struct xhci_hcd *xhci) 768 { 769 xhci->s3.command = readl(&xhci->op_regs->command); 770 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification); 771 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 772 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg); 773 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size); 774 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); 775 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 776 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending); 777 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control); 778 } 779 780 static void xhci_restore_registers(struct xhci_hcd *xhci) 781 { 782 writel(xhci->s3.command, &xhci->op_regs->command); 783 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); 784 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); 785 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); 786 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size); 787 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); 788 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); 789 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending); 790 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control); 791 } 792 793 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) 794 { 795 u64 val_64; 796 797 /* step 2: initialize command ring buffer */ 798 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 799 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 800 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 801 xhci->cmd_ring->dequeue) & 802 (u64) ~CMD_RING_RSVD_BITS) | 803 xhci->cmd_ring->cycle_state; 804 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 805 "// Setting command ring address to 0x%llx", 806 (long unsigned long) val_64); 807 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); 808 } 809 810 /* 811 * The whole command ring must be cleared to zero when we suspend the host. 812 * 813 * The host doesn't save the command ring pointer in the suspend well, so we 814 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte 815 * aligned, because of the reserved bits in the command ring dequeue pointer 816 * register. Therefore, we can't just set the dequeue pointer back in the 817 * middle of the ring (TRBs are 16-byte aligned). 818 */ 819 static void xhci_clear_command_ring(struct xhci_hcd *xhci) 820 { 821 struct xhci_ring *ring; 822 struct xhci_segment *seg; 823 824 ring = xhci->cmd_ring; 825 seg = ring->deq_seg; 826 do { 827 memset(seg->trbs, 0, 828 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); 829 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= 830 cpu_to_le32(~TRB_CYCLE); 831 seg = seg->next; 832 } while (seg != ring->deq_seg); 833 834 /* Reset the software enqueue and dequeue pointers */ 835 ring->deq_seg = ring->first_seg; 836 ring->dequeue = ring->first_seg->trbs; 837 ring->enq_seg = ring->deq_seg; 838 ring->enqueue = ring->dequeue; 839 840 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; 841 /* 842 * Ring is now zeroed, so the HW should look for change of ownership 843 * when the cycle bit is set to 1. 844 */ 845 ring->cycle_state = 1; 846 847 /* 848 * Reset the hardware dequeue pointer. 849 * Yes, this will need to be re-written after resume, but we're paranoid 850 * and want to make sure the hardware doesn't access bogus memory 851 * because, say, the BIOS or an SMI started the host without changing 852 * the command ring pointers. 853 */ 854 xhci_set_cmd_ring_deq(xhci); 855 } 856 857 static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) 858 { 859 int port_index; 860 __le32 __iomem **port_array; 861 unsigned long flags; 862 u32 t1, t2; 863 864 spin_lock_irqsave(&xhci->lock, flags); 865 866 /* disble usb3 ports Wake bits*/ 867 port_index = xhci->num_usb3_ports; 868 port_array = xhci->usb3_ports; 869 while (port_index--) { 870 t1 = readl(port_array[port_index]); 871 t1 = xhci_port_state_to_neutral(t1); 872 t2 = t1 & ~PORT_WAKE_BITS; 873 if (t1 != t2) 874 writel(t2, port_array[port_index]); 875 } 876 877 /* disble usb2 ports Wake bits*/ 878 port_index = xhci->num_usb2_ports; 879 port_array = xhci->usb2_ports; 880 while (port_index--) { 881 t1 = readl(port_array[port_index]); 882 t1 = xhci_port_state_to_neutral(t1); 883 t2 = t1 & ~PORT_WAKE_BITS; 884 if (t1 != t2) 885 writel(t2, port_array[port_index]); 886 } 887 888 spin_unlock_irqrestore(&xhci->lock, flags); 889 } 890 891 /* 892 * Stop HC (not bus-specific) 893 * 894 * This is called when the machine transition into S3/S4 mode. 895 * 896 */ 897 int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) 898 { 899 int rc = 0; 900 unsigned int delay = XHCI_MAX_HALT_USEC; 901 struct usb_hcd *hcd = xhci_to_hcd(xhci); 902 u32 command; 903 904 if (!hcd->state) 905 return 0; 906 907 if (hcd->state != HC_STATE_SUSPENDED || 908 xhci->shared_hcd->state != HC_STATE_SUSPENDED) 909 return -EINVAL; 910 911 /* Clear root port wake on bits if wakeup not allowed. */ 912 if (!do_wakeup) 913 xhci_disable_port_wake_on_bits(xhci); 914 915 /* Don't poll the roothubs on bus suspend. */ 916 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); 917 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 918 del_timer_sync(&hcd->rh_timer); 919 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 920 del_timer_sync(&xhci->shared_hcd->rh_timer); 921 922 spin_lock_irq(&xhci->lock); 923 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 924 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); 925 /* step 1: stop endpoint */ 926 /* skipped assuming that port suspend has done */ 927 928 /* step 2: clear Run/Stop bit */ 929 command = readl(&xhci->op_regs->command); 930 command &= ~CMD_RUN; 931 writel(command, &xhci->op_regs->command); 932 933 /* Some chips from Fresco Logic need an extraordinary delay */ 934 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1; 935 936 if (xhci_handshake(&xhci->op_regs->status, 937 STS_HALT, STS_HALT, delay)) { 938 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); 939 spin_unlock_irq(&xhci->lock); 940 return -ETIMEDOUT; 941 } 942 xhci_clear_command_ring(xhci); 943 944 /* step 3: save registers */ 945 xhci_save_registers(xhci); 946 947 /* step 4: set CSS flag */ 948 command = readl(&xhci->op_regs->command); 949 command |= CMD_CSS; 950 writel(command, &xhci->op_regs->command); 951 if (xhci_handshake(&xhci->op_regs->status, 952 STS_SAVE, 0, 10 * 1000)) { 953 xhci_warn(xhci, "WARN: xHC save state timeout\n"); 954 spin_unlock_irq(&xhci->lock); 955 return -ETIMEDOUT; 956 } 957 spin_unlock_irq(&xhci->lock); 958 959 /* 960 * Deleting Compliance Mode Recovery Timer because the xHCI Host 961 * is about to be suspended. 962 */ 963 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 964 (!(xhci_all_ports_seen_u0(xhci)))) { 965 del_timer_sync(&xhci->comp_mode_recovery_timer); 966 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 967 "%s: compliance mode recovery timer deleted", 968 __func__); 969 } 970 971 /* step 5: remove core well power */ 972 /* synchronize irq when using MSI-X */ 973 xhci_msix_sync_irqs(xhci); 974 975 return rc; 976 } 977 EXPORT_SYMBOL_GPL(xhci_suspend); 978 979 /* 980 * start xHC (not bus-specific) 981 * 982 * This is called when the machine transition from S3/S4 mode. 983 * 984 */ 985 int xhci_resume(struct xhci_hcd *xhci, bool hibernated) 986 { 987 u32 command, temp = 0, status; 988 struct usb_hcd *hcd = xhci_to_hcd(xhci); 989 struct usb_hcd *secondary_hcd; 990 int retval = 0; 991 bool comp_timer_running = false; 992 993 if (!hcd->state) 994 return 0; 995 996 /* Wait a bit if either of the roothubs need to settle from the 997 * transition into bus suspend. 998 */ 999 if (time_before(jiffies, xhci->bus_state[0].next_statechange) || 1000 time_before(jiffies, 1001 xhci->bus_state[1].next_statechange)) 1002 msleep(100); 1003 1004 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 1005 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); 1006 1007 spin_lock_irq(&xhci->lock); 1008 if (xhci->quirks & XHCI_RESET_ON_RESUME) 1009 hibernated = true; 1010 1011 if (!hibernated) { 1012 /* step 1: restore register */ 1013 xhci_restore_registers(xhci); 1014 /* step 2: initialize command ring buffer */ 1015 xhci_set_cmd_ring_deq(xhci); 1016 /* step 3: restore state and start state*/ 1017 /* step 3: set CRS flag */ 1018 command = readl(&xhci->op_regs->command); 1019 command |= CMD_CRS; 1020 writel(command, &xhci->op_regs->command); 1021 if (xhci_handshake(&xhci->op_regs->status, 1022 STS_RESTORE, 0, 10 * 1000)) { 1023 xhci_warn(xhci, "WARN: xHC restore state timeout\n"); 1024 spin_unlock_irq(&xhci->lock); 1025 return -ETIMEDOUT; 1026 } 1027 temp = readl(&xhci->op_regs->status); 1028 } 1029 1030 /* If restore operation fails, re-initialize the HC during resume */ 1031 if ((temp & STS_SRE) || hibernated) { 1032 1033 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 1034 !(xhci_all_ports_seen_u0(xhci))) { 1035 del_timer_sync(&xhci->comp_mode_recovery_timer); 1036 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1037 "Compliance Mode Recovery Timer deleted!"); 1038 } 1039 1040 /* Let the USB core know _both_ roothubs lost power. */ 1041 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); 1042 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); 1043 1044 xhci_dbg(xhci, "Stop HCD\n"); 1045 xhci_halt(xhci); 1046 xhci_reset(xhci); 1047 spin_unlock_irq(&xhci->lock); 1048 xhci_cleanup_msix(xhci); 1049 1050 xhci_dbg(xhci, "// Disabling event ring interrupts\n"); 1051 temp = readl(&xhci->op_regs->status); 1052 writel(temp & ~STS_EINT, &xhci->op_regs->status); 1053 temp = readl(&xhci->ir_set->irq_pending); 1054 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); 1055 xhci_print_ir_set(xhci, 0); 1056 1057 xhci_dbg(xhci, "cleaning up memory\n"); 1058 xhci_mem_cleanup(xhci); 1059 xhci_dbg(xhci, "xhci_stop completed - status = %x\n", 1060 readl(&xhci->op_regs->status)); 1061 1062 /* USB core calls the PCI reinit and start functions twice: 1063 * first with the primary HCD, and then with the secondary HCD. 1064 * If we don't do the same, the host will never be started. 1065 */ 1066 if (!usb_hcd_is_primary_hcd(hcd)) 1067 secondary_hcd = hcd; 1068 else 1069 secondary_hcd = xhci->shared_hcd; 1070 1071 xhci_dbg(xhci, "Initialize the xhci_hcd\n"); 1072 retval = xhci_init(hcd->primary_hcd); 1073 if (retval) 1074 return retval; 1075 comp_timer_running = true; 1076 1077 xhci_dbg(xhci, "Start the primary HCD\n"); 1078 retval = xhci_run(hcd->primary_hcd); 1079 if (!retval) { 1080 xhci_dbg(xhci, "Start the secondary HCD\n"); 1081 retval = xhci_run(secondary_hcd); 1082 } 1083 hcd->state = HC_STATE_SUSPENDED; 1084 xhci->shared_hcd->state = HC_STATE_SUSPENDED; 1085 goto done; 1086 } 1087 1088 /* step 4: set Run/Stop bit */ 1089 command = readl(&xhci->op_regs->command); 1090 command |= CMD_RUN; 1091 writel(command, &xhci->op_regs->command); 1092 xhci_handshake(&xhci->op_regs->status, STS_HALT, 1093 0, 250 * 1000); 1094 1095 /* step 5: walk topology and initialize portsc, 1096 * portpmsc and portli 1097 */ 1098 /* this is done in bus_resume */ 1099 1100 /* step 6: restart each of the previously 1101 * Running endpoints by ringing their doorbells 1102 */ 1103 1104 spin_unlock_irq(&xhci->lock); 1105 1106 done: 1107 if (retval == 0) { 1108 /* Resume root hubs only when have pending events. */ 1109 status = readl(&xhci->op_regs->status); 1110 if (status & STS_EINT) { 1111 usb_hcd_resume_root_hub(hcd); 1112 usb_hcd_resume_root_hub(xhci->shared_hcd); 1113 } 1114 } 1115 1116 /* 1117 * If system is subject to the Quirk, Compliance Mode Timer needs to 1118 * be re-initialized Always after a system resume. Ports are subject 1119 * to suffer the Compliance Mode issue again. It doesn't matter if 1120 * ports have entered previously to U0 before system's suspension. 1121 */ 1122 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running) 1123 compliance_mode_recovery_timer_init(xhci); 1124 1125 /* Re-enable port polling. */ 1126 xhci_dbg(xhci, "%s: starting port polling.\n", __func__); 1127 set_bit(HCD_FLAG_POLL_RH, &hcd->flags); 1128 usb_hcd_poll_rh_status(hcd); 1129 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 1130 usb_hcd_poll_rh_status(xhci->shared_hcd); 1131 1132 return retval; 1133 } 1134 EXPORT_SYMBOL_GPL(xhci_resume); 1135 #endif /* CONFIG_PM */ 1136 1137 /*-------------------------------------------------------------------------*/ 1138 1139 /** 1140 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and 1141 * HCDs. Find the index for an endpoint given its descriptor. Use the return 1142 * value to right shift 1 for the bitmask. 1143 * 1144 * Index = (epnum * 2) + direction - 1, 1145 * where direction = 0 for OUT, 1 for IN. 1146 * For control endpoints, the IN index is used (OUT index is unused), so 1147 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) 1148 */ 1149 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) 1150 { 1151 unsigned int index; 1152 if (usb_endpoint_xfer_control(desc)) 1153 index = (unsigned int) (usb_endpoint_num(desc)*2); 1154 else 1155 index = (unsigned int) (usb_endpoint_num(desc)*2) + 1156 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; 1157 return index; 1158 } 1159 1160 /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint 1161 * address from the XHCI endpoint index. 1162 */ 1163 unsigned int xhci_get_endpoint_address(unsigned int ep_index) 1164 { 1165 unsigned int number = DIV_ROUND_UP(ep_index, 2); 1166 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN; 1167 return direction | number; 1168 } 1169 1170 /* Find the flag for this endpoint (for use in the control context). Use the 1171 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is 1172 * bit 1, etc. 1173 */ 1174 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) 1175 { 1176 return 1 << (xhci_get_endpoint_index(desc) + 1); 1177 } 1178 1179 /* Find the flag for this endpoint (for use in the control context). Use the 1180 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is 1181 * bit 1, etc. 1182 */ 1183 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index) 1184 { 1185 return 1 << (ep_index + 1); 1186 } 1187 1188 /* Compute the last valid endpoint context index. Basically, this is the 1189 * endpoint index plus one. For slot contexts with more than valid endpoint, 1190 * we find the most significant bit set in the added contexts flags. 1191 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 1192 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. 1193 */ 1194 unsigned int xhci_last_valid_endpoint(u32 added_ctxs) 1195 { 1196 return fls(added_ctxs) - 1; 1197 } 1198 1199 /* Returns 1 if the arguments are OK; 1200 * returns 0 this is a root hub; returns -EINVAL for NULL pointers. 1201 */ 1202 static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, 1203 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, 1204 const char *func) { 1205 struct xhci_hcd *xhci; 1206 struct xhci_virt_device *virt_dev; 1207 1208 if (!hcd || (check_ep && !ep) || !udev) { 1209 pr_debug("xHCI %s called with invalid args\n", func); 1210 return -EINVAL; 1211 } 1212 if (!udev->parent) { 1213 pr_debug("xHCI %s called for root hub\n", func); 1214 return 0; 1215 } 1216 1217 xhci = hcd_to_xhci(hcd); 1218 if (check_virt_dev) { 1219 if (!udev->slot_id || !xhci->devs[udev->slot_id]) { 1220 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n", 1221 func); 1222 return -EINVAL; 1223 } 1224 1225 virt_dev = xhci->devs[udev->slot_id]; 1226 if (virt_dev->udev != udev) { 1227 xhci_dbg(xhci, "xHCI %s called with udev and " 1228 "virt_dev does not match\n", func); 1229 return -EINVAL; 1230 } 1231 } 1232 1233 if (xhci->xhc_state & XHCI_STATE_HALTED) 1234 return -ENODEV; 1235 1236 return 1; 1237 } 1238 1239 static int xhci_configure_endpoint(struct xhci_hcd *xhci, 1240 struct usb_device *udev, struct xhci_command *command, 1241 bool ctx_change, bool must_succeed); 1242 1243 /* 1244 * Full speed devices may have a max packet size greater than 8 bytes, but the 1245 * USB core doesn't know that until it reads the first 8 bytes of the 1246 * descriptor. If the usb_device's max packet size changes after that point, 1247 * we need to issue an evaluate context command and wait on it. 1248 */ 1249 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, 1250 unsigned int ep_index, struct urb *urb) 1251 { 1252 struct xhci_container_ctx *out_ctx; 1253 struct xhci_input_control_ctx *ctrl_ctx; 1254 struct xhci_ep_ctx *ep_ctx; 1255 struct xhci_command *command; 1256 int max_packet_size; 1257 int hw_max_packet_size; 1258 int ret = 0; 1259 1260 out_ctx = xhci->devs[slot_id]->out_ctx; 1261 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1262 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); 1263 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc); 1264 if (hw_max_packet_size != max_packet_size) { 1265 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1266 "Max Packet Size for ep 0 changed."); 1267 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1268 "Max packet size in usb_device = %d", 1269 max_packet_size); 1270 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1271 "Max packet size in xHCI HW = %d", 1272 hw_max_packet_size); 1273 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1274 "Issuing evaluate context command."); 1275 1276 /* Set up the input context flags for the command */ 1277 /* FIXME: This won't work if a non-default control endpoint 1278 * changes max packet sizes. 1279 */ 1280 1281 command = xhci_alloc_command(xhci, false, true, GFP_KERNEL); 1282 if (!command) 1283 return -ENOMEM; 1284 1285 command->in_ctx = xhci->devs[slot_id]->in_ctx; 1286 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 1287 if (!ctrl_ctx) { 1288 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 1289 __func__); 1290 ret = -ENOMEM; 1291 goto command_cleanup; 1292 } 1293 /* Set up the modified control endpoint 0 */ 1294 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, 1295 xhci->devs[slot_id]->out_ctx, ep_index); 1296 1297 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); 1298 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); 1299 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); 1300 1301 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); 1302 ctrl_ctx->drop_flags = 0; 1303 1304 xhci_dbg(xhci, "Slot %d input context\n", slot_id); 1305 xhci_dbg_ctx(xhci, command->in_ctx, ep_index); 1306 xhci_dbg(xhci, "Slot %d output context\n", slot_id); 1307 xhci_dbg_ctx(xhci, out_ctx, ep_index); 1308 1309 ret = xhci_configure_endpoint(xhci, urb->dev, command, 1310 true, false); 1311 1312 /* Clean up the input context for later use by bandwidth 1313 * functions. 1314 */ 1315 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); 1316 command_cleanup: 1317 kfree(command->completion); 1318 kfree(command); 1319 } 1320 return ret; 1321 } 1322 1323 /* 1324 * non-error returns are a promise to giveback() the urb later 1325 * we drop ownership so next owner (or urb unlink) can get it 1326 */ 1327 int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) 1328 { 1329 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 1330 struct xhci_td *buffer; 1331 unsigned long flags; 1332 int ret = 0; 1333 unsigned int slot_id, ep_index; 1334 struct urb_priv *urb_priv; 1335 int size, i; 1336 1337 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, 1338 true, true, __func__) <= 0) 1339 return -EINVAL; 1340 1341 slot_id = urb->dev->slot_id; 1342 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1343 1344 if (!HCD_HW_ACCESSIBLE(hcd)) { 1345 if (!in_interrupt()) 1346 xhci_dbg(xhci, "urb submitted during PCI suspend\n"); 1347 ret = -ESHUTDOWN; 1348 goto exit; 1349 } 1350 1351 if (usb_endpoint_xfer_isoc(&urb->ep->desc)) 1352 size = urb->number_of_packets; 1353 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) && 1354 urb->transfer_buffer_length > 0 && 1355 urb->transfer_flags & URB_ZERO_PACKET && 1356 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc))) 1357 size = 2; 1358 else 1359 size = 1; 1360 1361 urb_priv = kzalloc(sizeof(struct urb_priv) + 1362 size * sizeof(struct xhci_td *), mem_flags); 1363 if (!urb_priv) 1364 return -ENOMEM; 1365 1366 buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags); 1367 if (!buffer) { 1368 kfree(urb_priv); 1369 return -ENOMEM; 1370 } 1371 1372 for (i = 0; i < size; i++) { 1373 urb_priv->td[i] = buffer; 1374 buffer++; 1375 } 1376 1377 urb_priv->length = size; 1378 urb_priv->td_cnt = 0; 1379 urb->hcpriv = urb_priv; 1380 1381 if (usb_endpoint_xfer_control(&urb->ep->desc)) { 1382 /* Check to see if the max packet size for the default control 1383 * endpoint changed during FS device enumeration 1384 */ 1385 if (urb->dev->speed == USB_SPEED_FULL) { 1386 ret = xhci_check_maxpacket(xhci, slot_id, 1387 ep_index, urb); 1388 if (ret < 0) { 1389 xhci_urb_free_priv(urb_priv); 1390 urb->hcpriv = NULL; 1391 return ret; 1392 } 1393 } 1394 1395 /* We have a spinlock and interrupts disabled, so we must pass 1396 * atomic context to this function, which may allocate memory. 1397 */ 1398 spin_lock_irqsave(&xhci->lock, flags); 1399 if (xhci->xhc_state & XHCI_STATE_DYING) 1400 goto dying; 1401 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, 1402 slot_id, ep_index); 1403 if (ret) 1404 goto free_priv; 1405 spin_unlock_irqrestore(&xhci->lock, flags); 1406 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { 1407 spin_lock_irqsave(&xhci->lock, flags); 1408 if (xhci->xhc_state & XHCI_STATE_DYING) 1409 goto dying; 1410 if (xhci->devs[slot_id]->eps[ep_index].ep_state & 1411 EP_GETTING_STREAMS) { 1412 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " 1413 "is transitioning to using streams.\n"); 1414 ret = -EINVAL; 1415 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state & 1416 EP_GETTING_NO_STREAMS) { 1417 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " 1418 "is transitioning to " 1419 "not having streams.\n"); 1420 ret = -EINVAL; 1421 } else { 1422 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, 1423 slot_id, ep_index); 1424 } 1425 if (ret) 1426 goto free_priv; 1427 spin_unlock_irqrestore(&xhci->lock, flags); 1428 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { 1429 spin_lock_irqsave(&xhci->lock, flags); 1430 if (xhci->xhc_state & XHCI_STATE_DYING) 1431 goto dying; 1432 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, 1433 slot_id, ep_index); 1434 if (ret) 1435 goto free_priv; 1436 spin_unlock_irqrestore(&xhci->lock, flags); 1437 } else { 1438 spin_lock_irqsave(&xhci->lock, flags); 1439 if (xhci->xhc_state & XHCI_STATE_DYING) 1440 goto dying; 1441 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, 1442 slot_id, ep_index); 1443 if (ret) 1444 goto free_priv; 1445 spin_unlock_irqrestore(&xhci->lock, flags); 1446 } 1447 exit: 1448 return ret; 1449 dying: 1450 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " 1451 "non-responsive xHCI host.\n", 1452 urb->ep->desc.bEndpointAddress, urb); 1453 ret = -ESHUTDOWN; 1454 free_priv: 1455 xhci_urb_free_priv(urb_priv); 1456 urb->hcpriv = NULL; 1457 spin_unlock_irqrestore(&xhci->lock, flags); 1458 return ret; 1459 } 1460 1461 /* Get the right ring for the given URB. 1462 * If the endpoint supports streams, boundary check the URB's stream ID. 1463 * If the endpoint doesn't support streams, return the singular endpoint ring. 1464 */ 1465 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, 1466 struct urb *urb) 1467 { 1468 unsigned int slot_id; 1469 unsigned int ep_index; 1470 unsigned int stream_id; 1471 struct xhci_virt_ep *ep; 1472 1473 slot_id = urb->dev->slot_id; 1474 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1475 stream_id = urb->stream_id; 1476 ep = &xhci->devs[slot_id]->eps[ep_index]; 1477 /* Common case: no streams */ 1478 if (!(ep->ep_state & EP_HAS_STREAMS)) 1479 return ep->ring; 1480 1481 if (stream_id == 0) { 1482 xhci_warn(xhci, 1483 "WARN: Slot ID %u, ep index %u has streams, " 1484 "but URB has no stream ID.\n", 1485 slot_id, ep_index); 1486 return NULL; 1487 } 1488 1489 if (stream_id < ep->stream_info->num_streams) 1490 return ep->stream_info->stream_rings[stream_id]; 1491 1492 xhci_warn(xhci, 1493 "WARN: Slot ID %u, ep index %u has " 1494 "stream IDs 1 to %u allocated, " 1495 "but stream ID %u is requested.\n", 1496 slot_id, ep_index, 1497 ep->stream_info->num_streams - 1, 1498 stream_id); 1499 return NULL; 1500 } 1501 1502 /* 1503 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop 1504 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC 1505 * should pick up where it left off in the TD, unless a Set Transfer Ring 1506 * Dequeue Pointer is issued. 1507 * 1508 * The TRBs that make up the buffers for the canceled URB will be "removed" from 1509 * the ring. Since the ring is a contiguous structure, they can't be physically 1510 * removed. Instead, there are two options: 1511 * 1512 * 1) If the HC is in the middle of processing the URB to be canceled, we 1513 * simply move the ring's dequeue pointer past those TRBs using the Set 1514 * Transfer Ring Dequeue Pointer command. This will be the common case, 1515 * when drivers timeout on the last submitted URB and attempt to cancel. 1516 * 1517 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a 1518 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The 1519 * HC will need to invalidate the any TRBs it has cached after the stop 1520 * endpoint command, as noted in the xHCI 0.95 errata. 1521 * 1522 * 3) The TD may have completed by the time the Stop Endpoint Command 1523 * completes, so software needs to handle that case too. 1524 * 1525 * This function should protect against the TD enqueueing code ringing the 1526 * doorbell while this code is waiting for a Stop Endpoint command to complete. 1527 * It also needs to account for multiple cancellations on happening at the same 1528 * time for the same endpoint. 1529 * 1530 * Note that this function can be called in any context, or so says 1531 * usb_hcd_unlink_urb() 1532 */ 1533 int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) 1534 { 1535 unsigned long flags; 1536 int ret, i; 1537 u32 temp; 1538 struct xhci_hcd *xhci; 1539 struct urb_priv *urb_priv; 1540 struct xhci_td *td; 1541 unsigned int ep_index; 1542 struct xhci_ring *ep_ring; 1543 struct xhci_virt_ep *ep; 1544 struct xhci_command *command; 1545 1546 xhci = hcd_to_xhci(hcd); 1547 spin_lock_irqsave(&xhci->lock, flags); 1548 /* Make sure the URB hasn't completed or been unlinked already */ 1549 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 1550 if (ret || !urb->hcpriv) 1551 goto done; 1552 temp = readl(&xhci->op_regs->status); 1553 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { 1554 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1555 "HW died, freeing TD."); 1556 urb_priv = urb->hcpriv; 1557 for (i = urb_priv->td_cnt; 1558 i < urb_priv->length && xhci->devs[urb->dev->slot_id]; 1559 i++) { 1560 td = urb_priv->td[i]; 1561 if (!list_empty(&td->td_list)) 1562 list_del_init(&td->td_list); 1563 if (!list_empty(&td->cancelled_td_list)) 1564 list_del_init(&td->cancelled_td_list); 1565 } 1566 1567 usb_hcd_unlink_urb_from_ep(hcd, urb); 1568 spin_unlock_irqrestore(&xhci->lock, flags); 1569 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); 1570 xhci_urb_free_priv(urb_priv); 1571 return ret; 1572 } 1573 if ((xhci->xhc_state & XHCI_STATE_DYING) || 1574 (xhci->xhc_state & XHCI_STATE_HALTED)) { 1575 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1576 "Ep 0x%x: URB %p to be canceled on " 1577 "non-responsive xHCI host.", 1578 urb->ep->desc.bEndpointAddress, urb); 1579 /* Let the stop endpoint command watchdog timer (which set this 1580 * state) finish cleaning up the endpoint TD lists. We must 1581 * have caught it in the middle of dropping a lock and giving 1582 * back an URB. 1583 */ 1584 goto done; 1585 } 1586 1587 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1588 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; 1589 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 1590 if (!ep_ring) { 1591 ret = -EINVAL; 1592 goto done; 1593 } 1594 1595 urb_priv = urb->hcpriv; 1596 i = urb_priv->td_cnt; 1597 if (i < urb_priv->length) 1598 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1599 "Cancel URB %p, dev %s, ep 0x%x, " 1600 "starting at offset 0x%llx", 1601 urb, urb->dev->devpath, 1602 urb->ep->desc.bEndpointAddress, 1603 (unsigned long long) xhci_trb_virt_to_dma( 1604 urb_priv->td[i]->start_seg, 1605 urb_priv->td[i]->first_trb)); 1606 1607 for (; i < urb_priv->length; i++) { 1608 td = urb_priv->td[i]; 1609 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); 1610 } 1611 1612 /* Queue a stop endpoint command, but only if this is 1613 * the first cancellation to be handled. 1614 */ 1615 if (!(ep->ep_state & EP_HALT_PENDING)) { 1616 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); 1617 if (!command) { 1618 ret = -ENOMEM; 1619 goto done; 1620 } 1621 ep->ep_state |= EP_HALT_PENDING; 1622 ep->stop_cmds_pending++; 1623 ep->stop_cmd_timer.expires = jiffies + 1624 XHCI_STOP_EP_CMD_TIMEOUT * HZ; 1625 add_timer(&ep->stop_cmd_timer); 1626 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id, 1627 ep_index, 0); 1628 xhci_ring_cmd_db(xhci); 1629 } 1630 done: 1631 spin_unlock_irqrestore(&xhci->lock, flags); 1632 return ret; 1633 } 1634 1635 /* Drop an endpoint from a new bandwidth configuration for this device. 1636 * Only one call to this function is allowed per endpoint before 1637 * check_bandwidth() or reset_bandwidth() must be called. 1638 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will 1639 * add the endpoint to the schedule with possibly new parameters denoted by a 1640 * different endpoint descriptor in usb_host_endpoint. 1641 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is 1642 * not allowed. 1643 * 1644 * The USB core will not allow URBs to be queued to an endpoint that is being 1645 * disabled, so there's no need for mutual exclusion to protect 1646 * the xhci->devs[slot_id] structure. 1647 */ 1648 int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, 1649 struct usb_host_endpoint *ep) 1650 { 1651 struct xhci_hcd *xhci; 1652 struct xhci_container_ctx *in_ctx, *out_ctx; 1653 struct xhci_input_control_ctx *ctrl_ctx; 1654 unsigned int ep_index; 1655 struct xhci_ep_ctx *ep_ctx; 1656 u32 drop_flag; 1657 u32 new_add_flags, new_drop_flags; 1658 int ret; 1659 1660 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); 1661 if (ret <= 0) 1662 return ret; 1663 xhci = hcd_to_xhci(hcd); 1664 if (xhci->xhc_state & XHCI_STATE_DYING) 1665 return -ENODEV; 1666 1667 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 1668 drop_flag = xhci_get_endpoint_flag(&ep->desc); 1669 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { 1670 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", 1671 __func__, drop_flag); 1672 return 0; 1673 } 1674 1675 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 1676 out_ctx = xhci->devs[udev->slot_id]->out_ctx; 1677 ctrl_ctx = xhci_get_input_control_ctx(in_ctx); 1678 if (!ctrl_ctx) { 1679 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 1680 __func__); 1681 return 0; 1682 } 1683 1684 ep_index = xhci_get_endpoint_index(&ep->desc); 1685 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1686 /* If the HC already knows the endpoint is disabled, 1687 * or the HCD has noted it is disabled, ignore this request 1688 */ 1689 if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) == 1690 cpu_to_le32(EP_STATE_DISABLED)) || 1691 le32_to_cpu(ctrl_ctx->drop_flags) & 1692 xhci_get_endpoint_flag(&ep->desc)) { 1693 /* Do not warn when called after a usb_device_reset */ 1694 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL) 1695 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", 1696 __func__, ep); 1697 return 0; 1698 } 1699 1700 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag); 1701 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); 1702 1703 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag); 1704 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); 1705 1706 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); 1707 1708 if (xhci->quirks & XHCI_MTK_HOST) 1709 xhci_mtk_drop_ep_quirk(hcd, udev, ep); 1710 1711 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", 1712 (unsigned int) ep->desc.bEndpointAddress, 1713 udev->slot_id, 1714 (unsigned int) new_drop_flags, 1715 (unsigned int) new_add_flags); 1716 return 0; 1717 } 1718 1719 /* Add an endpoint to a new possible bandwidth configuration for this device. 1720 * Only one call to this function is allowed per endpoint before 1721 * check_bandwidth() or reset_bandwidth() must be called. 1722 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will 1723 * add the endpoint to the schedule with possibly new parameters denoted by a 1724 * different endpoint descriptor in usb_host_endpoint. 1725 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is 1726 * not allowed. 1727 * 1728 * The USB core will not allow URBs to be queued to an endpoint until the 1729 * configuration or alt setting is installed in the device, so there's no need 1730 * for mutual exclusion to protect the xhci->devs[slot_id] structure. 1731 */ 1732 int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, 1733 struct usb_host_endpoint *ep) 1734 { 1735 struct xhci_hcd *xhci; 1736 struct xhci_container_ctx *in_ctx; 1737 unsigned int ep_index; 1738 struct xhci_input_control_ctx *ctrl_ctx; 1739 u32 added_ctxs; 1740 u32 new_add_flags, new_drop_flags; 1741 struct xhci_virt_device *virt_dev; 1742 int ret = 0; 1743 1744 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); 1745 if (ret <= 0) { 1746 /* So we won't queue a reset ep command for a root hub */ 1747 ep->hcpriv = NULL; 1748 return ret; 1749 } 1750 xhci = hcd_to_xhci(hcd); 1751 if (xhci->xhc_state & XHCI_STATE_DYING) 1752 return -ENODEV; 1753 1754 added_ctxs = xhci_get_endpoint_flag(&ep->desc); 1755 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { 1756 /* FIXME when we have to issue an evaluate endpoint command to 1757 * deal with ep0 max packet size changing once we get the 1758 * descriptors 1759 */ 1760 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", 1761 __func__, added_ctxs); 1762 return 0; 1763 } 1764 1765 virt_dev = xhci->devs[udev->slot_id]; 1766 in_ctx = virt_dev->in_ctx; 1767 ctrl_ctx = xhci_get_input_control_ctx(in_ctx); 1768 if (!ctrl_ctx) { 1769 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 1770 __func__); 1771 return 0; 1772 } 1773 1774 ep_index = xhci_get_endpoint_index(&ep->desc); 1775 /* If this endpoint is already in use, and the upper layers are trying 1776 * to add it again without dropping it, reject the addition. 1777 */ 1778 if (virt_dev->eps[ep_index].ring && 1779 !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) { 1780 xhci_warn(xhci, "Trying to add endpoint 0x%x " 1781 "without dropping it.\n", 1782 (unsigned int) ep->desc.bEndpointAddress); 1783 return -EINVAL; 1784 } 1785 1786 /* If the HCD has already noted the endpoint is enabled, 1787 * ignore this request. 1788 */ 1789 if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) { 1790 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", 1791 __func__, ep); 1792 return 0; 1793 } 1794 1795 /* 1796 * Configuration and alternate setting changes must be done in 1797 * process context, not interrupt context (or so documenation 1798 * for usb_set_interface() and usb_set_configuration() claim). 1799 */ 1800 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { 1801 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", 1802 __func__, ep->desc.bEndpointAddress); 1803 return -ENOMEM; 1804 } 1805 1806 if (xhci->quirks & XHCI_MTK_HOST) { 1807 ret = xhci_mtk_add_ep_quirk(hcd, udev, ep); 1808 if (ret < 0) { 1809 xhci_free_or_cache_endpoint_ring(xhci, 1810 virt_dev, ep_index); 1811 return ret; 1812 } 1813 } 1814 1815 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); 1816 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); 1817 1818 /* If xhci_endpoint_disable() was called for this endpoint, but the 1819 * xHC hasn't been notified yet through the check_bandwidth() call, 1820 * this re-adds a new state for the endpoint from the new endpoint 1821 * descriptors. We must drop and re-add this endpoint, so we leave the 1822 * drop flags alone. 1823 */ 1824 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); 1825 1826 /* Store the usb_device pointer for later use */ 1827 ep->hcpriv = udev; 1828 1829 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", 1830 (unsigned int) ep->desc.bEndpointAddress, 1831 udev->slot_id, 1832 (unsigned int) new_drop_flags, 1833 (unsigned int) new_add_flags); 1834 return 0; 1835 } 1836 1837 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) 1838 { 1839 struct xhci_input_control_ctx *ctrl_ctx; 1840 struct xhci_ep_ctx *ep_ctx; 1841 struct xhci_slot_ctx *slot_ctx; 1842 int i; 1843 1844 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); 1845 if (!ctrl_ctx) { 1846 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 1847 __func__); 1848 return; 1849 } 1850 1851 /* When a device's add flag and drop flag are zero, any subsequent 1852 * configure endpoint command will leave that endpoint's state 1853 * untouched. Make sure we don't leave any old state in the input 1854 * endpoint contexts. 1855 */ 1856 ctrl_ctx->drop_flags = 0; 1857 ctrl_ctx->add_flags = 0; 1858 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 1859 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 1860 /* Endpoint 0 is always valid */ 1861 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); 1862 for (i = 1; i < 31; ++i) { 1863 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); 1864 ep_ctx->ep_info = 0; 1865 ep_ctx->ep_info2 = 0; 1866 ep_ctx->deq = 0; 1867 ep_ctx->tx_info = 0; 1868 } 1869 } 1870 1871 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, 1872 struct usb_device *udev, u32 *cmd_status) 1873 { 1874 int ret; 1875 1876 switch (*cmd_status) { 1877 case COMP_CMD_ABORT: 1878 case COMP_CMD_STOP: 1879 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n"); 1880 ret = -ETIME; 1881 break; 1882 case COMP_ENOMEM: 1883 dev_warn(&udev->dev, 1884 "Not enough host controller resources for new device state.\n"); 1885 ret = -ENOMEM; 1886 /* FIXME: can we allocate more resources for the HC? */ 1887 break; 1888 case COMP_BW_ERR: 1889 case COMP_2ND_BW_ERR: 1890 dev_warn(&udev->dev, 1891 "Not enough bandwidth for new device state.\n"); 1892 ret = -ENOSPC; 1893 /* FIXME: can we go back to the old state? */ 1894 break; 1895 case COMP_TRB_ERR: 1896 /* the HCD set up something wrong */ 1897 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " 1898 "add flag = 1, " 1899 "and endpoint is not disabled.\n"); 1900 ret = -EINVAL; 1901 break; 1902 case COMP_DEV_ERR: 1903 dev_warn(&udev->dev, 1904 "ERROR: Incompatible device for endpoint configure command.\n"); 1905 ret = -ENODEV; 1906 break; 1907 case COMP_SUCCESS: 1908 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1909 "Successful Endpoint Configure command"); 1910 ret = 0; 1911 break; 1912 default: 1913 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", 1914 *cmd_status); 1915 ret = -EINVAL; 1916 break; 1917 } 1918 return ret; 1919 } 1920 1921 static int xhci_evaluate_context_result(struct xhci_hcd *xhci, 1922 struct usb_device *udev, u32 *cmd_status) 1923 { 1924 int ret; 1925 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; 1926 1927 switch (*cmd_status) { 1928 case COMP_CMD_ABORT: 1929 case COMP_CMD_STOP: 1930 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n"); 1931 ret = -ETIME; 1932 break; 1933 case COMP_EINVAL: 1934 dev_warn(&udev->dev, 1935 "WARN: xHCI driver setup invalid evaluate context command.\n"); 1936 ret = -EINVAL; 1937 break; 1938 case COMP_EBADSLT: 1939 dev_warn(&udev->dev, 1940 "WARN: slot not enabled for evaluate context command.\n"); 1941 ret = -EINVAL; 1942 break; 1943 case COMP_CTX_STATE: 1944 dev_warn(&udev->dev, 1945 "WARN: invalid context state for evaluate context command.\n"); 1946 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); 1947 ret = -EINVAL; 1948 break; 1949 case COMP_DEV_ERR: 1950 dev_warn(&udev->dev, 1951 "ERROR: Incompatible device for evaluate context command.\n"); 1952 ret = -ENODEV; 1953 break; 1954 case COMP_MEL_ERR: 1955 /* Max Exit Latency too large error */ 1956 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n"); 1957 ret = -EINVAL; 1958 break; 1959 case COMP_SUCCESS: 1960 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1961 "Successful evaluate context command"); 1962 ret = 0; 1963 break; 1964 default: 1965 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", 1966 *cmd_status); 1967 ret = -EINVAL; 1968 break; 1969 } 1970 return ret; 1971 } 1972 1973 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, 1974 struct xhci_input_control_ctx *ctrl_ctx) 1975 { 1976 u32 valid_add_flags; 1977 u32 valid_drop_flags; 1978 1979 /* Ignore the slot flag (bit 0), and the default control endpoint flag 1980 * (bit 1). The default control endpoint is added during the Address 1981 * Device command and is never removed until the slot is disabled. 1982 */ 1983 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; 1984 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; 1985 1986 /* Use hweight32 to count the number of ones in the add flags, or 1987 * number of endpoints added. Don't count endpoints that are changed 1988 * (both added and dropped). 1989 */ 1990 return hweight32(valid_add_flags) - 1991 hweight32(valid_add_flags & valid_drop_flags); 1992 } 1993 1994 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, 1995 struct xhci_input_control_ctx *ctrl_ctx) 1996 { 1997 u32 valid_add_flags; 1998 u32 valid_drop_flags; 1999 2000 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; 2001 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; 2002 2003 return hweight32(valid_drop_flags) - 2004 hweight32(valid_add_flags & valid_drop_flags); 2005 } 2006 2007 /* 2008 * We need to reserve the new number of endpoints before the configure endpoint 2009 * command completes. We can't subtract the dropped endpoints from the number 2010 * of active endpoints until the command completes because we can oversubscribe 2011 * the host in this case: 2012 * 2013 * - the first configure endpoint command drops more endpoints than it adds 2014 * - a second configure endpoint command that adds more endpoints is queued 2015 * - the first configure endpoint command fails, so the config is unchanged 2016 * - the second command may succeed, even though there isn't enough resources 2017 * 2018 * Must be called with xhci->lock held. 2019 */ 2020 static int xhci_reserve_host_resources(struct xhci_hcd *xhci, 2021 struct xhci_input_control_ctx *ctrl_ctx) 2022 { 2023 u32 added_eps; 2024 2025 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); 2026 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { 2027 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2028 "Not enough ep ctxs: " 2029 "%u active, need to add %u, limit is %u.", 2030 xhci->num_active_eps, added_eps, 2031 xhci->limit_active_eps); 2032 return -ENOMEM; 2033 } 2034 xhci->num_active_eps += added_eps; 2035 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2036 "Adding %u ep ctxs, %u now active.", added_eps, 2037 xhci->num_active_eps); 2038 return 0; 2039 } 2040 2041 /* 2042 * The configure endpoint was failed by the xHC for some other reason, so we 2043 * need to revert the resources that failed configuration would have used. 2044 * 2045 * Must be called with xhci->lock held. 2046 */ 2047 static void xhci_free_host_resources(struct xhci_hcd *xhci, 2048 struct xhci_input_control_ctx *ctrl_ctx) 2049 { 2050 u32 num_failed_eps; 2051 2052 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); 2053 xhci->num_active_eps -= num_failed_eps; 2054 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2055 "Removing %u failed ep ctxs, %u now active.", 2056 num_failed_eps, 2057 xhci->num_active_eps); 2058 } 2059 2060 /* 2061 * Now that the command has completed, clean up the active endpoint count by 2062 * subtracting out the endpoints that were dropped (but not changed). 2063 * 2064 * Must be called with xhci->lock held. 2065 */ 2066 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, 2067 struct xhci_input_control_ctx *ctrl_ctx) 2068 { 2069 u32 num_dropped_eps; 2070 2071 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx); 2072 xhci->num_active_eps -= num_dropped_eps; 2073 if (num_dropped_eps) 2074 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2075 "Removing %u dropped ep ctxs, %u now active.", 2076 num_dropped_eps, 2077 xhci->num_active_eps); 2078 } 2079 2080 static unsigned int xhci_get_block_size(struct usb_device *udev) 2081 { 2082 switch (udev->speed) { 2083 case USB_SPEED_LOW: 2084 case USB_SPEED_FULL: 2085 return FS_BLOCK; 2086 case USB_SPEED_HIGH: 2087 return HS_BLOCK; 2088 case USB_SPEED_SUPER: 2089 case USB_SPEED_SUPER_PLUS: 2090 return SS_BLOCK; 2091 case USB_SPEED_UNKNOWN: 2092 case USB_SPEED_WIRELESS: 2093 default: 2094 /* Should never happen */ 2095 return 1; 2096 } 2097 } 2098 2099 static unsigned int 2100 xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw) 2101 { 2102 if (interval_bw->overhead[LS_OVERHEAD_TYPE]) 2103 return LS_OVERHEAD; 2104 if (interval_bw->overhead[FS_OVERHEAD_TYPE]) 2105 return FS_OVERHEAD; 2106 return HS_OVERHEAD; 2107 } 2108 2109 /* If we are changing a LS/FS device under a HS hub, 2110 * make sure (if we are activating a new TT) that the HS bus has enough 2111 * bandwidth for this new TT. 2112 */ 2113 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, 2114 struct xhci_virt_device *virt_dev, 2115 int old_active_eps) 2116 { 2117 struct xhci_interval_bw_table *bw_table; 2118 struct xhci_tt_bw_info *tt_info; 2119 2120 /* Find the bandwidth table for the root port this TT is attached to. */ 2121 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table; 2122 tt_info = virt_dev->tt_info; 2123 /* If this TT already had active endpoints, the bandwidth for this TT 2124 * has already been added. Removing all periodic endpoints (and thus 2125 * making the TT enactive) will only decrease the bandwidth used. 2126 */ 2127 if (old_active_eps) 2128 return 0; 2129 if (old_active_eps == 0 && tt_info->active_eps != 0) { 2130 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT) 2131 return -ENOMEM; 2132 return 0; 2133 } 2134 /* Not sure why we would have no new active endpoints... 2135 * 2136 * Maybe because of an Evaluate Context change for a hub update or a 2137 * control endpoint 0 max packet size change? 2138 * FIXME: skip the bandwidth calculation in that case. 2139 */ 2140 return 0; 2141 } 2142 2143 static int xhci_check_ss_bw(struct xhci_hcd *xhci, 2144 struct xhci_virt_device *virt_dev) 2145 { 2146 unsigned int bw_reserved; 2147 2148 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100); 2149 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved)) 2150 return -ENOMEM; 2151 2152 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100); 2153 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved)) 2154 return -ENOMEM; 2155 2156 return 0; 2157 } 2158 2159 /* 2160 * This algorithm is a very conservative estimate of the worst-case scheduling 2161 * scenario for any one interval. The hardware dynamically schedules the 2162 * packets, so we can't tell which microframe could be the limiting factor in 2163 * the bandwidth scheduling. This only takes into account periodic endpoints. 2164 * 2165 * Obviously, we can't solve an NP complete problem to find the minimum worst 2166 * case scenario. Instead, we come up with an estimate that is no less than 2167 * the worst case bandwidth used for any one microframe, but may be an 2168 * over-estimate. 2169 * 2170 * We walk the requirements for each endpoint by interval, starting with the 2171 * smallest interval, and place packets in the schedule where there is only one 2172 * possible way to schedule packets for that interval. In order to simplify 2173 * this algorithm, we record the largest max packet size for each interval, and 2174 * assume all packets will be that size. 2175 * 2176 * For interval 0, we obviously must schedule all packets for each interval. 2177 * The bandwidth for interval 0 is just the amount of data to be transmitted 2178 * (the sum of all max ESIT payload sizes, plus any overhead per packet times 2179 * the number of packets). 2180 * 2181 * For interval 1, we have two possible microframes to schedule those packets 2182 * in. For this algorithm, if we can schedule the same number of packets for 2183 * each possible scheduling opportunity (each microframe), we will do so. The 2184 * remaining number of packets will be saved to be transmitted in the gaps in 2185 * the next interval's scheduling sequence. 2186 * 2187 * As we move those remaining packets to be scheduled with interval 2 packets, 2188 * we have to double the number of remaining packets to transmit. This is 2189 * because the intervals are actually powers of 2, and we would be transmitting 2190 * the previous interval's packets twice in this interval. We also have to be 2191 * sure that when we look at the largest max packet size for this interval, we 2192 * also look at the largest max packet size for the remaining packets and take 2193 * the greater of the two. 2194 * 2195 * The algorithm continues to evenly distribute packets in each scheduling 2196 * opportunity, and push the remaining packets out, until we get to the last 2197 * interval. Then those packets and their associated overhead are just added 2198 * to the bandwidth used. 2199 */ 2200 static int xhci_check_bw_table(struct xhci_hcd *xhci, 2201 struct xhci_virt_device *virt_dev, 2202 int old_active_eps) 2203 { 2204 unsigned int bw_reserved; 2205 unsigned int max_bandwidth; 2206 unsigned int bw_used; 2207 unsigned int block_size; 2208 struct xhci_interval_bw_table *bw_table; 2209 unsigned int packet_size = 0; 2210 unsigned int overhead = 0; 2211 unsigned int packets_transmitted = 0; 2212 unsigned int packets_remaining = 0; 2213 unsigned int i; 2214 2215 if (virt_dev->udev->speed >= USB_SPEED_SUPER) 2216 return xhci_check_ss_bw(xhci, virt_dev); 2217 2218 if (virt_dev->udev->speed == USB_SPEED_HIGH) { 2219 max_bandwidth = HS_BW_LIMIT; 2220 /* Convert percent of bus BW reserved to blocks reserved */ 2221 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100); 2222 } else { 2223 max_bandwidth = FS_BW_LIMIT; 2224 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100); 2225 } 2226 2227 bw_table = virt_dev->bw_table; 2228 /* We need to translate the max packet size and max ESIT payloads into 2229 * the units the hardware uses. 2230 */ 2231 block_size = xhci_get_block_size(virt_dev->udev); 2232 2233 /* If we are manipulating a LS/FS device under a HS hub, double check 2234 * that the HS bus has enough bandwidth if we are activing a new TT. 2235 */ 2236 if (virt_dev->tt_info) { 2237 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2238 "Recalculating BW for rootport %u", 2239 virt_dev->real_port); 2240 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { 2241 xhci_warn(xhci, "Not enough bandwidth on HS bus for " 2242 "newly activated TT.\n"); 2243 return -ENOMEM; 2244 } 2245 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2246 "Recalculating BW for TT slot %u port %u", 2247 virt_dev->tt_info->slot_id, 2248 virt_dev->tt_info->ttport); 2249 } else { 2250 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2251 "Recalculating BW for rootport %u", 2252 virt_dev->real_port); 2253 } 2254 2255 /* Add in how much bandwidth will be used for interval zero, or the 2256 * rounded max ESIT payload + number of packets * largest overhead. 2257 */ 2258 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) + 2259 bw_table->interval_bw[0].num_packets * 2260 xhci_get_largest_overhead(&bw_table->interval_bw[0]); 2261 2262 for (i = 1; i < XHCI_MAX_INTERVAL; i++) { 2263 unsigned int bw_added; 2264 unsigned int largest_mps; 2265 unsigned int interval_overhead; 2266 2267 /* 2268 * How many packets could we transmit in this interval? 2269 * If packets didn't fit in the previous interval, we will need 2270 * to transmit that many packets twice within this interval. 2271 */ 2272 packets_remaining = 2 * packets_remaining + 2273 bw_table->interval_bw[i].num_packets; 2274 2275 /* Find the largest max packet size of this or the previous 2276 * interval. 2277 */ 2278 if (list_empty(&bw_table->interval_bw[i].endpoints)) 2279 largest_mps = 0; 2280 else { 2281 struct xhci_virt_ep *virt_ep; 2282 struct list_head *ep_entry; 2283 2284 ep_entry = bw_table->interval_bw[i].endpoints.next; 2285 virt_ep = list_entry(ep_entry, 2286 struct xhci_virt_ep, bw_endpoint_list); 2287 /* Convert to blocks, rounding up */ 2288 largest_mps = DIV_ROUND_UP( 2289 virt_ep->bw_info.max_packet_size, 2290 block_size); 2291 } 2292 if (largest_mps > packet_size) 2293 packet_size = largest_mps; 2294 2295 /* Use the larger overhead of this or the previous interval. */ 2296 interval_overhead = xhci_get_largest_overhead( 2297 &bw_table->interval_bw[i]); 2298 if (interval_overhead > overhead) 2299 overhead = interval_overhead; 2300 2301 /* How many packets can we evenly distribute across 2302 * (1 << (i + 1)) possible scheduling opportunities? 2303 */ 2304 packets_transmitted = packets_remaining >> (i + 1); 2305 2306 /* Add in the bandwidth used for those scheduled packets */ 2307 bw_added = packets_transmitted * (overhead + packet_size); 2308 2309 /* How many packets do we have remaining to transmit? */ 2310 packets_remaining = packets_remaining % (1 << (i + 1)); 2311 2312 /* What largest max packet size should those packets have? */ 2313 /* If we've transmitted all packets, don't carry over the 2314 * largest packet size. 2315 */ 2316 if (packets_remaining == 0) { 2317 packet_size = 0; 2318 overhead = 0; 2319 } else if (packets_transmitted > 0) { 2320 /* Otherwise if we do have remaining packets, and we've 2321 * scheduled some packets in this interval, take the 2322 * largest max packet size from endpoints with this 2323 * interval. 2324 */ 2325 packet_size = largest_mps; 2326 overhead = interval_overhead; 2327 } 2328 /* Otherwise carry over packet_size and overhead from the last 2329 * time we had a remainder. 2330 */ 2331 bw_used += bw_added; 2332 if (bw_used > max_bandwidth) { 2333 xhci_warn(xhci, "Not enough bandwidth. " 2334 "Proposed: %u, Max: %u\n", 2335 bw_used, max_bandwidth); 2336 return -ENOMEM; 2337 } 2338 } 2339 /* 2340 * Ok, we know we have some packets left over after even-handedly 2341 * scheduling interval 15. We don't know which microframes they will 2342 * fit into, so we over-schedule and say they will be scheduled every 2343 * microframe. 2344 */ 2345 if (packets_remaining > 0) 2346 bw_used += overhead + packet_size; 2347 2348 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) { 2349 unsigned int port_index = virt_dev->real_port - 1; 2350 2351 /* OK, we're manipulating a HS device attached to a 2352 * root port bandwidth domain. Include the number of active TTs 2353 * in the bandwidth used. 2354 */ 2355 bw_used += TT_HS_OVERHEAD * 2356 xhci->rh_bw[port_index].num_active_tts; 2357 } 2358 2359 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2360 "Final bandwidth: %u, Limit: %u, Reserved: %u, " 2361 "Available: %u " "percent", 2362 bw_used, max_bandwidth, bw_reserved, 2363 (max_bandwidth - bw_used - bw_reserved) * 100 / 2364 max_bandwidth); 2365 2366 bw_used += bw_reserved; 2367 if (bw_used > max_bandwidth) { 2368 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n", 2369 bw_used, max_bandwidth); 2370 return -ENOMEM; 2371 } 2372 2373 bw_table->bw_used = bw_used; 2374 return 0; 2375 } 2376 2377 static bool xhci_is_async_ep(unsigned int ep_type) 2378 { 2379 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && 2380 ep_type != ISOC_IN_EP && 2381 ep_type != INT_IN_EP); 2382 } 2383 2384 static bool xhci_is_sync_in_ep(unsigned int ep_type) 2385 { 2386 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP); 2387 } 2388 2389 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw) 2390 { 2391 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK); 2392 2393 if (ep_bw->ep_interval == 0) 2394 return SS_OVERHEAD_BURST + 2395 (ep_bw->mult * ep_bw->num_packets * 2396 (SS_OVERHEAD + mps)); 2397 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets * 2398 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST), 2399 1 << ep_bw->ep_interval); 2400 2401 } 2402 2403 void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, 2404 struct xhci_bw_info *ep_bw, 2405 struct xhci_interval_bw_table *bw_table, 2406 struct usb_device *udev, 2407 struct xhci_virt_ep *virt_ep, 2408 struct xhci_tt_bw_info *tt_info) 2409 { 2410 struct xhci_interval_bw *interval_bw; 2411 int normalized_interval; 2412 2413 if (xhci_is_async_ep(ep_bw->type)) 2414 return; 2415 2416 if (udev->speed >= USB_SPEED_SUPER) { 2417 if (xhci_is_sync_in_ep(ep_bw->type)) 2418 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= 2419 xhci_get_ss_bw_consumed(ep_bw); 2420 else 2421 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= 2422 xhci_get_ss_bw_consumed(ep_bw); 2423 return; 2424 } 2425 2426 /* SuperSpeed endpoints never get added to intervals in the table, so 2427 * this check is only valid for HS/FS/LS devices. 2428 */ 2429 if (list_empty(&virt_ep->bw_endpoint_list)) 2430 return; 2431 /* For LS/FS devices, we need to translate the interval expressed in 2432 * microframes to frames. 2433 */ 2434 if (udev->speed == USB_SPEED_HIGH) 2435 normalized_interval = ep_bw->ep_interval; 2436 else 2437 normalized_interval = ep_bw->ep_interval - 3; 2438 2439 if (normalized_interval == 0) 2440 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload; 2441 interval_bw = &bw_table->interval_bw[normalized_interval]; 2442 interval_bw->num_packets -= ep_bw->num_packets; 2443 switch (udev->speed) { 2444 case USB_SPEED_LOW: 2445 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1; 2446 break; 2447 case USB_SPEED_FULL: 2448 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1; 2449 break; 2450 case USB_SPEED_HIGH: 2451 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1; 2452 break; 2453 case USB_SPEED_SUPER: 2454 case USB_SPEED_SUPER_PLUS: 2455 case USB_SPEED_UNKNOWN: 2456 case USB_SPEED_WIRELESS: 2457 /* Should never happen because only LS/FS/HS endpoints will get 2458 * added to the endpoint list. 2459 */ 2460 return; 2461 } 2462 if (tt_info) 2463 tt_info->active_eps -= 1; 2464 list_del_init(&virt_ep->bw_endpoint_list); 2465 } 2466 2467 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, 2468 struct xhci_bw_info *ep_bw, 2469 struct xhci_interval_bw_table *bw_table, 2470 struct usb_device *udev, 2471 struct xhci_virt_ep *virt_ep, 2472 struct xhci_tt_bw_info *tt_info) 2473 { 2474 struct xhci_interval_bw *interval_bw; 2475 struct xhci_virt_ep *smaller_ep; 2476 int normalized_interval; 2477 2478 if (xhci_is_async_ep(ep_bw->type)) 2479 return; 2480 2481 if (udev->speed == USB_SPEED_SUPER) { 2482 if (xhci_is_sync_in_ep(ep_bw->type)) 2483 xhci->devs[udev->slot_id]->bw_table->ss_bw_in += 2484 xhci_get_ss_bw_consumed(ep_bw); 2485 else 2486 xhci->devs[udev->slot_id]->bw_table->ss_bw_out += 2487 xhci_get_ss_bw_consumed(ep_bw); 2488 return; 2489 } 2490 2491 /* For LS/FS devices, we need to translate the interval expressed in 2492 * microframes to frames. 2493 */ 2494 if (udev->speed == USB_SPEED_HIGH) 2495 normalized_interval = ep_bw->ep_interval; 2496 else 2497 normalized_interval = ep_bw->ep_interval - 3; 2498 2499 if (normalized_interval == 0) 2500 bw_table->interval0_esit_payload += ep_bw->max_esit_payload; 2501 interval_bw = &bw_table->interval_bw[normalized_interval]; 2502 interval_bw->num_packets += ep_bw->num_packets; 2503 switch (udev->speed) { 2504 case USB_SPEED_LOW: 2505 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1; 2506 break; 2507 case USB_SPEED_FULL: 2508 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1; 2509 break; 2510 case USB_SPEED_HIGH: 2511 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1; 2512 break; 2513 case USB_SPEED_SUPER: 2514 case USB_SPEED_SUPER_PLUS: 2515 case USB_SPEED_UNKNOWN: 2516 case USB_SPEED_WIRELESS: 2517 /* Should never happen because only LS/FS/HS endpoints will get 2518 * added to the endpoint list. 2519 */ 2520 return; 2521 } 2522 2523 if (tt_info) 2524 tt_info->active_eps += 1; 2525 /* Insert the endpoint into the list, largest max packet size first. */ 2526 list_for_each_entry(smaller_ep, &interval_bw->endpoints, 2527 bw_endpoint_list) { 2528 if (ep_bw->max_packet_size >= 2529 smaller_ep->bw_info.max_packet_size) { 2530 /* Add the new ep before the smaller endpoint */ 2531 list_add_tail(&virt_ep->bw_endpoint_list, 2532 &smaller_ep->bw_endpoint_list); 2533 return; 2534 } 2535 } 2536 /* Add the new endpoint at the end of the list. */ 2537 list_add_tail(&virt_ep->bw_endpoint_list, 2538 &interval_bw->endpoints); 2539 } 2540 2541 void xhci_update_tt_active_eps(struct xhci_hcd *xhci, 2542 struct xhci_virt_device *virt_dev, 2543 int old_active_eps) 2544 { 2545 struct xhci_root_port_bw_info *rh_bw_info; 2546 if (!virt_dev->tt_info) 2547 return; 2548 2549 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1]; 2550 if (old_active_eps == 0 && 2551 virt_dev->tt_info->active_eps != 0) { 2552 rh_bw_info->num_active_tts += 1; 2553 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD; 2554 } else if (old_active_eps != 0 && 2555 virt_dev->tt_info->active_eps == 0) { 2556 rh_bw_info->num_active_tts -= 1; 2557 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD; 2558 } 2559 } 2560 2561 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, 2562 struct xhci_virt_device *virt_dev, 2563 struct xhci_container_ctx *in_ctx) 2564 { 2565 struct xhci_bw_info ep_bw_info[31]; 2566 int i; 2567 struct xhci_input_control_ctx *ctrl_ctx; 2568 int old_active_eps = 0; 2569 2570 if (virt_dev->tt_info) 2571 old_active_eps = virt_dev->tt_info->active_eps; 2572 2573 ctrl_ctx = xhci_get_input_control_ctx(in_ctx); 2574 if (!ctrl_ctx) { 2575 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 2576 __func__); 2577 return -ENOMEM; 2578 } 2579 2580 for (i = 0; i < 31; i++) { 2581 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) 2582 continue; 2583 2584 /* Make a copy of the BW info in case we need to revert this */ 2585 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info, 2586 sizeof(ep_bw_info[i])); 2587 /* Drop the endpoint from the interval table if the endpoint is 2588 * being dropped or changed. 2589 */ 2590 if (EP_IS_DROPPED(ctrl_ctx, i)) 2591 xhci_drop_ep_from_interval_table(xhci, 2592 &virt_dev->eps[i].bw_info, 2593 virt_dev->bw_table, 2594 virt_dev->udev, 2595 &virt_dev->eps[i], 2596 virt_dev->tt_info); 2597 } 2598 /* Overwrite the information stored in the endpoints' bw_info */ 2599 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); 2600 for (i = 0; i < 31; i++) { 2601 /* Add any changed or added endpoints to the interval table */ 2602 if (EP_IS_ADDED(ctrl_ctx, i)) 2603 xhci_add_ep_to_interval_table(xhci, 2604 &virt_dev->eps[i].bw_info, 2605 virt_dev->bw_table, 2606 virt_dev->udev, 2607 &virt_dev->eps[i], 2608 virt_dev->tt_info); 2609 } 2610 2611 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { 2612 /* Ok, this fits in the bandwidth we have. 2613 * Update the number of active TTs. 2614 */ 2615 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); 2616 return 0; 2617 } 2618 2619 /* We don't have enough bandwidth for this, revert the stored info. */ 2620 for (i = 0; i < 31; i++) { 2621 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) 2622 continue; 2623 2624 /* Drop the new copies of any added or changed endpoints from 2625 * the interval table. 2626 */ 2627 if (EP_IS_ADDED(ctrl_ctx, i)) { 2628 xhci_drop_ep_from_interval_table(xhci, 2629 &virt_dev->eps[i].bw_info, 2630 virt_dev->bw_table, 2631 virt_dev->udev, 2632 &virt_dev->eps[i], 2633 virt_dev->tt_info); 2634 } 2635 /* Revert the endpoint back to its old information */ 2636 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i], 2637 sizeof(ep_bw_info[i])); 2638 /* Add any changed or dropped endpoints back into the table */ 2639 if (EP_IS_DROPPED(ctrl_ctx, i)) 2640 xhci_add_ep_to_interval_table(xhci, 2641 &virt_dev->eps[i].bw_info, 2642 virt_dev->bw_table, 2643 virt_dev->udev, 2644 &virt_dev->eps[i], 2645 virt_dev->tt_info); 2646 } 2647 return -ENOMEM; 2648 } 2649 2650 2651 /* Issue a configure endpoint command or evaluate context command 2652 * and wait for it to finish. 2653 */ 2654 static int xhci_configure_endpoint(struct xhci_hcd *xhci, 2655 struct usb_device *udev, 2656 struct xhci_command *command, 2657 bool ctx_change, bool must_succeed) 2658 { 2659 int ret; 2660 unsigned long flags; 2661 struct xhci_input_control_ctx *ctrl_ctx; 2662 struct xhci_virt_device *virt_dev; 2663 2664 if (!command) 2665 return -EINVAL; 2666 2667 spin_lock_irqsave(&xhci->lock, flags); 2668 virt_dev = xhci->devs[udev->slot_id]; 2669 2670 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 2671 if (!ctrl_ctx) { 2672 spin_unlock_irqrestore(&xhci->lock, flags); 2673 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 2674 __func__); 2675 return -ENOMEM; 2676 } 2677 2678 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && 2679 xhci_reserve_host_resources(xhci, ctrl_ctx)) { 2680 spin_unlock_irqrestore(&xhci->lock, flags); 2681 xhci_warn(xhci, "Not enough host resources, " 2682 "active endpoint contexts = %u\n", 2683 xhci->num_active_eps); 2684 return -ENOMEM; 2685 } 2686 if ((xhci->quirks & XHCI_SW_BW_CHECKING) && 2687 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) { 2688 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) 2689 xhci_free_host_resources(xhci, ctrl_ctx); 2690 spin_unlock_irqrestore(&xhci->lock, flags); 2691 xhci_warn(xhci, "Not enough bandwidth\n"); 2692 return -ENOMEM; 2693 } 2694 2695 if (!ctx_change) 2696 ret = xhci_queue_configure_endpoint(xhci, command, 2697 command->in_ctx->dma, 2698 udev->slot_id, must_succeed); 2699 else 2700 ret = xhci_queue_evaluate_context(xhci, command, 2701 command->in_ctx->dma, 2702 udev->slot_id, must_succeed); 2703 if (ret < 0) { 2704 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) 2705 xhci_free_host_resources(xhci, ctrl_ctx); 2706 spin_unlock_irqrestore(&xhci->lock, flags); 2707 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 2708 "FIXME allocate a new ring segment"); 2709 return -ENOMEM; 2710 } 2711 xhci_ring_cmd_db(xhci); 2712 spin_unlock_irqrestore(&xhci->lock, flags); 2713 2714 /* Wait for the configure endpoint command to complete */ 2715 wait_for_completion(command->completion); 2716 2717 if (!ctx_change) 2718 ret = xhci_configure_endpoint_result(xhci, udev, 2719 &command->status); 2720 else 2721 ret = xhci_evaluate_context_result(xhci, udev, 2722 &command->status); 2723 2724 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 2725 spin_lock_irqsave(&xhci->lock, flags); 2726 /* If the command failed, remove the reserved resources. 2727 * Otherwise, clean up the estimate to include dropped eps. 2728 */ 2729 if (ret) 2730 xhci_free_host_resources(xhci, ctrl_ctx); 2731 else 2732 xhci_finish_resource_reservation(xhci, ctrl_ctx); 2733 spin_unlock_irqrestore(&xhci->lock, flags); 2734 } 2735 return ret; 2736 } 2737 2738 static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, 2739 struct xhci_virt_device *vdev, int i) 2740 { 2741 struct xhci_virt_ep *ep = &vdev->eps[i]; 2742 2743 if (ep->ep_state & EP_HAS_STREAMS) { 2744 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n", 2745 xhci_get_endpoint_address(i)); 2746 xhci_free_stream_info(xhci, ep->stream_info); 2747 ep->stream_info = NULL; 2748 ep->ep_state &= ~EP_HAS_STREAMS; 2749 } 2750 } 2751 2752 /* Called after one or more calls to xhci_add_endpoint() or 2753 * xhci_drop_endpoint(). If this call fails, the USB core is expected 2754 * to call xhci_reset_bandwidth(). 2755 * 2756 * Since we are in the middle of changing either configuration or 2757 * installing a new alt setting, the USB core won't allow URBs to be 2758 * enqueued for any endpoint on the old config or interface. Nothing 2759 * else should be touching the xhci->devs[slot_id] structure, so we 2760 * don't need to take the xhci->lock for manipulating that. 2761 */ 2762 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) 2763 { 2764 int i; 2765 int ret = 0; 2766 struct xhci_hcd *xhci; 2767 struct xhci_virt_device *virt_dev; 2768 struct xhci_input_control_ctx *ctrl_ctx; 2769 struct xhci_slot_ctx *slot_ctx; 2770 struct xhci_command *command; 2771 2772 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 2773 if (ret <= 0) 2774 return ret; 2775 xhci = hcd_to_xhci(hcd); 2776 if (xhci->xhc_state & XHCI_STATE_DYING) 2777 return -ENODEV; 2778 2779 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 2780 virt_dev = xhci->devs[udev->slot_id]; 2781 2782 command = xhci_alloc_command(xhci, false, true, GFP_KERNEL); 2783 if (!command) 2784 return -ENOMEM; 2785 2786 command->in_ctx = virt_dev->in_ctx; 2787 2788 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ 2789 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 2790 if (!ctrl_ctx) { 2791 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 2792 __func__); 2793 ret = -ENOMEM; 2794 goto command_cleanup; 2795 } 2796 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 2797 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); 2798 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); 2799 2800 /* Don't issue the command if there's no endpoints to update. */ 2801 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && 2802 ctrl_ctx->drop_flags == 0) { 2803 ret = 0; 2804 goto command_cleanup; 2805 } 2806 /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */ 2807 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 2808 for (i = 31; i >= 1; i--) { 2809 __le32 le32 = cpu_to_le32(BIT(i)); 2810 2811 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32)) 2812 || (ctrl_ctx->add_flags & le32) || i == 1) { 2813 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 2814 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i)); 2815 break; 2816 } 2817 } 2818 xhci_dbg(xhci, "New Input Control Context:\n"); 2819 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2820 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); 2821 2822 ret = xhci_configure_endpoint(xhci, udev, command, 2823 false, false); 2824 if (ret) 2825 /* Callee should call reset_bandwidth() */ 2826 goto command_cleanup; 2827 2828 xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); 2829 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2830 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); 2831 2832 /* Free any rings that were dropped, but not changed. */ 2833 for (i = 1; i < 31; ++i) { 2834 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) && 2835 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) { 2836 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 2837 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); 2838 } 2839 } 2840 xhci_zero_in_ctx(xhci, virt_dev); 2841 /* 2842 * Install any rings for completely new endpoints or changed endpoints, 2843 * and free or cache any old rings from changed endpoints. 2844 */ 2845 for (i = 1; i < 31; ++i) { 2846 if (!virt_dev->eps[i].new_ring) 2847 continue; 2848 /* Only cache or free the old ring if it exists. 2849 * It may not if this is the first add of an endpoint. 2850 */ 2851 if (virt_dev->eps[i].ring) { 2852 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 2853 } 2854 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); 2855 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; 2856 virt_dev->eps[i].new_ring = NULL; 2857 } 2858 command_cleanup: 2859 kfree(command->completion); 2860 kfree(command); 2861 2862 return ret; 2863 } 2864 2865 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) 2866 { 2867 struct xhci_hcd *xhci; 2868 struct xhci_virt_device *virt_dev; 2869 int i, ret; 2870 2871 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 2872 if (ret <= 0) 2873 return; 2874 xhci = hcd_to_xhci(hcd); 2875 2876 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 2877 virt_dev = xhci->devs[udev->slot_id]; 2878 /* Free any rings allocated for added endpoints */ 2879 for (i = 0; i < 31; ++i) { 2880 if (virt_dev->eps[i].new_ring) { 2881 xhci_ring_free(xhci, virt_dev->eps[i].new_ring); 2882 virt_dev->eps[i].new_ring = NULL; 2883 } 2884 } 2885 xhci_zero_in_ctx(xhci, virt_dev); 2886 } 2887 2888 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, 2889 struct xhci_container_ctx *in_ctx, 2890 struct xhci_container_ctx *out_ctx, 2891 struct xhci_input_control_ctx *ctrl_ctx, 2892 u32 add_flags, u32 drop_flags) 2893 { 2894 ctrl_ctx->add_flags = cpu_to_le32(add_flags); 2895 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags); 2896 xhci_slot_copy(xhci, in_ctx, out_ctx); 2897 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 2898 2899 xhci_dbg(xhci, "Input Context:\n"); 2900 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); 2901 } 2902 2903 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, 2904 unsigned int slot_id, unsigned int ep_index, 2905 struct xhci_dequeue_state *deq_state) 2906 { 2907 struct xhci_input_control_ctx *ctrl_ctx; 2908 struct xhci_container_ctx *in_ctx; 2909 struct xhci_ep_ctx *ep_ctx; 2910 u32 added_ctxs; 2911 dma_addr_t addr; 2912 2913 in_ctx = xhci->devs[slot_id]->in_ctx; 2914 ctrl_ctx = xhci_get_input_control_ctx(in_ctx); 2915 if (!ctrl_ctx) { 2916 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 2917 __func__); 2918 return; 2919 } 2920 2921 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, 2922 xhci->devs[slot_id]->out_ctx, ep_index); 2923 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); 2924 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, 2925 deq_state->new_deq_ptr); 2926 if (addr == 0) { 2927 xhci_warn(xhci, "WARN Cannot submit config ep after " 2928 "reset ep command\n"); 2929 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", 2930 deq_state->new_deq_seg, 2931 deq_state->new_deq_ptr); 2932 return; 2933 } 2934 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state); 2935 2936 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); 2937 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, 2938 xhci->devs[slot_id]->out_ctx, ctrl_ctx, 2939 added_ctxs, added_ctxs); 2940 } 2941 2942 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, 2943 unsigned int ep_index, struct xhci_td *td) 2944 { 2945 struct xhci_dequeue_state deq_state; 2946 struct xhci_virt_ep *ep; 2947 struct usb_device *udev = td->urb->dev; 2948 2949 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, 2950 "Cleaning up stalled endpoint ring"); 2951 ep = &xhci->devs[udev->slot_id]->eps[ep_index]; 2952 /* We need to move the HW's dequeue pointer past this TD, 2953 * or it will attempt to resend it on the next doorbell ring. 2954 */ 2955 xhci_find_new_dequeue_state(xhci, udev->slot_id, 2956 ep_index, ep->stopped_stream, td, &deq_state); 2957 2958 if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg) 2959 return; 2960 2961 /* HW with the reset endpoint quirk will use the saved dequeue state to 2962 * issue a configure endpoint command later. 2963 */ 2964 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { 2965 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, 2966 "Queueing new dequeue state"); 2967 xhci_queue_new_dequeue_state(xhci, udev->slot_id, 2968 ep_index, ep->stopped_stream, &deq_state); 2969 } else { 2970 /* Better hope no one uses the input context between now and the 2971 * reset endpoint completion! 2972 * XXX: No idea how this hardware will react when stream rings 2973 * are enabled. 2974 */ 2975 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2976 "Setting up input context for " 2977 "configure endpoint command"); 2978 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, 2979 ep_index, &deq_state); 2980 } 2981 } 2982 2983 /* Called when clearing halted device. The core should have sent the control 2984 * message to clear the device halt condition. The host side of the halt should 2985 * already be cleared with a reset endpoint command issued when the STALL tx 2986 * event was received. 2987 * 2988 * Context: in_interrupt 2989 */ 2990 2991 void xhci_endpoint_reset(struct usb_hcd *hcd, 2992 struct usb_host_endpoint *ep) 2993 { 2994 struct xhci_hcd *xhci; 2995 2996 xhci = hcd_to_xhci(hcd); 2997 2998 /* 2999 * We might need to implement the config ep cmd in xhci 4.8.1 note: 3000 * The Reset Endpoint Command may only be issued to endpoints in the 3001 * Halted state. If software wishes reset the Data Toggle or Sequence 3002 * Number of an endpoint that isn't in the Halted state, then software 3003 * may issue a Configure Endpoint Command with the Drop and Add bits set 3004 * for the target endpoint. that is in the Stopped state. 3005 */ 3006 3007 /* For now just print debug to follow the situation */ 3008 xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n", 3009 ep->desc.bEndpointAddress); 3010 } 3011 3012 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, 3013 struct usb_device *udev, struct usb_host_endpoint *ep, 3014 unsigned int slot_id) 3015 { 3016 int ret; 3017 unsigned int ep_index; 3018 unsigned int ep_state; 3019 3020 if (!ep) 3021 return -EINVAL; 3022 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); 3023 if (ret <= 0) 3024 return -EINVAL; 3025 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) { 3026 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" 3027 " descriptor for ep 0x%x does not support streams\n", 3028 ep->desc.bEndpointAddress); 3029 return -EINVAL; 3030 } 3031 3032 ep_index = xhci_get_endpoint_index(&ep->desc); 3033 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 3034 if (ep_state & EP_HAS_STREAMS || 3035 ep_state & EP_GETTING_STREAMS) { 3036 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " 3037 "already has streams set up.\n", 3038 ep->desc.bEndpointAddress); 3039 xhci_warn(xhci, "Send email to xHCI maintainer and ask for " 3040 "dynamic stream context array reallocation.\n"); 3041 return -EINVAL; 3042 } 3043 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { 3044 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " 3045 "endpoint 0x%x; URBs are pending.\n", 3046 ep->desc.bEndpointAddress); 3047 return -EINVAL; 3048 } 3049 return 0; 3050 } 3051 3052 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, 3053 unsigned int *num_streams, unsigned int *num_stream_ctxs) 3054 { 3055 unsigned int max_streams; 3056 3057 /* The stream context array size must be a power of two */ 3058 *num_stream_ctxs = roundup_pow_of_two(*num_streams); 3059 /* 3060 * Find out how many primary stream array entries the host controller 3061 * supports. Later we may use secondary stream arrays (similar to 2nd 3062 * level page entries), but that's an optional feature for xHCI host 3063 * controllers. xHCs must support at least 4 stream IDs. 3064 */ 3065 max_streams = HCC_MAX_PSA(xhci->hcc_params); 3066 if (*num_stream_ctxs > max_streams) { 3067 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", 3068 max_streams); 3069 *num_stream_ctxs = max_streams; 3070 *num_streams = max_streams; 3071 } 3072 } 3073 3074 /* Returns an error code if one of the endpoint already has streams. 3075 * This does not change any data structures, it only checks and gathers 3076 * information. 3077 */ 3078 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, 3079 struct usb_device *udev, 3080 struct usb_host_endpoint **eps, unsigned int num_eps, 3081 unsigned int *num_streams, u32 *changed_ep_bitmask) 3082 { 3083 unsigned int max_streams; 3084 unsigned int endpoint_flag; 3085 int i; 3086 int ret; 3087 3088 for (i = 0; i < num_eps; i++) { 3089 ret = xhci_check_streams_endpoint(xhci, udev, 3090 eps[i], udev->slot_id); 3091 if (ret < 0) 3092 return ret; 3093 3094 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp); 3095 if (max_streams < (*num_streams - 1)) { 3096 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", 3097 eps[i]->desc.bEndpointAddress, 3098 max_streams); 3099 *num_streams = max_streams+1; 3100 } 3101 3102 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc); 3103 if (*changed_ep_bitmask & endpoint_flag) 3104 return -EINVAL; 3105 *changed_ep_bitmask |= endpoint_flag; 3106 } 3107 return 0; 3108 } 3109 3110 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, 3111 struct usb_device *udev, 3112 struct usb_host_endpoint **eps, unsigned int num_eps) 3113 { 3114 u32 changed_ep_bitmask = 0; 3115 unsigned int slot_id; 3116 unsigned int ep_index; 3117 unsigned int ep_state; 3118 int i; 3119 3120 slot_id = udev->slot_id; 3121 if (!xhci->devs[slot_id]) 3122 return 0; 3123 3124 for (i = 0; i < num_eps; i++) { 3125 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3126 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 3127 /* Are streams already being freed for the endpoint? */ 3128 if (ep_state & EP_GETTING_NO_STREAMS) { 3129 xhci_warn(xhci, "WARN Can't disable streams for " 3130 "endpoint 0x%x, " 3131 "streams are being disabled already\n", 3132 eps[i]->desc.bEndpointAddress); 3133 return 0; 3134 } 3135 /* Are there actually any streams to free? */ 3136 if (!(ep_state & EP_HAS_STREAMS) && 3137 !(ep_state & EP_GETTING_STREAMS)) { 3138 xhci_warn(xhci, "WARN Can't disable streams for " 3139 "endpoint 0x%x, " 3140 "streams are already disabled!\n", 3141 eps[i]->desc.bEndpointAddress); 3142 xhci_warn(xhci, "WARN xhci_free_streams() called " 3143 "with non-streams endpoint\n"); 3144 return 0; 3145 } 3146 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc); 3147 } 3148 return changed_ep_bitmask; 3149 } 3150 3151 /* 3152 * The USB device drivers use this function (through the HCD interface in USB 3153 * core) to prepare a set of bulk endpoints to use streams. Streams are used to 3154 * coordinate mass storage command queueing across multiple endpoints (basically 3155 * a stream ID == a task ID). 3156 * 3157 * Setting up streams involves allocating the same size stream context array 3158 * for each endpoint and issuing a configure endpoint command for all endpoints. 3159 * 3160 * Don't allow the call to succeed if one endpoint only supports one stream 3161 * (which means it doesn't support streams at all). 3162 * 3163 * Drivers may get less stream IDs than they asked for, if the host controller 3164 * hardware or endpoints claim they can't support the number of requested 3165 * stream IDs. 3166 */ 3167 int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, 3168 struct usb_host_endpoint **eps, unsigned int num_eps, 3169 unsigned int num_streams, gfp_t mem_flags) 3170 { 3171 int i, ret; 3172 struct xhci_hcd *xhci; 3173 struct xhci_virt_device *vdev; 3174 struct xhci_command *config_cmd; 3175 struct xhci_input_control_ctx *ctrl_ctx; 3176 unsigned int ep_index; 3177 unsigned int num_stream_ctxs; 3178 unsigned long flags; 3179 u32 changed_ep_bitmask = 0; 3180 3181 if (!eps) 3182 return -EINVAL; 3183 3184 /* Add one to the number of streams requested to account for 3185 * stream 0 that is reserved for xHCI usage. 3186 */ 3187 num_streams += 1; 3188 xhci = hcd_to_xhci(hcd); 3189 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", 3190 num_streams); 3191 3192 /* MaxPSASize value 0 (2 streams) means streams are not supported */ 3193 if ((xhci->quirks & XHCI_BROKEN_STREAMS) || 3194 HCC_MAX_PSA(xhci->hcc_params) < 4) { 3195 xhci_dbg(xhci, "xHCI controller does not support streams.\n"); 3196 return -ENOSYS; 3197 } 3198 3199 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); 3200 if (!config_cmd) { 3201 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); 3202 return -ENOMEM; 3203 } 3204 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx); 3205 if (!ctrl_ctx) { 3206 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 3207 __func__); 3208 xhci_free_command(xhci, config_cmd); 3209 return -ENOMEM; 3210 } 3211 3212 /* Check to make sure all endpoints are not already configured for 3213 * streams. While we're at it, find the maximum number of streams that 3214 * all the endpoints will support and check for duplicate endpoints. 3215 */ 3216 spin_lock_irqsave(&xhci->lock, flags); 3217 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, 3218 num_eps, &num_streams, &changed_ep_bitmask); 3219 if (ret < 0) { 3220 xhci_free_command(xhci, config_cmd); 3221 spin_unlock_irqrestore(&xhci->lock, flags); 3222 return ret; 3223 } 3224 if (num_streams <= 1) { 3225 xhci_warn(xhci, "WARN: endpoints can't handle " 3226 "more than one stream.\n"); 3227 xhci_free_command(xhci, config_cmd); 3228 spin_unlock_irqrestore(&xhci->lock, flags); 3229 return -EINVAL; 3230 } 3231 vdev = xhci->devs[udev->slot_id]; 3232 /* Mark each endpoint as being in transition, so 3233 * xhci_urb_enqueue() will reject all URBs. 3234 */ 3235 for (i = 0; i < num_eps; i++) { 3236 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3237 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; 3238 } 3239 spin_unlock_irqrestore(&xhci->lock, flags); 3240 3241 /* Setup internal data structures and allocate HW data structures for 3242 * streams (but don't install the HW structures in the input context 3243 * until we're sure all memory allocation succeeded). 3244 */ 3245 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); 3246 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", 3247 num_stream_ctxs, num_streams); 3248 3249 for (i = 0; i < num_eps; i++) { 3250 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3251 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, 3252 num_stream_ctxs, 3253 num_streams, mem_flags); 3254 if (!vdev->eps[ep_index].stream_info) 3255 goto cleanup; 3256 /* Set maxPstreams in endpoint context and update deq ptr to 3257 * point to stream context array. FIXME 3258 */ 3259 } 3260 3261 /* Set up the input context for a configure endpoint command. */ 3262 for (i = 0; i < num_eps; i++) { 3263 struct xhci_ep_ctx *ep_ctx; 3264 3265 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3266 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); 3267 3268 xhci_endpoint_copy(xhci, config_cmd->in_ctx, 3269 vdev->out_ctx, ep_index); 3270 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, 3271 vdev->eps[ep_index].stream_info); 3272 } 3273 /* Tell the HW to drop its old copy of the endpoint context info 3274 * and add the updated copy from the input context. 3275 */ 3276 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, 3277 vdev->out_ctx, ctrl_ctx, 3278 changed_ep_bitmask, changed_ep_bitmask); 3279 3280 /* Issue and wait for the configure endpoint command */ 3281 ret = xhci_configure_endpoint(xhci, udev, config_cmd, 3282 false, false); 3283 3284 /* xHC rejected the configure endpoint command for some reason, so we 3285 * leave the old ring intact and free our internal streams data 3286 * structure. 3287 */ 3288 if (ret < 0) 3289 goto cleanup; 3290 3291 spin_lock_irqsave(&xhci->lock, flags); 3292 for (i = 0; i < num_eps; i++) { 3293 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3294 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; 3295 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", 3296 udev->slot_id, ep_index); 3297 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; 3298 } 3299 xhci_free_command(xhci, config_cmd); 3300 spin_unlock_irqrestore(&xhci->lock, flags); 3301 3302 /* Subtract 1 for stream 0, which drivers can't use */ 3303 return num_streams - 1; 3304 3305 cleanup: 3306 /* If it didn't work, free the streams! */ 3307 for (i = 0; i < num_eps; i++) { 3308 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3309 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); 3310 vdev->eps[ep_index].stream_info = NULL; 3311 /* FIXME Unset maxPstreams in endpoint context and 3312 * update deq ptr to point to normal string ring. 3313 */ 3314 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; 3315 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; 3316 xhci_endpoint_zero(xhci, vdev, eps[i]); 3317 } 3318 xhci_free_command(xhci, config_cmd); 3319 return -ENOMEM; 3320 } 3321 3322 /* Transition the endpoint from using streams to being a "normal" endpoint 3323 * without streams. 3324 * 3325 * Modify the endpoint context state, submit a configure endpoint command, 3326 * and free all endpoint rings for streams if that completes successfully. 3327 */ 3328 int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, 3329 struct usb_host_endpoint **eps, unsigned int num_eps, 3330 gfp_t mem_flags) 3331 { 3332 int i, ret; 3333 struct xhci_hcd *xhci; 3334 struct xhci_virt_device *vdev; 3335 struct xhci_command *command; 3336 struct xhci_input_control_ctx *ctrl_ctx; 3337 unsigned int ep_index; 3338 unsigned long flags; 3339 u32 changed_ep_bitmask; 3340 3341 xhci = hcd_to_xhci(hcd); 3342 vdev = xhci->devs[udev->slot_id]; 3343 3344 /* Set up a configure endpoint command to remove the streams rings */ 3345 spin_lock_irqsave(&xhci->lock, flags); 3346 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, 3347 udev, eps, num_eps); 3348 if (changed_ep_bitmask == 0) { 3349 spin_unlock_irqrestore(&xhci->lock, flags); 3350 return -EINVAL; 3351 } 3352 3353 /* Use the xhci_command structure from the first endpoint. We may have 3354 * allocated too many, but the driver may call xhci_free_streams() for 3355 * each endpoint it grouped into one call to xhci_alloc_streams(). 3356 */ 3357 ep_index = xhci_get_endpoint_index(&eps[0]->desc); 3358 command = vdev->eps[ep_index].stream_info->free_streams_command; 3359 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 3360 if (!ctrl_ctx) { 3361 spin_unlock_irqrestore(&xhci->lock, flags); 3362 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 3363 __func__); 3364 return -EINVAL; 3365 } 3366 3367 for (i = 0; i < num_eps; i++) { 3368 struct xhci_ep_ctx *ep_ctx; 3369 3370 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3371 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); 3372 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= 3373 EP_GETTING_NO_STREAMS; 3374 3375 xhci_endpoint_copy(xhci, command->in_ctx, 3376 vdev->out_ctx, ep_index); 3377 xhci_setup_no_streams_ep_input_ctx(ep_ctx, 3378 &vdev->eps[ep_index]); 3379 } 3380 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, 3381 vdev->out_ctx, ctrl_ctx, 3382 changed_ep_bitmask, changed_ep_bitmask); 3383 spin_unlock_irqrestore(&xhci->lock, flags); 3384 3385 /* Issue and wait for the configure endpoint command, 3386 * which must succeed. 3387 */ 3388 ret = xhci_configure_endpoint(xhci, udev, command, 3389 false, true); 3390 3391 /* xHC rejected the configure endpoint command for some reason, so we 3392 * leave the streams rings intact. 3393 */ 3394 if (ret < 0) 3395 return ret; 3396 3397 spin_lock_irqsave(&xhci->lock, flags); 3398 for (i = 0; i < num_eps; i++) { 3399 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3400 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); 3401 vdev->eps[ep_index].stream_info = NULL; 3402 /* FIXME Unset maxPstreams in endpoint context and 3403 * update deq ptr to point to normal string ring. 3404 */ 3405 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; 3406 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; 3407 } 3408 spin_unlock_irqrestore(&xhci->lock, flags); 3409 3410 return 0; 3411 } 3412 3413 /* 3414 * Deletes endpoint resources for endpoints that were active before a Reset 3415 * Device command, or a Disable Slot command. The Reset Device command leaves 3416 * the control endpoint intact, whereas the Disable Slot command deletes it. 3417 * 3418 * Must be called with xhci->lock held. 3419 */ 3420 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, 3421 struct xhci_virt_device *virt_dev, bool drop_control_ep) 3422 { 3423 int i; 3424 unsigned int num_dropped_eps = 0; 3425 unsigned int drop_flags = 0; 3426 3427 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) { 3428 if (virt_dev->eps[i].ring) { 3429 drop_flags |= 1 << i; 3430 num_dropped_eps++; 3431 } 3432 } 3433 xhci->num_active_eps -= num_dropped_eps; 3434 if (num_dropped_eps) 3435 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 3436 "Dropped %u ep ctxs, flags = 0x%x, " 3437 "%u now active.", 3438 num_dropped_eps, drop_flags, 3439 xhci->num_active_eps); 3440 } 3441 3442 /* 3443 * This submits a Reset Device Command, which will set the device state to 0, 3444 * set the device address to 0, and disable all the endpoints except the default 3445 * control endpoint. The USB core should come back and call 3446 * xhci_address_device(), and then re-set up the configuration. If this is 3447 * called because of a usb_reset_and_verify_device(), then the old alternate 3448 * settings will be re-installed through the normal bandwidth allocation 3449 * functions. 3450 * 3451 * Wait for the Reset Device command to finish. Remove all structures 3452 * associated with the endpoints that were disabled. Clear the input device 3453 * structure? Cache the rings? Reset the control endpoint 0 max packet size? 3454 * 3455 * If the virt_dev to be reset does not exist or does not match the udev, 3456 * it means the device is lost, possibly due to the xHC restore error and 3457 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to 3458 * re-allocate the device. 3459 */ 3460 int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) 3461 { 3462 int ret, i; 3463 unsigned long flags; 3464 struct xhci_hcd *xhci; 3465 unsigned int slot_id; 3466 struct xhci_virt_device *virt_dev; 3467 struct xhci_command *reset_device_cmd; 3468 int last_freed_endpoint; 3469 struct xhci_slot_ctx *slot_ctx; 3470 int old_active_eps = 0; 3471 3472 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); 3473 if (ret <= 0) 3474 return ret; 3475 xhci = hcd_to_xhci(hcd); 3476 slot_id = udev->slot_id; 3477 virt_dev = xhci->devs[slot_id]; 3478 if (!virt_dev) { 3479 xhci_dbg(xhci, "The device to be reset with slot ID %u does " 3480 "not exist. Re-allocate the device\n", slot_id); 3481 ret = xhci_alloc_dev(hcd, udev); 3482 if (ret == 1) 3483 return 0; 3484 else 3485 return -EINVAL; 3486 } 3487 3488 if (virt_dev->tt_info) 3489 old_active_eps = virt_dev->tt_info->active_eps; 3490 3491 if (virt_dev->udev != udev) { 3492 /* If the virt_dev and the udev does not match, this virt_dev 3493 * may belong to another udev. 3494 * Re-allocate the device. 3495 */ 3496 xhci_dbg(xhci, "The device to be reset with slot ID %u does " 3497 "not match the udev. Re-allocate the device\n", 3498 slot_id); 3499 ret = xhci_alloc_dev(hcd, udev); 3500 if (ret == 1) 3501 return 0; 3502 else 3503 return -EINVAL; 3504 } 3505 3506 /* If device is not setup, there is no point in resetting it */ 3507 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 3508 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == 3509 SLOT_STATE_DISABLED) 3510 return 0; 3511 3512 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); 3513 /* Allocate the command structure that holds the struct completion. 3514 * Assume we're in process context, since the normal device reset 3515 * process has to wait for the device anyway. Storage devices are 3516 * reset as part of error handling, so use GFP_NOIO instead of 3517 * GFP_KERNEL. 3518 */ 3519 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); 3520 if (!reset_device_cmd) { 3521 xhci_dbg(xhci, "Couldn't allocate command structure.\n"); 3522 return -ENOMEM; 3523 } 3524 3525 /* Attempt to submit the Reset Device command to the command ring */ 3526 spin_lock_irqsave(&xhci->lock, flags); 3527 3528 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id); 3529 if (ret) { 3530 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3531 spin_unlock_irqrestore(&xhci->lock, flags); 3532 goto command_cleanup; 3533 } 3534 xhci_ring_cmd_db(xhci); 3535 spin_unlock_irqrestore(&xhci->lock, flags); 3536 3537 /* Wait for the Reset Device command to finish */ 3538 wait_for_completion(reset_device_cmd->completion); 3539 3540 /* The Reset Device command can't fail, according to the 0.95/0.96 spec, 3541 * unless we tried to reset a slot ID that wasn't enabled, 3542 * or the device wasn't in the addressed or configured state. 3543 */ 3544 ret = reset_device_cmd->status; 3545 switch (ret) { 3546 case COMP_CMD_ABORT: 3547 case COMP_CMD_STOP: 3548 xhci_warn(xhci, "Timeout waiting for reset device command\n"); 3549 ret = -ETIME; 3550 goto command_cleanup; 3551 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */ 3552 case COMP_CTX_STATE: /* 0.96 completion code for same thing */ 3553 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n", 3554 slot_id, 3555 xhci_get_slot_state(xhci, virt_dev->out_ctx)); 3556 xhci_dbg(xhci, "Not freeing device rings.\n"); 3557 /* Don't treat this as an error. May change my mind later. */ 3558 ret = 0; 3559 goto command_cleanup; 3560 case COMP_SUCCESS: 3561 xhci_dbg(xhci, "Successful reset device command.\n"); 3562 break; 3563 default: 3564 if (xhci_is_vendor_info_code(xhci, ret)) 3565 break; 3566 xhci_warn(xhci, "Unknown completion code %u for " 3567 "reset device command.\n", ret); 3568 ret = -EINVAL; 3569 goto command_cleanup; 3570 } 3571 3572 /* Free up host controller endpoint resources */ 3573 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 3574 spin_lock_irqsave(&xhci->lock, flags); 3575 /* Don't delete the default control endpoint resources */ 3576 xhci_free_device_endpoint_resources(xhci, virt_dev, false); 3577 spin_unlock_irqrestore(&xhci->lock, flags); 3578 } 3579 3580 /* Everything but endpoint 0 is disabled, so free or cache the rings. */ 3581 last_freed_endpoint = 1; 3582 for (i = 1; i < 31; ++i) { 3583 struct xhci_virt_ep *ep = &virt_dev->eps[i]; 3584 3585 if (ep->ep_state & EP_HAS_STREAMS) { 3586 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n", 3587 xhci_get_endpoint_address(i)); 3588 xhci_free_stream_info(xhci, ep->stream_info); 3589 ep->stream_info = NULL; 3590 ep->ep_state &= ~EP_HAS_STREAMS; 3591 } 3592 3593 if (ep->ring) { 3594 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 3595 last_freed_endpoint = i; 3596 } 3597 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list)) 3598 xhci_drop_ep_from_interval_table(xhci, 3599 &virt_dev->eps[i].bw_info, 3600 virt_dev->bw_table, 3601 udev, 3602 &virt_dev->eps[i], 3603 virt_dev->tt_info); 3604 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info); 3605 } 3606 /* If necessary, update the number of active TTs on this root port */ 3607 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); 3608 3609 xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); 3610 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); 3611 ret = 0; 3612 3613 command_cleanup: 3614 xhci_free_command(xhci, reset_device_cmd); 3615 return ret; 3616 } 3617 3618 /* 3619 * At this point, the struct usb_device is about to go away, the device has 3620 * disconnected, and all traffic has been stopped and the endpoints have been 3621 * disabled. Free any HC data structures associated with that device. 3622 */ 3623 void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) 3624 { 3625 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3626 struct xhci_virt_device *virt_dev; 3627 unsigned long flags; 3628 u32 state; 3629 int i, ret; 3630 struct xhci_command *command; 3631 3632 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); 3633 if (!command) 3634 return; 3635 3636 #ifndef CONFIG_USB_DEFAULT_PERSIST 3637 /* 3638 * We called pm_runtime_get_noresume when the device was attached. 3639 * Decrement the counter here to allow controller to runtime suspend 3640 * if no devices remain. 3641 */ 3642 if (xhci->quirks & XHCI_RESET_ON_RESUME) 3643 pm_runtime_put_noidle(hcd->self.controller); 3644 #endif 3645 3646 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 3647 /* If the host is halted due to driver unload, we still need to free the 3648 * device. 3649 */ 3650 if (ret <= 0 && ret != -ENODEV) { 3651 kfree(command); 3652 return; 3653 } 3654 3655 virt_dev = xhci->devs[udev->slot_id]; 3656 3657 /* Stop any wayward timer functions (which may grab the lock) */ 3658 for (i = 0; i < 31; ++i) { 3659 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING; 3660 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); 3661 } 3662 3663 spin_lock_irqsave(&xhci->lock, flags); 3664 /* Don't disable the slot if the host controller is dead. */ 3665 state = readl(&xhci->op_regs->status); 3666 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || 3667 (xhci->xhc_state & XHCI_STATE_HALTED)) { 3668 xhci_free_virt_device(xhci, udev->slot_id); 3669 spin_unlock_irqrestore(&xhci->lock, flags); 3670 kfree(command); 3671 return; 3672 } 3673 3674 if (xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, 3675 udev->slot_id)) { 3676 spin_unlock_irqrestore(&xhci->lock, flags); 3677 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3678 return; 3679 } 3680 xhci_ring_cmd_db(xhci); 3681 spin_unlock_irqrestore(&xhci->lock, flags); 3682 3683 /* 3684 * Event command completion handler will free any data structures 3685 * associated with the slot. XXX Can free sleep? 3686 */ 3687 } 3688 3689 /* 3690 * Checks if we have enough host controller resources for the default control 3691 * endpoint. 3692 * 3693 * Must be called with xhci->lock held. 3694 */ 3695 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) 3696 { 3697 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { 3698 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 3699 "Not enough ep ctxs: " 3700 "%u active, need to add 1, limit is %u.", 3701 xhci->num_active_eps, xhci->limit_active_eps); 3702 return -ENOMEM; 3703 } 3704 xhci->num_active_eps += 1; 3705 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 3706 "Adding 1 ep ctx, %u now active.", 3707 xhci->num_active_eps); 3708 return 0; 3709 } 3710 3711 3712 /* 3713 * Returns 0 if the xHC ran out of device slots, the Enable Slot command 3714 * timed out, or allocating memory failed. Returns 1 on success. 3715 */ 3716 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) 3717 { 3718 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3719 unsigned long flags; 3720 int ret, slot_id; 3721 struct xhci_command *command; 3722 3723 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); 3724 if (!command) 3725 return 0; 3726 3727 /* xhci->slot_id and xhci->addr_dev are not thread-safe */ 3728 mutex_lock(&xhci->mutex); 3729 spin_lock_irqsave(&xhci->lock, flags); 3730 command->completion = &xhci->addr_dev; 3731 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0); 3732 if (ret) { 3733 spin_unlock_irqrestore(&xhci->lock, flags); 3734 mutex_unlock(&xhci->mutex); 3735 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3736 kfree(command); 3737 return 0; 3738 } 3739 xhci_ring_cmd_db(xhci); 3740 spin_unlock_irqrestore(&xhci->lock, flags); 3741 3742 wait_for_completion(command->completion); 3743 slot_id = xhci->slot_id; 3744 mutex_unlock(&xhci->mutex); 3745 3746 if (!slot_id || command->status != COMP_SUCCESS) { 3747 xhci_err(xhci, "Error while assigning device slot ID\n"); 3748 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", 3749 HCS_MAX_SLOTS( 3750 readl(&xhci->cap_regs->hcs_params1))); 3751 kfree(command); 3752 return 0; 3753 } 3754 3755 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 3756 spin_lock_irqsave(&xhci->lock, flags); 3757 ret = xhci_reserve_host_control_ep_resources(xhci); 3758 if (ret) { 3759 spin_unlock_irqrestore(&xhci->lock, flags); 3760 xhci_warn(xhci, "Not enough host resources, " 3761 "active endpoint contexts = %u\n", 3762 xhci->num_active_eps); 3763 goto disable_slot; 3764 } 3765 spin_unlock_irqrestore(&xhci->lock, flags); 3766 } 3767 /* Use GFP_NOIO, since this function can be called from 3768 * xhci_discover_or_reset_device(), which may be called as part of 3769 * mass storage driver error handling. 3770 */ 3771 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) { 3772 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); 3773 goto disable_slot; 3774 } 3775 udev->slot_id = slot_id; 3776 3777 #ifndef CONFIG_USB_DEFAULT_PERSIST 3778 /* 3779 * If resetting upon resume, we can't put the controller into runtime 3780 * suspend if there is a device attached. 3781 */ 3782 if (xhci->quirks & XHCI_RESET_ON_RESUME) 3783 pm_runtime_get_noresume(hcd->self.controller); 3784 #endif 3785 3786 3787 kfree(command); 3788 /* Is this a LS or FS device under a HS hub? */ 3789 /* Hub or peripherial? */ 3790 return 1; 3791 3792 disable_slot: 3793 /* Disable slot, if we can do it without mem alloc */ 3794 spin_lock_irqsave(&xhci->lock, flags); 3795 command->completion = NULL; 3796 command->status = 0; 3797 if (!xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, 3798 udev->slot_id)) 3799 xhci_ring_cmd_db(xhci); 3800 spin_unlock_irqrestore(&xhci->lock, flags); 3801 return 0; 3802 } 3803 3804 /* 3805 * Issue an Address Device command and optionally send a corresponding 3806 * SetAddress request to the device. 3807 */ 3808 static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, 3809 enum xhci_setup_dev setup) 3810 { 3811 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address"; 3812 unsigned long flags; 3813 struct xhci_virt_device *virt_dev; 3814 int ret = 0; 3815 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3816 struct xhci_slot_ctx *slot_ctx; 3817 struct xhci_input_control_ctx *ctrl_ctx; 3818 u64 temp_64; 3819 struct xhci_command *command = NULL; 3820 3821 mutex_lock(&xhci->mutex); 3822 3823 if (xhci->xhc_state) /* dying or halted */ 3824 goto out; 3825 3826 if (!udev->slot_id) { 3827 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3828 "Bad Slot ID %d", udev->slot_id); 3829 ret = -EINVAL; 3830 goto out; 3831 } 3832 3833 virt_dev = xhci->devs[udev->slot_id]; 3834 3835 if (WARN_ON(!virt_dev)) { 3836 /* 3837 * In plug/unplug torture test with an NEC controller, 3838 * a zero-dereference was observed once due to virt_dev = 0. 3839 * Print useful debug rather than crash if it is observed again! 3840 */ 3841 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", 3842 udev->slot_id); 3843 ret = -EINVAL; 3844 goto out; 3845 } 3846 3847 if (setup == SETUP_CONTEXT_ONLY) { 3848 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 3849 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == 3850 SLOT_STATE_DEFAULT) { 3851 xhci_dbg(xhci, "Slot already in default state\n"); 3852 goto out; 3853 } 3854 } 3855 3856 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); 3857 if (!command) { 3858 ret = -ENOMEM; 3859 goto out; 3860 } 3861 3862 command->in_ctx = virt_dev->in_ctx; 3863 command->completion = &xhci->addr_dev; 3864 3865 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 3866 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); 3867 if (!ctrl_ctx) { 3868 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 3869 __func__); 3870 ret = -EINVAL; 3871 goto out; 3872 } 3873 /* 3874 * If this is the first Set Address since device plug-in or 3875 * virt_device realloaction after a resume with an xHCI power loss, 3876 * then set up the slot context. 3877 */ 3878 if (!slot_ctx->dev_info) 3879 xhci_setup_addressable_virt_dev(xhci, udev); 3880 /* Otherwise, update the control endpoint ring enqueue pointer. */ 3881 else 3882 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); 3883 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); 3884 ctrl_ctx->drop_flags = 0; 3885 3886 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 3887 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); 3888 trace_xhci_address_ctx(xhci, virt_dev->in_ctx, 3889 le32_to_cpu(slot_ctx->dev_info) >> 27); 3890 3891 spin_lock_irqsave(&xhci->lock, flags); 3892 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma, 3893 udev->slot_id, setup); 3894 if (ret) { 3895 spin_unlock_irqrestore(&xhci->lock, flags); 3896 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3897 "FIXME: allocate a command ring segment"); 3898 goto out; 3899 } 3900 xhci_ring_cmd_db(xhci); 3901 spin_unlock_irqrestore(&xhci->lock, flags); 3902 3903 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ 3904 wait_for_completion(command->completion); 3905 3906 /* FIXME: From section 4.3.4: "Software shall be responsible for timing 3907 * the SetAddress() "recovery interval" required by USB and aborting the 3908 * command on a timeout. 3909 */ 3910 switch (command->status) { 3911 case COMP_CMD_ABORT: 3912 case COMP_CMD_STOP: 3913 xhci_warn(xhci, "Timeout while waiting for setup device command\n"); 3914 ret = -ETIME; 3915 break; 3916 case COMP_CTX_STATE: 3917 case COMP_EBADSLT: 3918 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n", 3919 act, udev->slot_id); 3920 ret = -EINVAL; 3921 break; 3922 case COMP_TX_ERR: 3923 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act); 3924 ret = -EPROTO; 3925 break; 3926 case COMP_DEV_ERR: 3927 dev_warn(&udev->dev, 3928 "ERROR: Incompatible device for setup %s command\n", act); 3929 ret = -ENODEV; 3930 break; 3931 case COMP_SUCCESS: 3932 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3933 "Successful setup %s command", act); 3934 break; 3935 default: 3936 xhci_err(xhci, 3937 "ERROR: unexpected setup %s command completion code 0x%x.\n", 3938 act, command->status); 3939 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); 3940 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); 3941 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1); 3942 ret = -EINVAL; 3943 break; 3944 } 3945 if (ret) 3946 goto out; 3947 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 3948 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3949 "Op regs DCBAA ptr = %#016llx", temp_64); 3950 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3951 "Slot ID %d dcbaa entry @%p = %#016llx", 3952 udev->slot_id, 3953 &xhci->dcbaa->dev_context_ptrs[udev->slot_id], 3954 (unsigned long long) 3955 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); 3956 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3957 "Output Context DMA address = %#08llx", 3958 (unsigned long long)virt_dev->out_ctx->dma); 3959 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 3960 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); 3961 trace_xhci_address_ctx(xhci, virt_dev->in_ctx, 3962 le32_to_cpu(slot_ctx->dev_info) >> 27); 3963 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); 3964 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); 3965 /* 3966 * USB core uses address 1 for the roothubs, so we add one to the 3967 * address given back to us by the HC. 3968 */ 3969 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 3970 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 3971 le32_to_cpu(slot_ctx->dev_info) >> 27); 3972 /* Zero the input context control for later use */ 3973 ctrl_ctx->add_flags = 0; 3974 ctrl_ctx->drop_flags = 0; 3975 3976 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3977 "Internal device address = %d", 3978 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); 3979 out: 3980 mutex_unlock(&xhci->mutex); 3981 kfree(command); 3982 return ret; 3983 } 3984 3985 int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) 3986 { 3987 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS); 3988 } 3989 3990 int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev) 3991 { 3992 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY); 3993 } 3994 3995 /* 3996 * Transfer the port index into real index in the HW port status 3997 * registers. Caculate offset between the port's PORTSC register 3998 * and port status base. Divide the number of per port register 3999 * to get the real index. The raw port number bases 1. 4000 */ 4001 int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1) 4002 { 4003 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4004 __le32 __iomem *base_addr = &xhci->op_regs->port_status_base; 4005 __le32 __iomem *addr; 4006 int raw_port; 4007 4008 if (hcd->speed < HCD_USB3) 4009 addr = xhci->usb2_ports[port1 - 1]; 4010 else 4011 addr = xhci->usb3_ports[port1 - 1]; 4012 4013 raw_port = (addr - base_addr)/NUM_PORT_REGS + 1; 4014 return raw_port; 4015 } 4016 4017 /* 4018 * Issue an Evaluate Context command to change the Maximum Exit Latency in the 4019 * slot context. If that succeeds, store the new MEL in the xhci_virt_device. 4020 */ 4021 static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, 4022 struct usb_device *udev, u16 max_exit_latency) 4023 { 4024 struct xhci_virt_device *virt_dev; 4025 struct xhci_command *command; 4026 struct xhci_input_control_ctx *ctrl_ctx; 4027 struct xhci_slot_ctx *slot_ctx; 4028 unsigned long flags; 4029 int ret; 4030 4031 spin_lock_irqsave(&xhci->lock, flags); 4032 4033 virt_dev = xhci->devs[udev->slot_id]; 4034 4035 /* 4036 * virt_dev might not exists yet if xHC resumed from hibernate (S4) and 4037 * xHC was re-initialized. Exit latency will be set later after 4038 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated 4039 */ 4040 4041 if (!virt_dev || max_exit_latency == virt_dev->current_mel) { 4042 spin_unlock_irqrestore(&xhci->lock, flags); 4043 return 0; 4044 } 4045 4046 /* Attempt to issue an Evaluate Context command to change the MEL. */ 4047 command = xhci->lpm_command; 4048 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 4049 if (!ctrl_ctx) { 4050 spin_unlock_irqrestore(&xhci->lock, flags); 4051 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 4052 __func__); 4053 return -ENOMEM; 4054 } 4055 4056 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx); 4057 spin_unlock_irqrestore(&xhci->lock, flags); 4058 4059 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 4060 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); 4061 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT)); 4062 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency); 4063 slot_ctx->dev_state = 0; 4064 4065 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 4066 "Set up evaluate context for LPM MEL change."); 4067 xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id); 4068 xhci_dbg_ctx(xhci, command->in_ctx, 0); 4069 4070 /* Issue and wait for the evaluate context command. */ 4071 ret = xhci_configure_endpoint(xhci, udev, command, 4072 true, true); 4073 xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id); 4074 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0); 4075 4076 if (!ret) { 4077 spin_lock_irqsave(&xhci->lock, flags); 4078 virt_dev->current_mel = max_exit_latency; 4079 spin_unlock_irqrestore(&xhci->lock, flags); 4080 } 4081 return ret; 4082 } 4083 4084 #ifdef CONFIG_PM 4085 4086 /* BESL to HIRD Encoding array for USB2 LPM */ 4087 static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000, 4088 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000}; 4089 4090 /* Calculate HIRD/BESL for USB2 PORTPMSC*/ 4091 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci, 4092 struct usb_device *udev) 4093 { 4094 int u2del, besl, besl_host; 4095 int besl_device = 0; 4096 u32 field; 4097 4098 u2del = HCS_U2_LATENCY(xhci->hcs_params3); 4099 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); 4100 4101 if (field & USB_BESL_SUPPORT) { 4102 for (besl_host = 0; besl_host < 16; besl_host++) { 4103 if (xhci_besl_encoding[besl_host] >= u2del) 4104 break; 4105 } 4106 /* Use baseline BESL value as default */ 4107 if (field & USB_BESL_BASELINE_VALID) 4108 besl_device = USB_GET_BESL_BASELINE(field); 4109 else if (field & USB_BESL_DEEP_VALID) 4110 besl_device = USB_GET_BESL_DEEP(field); 4111 } else { 4112 if (u2del <= 50) 4113 besl_host = 0; 4114 else 4115 besl_host = (u2del - 51) / 75 + 1; 4116 } 4117 4118 besl = besl_host + besl_device; 4119 if (besl > 15) 4120 besl = 15; 4121 4122 return besl; 4123 } 4124 4125 /* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */ 4126 static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev) 4127 { 4128 u32 field; 4129 int l1; 4130 int besld = 0; 4131 int hirdm = 0; 4132 4133 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); 4134 4135 /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */ 4136 l1 = udev->l1_params.timeout / 256; 4137 4138 /* device has preferred BESLD */ 4139 if (field & USB_BESL_DEEP_VALID) { 4140 besld = USB_GET_BESL_DEEP(field); 4141 hirdm = 1; 4142 } 4143 4144 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm); 4145 } 4146 4147 int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, 4148 struct usb_device *udev, int enable) 4149 { 4150 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4151 __le32 __iomem **port_array; 4152 __le32 __iomem *pm_addr, *hlpm_addr; 4153 u32 pm_val, hlpm_val, field; 4154 unsigned int port_num; 4155 unsigned long flags; 4156 int hird, exit_latency; 4157 int ret; 4158 4159 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support || 4160 !udev->lpm_capable) 4161 return -EPERM; 4162 4163 if (!udev->parent || udev->parent->parent || 4164 udev->descriptor.bDeviceClass == USB_CLASS_HUB) 4165 return -EPERM; 4166 4167 if (udev->usb2_hw_lpm_capable != 1) 4168 return -EPERM; 4169 4170 spin_lock_irqsave(&xhci->lock, flags); 4171 4172 port_array = xhci->usb2_ports; 4173 port_num = udev->portnum - 1; 4174 pm_addr = port_array[port_num] + PORTPMSC; 4175 pm_val = readl(pm_addr); 4176 hlpm_addr = port_array[port_num] + PORTHLPMC; 4177 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); 4178 4179 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", 4180 enable ? "enable" : "disable", port_num + 1); 4181 4182 if (enable) { 4183 /* Host supports BESL timeout instead of HIRD */ 4184 if (udev->usb2_hw_lpm_besl_capable) { 4185 /* if device doesn't have a preferred BESL value use a 4186 * default one which works with mixed HIRD and BESL 4187 * systems. See XHCI_DEFAULT_BESL definition in xhci.h 4188 */ 4189 if ((field & USB_BESL_SUPPORT) && 4190 (field & USB_BESL_BASELINE_VALID)) 4191 hird = USB_GET_BESL_BASELINE(field); 4192 else 4193 hird = udev->l1_params.besl; 4194 4195 exit_latency = xhci_besl_encoding[hird]; 4196 spin_unlock_irqrestore(&xhci->lock, flags); 4197 4198 /* USB 3.0 code dedicate one xhci->lpm_command->in_ctx 4199 * input context for link powermanagement evaluate 4200 * context commands. It is protected by hcd->bandwidth 4201 * mutex and is shared by all devices. We need to set 4202 * the max ext latency in USB 2 BESL LPM as well, so 4203 * use the same mutex and xhci_change_max_exit_latency() 4204 */ 4205 mutex_lock(hcd->bandwidth_mutex); 4206 ret = xhci_change_max_exit_latency(xhci, udev, 4207 exit_latency); 4208 mutex_unlock(hcd->bandwidth_mutex); 4209 4210 if (ret < 0) 4211 return ret; 4212 spin_lock_irqsave(&xhci->lock, flags); 4213 4214 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev); 4215 writel(hlpm_val, hlpm_addr); 4216 /* flush write */ 4217 readl(hlpm_addr); 4218 } else { 4219 hird = xhci_calculate_hird_besl(xhci, udev); 4220 } 4221 4222 pm_val &= ~PORT_HIRD_MASK; 4223 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id); 4224 writel(pm_val, pm_addr); 4225 pm_val = readl(pm_addr); 4226 pm_val |= PORT_HLE; 4227 writel(pm_val, pm_addr); 4228 /* flush write */ 4229 readl(pm_addr); 4230 } else { 4231 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK); 4232 writel(pm_val, pm_addr); 4233 /* flush write */ 4234 readl(pm_addr); 4235 if (udev->usb2_hw_lpm_besl_capable) { 4236 spin_unlock_irqrestore(&xhci->lock, flags); 4237 mutex_lock(hcd->bandwidth_mutex); 4238 xhci_change_max_exit_latency(xhci, udev, 0); 4239 mutex_unlock(hcd->bandwidth_mutex); 4240 return 0; 4241 } 4242 } 4243 4244 spin_unlock_irqrestore(&xhci->lock, flags); 4245 return 0; 4246 } 4247 4248 /* check if a usb2 port supports a given extened capability protocol 4249 * only USB2 ports extended protocol capability values are cached. 4250 * Return 1 if capability is supported 4251 */ 4252 static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port, 4253 unsigned capability) 4254 { 4255 u32 port_offset, port_count; 4256 int i; 4257 4258 for (i = 0; i < xhci->num_ext_caps; i++) { 4259 if (xhci->ext_caps[i] & capability) { 4260 /* port offsets starts at 1 */ 4261 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1; 4262 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]); 4263 if (port >= port_offset && 4264 port < port_offset + port_count) 4265 return 1; 4266 } 4267 } 4268 return 0; 4269 } 4270 4271 int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) 4272 { 4273 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4274 int portnum = udev->portnum - 1; 4275 4276 if (hcd->speed >= HCD_USB3 || !xhci->sw_lpm_support || 4277 !udev->lpm_capable) 4278 return 0; 4279 4280 /* we only support lpm for non-hub device connected to root hub yet */ 4281 if (!udev->parent || udev->parent->parent || 4282 udev->descriptor.bDeviceClass == USB_CLASS_HUB) 4283 return 0; 4284 4285 if (xhci->hw_lpm_support == 1 && 4286 xhci_check_usb2_port_capability( 4287 xhci, portnum, XHCI_HLC)) { 4288 udev->usb2_hw_lpm_capable = 1; 4289 udev->l1_params.timeout = XHCI_L1_TIMEOUT; 4290 udev->l1_params.besl = XHCI_DEFAULT_BESL; 4291 if (xhci_check_usb2_port_capability(xhci, portnum, 4292 XHCI_BLC)) 4293 udev->usb2_hw_lpm_besl_capable = 1; 4294 } 4295 4296 return 0; 4297 } 4298 4299 /*---------------------- USB 3.0 Link PM functions ------------------------*/ 4300 4301 /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */ 4302 static unsigned long long xhci_service_interval_to_ns( 4303 struct usb_endpoint_descriptor *desc) 4304 { 4305 return (1ULL << (desc->bInterval - 1)) * 125 * 1000; 4306 } 4307 4308 static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev, 4309 enum usb3_link_state state) 4310 { 4311 unsigned long long sel; 4312 unsigned long long pel; 4313 unsigned int max_sel_pel; 4314 char *state_name; 4315 4316 switch (state) { 4317 case USB3_LPM_U1: 4318 /* Convert SEL and PEL stored in nanoseconds to microseconds */ 4319 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); 4320 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); 4321 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL; 4322 state_name = "U1"; 4323 break; 4324 case USB3_LPM_U2: 4325 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000); 4326 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000); 4327 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL; 4328 state_name = "U2"; 4329 break; 4330 default: 4331 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n", 4332 __func__); 4333 return USB3_LPM_DISABLED; 4334 } 4335 4336 if (sel <= max_sel_pel && pel <= max_sel_pel) 4337 return USB3_LPM_DEVICE_INITIATED; 4338 4339 if (sel > max_sel_pel) 4340 dev_dbg(&udev->dev, "Device-initiated %s disabled " 4341 "due to long SEL %llu ms\n", 4342 state_name, sel); 4343 else 4344 dev_dbg(&udev->dev, "Device-initiated %s disabled " 4345 "due to long PEL %llu ms\n", 4346 state_name, pel); 4347 return USB3_LPM_DISABLED; 4348 } 4349 4350 /* The U1 timeout should be the maximum of the following values: 4351 * - For control endpoints, U1 system exit latency (SEL) * 3 4352 * - For bulk endpoints, U1 SEL * 5 4353 * - For interrupt endpoints: 4354 * - Notification EPs, U1 SEL * 3 4355 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2) 4356 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2) 4357 */ 4358 static unsigned long long xhci_calculate_intel_u1_timeout( 4359 struct usb_device *udev, 4360 struct usb_endpoint_descriptor *desc) 4361 { 4362 unsigned long long timeout_ns; 4363 int ep_type; 4364 int intr_type; 4365 4366 ep_type = usb_endpoint_type(desc); 4367 switch (ep_type) { 4368 case USB_ENDPOINT_XFER_CONTROL: 4369 timeout_ns = udev->u1_params.sel * 3; 4370 break; 4371 case USB_ENDPOINT_XFER_BULK: 4372 timeout_ns = udev->u1_params.sel * 5; 4373 break; 4374 case USB_ENDPOINT_XFER_INT: 4375 intr_type = usb_endpoint_interrupt_type(desc); 4376 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) { 4377 timeout_ns = udev->u1_params.sel * 3; 4378 break; 4379 } 4380 /* Otherwise the calculation is the same as isoc eps */ 4381 case USB_ENDPOINT_XFER_ISOC: 4382 timeout_ns = xhci_service_interval_to_ns(desc); 4383 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100); 4384 if (timeout_ns < udev->u1_params.sel * 2) 4385 timeout_ns = udev->u1_params.sel * 2; 4386 break; 4387 default: 4388 return 0; 4389 } 4390 4391 return timeout_ns; 4392 } 4393 4394 /* Returns the hub-encoded U1 timeout value. */ 4395 static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, 4396 struct usb_device *udev, 4397 struct usb_endpoint_descriptor *desc) 4398 { 4399 unsigned long long timeout_ns; 4400 4401 if (xhci->quirks & XHCI_INTEL_HOST) 4402 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); 4403 else 4404 timeout_ns = udev->u1_params.sel; 4405 4406 /* The U1 timeout is encoded in 1us intervals. 4407 * Don't return a timeout of zero, because that's USB3_LPM_DISABLED. 4408 */ 4409 if (timeout_ns == USB3_LPM_DISABLED) 4410 timeout_ns = 1; 4411 else 4412 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000); 4413 4414 /* If the necessary timeout value is bigger than what we can set in the 4415 * USB 3.0 hub, we have to disable hub-initiated U1. 4416 */ 4417 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT) 4418 return timeout_ns; 4419 dev_dbg(&udev->dev, "Hub-initiated U1 disabled " 4420 "due to long timeout %llu ms\n", timeout_ns); 4421 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1); 4422 } 4423 4424 /* The U2 timeout should be the maximum of: 4425 * - 10 ms (to avoid the bandwidth impact on the scheduler) 4426 * - largest bInterval of any active periodic endpoint (to avoid going 4427 * into lower power link states between intervals). 4428 * - the U2 Exit Latency of the device 4429 */ 4430 static unsigned long long xhci_calculate_intel_u2_timeout( 4431 struct usb_device *udev, 4432 struct usb_endpoint_descriptor *desc) 4433 { 4434 unsigned long long timeout_ns; 4435 unsigned long long u2_del_ns; 4436 4437 timeout_ns = 10 * 1000 * 1000; 4438 4439 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) && 4440 (xhci_service_interval_to_ns(desc) > timeout_ns)) 4441 timeout_ns = xhci_service_interval_to_ns(desc); 4442 4443 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL; 4444 if (u2_del_ns > timeout_ns) 4445 timeout_ns = u2_del_ns; 4446 4447 return timeout_ns; 4448 } 4449 4450 /* Returns the hub-encoded U2 timeout value. */ 4451 static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, 4452 struct usb_device *udev, 4453 struct usb_endpoint_descriptor *desc) 4454 { 4455 unsigned long long timeout_ns; 4456 4457 if (xhci->quirks & XHCI_INTEL_HOST) 4458 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); 4459 else 4460 timeout_ns = udev->u2_params.sel; 4461 4462 /* The U2 timeout is encoded in 256us intervals */ 4463 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000); 4464 /* If the necessary timeout value is bigger than what we can set in the 4465 * USB 3.0 hub, we have to disable hub-initiated U2. 4466 */ 4467 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT) 4468 return timeout_ns; 4469 dev_dbg(&udev->dev, "Hub-initiated U2 disabled " 4470 "due to long timeout %llu ms\n", timeout_ns); 4471 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2); 4472 } 4473 4474 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci, 4475 struct usb_device *udev, 4476 struct usb_endpoint_descriptor *desc, 4477 enum usb3_link_state state, 4478 u16 *timeout) 4479 { 4480 if (state == USB3_LPM_U1) 4481 return xhci_calculate_u1_timeout(xhci, udev, desc); 4482 else if (state == USB3_LPM_U2) 4483 return xhci_calculate_u2_timeout(xhci, udev, desc); 4484 4485 return USB3_LPM_DISABLED; 4486 } 4487 4488 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci, 4489 struct usb_device *udev, 4490 struct usb_endpoint_descriptor *desc, 4491 enum usb3_link_state state, 4492 u16 *timeout) 4493 { 4494 u16 alt_timeout; 4495 4496 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev, 4497 desc, state, timeout); 4498 4499 /* If we found we can't enable hub-initiated LPM, or 4500 * the U1 or U2 exit latency was too high to allow 4501 * device-initiated LPM as well, just stop searching. 4502 */ 4503 if (alt_timeout == USB3_LPM_DISABLED || 4504 alt_timeout == USB3_LPM_DEVICE_INITIATED) { 4505 *timeout = alt_timeout; 4506 return -E2BIG; 4507 } 4508 if (alt_timeout > *timeout) 4509 *timeout = alt_timeout; 4510 return 0; 4511 } 4512 4513 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci, 4514 struct usb_device *udev, 4515 struct usb_host_interface *alt, 4516 enum usb3_link_state state, 4517 u16 *timeout) 4518 { 4519 int j; 4520 4521 for (j = 0; j < alt->desc.bNumEndpoints; j++) { 4522 if (xhci_update_timeout_for_endpoint(xhci, udev, 4523 &alt->endpoint[j].desc, state, timeout)) 4524 return -E2BIG; 4525 continue; 4526 } 4527 return 0; 4528 } 4529 4530 static int xhci_check_intel_tier_policy(struct usb_device *udev, 4531 enum usb3_link_state state) 4532 { 4533 struct usb_device *parent; 4534 unsigned int num_hubs; 4535 4536 if (state == USB3_LPM_U2) 4537 return 0; 4538 4539 /* Don't enable U1 if the device is on a 2nd tier hub or lower. */ 4540 for (parent = udev->parent, num_hubs = 0; parent->parent; 4541 parent = parent->parent) 4542 num_hubs++; 4543 4544 if (num_hubs < 2) 4545 return 0; 4546 4547 dev_dbg(&udev->dev, "Disabling U1 link state for device" 4548 " below second-tier hub.\n"); 4549 dev_dbg(&udev->dev, "Plug device into first-tier hub " 4550 "to decrease power consumption.\n"); 4551 return -E2BIG; 4552 } 4553 4554 static int xhci_check_tier_policy(struct xhci_hcd *xhci, 4555 struct usb_device *udev, 4556 enum usb3_link_state state) 4557 { 4558 if (xhci->quirks & XHCI_INTEL_HOST) 4559 return xhci_check_intel_tier_policy(udev, state); 4560 else 4561 return 0; 4562 } 4563 4564 /* Returns the U1 or U2 timeout that should be enabled. 4565 * If the tier check or timeout setting functions return with a non-zero exit 4566 * code, that means the timeout value has been finalized and we shouldn't look 4567 * at any more endpoints. 4568 */ 4569 static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd, 4570 struct usb_device *udev, enum usb3_link_state state) 4571 { 4572 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4573 struct usb_host_config *config; 4574 char *state_name; 4575 int i; 4576 u16 timeout = USB3_LPM_DISABLED; 4577 4578 if (state == USB3_LPM_U1) 4579 state_name = "U1"; 4580 else if (state == USB3_LPM_U2) 4581 state_name = "U2"; 4582 else { 4583 dev_warn(&udev->dev, "Can't enable unknown link state %i\n", 4584 state); 4585 return timeout; 4586 } 4587 4588 if (xhci_check_tier_policy(xhci, udev, state) < 0) 4589 return timeout; 4590 4591 /* Gather some information about the currently installed configuration 4592 * and alternate interface settings. 4593 */ 4594 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc, 4595 state, &timeout)) 4596 return timeout; 4597 4598 config = udev->actconfig; 4599 if (!config) 4600 return timeout; 4601 4602 for (i = 0; i < config->desc.bNumInterfaces; i++) { 4603 struct usb_driver *driver; 4604 struct usb_interface *intf = config->interface[i]; 4605 4606 if (!intf) 4607 continue; 4608 4609 /* Check if any currently bound drivers want hub-initiated LPM 4610 * disabled. 4611 */ 4612 if (intf->dev.driver) { 4613 driver = to_usb_driver(intf->dev.driver); 4614 if (driver && driver->disable_hub_initiated_lpm) { 4615 dev_dbg(&udev->dev, "Hub-initiated %s disabled " 4616 "at request of driver %s\n", 4617 state_name, driver->name); 4618 return xhci_get_timeout_no_hub_lpm(udev, state); 4619 } 4620 } 4621 4622 /* Not sure how this could happen... */ 4623 if (!intf->cur_altsetting) 4624 continue; 4625 4626 if (xhci_update_timeout_for_interface(xhci, udev, 4627 intf->cur_altsetting, 4628 state, &timeout)) 4629 return timeout; 4630 } 4631 return timeout; 4632 } 4633 4634 static int calculate_max_exit_latency(struct usb_device *udev, 4635 enum usb3_link_state state_changed, 4636 u16 hub_encoded_timeout) 4637 { 4638 unsigned long long u1_mel_us = 0; 4639 unsigned long long u2_mel_us = 0; 4640 unsigned long long mel_us = 0; 4641 bool disabling_u1; 4642 bool disabling_u2; 4643 bool enabling_u1; 4644 bool enabling_u2; 4645 4646 disabling_u1 = (state_changed == USB3_LPM_U1 && 4647 hub_encoded_timeout == USB3_LPM_DISABLED); 4648 disabling_u2 = (state_changed == USB3_LPM_U2 && 4649 hub_encoded_timeout == USB3_LPM_DISABLED); 4650 4651 enabling_u1 = (state_changed == USB3_LPM_U1 && 4652 hub_encoded_timeout != USB3_LPM_DISABLED); 4653 enabling_u2 = (state_changed == USB3_LPM_U2 && 4654 hub_encoded_timeout != USB3_LPM_DISABLED); 4655 4656 /* If U1 was already enabled and we're not disabling it, 4657 * or we're going to enable U1, account for the U1 max exit latency. 4658 */ 4659 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) || 4660 enabling_u1) 4661 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000); 4662 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) || 4663 enabling_u2) 4664 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000); 4665 4666 if (u1_mel_us > u2_mel_us) 4667 mel_us = u1_mel_us; 4668 else 4669 mel_us = u2_mel_us; 4670 /* xHCI host controller max exit latency field is only 16 bits wide. */ 4671 if (mel_us > MAX_EXIT) { 4672 dev_warn(&udev->dev, "Link PM max exit latency of %lluus " 4673 "is too big.\n", mel_us); 4674 return -E2BIG; 4675 } 4676 return mel_us; 4677 } 4678 4679 /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */ 4680 int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, 4681 struct usb_device *udev, enum usb3_link_state state) 4682 { 4683 struct xhci_hcd *xhci; 4684 u16 hub_encoded_timeout; 4685 int mel; 4686 int ret; 4687 4688 xhci = hcd_to_xhci(hcd); 4689 /* The LPM timeout values are pretty host-controller specific, so don't 4690 * enable hub-initiated timeouts unless the vendor has provided 4691 * information about their timeout algorithm. 4692 */ 4693 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || 4694 !xhci->devs[udev->slot_id]) 4695 return USB3_LPM_DISABLED; 4696 4697 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state); 4698 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout); 4699 if (mel < 0) { 4700 /* Max Exit Latency is too big, disable LPM. */ 4701 hub_encoded_timeout = USB3_LPM_DISABLED; 4702 mel = 0; 4703 } 4704 4705 ret = xhci_change_max_exit_latency(xhci, udev, mel); 4706 if (ret) 4707 return ret; 4708 return hub_encoded_timeout; 4709 } 4710 4711 int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, 4712 struct usb_device *udev, enum usb3_link_state state) 4713 { 4714 struct xhci_hcd *xhci; 4715 u16 mel; 4716 4717 xhci = hcd_to_xhci(hcd); 4718 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || 4719 !xhci->devs[udev->slot_id]) 4720 return 0; 4721 4722 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED); 4723 return xhci_change_max_exit_latency(xhci, udev, mel); 4724 } 4725 #else /* CONFIG_PM */ 4726 4727 int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, 4728 struct usb_device *udev, int enable) 4729 { 4730 return 0; 4731 } 4732 4733 int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) 4734 { 4735 return 0; 4736 } 4737 4738 int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, 4739 struct usb_device *udev, enum usb3_link_state state) 4740 { 4741 return USB3_LPM_DISABLED; 4742 } 4743 4744 int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, 4745 struct usb_device *udev, enum usb3_link_state state) 4746 { 4747 return 0; 4748 } 4749 #endif /* CONFIG_PM */ 4750 4751 /*-------------------------------------------------------------------------*/ 4752 4753 /* Once a hub descriptor is fetched for a device, we need to update the xHC's 4754 * internal data structures for the device. 4755 */ 4756 int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, 4757 struct usb_tt *tt, gfp_t mem_flags) 4758 { 4759 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4760 struct xhci_virt_device *vdev; 4761 struct xhci_command *config_cmd; 4762 struct xhci_input_control_ctx *ctrl_ctx; 4763 struct xhci_slot_ctx *slot_ctx; 4764 unsigned long flags; 4765 unsigned think_time; 4766 int ret; 4767 4768 /* Ignore root hubs */ 4769 if (!hdev->parent) 4770 return 0; 4771 4772 vdev = xhci->devs[hdev->slot_id]; 4773 if (!vdev) { 4774 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); 4775 return -EINVAL; 4776 } 4777 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); 4778 if (!config_cmd) { 4779 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); 4780 return -ENOMEM; 4781 } 4782 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx); 4783 if (!ctrl_ctx) { 4784 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 4785 __func__); 4786 xhci_free_command(xhci, config_cmd); 4787 return -ENOMEM; 4788 } 4789 4790 spin_lock_irqsave(&xhci->lock, flags); 4791 if (hdev->speed == USB_SPEED_HIGH && 4792 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) { 4793 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n"); 4794 xhci_free_command(xhci, config_cmd); 4795 spin_unlock_irqrestore(&xhci->lock, flags); 4796 return -ENOMEM; 4797 } 4798 4799 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); 4800 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 4801 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); 4802 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); 4803 /* 4804 * refer to section 6.2.2: MTT should be 0 for full speed hub, 4805 * but it may be already set to 1 when setup an xHCI virtual 4806 * device, so clear it anyway. 4807 */ 4808 if (tt->multi) 4809 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); 4810 else if (hdev->speed == USB_SPEED_FULL) 4811 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT); 4812 4813 if (xhci->hci_version > 0x95) { 4814 xhci_dbg(xhci, "xHCI version %x needs hub " 4815 "TT think time and number of ports\n", 4816 (unsigned int) xhci->hci_version); 4817 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild)); 4818 /* Set TT think time - convert from ns to FS bit times. 4819 * 0 = 8 FS bit times, 1 = 16 FS bit times, 4820 * 2 = 24 FS bit times, 3 = 32 FS bit times. 4821 * 4822 * xHCI 1.0: this field shall be 0 if the device is not a 4823 * High-spped hub. 4824 */ 4825 think_time = tt->think_time; 4826 if (think_time != 0) 4827 think_time = (think_time / 666) - 1; 4828 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) 4829 slot_ctx->tt_info |= 4830 cpu_to_le32(TT_THINK_TIME(think_time)); 4831 } else { 4832 xhci_dbg(xhci, "xHCI version %x doesn't need hub " 4833 "TT think time or number of ports\n", 4834 (unsigned int) xhci->hci_version); 4835 } 4836 slot_ctx->dev_state = 0; 4837 spin_unlock_irqrestore(&xhci->lock, flags); 4838 4839 xhci_dbg(xhci, "Set up %s for hub device.\n", 4840 (xhci->hci_version > 0x95) ? 4841 "configure endpoint" : "evaluate context"); 4842 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id); 4843 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0); 4844 4845 /* Issue and wait for the configure endpoint or 4846 * evaluate context command. 4847 */ 4848 if (xhci->hci_version > 0x95) 4849 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, 4850 false, false); 4851 else 4852 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, 4853 true, false); 4854 4855 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id); 4856 xhci_dbg_ctx(xhci, vdev->out_ctx, 0); 4857 4858 xhci_free_command(xhci, config_cmd); 4859 return ret; 4860 } 4861 4862 int xhci_get_frame(struct usb_hcd *hcd) 4863 { 4864 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4865 /* EHCI mods by the periodic size. Why? */ 4866 return readl(&xhci->run_regs->microframe_index) >> 3; 4867 } 4868 4869 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) 4870 { 4871 struct xhci_hcd *xhci; 4872 struct device *dev = hcd->self.controller; 4873 int retval; 4874 4875 /* Accept arbitrarily long scatter-gather lists */ 4876 hcd->self.sg_tablesize = ~0; 4877 4878 /* support to build packet from discontinuous buffers */ 4879 hcd->self.no_sg_constraint = 1; 4880 4881 /* XHCI controllers don't stop the ep queue on short packets :| */ 4882 hcd->self.no_stop_on_short = 1; 4883 4884 xhci = hcd_to_xhci(hcd); 4885 4886 if (usb_hcd_is_primary_hcd(hcd)) { 4887 xhci->main_hcd = hcd; 4888 /* Mark the first roothub as being USB 2.0. 4889 * The xHCI driver will register the USB 3.0 roothub. 4890 */ 4891 hcd->speed = HCD_USB2; 4892 hcd->self.root_hub->speed = USB_SPEED_HIGH; 4893 /* 4894 * USB 2.0 roothub under xHCI has an integrated TT, 4895 * (rate matching hub) as opposed to having an OHCI/UHCI 4896 * companion controller. 4897 */ 4898 hcd->has_tt = 1; 4899 } else { 4900 if (xhci->sbrn == 0x31) { 4901 xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n"); 4902 hcd->speed = HCD_USB31; 4903 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; 4904 } 4905 /* xHCI private pointer was set in xhci_pci_probe for the second 4906 * registered roothub. 4907 */ 4908 return 0; 4909 } 4910 4911 mutex_init(&xhci->mutex); 4912 xhci->cap_regs = hcd->regs; 4913 xhci->op_regs = hcd->regs + 4914 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); 4915 xhci->run_regs = hcd->regs + 4916 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK); 4917 /* Cache read-only capability registers */ 4918 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1); 4919 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2); 4920 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3); 4921 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase); 4922 xhci->hci_version = HC_VERSION(xhci->hcc_params); 4923 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params); 4924 if (xhci->hci_version > 0x100) 4925 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2); 4926 xhci_print_registers(xhci); 4927 4928 xhci->quirks = quirks; 4929 4930 get_quirks(dev, xhci); 4931 4932 /* In xhci controllers which follow xhci 1.0 spec gives a spurious 4933 * success event after a short transfer. This quirk will ignore such 4934 * spurious event. 4935 */ 4936 if (xhci->hci_version > 0x96) 4937 xhci->quirks |= XHCI_SPURIOUS_SUCCESS; 4938 4939 /* Make sure the HC is halted. */ 4940 retval = xhci_halt(xhci); 4941 if (retval) 4942 return retval; 4943 4944 xhci_dbg(xhci, "Resetting HCD\n"); 4945 /* Reset the internal HC memory state and registers. */ 4946 retval = xhci_reset(xhci); 4947 if (retval) 4948 return retval; 4949 xhci_dbg(xhci, "Reset complete\n"); 4950 4951 /* Set dma_mask and coherent_dma_mask to 64-bits, 4952 * if xHC supports 64-bit addressing */ 4953 if (HCC_64BIT_ADDR(xhci->hcc_params) && 4954 !dma_set_mask(dev, DMA_BIT_MASK(64))) { 4955 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); 4956 dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); 4957 } else { 4958 /* 4959 * This is to avoid error in cases where a 32-bit USB 4960 * controller is used on a 64-bit capable system. 4961 */ 4962 retval = dma_set_mask(dev, DMA_BIT_MASK(32)); 4963 if (retval) 4964 return retval; 4965 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n"); 4966 dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); 4967 } 4968 4969 xhci_dbg(xhci, "Calling HCD init\n"); 4970 /* Initialize HCD and host controller data structures. */ 4971 retval = xhci_init(hcd); 4972 if (retval) 4973 return retval; 4974 xhci_dbg(xhci, "Called HCD init\n"); 4975 4976 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%08x\n", 4977 xhci->hcc_params, xhci->hci_version, xhci->quirks); 4978 4979 return 0; 4980 } 4981 EXPORT_SYMBOL_GPL(xhci_gen_setup); 4982 4983 static const struct hc_driver xhci_hc_driver = { 4984 .description = "xhci-hcd", 4985 .product_desc = "xHCI Host Controller", 4986 .hcd_priv_size = sizeof(struct xhci_hcd), 4987 4988 /* 4989 * generic hardware linkage 4990 */ 4991 .irq = xhci_irq, 4992 .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED, 4993 4994 /* 4995 * basic lifecycle operations 4996 */ 4997 .reset = NULL, /* set in xhci_init_driver() */ 4998 .start = xhci_run, 4999 .stop = xhci_stop, 5000 .shutdown = xhci_shutdown, 5001 5002 /* 5003 * managing i/o requests and associated device resources 5004 */ 5005 .urb_enqueue = xhci_urb_enqueue, 5006 .urb_dequeue = xhci_urb_dequeue, 5007 .alloc_dev = xhci_alloc_dev, 5008 .free_dev = xhci_free_dev, 5009 .alloc_streams = xhci_alloc_streams, 5010 .free_streams = xhci_free_streams, 5011 .add_endpoint = xhci_add_endpoint, 5012 .drop_endpoint = xhci_drop_endpoint, 5013 .endpoint_reset = xhci_endpoint_reset, 5014 .check_bandwidth = xhci_check_bandwidth, 5015 .reset_bandwidth = xhci_reset_bandwidth, 5016 .address_device = xhci_address_device, 5017 .enable_device = xhci_enable_device, 5018 .update_hub_device = xhci_update_hub_device, 5019 .reset_device = xhci_discover_or_reset_device, 5020 5021 /* 5022 * scheduling support 5023 */ 5024 .get_frame_number = xhci_get_frame, 5025 5026 /* 5027 * root hub support 5028 */ 5029 .hub_control = xhci_hub_control, 5030 .hub_status_data = xhci_hub_status_data, 5031 .bus_suspend = xhci_bus_suspend, 5032 .bus_resume = xhci_bus_resume, 5033 5034 /* 5035 * call back when device connected and addressed 5036 */ 5037 .update_device = xhci_update_device, 5038 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm, 5039 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout, 5040 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout, 5041 .find_raw_port_number = xhci_find_raw_port_number, 5042 }; 5043 5044 void xhci_init_driver(struct hc_driver *drv, 5045 const struct xhci_driver_overrides *over) 5046 { 5047 BUG_ON(!over); 5048 5049 /* Copy the generic table to drv then apply the overrides */ 5050 *drv = xhci_hc_driver; 5051 5052 if (over) { 5053 drv->hcd_priv_size += over->extra_priv_size; 5054 if (over->reset) 5055 drv->reset = over->reset; 5056 if (over->start) 5057 drv->start = over->start; 5058 } 5059 } 5060 EXPORT_SYMBOL_GPL(xhci_init_driver); 5061 5062 MODULE_DESCRIPTION(DRIVER_DESC); 5063 MODULE_AUTHOR(DRIVER_AUTHOR); 5064 MODULE_LICENSE("GPL"); 5065 5066 static int __init xhci_hcd_init(void) 5067 { 5068 /* 5069 * Check the compiler generated sizes of structures that must be laid 5070 * out in specific ways for hardware access. 5071 */ 5072 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); 5073 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); 5074 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); 5075 /* xhci_device_control has eight fields, and also 5076 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx 5077 */ 5078 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); 5079 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); 5080 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); 5081 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8); 5082 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); 5083 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ 5084 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); 5085 5086 if (usb_disabled()) 5087 return -ENODEV; 5088 5089 return 0; 5090 } 5091 5092 /* 5093 * If an init function is provided, an exit function must also be provided 5094 * to allow module unload. 5095 */ 5096 static void __exit xhci_hcd_fini(void) { } 5097 5098 module_init(xhci_hcd_init); 5099 module_exit(xhci_hcd_fini); 5100