1 /* 2 * xHCI host controller driver 3 * 4 * Copyright (C) 2008 Intel Corp. 5 * 6 * Author: Sarah Sharp 7 * Some code borrowed from the Linux EHCI driver. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16 * for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software Foundation, 20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #include <linux/pci.h> 24 #include <linux/irq.h> 25 #include <linux/log2.h> 26 #include <linux/module.h> 27 #include <linux/moduleparam.h> 28 #include <linux/slab.h> 29 #include <linux/dmi.h> 30 31 #include "xhci.h" 32 33 #define DRIVER_AUTHOR "Sarah Sharp" 34 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" 35 36 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ 37 static int link_quirk; 38 module_param(link_quirk, int, S_IRUGO | S_IWUSR); 39 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); 40 41 /* TODO: copied from ehci-hcd.c - can this be refactored? */ 42 /* 43 * xhci_handshake - spin reading hc until handshake completes or fails 44 * @ptr: address of hc register to be read 45 * @mask: bits to look at in result of read 46 * @done: value of those bits when handshake succeeds 47 * @usec: timeout in microseconds 48 * 49 * Returns negative errno, or zero on success 50 * 51 * Success happens when the "mask" bits have the specified value (hardware 52 * handshake done). There are two failure modes: "usec" have passed (major 53 * hardware flakeout), or the register reads as all-ones (hardware removed). 54 */ 55 int xhci_handshake(struct xhci_hcd *xhci, void __iomem *ptr, 56 u32 mask, u32 done, int usec) 57 { 58 u32 result; 59 60 do { 61 result = xhci_readl(xhci, ptr); 62 if (result == ~(u32)0) /* card removed */ 63 return -ENODEV; 64 result &= mask; 65 if (result == done) 66 return 0; 67 udelay(1); 68 usec--; 69 } while (usec > 0); 70 return -ETIMEDOUT; 71 } 72 73 /* 74 * Disable interrupts and begin the xHCI halting process. 75 */ 76 void xhci_quiesce(struct xhci_hcd *xhci) 77 { 78 u32 halted; 79 u32 cmd; 80 u32 mask; 81 82 mask = ~(XHCI_IRQS); 83 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT; 84 if (!halted) 85 mask &= ~CMD_RUN; 86 87 cmd = xhci_readl(xhci, &xhci->op_regs->command); 88 cmd &= mask; 89 xhci_writel(xhci, cmd, &xhci->op_regs->command); 90 } 91 92 /* 93 * Force HC into halt state. 94 * 95 * Disable any IRQs and clear the run/stop bit. 96 * HC will complete any current and actively pipelined transactions, and 97 * should halt within 16 ms of the run/stop bit being cleared. 98 * Read HC Halted bit in the status register to see when the HC is finished. 99 */ 100 int xhci_halt(struct xhci_hcd *xhci) 101 { 102 int ret; 103 xhci_dbg(xhci, "// Halt the HC\n"); 104 xhci_quiesce(xhci); 105 106 ret = xhci_handshake(xhci, &xhci->op_regs->status, 107 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); 108 if (!ret) { 109 xhci->xhc_state |= XHCI_STATE_HALTED; 110 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; 111 } else 112 xhci_warn(xhci, "Host not halted after %u microseconds.\n", 113 XHCI_MAX_HALT_USEC); 114 return ret; 115 } 116 117 /* 118 * Set the run bit and wait for the host to be running. 119 */ 120 static int xhci_start(struct xhci_hcd *xhci) 121 { 122 u32 temp; 123 int ret; 124 125 temp = xhci_readl(xhci, &xhci->op_regs->command); 126 temp |= (CMD_RUN); 127 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", 128 temp); 129 xhci_writel(xhci, temp, &xhci->op_regs->command); 130 131 /* 132 * Wait for the HCHalted Status bit to be 0 to indicate the host is 133 * running. 134 */ 135 ret = xhci_handshake(xhci, &xhci->op_regs->status, 136 STS_HALT, 0, XHCI_MAX_HALT_USEC); 137 if (ret == -ETIMEDOUT) 138 xhci_err(xhci, "Host took too long to start, " 139 "waited %u microseconds.\n", 140 XHCI_MAX_HALT_USEC); 141 if (!ret) 142 xhci->xhc_state &= ~XHCI_STATE_HALTED; 143 return ret; 144 } 145 146 /* 147 * Reset a halted HC. 148 * 149 * This resets pipelines, timers, counters, state machines, etc. 150 * Transactions will be terminated immediately, and operational registers 151 * will be set to their defaults. 152 */ 153 int xhci_reset(struct xhci_hcd *xhci) 154 { 155 u32 command; 156 u32 state; 157 int ret, i; 158 159 state = xhci_readl(xhci, &xhci->op_regs->status); 160 if ((state & STS_HALT) == 0) { 161 xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); 162 return 0; 163 } 164 165 xhci_dbg(xhci, "// Reset the HC\n"); 166 command = xhci_readl(xhci, &xhci->op_regs->command); 167 command |= CMD_RESET; 168 xhci_writel(xhci, command, &xhci->op_regs->command); 169 170 ret = xhci_handshake(xhci, &xhci->op_regs->command, 171 CMD_RESET, 0, 10 * 1000 * 1000); 172 if (ret) 173 return ret; 174 175 xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n"); 176 /* 177 * xHCI cannot write to any doorbells or operational registers other 178 * than status until the "Controller Not Ready" flag is cleared. 179 */ 180 ret = xhci_handshake(xhci, &xhci->op_regs->status, 181 STS_CNR, 0, 10 * 1000 * 1000); 182 183 for (i = 0; i < 2; ++i) { 184 xhci->bus_state[i].port_c_suspend = 0; 185 xhci->bus_state[i].suspended_ports = 0; 186 xhci->bus_state[i].resuming_ports = 0; 187 } 188 189 return ret; 190 } 191 192 #ifdef CONFIG_PCI 193 static int xhci_free_msi(struct xhci_hcd *xhci) 194 { 195 int i; 196 197 if (!xhci->msix_entries) 198 return -EINVAL; 199 200 for (i = 0; i < xhci->msix_count; i++) 201 if (xhci->msix_entries[i].vector) 202 free_irq(xhci->msix_entries[i].vector, 203 xhci_to_hcd(xhci)); 204 return 0; 205 } 206 207 /* 208 * Set up MSI 209 */ 210 static int xhci_setup_msi(struct xhci_hcd *xhci) 211 { 212 int ret; 213 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 214 215 ret = pci_enable_msi(pdev); 216 if (ret) { 217 xhci_dbg(xhci, "failed to allocate MSI entry\n"); 218 return ret; 219 } 220 221 ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq, 222 0, "xhci_hcd", xhci_to_hcd(xhci)); 223 if (ret) { 224 xhci_dbg(xhci, "disable MSI interrupt\n"); 225 pci_disable_msi(pdev); 226 } 227 228 return ret; 229 } 230 231 /* 232 * Free IRQs 233 * free all IRQs request 234 */ 235 static void xhci_free_irq(struct xhci_hcd *xhci) 236 { 237 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 238 int ret; 239 240 /* return if using legacy interrupt */ 241 if (xhci_to_hcd(xhci)->irq > 0) 242 return; 243 244 ret = xhci_free_msi(xhci); 245 if (!ret) 246 return; 247 if (pdev->irq > 0) 248 free_irq(pdev->irq, xhci_to_hcd(xhci)); 249 250 return; 251 } 252 253 /* 254 * Set up MSI-X 255 */ 256 static int xhci_setup_msix(struct xhci_hcd *xhci) 257 { 258 int i, ret = 0; 259 struct usb_hcd *hcd = xhci_to_hcd(xhci); 260 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 261 262 /* 263 * calculate number of msi-x vectors supported. 264 * - HCS_MAX_INTRS: the max number of interrupts the host can handle, 265 * with max number of interrupters based on the xhci HCSPARAMS1. 266 * - num_online_cpus: maximum msi-x vectors per CPUs core. 267 * Add additional 1 vector to ensure always available interrupt. 268 */ 269 xhci->msix_count = min(num_online_cpus() + 1, 270 HCS_MAX_INTRS(xhci->hcs_params1)); 271 272 xhci->msix_entries = 273 kmalloc((sizeof(struct msix_entry))*xhci->msix_count, 274 GFP_KERNEL); 275 if (!xhci->msix_entries) { 276 xhci_err(xhci, "Failed to allocate MSI-X entries\n"); 277 return -ENOMEM; 278 } 279 280 for (i = 0; i < xhci->msix_count; i++) { 281 xhci->msix_entries[i].entry = i; 282 xhci->msix_entries[i].vector = 0; 283 } 284 285 ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count); 286 if (ret) { 287 xhci_dbg(xhci, "Failed to enable MSI-X\n"); 288 goto free_entries; 289 } 290 291 for (i = 0; i < xhci->msix_count; i++) { 292 ret = request_irq(xhci->msix_entries[i].vector, 293 (irq_handler_t)xhci_msi_irq, 294 0, "xhci_hcd", xhci_to_hcd(xhci)); 295 if (ret) 296 goto disable_msix; 297 } 298 299 hcd->msix_enabled = 1; 300 return ret; 301 302 disable_msix: 303 xhci_dbg(xhci, "disable MSI-X interrupt\n"); 304 xhci_free_irq(xhci); 305 pci_disable_msix(pdev); 306 free_entries: 307 kfree(xhci->msix_entries); 308 xhci->msix_entries = NULL; 309 return ret; 310 } 311 312 /* Free any IRQs and disable MSI-X */ 313 static void xhci_cleanup_msix(struct xhci_hcd *xhci) 314 { 315 struct usb_hcd *hcd = xhci_to_hcd(xhci); 316 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 317 318 xhci_free_irq(xhci); 319 320 if (xhci->msix_entries) { 321 pci_disable_msix(pdev); 322 kfree(xhci->msix_entries); 323 xhci->msix_entries = NULL; 324 } else { 325 pci_disable_msi(pdev); 326 } 327 328 hcd->msix_enabled = 0; 329 return; 330 } 331 332 static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) 333 { 334 int i; 335 336 if (xhci->msix_entries) { 337 for (i = 0; i < xhci->msix_count; i++) 338 synchronize_irq(xhci->msix_entries[i].vector); 339 } 340 } 341 342 static int xhci_try_enable_msi(struct usb_hcd *hcd) 343 { 344 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 345 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 346 int ret; 347 348 /* 349 * Some Fresco Logic host controllers advertise MSI, but fail to 350 * generate interrupts. Don't even try to enable MSI. 351 */ 352 if (xhci->quirks & XHCI_BROKEN_MSI) 353 return 0; 354 355 /* unregister the legacy interrupt */ 356 if (hcd->irq) 357 free_irq(hcd->irq, hcd); 358 hcd->irq = 0; 359 360 ret = xhci_setup_msix(xhci); 361 if (ret) 362 /* fall back to msi*/ 363 ret = xhci_setup_msi(xhci); 364 365 if (!ret) 366 /* hcd->irq is 0, we have MSI */ 367 return 0; 368 369 if (!pdev->irq) { 370 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); 371 return -EINVAL; 372 } 373 374 /* fall back to legacy interrupt*/ 375 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, 376 hcd->irq_descr, hcd); 377 if (ret) { 378 xhci_err(xhci, "request interrupt %d failed\n", 379 pdev->irq); 380 return ret; 381 } 382 hcd->irq = pdev->irq; 383 return 0; 384 } 385 386 #else 387 388 static int xhci_try_enable_msi(struct usb_hcd *hcd) 389 { 390 return 0; 391 } 392 393 static void xhci_cleanup_msix(struct xhci_hcd *xhci) 394 { 395 } 396 397 static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) 398 { 399 } 400 401 #endif 402 403 static void compliance_mode_recovery(unsigned long arg) 404 { 405 struct xhci_hcd *xhci; 406 struct usb_hcd *hcd; 407 u32 temp; 408 int i; 409 410 xhci = (struct xhci_hcd *)arg; 411 412 for (i = 0; i < xhci->num_usb3_ports; i++) { 413 temp = xhci_readl(xhci, xhci->usb3_ports[i]); 414 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) { 415 /* 416 * Compliance Mode Detected. Letting USB Core 417 * handle the Warm Reset 418 */ 419 xhci_dbg(xhci, "Compliance Mode Detected->Port %d!\n", 420 i + 1); 421 xhci_dbg(xhci, "Attempting Recovery routine!\n"); 422 hcd = xhci->shared_hcd; 423 424 if (hcd->state == HC_STATE_SUSPENDED) 425 usb_hcd_resume_root_hub(hcd); 426 427 usb_hcd_poll_rh_status(hcd); 428 } 429 } 430 431 if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1)) 432 mod_timer(&xhci->comp_mode_recovery_timer, 433 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); 434 } 435 436 /* 437 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver 438 * that causes ports behind that hardware to enter compliance mode sometimes. 439 * The quirk creates a timer that polls every 2 seconds the link state of 440 * each host controller's port and recovers it by issuing a Warm reset 441 * if Compliance mode is detected, otherwise the port will become "dead" (no 442 * device connections or disconnections will be detected anymore). Becasue no 443 * status event is generated when entering compliance mode (per xhci spec), 444 * this quirk is needed on systems that have the failing hardware installed. 445 */ 446 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) 447 { 448 xhci->port_status_u0 = 0; 449 init_timer(&xhci->comp_mode_recovery_timer); 450 451 xhci->comp_mode_recovery_timer.data = (unsigned long) xhci; 452 xhci->comp_mode_recovery_timer.function = compliance_mode_recovery; 453 xhci->comp_mode_recovery_timer.expires = jiffies + 454 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS); 455 456 set_timer_slack(&xhci->comp_mode_recovery_timer, 457 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); 458 add_timer(&xhci->comp_mode_recovery_timer); 459 xhci_dbg(xhci, "Compliance Mode Recovery Timer Initialized.\n"); 460 } 461 462 /* 463 * This function identifies the systems that have installed the SN65LVPE502CP 464 * USB3.0 re-driver and that need the Compliance Mode Quirk. 465 * Systems: 466 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820 467 */ 468 static bool compliance_mode_recovery_timer_quirk_check(void) 469 { 470 const char *dmi_product_name, *dmi_sys_vendor; 471 472 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME); 473 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR); 474 if (!dmi_product_name || !dmi_sys_vendor) 475 return false; 476 477 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard"))) 478 return false; 479 480 if (strstr(dmi_product_name, "Z420") || 481 strstr(dmi_product_name, "Z620") || 482 strstr(dmi_product_name, "Z820") || 483 strstr(dmi_product_name, "Z1 Workstation")) 484 return true; 485 486 return false; 487 } 488 489 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) 490 { 491 return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1)); 492 } 493 494 495 /* 496 * Initialize memory for HCD and xHC (one-time init). 497 * 498 * Program the PAGESIZE register, initialize the device context array, create 499 * device contexts (?), set up a command ring segment (or two?), create event 500 * ring (one for now). 501 */ 502 int xhci_init(struct usb_hcd *hcd) 503 { 504 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 505 int retval = 0; 506 507 xhci_dbg(xhci, "xhci_init\n"); 508 spin_lock_init(&xhci->lock); 509 if (xhci->hci_version == 0x95 && link_quirk) { 510 xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n"); 511 xhci->quirks |= XHCI_LINK_TRB_QUIRK; 512 } else { 513 xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n"); 514 } 515 retval = xhci_mem_init(xhci, GFP_KERNEL); 516 xhci_dbg(xhci, "Finished xhci_init\n"); 517 518 /* Initializing Compliance Mode Recovery Data If Needed */ 519 if (compliance_mode_recovery_timer_quirk_check()) { 520 xhci->quirks |= XHCI_COMP_MODE_QUIRK; 521 compliance_mode_recovery_timer_init(xhci); 522 } 523 524 return retval; 525 } 526 527 /*-------------------------------------------------------------------------*/ 528 529 530 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 531 static void xhci_event_ring_work(unsigned long arg) 532 { 533 unsigned long flags; 534 int temp; 535 u64 temp_64; 536 struct xhci_hcd *xhci = (struct xhci_hcd *) arg; 537 int i, j; 538 539 xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies); 540 541 spin_lock_irqsave(&xhci->lock, flags); 542 temp = xhci_readl(xhci, &xhci->op_regs->status); 543 xhci_dbg(xhci, "op reg status = 0x%x\n", temp); 544 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || 545 (xhci->xhc_state & XHCI_STATE_HALTED)) { 546 xhci_dbg(xhci, "HW died, polling stopped.\n"); 547 spin_unlock_irqrestore(&xhci->lock, flags); 548 return; 549 } 550 551 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 552 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp); 553 xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask); 554 xhci->error_bitmask = 0; 555 xhci_dbg(xhci, "Event ring:\n"); 556 xhci_debug_segment(xhci, xhci->event_ring->deq_seg); 557 xhci_dbg_ring_ptrs(xhci, xhci->event_ring); 558 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 559 temp_64 &= ~ERST_PTR_MASK; 560 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); 561 xhci_dbg(xhci, "Command ring:\n"); 562 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); 563 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); 564 xhci_dbg_cmd_ptrs(xhci); 565 for (i = 0; i < MAX_HC_SLOTS; ++i) { 566 if (!xhci->devs[i]) 567 continue; 568 for (j = 0; j < 31; ++j) { 569 xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]); 570 } 571 } 572 spin_unlock_irqrestore(&xhci->lock, flags); 573 574 if (!xhci->zombie) 575 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ); 576 else 577 xhci_dbg(xhci, "Quit polling the event ring.\n"); 578 } 579 #endif 580 581 static int xhci_run_finished(struct xhci_hcd *xhci) 582 { 583 if (xhci_start(xhci)) { 584 xhci_halt(xhci); 585 return -ENODEV; 586 } 587 xhci->shared_hcd->state = HC_STATE_RUNNING; 588 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; 589 590 if (xhci->quirks & XHCI_NEC_HOST) 591 xhci_ring_cmd_db(xhci); 592 593 xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n"); 594 return 0; 595 } 596 597 /* 598 * Start the HC after it was halted. 599 * 600 * This function is called by the USB core when the HC driver is added. 601 * Its opposite is xhci_stop(). 602 * 603 * xhci_init() must be called once before this function can be called. 604 * Reset the HC, enable device slot contexts, program DCBAAP, and 605 * set command ring pointer and event ring pointer. 606 * 607 * Setup MSI-X vectors and enable interrupts. 608 */ 609 int xhci_run(struct usb_hcd *hcd) 610 { 611 u32 temp; 612 u64 temp_64; 613 int ret; 614 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 615 616 /* Start the xHCI host controller running only after the USB 2.0 roothub 617 * is setup. 618 */ 619 620 hcd->uses_new_polling = 1; 621 if (!usb_hcd_is_primary_hcd(hcd)) 622 return xhci_run_finished(xhci); 623 624 xhci_dbg(xhci, "xhci_run\n"); 625 626 ret = xhci_try_enable_msi(hcd); 627 if (ret) 628 return ret; 629 630 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 631 init_timer(&xhci->event_ring_timer); 632 xhci->event_ring_timer.data = (unsigned long) xhci; 633 xhci->event_ring_timer.function = xhci_event_ring_work; 634 /* Poll the event ring */ 635 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ; 636 xhci->zombie = 0; 637 xhci_dbg(xhci, "Setting event ring polling timer\n"); 638 add_timer(&xhci->event_ring_timer); 639 #endif 640 641 xhci_dbg(xhci, "Command ring memory map follows:\n"); 642 xhci_debug_ring(xhci, xhci->cmd_ring); 643 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); 644 xhci_dbg_cmd_ptrs(xhci); 645 646 xhci_dbg(xhci, "ERST memory map follows:\n"); 647 xhci_dbg_erst(xhci, &xhci->erst); 648 xhci_dbg(xhci, "Event ring:\n"); 649 xhci_debug_ring(xhci, xhci->event_ring); 650 xhci_dbg_ring_ptrs(xhci, xhci->event_ring); 651 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 652 temp_64 &= ~ERST_PTR_MASK; 653 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); 654 655 xhci_dbg(xhci, "// Set the interrupt modulation register\n"); 656 temp = xhci_readl(xhci, &xhci->ir_set->irq_control); 657 temp &= ~ER_IRQ_INTERVAL_MASK; 658 temp |= (u32) 160; 659 xhci_writel(xhci, temp, &xhci->ir_set->irq_control); 660 661 /* Set the HCD state before we enable the irqs */ 662 temp = xhci_readl(xhci, &xhci->op_regs->command); 663 temp |= (CMD_EIE); 664 xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n", 665 temp); 666 xhci_writel(xhci, temp, &xhci->op_regs->command); 667 668 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 669 xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n", 670 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); 671 xhci_writel(xhci, ER_IRQ_ENABLE(temp), 672 &xhci->ir_set->irq_pending); 673 xhci_print_ir_set(xhci, 0); 674 675 if (xhci->quirks & XHCI_NEC_HOST) 676 xhci_queue_vendor_command(xhci, 0, 0, 0, 677 TRB_TYPE(TRB_NEC_GET_FW)); 678 679 xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n"); 680 return 0; 681 } 682 683 static void xhci_only_stop_hcd(struct usb_hcd *hcd) 684 { 685 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 686 687 spin_lock_irq(&xhci->lock); 688 xhci_halt(xhci); 689 690 /* The shared_hcd is going to be deallocated shortly (the USB core only 691 * calls this function when allocation fails in usb_add_hcd(), or 692 * usb_remove_hcd() is called). So we need to unset xHCI's pointer. 693 */ 694 xhci->shared_hcd = NULL; 695 spin_unlock_irq(&xhci->lock); 696 } 697 698 /* 699 * Stop xHCI driver. 700 * 701 * This function is called by the USB core when the HC driver is removed. 702 * Its opposite is xhci_run(). 703 * 704 * Disable device contexts, disable IRQs, and quiesce the HC. 705 * Reset the HC, finish any completed transactions, and cleanup memory. 706 */ 707 void xhci_stop(struct usb_hcd *hcd) 708 { 709 u32 temp; 710 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 711 712 if (!usb_hcd_is_primary_hcd(hcd)) { 713 xhci_only_stop_hcd(xhci->shared_hcd); 714 return; 715 } 716 717 spin_lock_irq(&xhci->lock); 718 /* Make sure the xHC is halted for a USB3 roothub 719 * (xhci_stop() could be called as part of failed init). 720 */ 721 xhci_halt(xhci); 722 xhci_reset(xhci); 723 spin_unlock_irq(&xhci->lock); 724 725 xhci_cleanup_msix(xhci); 726 727 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 728 /* Tell the event ring poll function not to reschedule */ 729 xhci->zombie = 1; 730 del_timer_sync(&xhci->event_ring_timer); 731 #endif 732 733 /* Deleting Compliance Mode Recovery Timer */ 734 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 735 (!(xhci_all_ports_seen_u0(xhci)))) 736 del_timer_sync(&xhci->comp_mode_recovery_timer); 737 738 if (xhci->quirks & XHCI_AMD_PLL_FIX) 739 usb_amd_dev_put(); 740 741 xhci_dbg(xhci, "// Disabling event ring interrupts\n"); 742 temp = xhci_readl(xhci, &xhci->op_regs->status); 743 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); 744 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 745 xhci_writel(xhci, ER_IRQ_DISABLE(temp), 746 &xhci->ir_set->irq_pending); 747 xhci_print_ir_set(xhci, 0); 748 749 xhci_dbg(xhci, "cleaning up memory\n"); 750 xhci_mem_cleanup(xhci); 751 xhci_dbg(xhci, "xhci_stop completed - status = %x\n", 752 xhci_readl(xhci, &xhci->op_regs->status)); 753 } 754 755 /* 756 * Shutdown HC (not bus-specific) 757 * 758 * This is called when the machine is rebooting or halting. We assume that the 759 * machine will be powered off, and the HC's internal state will be reset. 760 * Don't bother to free memory. 761 * 762 * This will only ever be called with the main usb_hcd (the USB3 roothub). 763 */ 764 void xhci_shutdown(struct usb_hcd *hcd) 765 { 766 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 767 768 if (xhci->quirks & XHCI_SPURIOUS_REBOOT) 769 usb_disable_xhci_ports(to_pci_dev(hcd->self.controller)); 770 771 spin_lock_irq(&xhci->lock); 772 xhci_halt(xhci); 773 spin_unlock_irq(&xhci->lock); 774 775 xhci_cleanup_msix(xhci); 776 777 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n", 778 xhci_readl(xhci, &xhci->op_regs->status)); 779 } 780 781 #ifdef CONFIG_PM 782 static void xhci_save_registers(struct xhci_hcd *xhci) 783 { 784 xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command); 785 xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification); 786 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 787 xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg); 788 xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size); 789 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); 790 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 791 xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending); 792 xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control); 793 } 794 795 static void xhci_restore_registers(struct xhci_hcd *xhci) 796 { 797 xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command); 798 xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification); 799 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); 800 xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg); 801 xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size); 802 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); 803 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); 804 xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending); 805 xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control); 806 } 807 808 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) 809 { 810 u64 val_64; 811 812 /* step 2: initialize command ring buffer */ 813 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 814 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 815 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 816 xhci->cmd_ring->dequeue) & 817 (u64) ~CMD_RING_RSVD_BITS) | 818 xhci->cmd_ring->cycle_state; 819 xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n", 820 (long unsigned long) val_64); 821 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); 822 } 823 824 /* 825 * The whole command ring must be cleared to zero when we suspend the host. 826 * 827 * The host doesn't save the command ring pointer in the suspend well, so we 828 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte 829 * aligned, because of the reserved bits in the command ring dequeue pointer 830 * register. Therefore, we can't just set the dequeue pointer back in the 831 * middle of the ring (TRBs are 16-byte aligned). 832 */ 833 static void xhci_clear_command_ring(struct xhci_hcd *xhci) 834 { 835 struct xhci_ring *ring; 836 struct xhci_segment *seg; 837 838 ring = xhci->cmd_ring; 839 seg = ring->deq_seg; 840 do { 841 memset(seg->trbs, 0, 842 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); 843 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= 844 cpu_to_le32(~TRB_CYCLE); 845 seg = seg->next; 846 } while (seg != ring->deq_seg); 847 848 /* Reset the software enqueue and dequeue pointers */ 849 ring->deq_seg = ring->first_seg; 850 ring->dequeue = ring->first_seg->trbs; 851 ring->enq_seg = ring->deq_seg; 852 ring->enqueue = ring->dequeue; 853 854 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; 855 /* 856 * Ring is now zeroed, so the HW should look for change of ownership 857 * when the cycle bit is set to 1. 858 */ 859 ring->cycle_state = 1; 860 861 /* 862 * Reset the hardware dequeue pointer. 863 * Yes, this will need to be re-written after resume, but we're paranoid 864 * and want to make sure the hardware doesn't access bogus memory 865 * because, say, the BIOS or an SMI started the host without changing 866 * the command ring pointers. 867 */ 868 xhci_set_cmd_ring_deq(xhci); 869 } 870 871 /* 872 * Stop HC (not bus-specific) 873 * 874 * This is called when the machine transition into S3/S4 mode. 875 * 876 */ 877 int xhci_suspend(struct xhci_hcd *xhci) 878 { 879 int rc = 0; 880 struct usb_hcd *hcd = xhci_to_hcd(xhci); 881 u32 command; 882 883 if (hcd->state != HC_STATE_SUSPENDED || 884 xhci->shared_hcd->state != HC_STATE_SUSPENDED) 885 return -EINVAL; 886 887 /* Don't poll the roothubs on bus suspend. */ 888 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); 889 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 890 del_timer_sync(&hcd->rh_timer); 891 892 spin_lock_irq(&xhci->lock); 893 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 894 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); 895 /* step 1: stop endpoint */ 896 /* skipped assuming that port suspend has done */ 897 898 /* step 2: clear Run/Stop bit */ 899 command = xhci_readl(xhci, &xhci->op_regs->command); 900 command &= ~CMD_RUN; 901 xhci_writel(xhci, command, &xhci->op_regs->command); 902 if (xhci_handshake(xhci, &xhci->op_regs->status, 903 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC)) { 904 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); 905 spin_unlock_irq(&xhci->lock); 906 return -ETIMEDOUT; 907 } 908 xhci_clear_command_ring(xhci); 909 910 /* step 3: save registers */ 911 xhci_save_registers(xhci); 912 913 /* step 4: set CSS flag */ 914 command = xhci_readl(xhci, &xhci->op_regs->command); 915 command |= CMD_CSS; 916 xhci_writel(xhci, command, &xhci->op_regs->command); 917 if (xhci_handshake(xhci, &xhci->op_regs->status, 918 STS_SAVE, 0, 10 * 1000)) { 919 xhci_warn(xhci, "WARN: xHC save state timeout\n"); 920 spin_unlock_irq(&xhci->lock); 921 return -ETIMEDOUT; 922 } 923 spin_unlock_irq(&xhci->lock); 924 925 /* 926 * Deleting Compliance Mode Recovery Timer because the xHCI Host 927 * is about to be suspended. 928 */ 929 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 930 (!(xhci_all_ports_seen_u0(xhci)))) { 931 del_timer_sync(&xhci->comp_mode_recovery_timer); 932 xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted!\n"); 933 } 934 935 /* step 5: remove core well power */ 936 /* synchronize irq when using MSI-X */ 937 xhci_msix_sync_irqs(xhci); 938 939 return rc; 940 } 941 942 /* 943 * start xHC (not bus-specific) 944 * 945 * This is called when the machine transition from S3/S4 mode. 946 * 947 */ 948 int xhci_resume(struct xhci_hcd *xhci, bool hibernated) 949 { 950 u32 command, temp = 0; 951 struct usb_hcd *hcd = xhci_to_hcd(xhci); 952 struct usb_hcd *secondary_hcd; 953 int retval = 0; 954 955 /* Wait a bit if either of the roothubs need to settle from the 956 * transition into bus suspend. 957 */ 958 if (time_before(jiffies, xhci->bus_state[0].next_statechange) || 959 time_before(jiffies, 960 xhci->bus_state[1].next_statechange)) 961 msleep(100); 962 963 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 964 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); 965 966 spin_lock_irq(&xhci->lock); 967 if (xhci->quirks & XHCI_RESET_ON_RESUME) 968 hibernated = true; 969 970 if (!hibernated) { 971 /* step 1: restore register */ 972 xhci_restore_registers(xhci); 973 /* step 2: initialize command ring buffer */ 974 xhci_set_cmd_ring_deq(xhci); 975 /* step 3: restore state and start state*/ 976 /* step 3: set CRS flag */ 977 command = xhci_readl(xhci, &xhci->op_regs->command); 978 command |= CMD_CRS; 979 xhci_writel(xhci, command, &xhci->op_regs->command); 980 if (xhci_handshake(xhci, &xhci->op_regs->status, 981 STS_RESTORE, 0, 10 * 1000)) { 982 xhci_warn(xhci, "WARN: xHC restore state timeout\n"); 983 spin_unlock_irq(&xhci->lock); 984 return -ETIMEDOUT; 985 } 986 temp = xhci_readl(xhci, &xhci->op_regs->status); 987 } 988 989 /* If restore operation fails, re-initialize the HC during resume */ 990 if ((temp & STS_SRE) || hibernated) { 991 /* Let the USB core know _both_ roothubs lost power. */ 992 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); 993 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); 994 995 xhci_dbg(xhci, "Stop HCD\n"); 996 xhci_halt(xhci); 997 xhci_reset(xhci); 998 spin_unlock_irq(&xhci->lock); 999 xhci_cleanup_msix(xhci); 1000 1001 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 1002 /* Tell the event ring poll function not to reschedule */ 1003 xhci->zombie = 1; 1004 del_timer_sync(&xhci->event_ring_timer); 1005 #endif 1006 1007 xhci_dbg(xhci, "// Disabling event ring interrupts\n"); 1008 temp = xhci_readl(xhci, &xhci->op_regs->status); 1009 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); 1010 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 1011 xhci_writel(xhci, ER_IRQ_DISABLE(temp), 1012 &xhci->ir_set->irq_pending); 1013 xhci_print_ir_set(xhci, 0); 1014 1015 xhci_dbg(xhci, "cleaning up memory\n"); 1016 xhci_mem_cleanup(xhci); 1017 xhci_dbg(xhci, "xhci_stop completed - status = %x\n", 1018 xhci_readl(xhci, &xhci->op_regs->status)); 1019 1020 /* USB core calls the PCI reinit and start functions twice: 1021 * first with the primary HCD, and then with the secondary HCD. 1022 * If we don't do the same, the host will never be started. 1023 */ 1024 if (!usb_hcd_is_primary_hcd(hcd)) 1025 secondary_hcd = hcd; 1026 else 1027 secondary_hcd = xhci->shared_hcd; 1028 1029 xhci_dbg(xhci, "Initialize the xhci_hcd\n"); 1030 retval = xhci_init(hcd->primary_hcd); 1031 if (retval) 1032 return retval; 1033 xhci_dbg(xhci, "Start the primary HCD\n"); 1034 retval = xhci_run(hcd->primary_hcd); 1035 if (!retval) { 1036 xhci_dbg(xhci, "Start the secondary HCD\n"); 1037 retval = xhci_run(secondary_hcd); 1038 } 1039 hcd->state = HC_STATE_SUSPENDED; 1040 xhci->shared_hcd->state = HC_STATE_SUSPENDED; 1041 goto done; 1042 } 1043 1044 /* step 4: set Run/Stop bit */ 1045 command = xhci_readl(xhci, &xhci->op_regs->command); 1046 command |= CMD_RUN; 1047 xhci_writel(xhci, command, &xhci->op_regs->command); 1048 xhci_handshake(xhci, &xhci->op_regs->status, STS_HALT, 1049 0, 250 * 1000); 1050 1051 /* step 5: walk topology and initialize portsc, 1052 * portpmsc and portli 1053 */ 1054 /* this is done in bus_resume */ 1055 1056 /* step 6: restart each of the previously 1057 * Running endpoints by ringing their doorbells 1058 */ 1059 1060 spin_unlock_irq(&xhci->lock); 1061 1062 done: 1063 if (retval == 0) { 1064 usb_hcd_resume_root_hub(hcd); 1065 usb_hcd_resume_root_hub(xhci->shared_hcd); 1066 } 1067 1068 /* 1069 * If system is subject to the Quirk, Compliance Mode Timer needs to 1070 * be re-initialized Always after a system resume. Ports are subject 1071 * to suffer the Compliance Mode issue again. It doesn't matter if 1072 * ports have entered previously to U0 before system's suspension. 1073 */ 1074 if (xhci->quirks & XHCI_COMP_MODE_QUIRK) 1075 compliance_mode_recovery_timer_init(xhci); 1076 1077 /* Re-enable port polling. */ 1078 xhci_dbg(xhci, "%s: starting port polling.\n", __func__); 1079 set_bit(HCD_FLAG_POLL_RH, &hcd->flags); 1080 usb_hcd_poll_rh_status(hcd); 1081 1082 return retval; 1083 } 1084 #endif /* CONFIG_PM */ 1085 1086 /*-------------------------------------------------------------------------*/ 1087 1088 /** 1089 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and 1090 * HCDs. Find the index for an endpoint given its descriptor. Use the return 1091 * value to right shift 1 for the bitmask. 1092 * 1093 * Index = (epnum * 2) + direction - 1, 1094 * where direction = 0 for OUT, 1 for IN. 1095 * For control endpoints, the IN index is used (OUT index is unused), so 1096 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) 1097 */ 1098 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) 1099 { 1100 unsigned int index; 1101 if (usb_endpoint_xfer_control(desc)) 1102 index = (unsigned int) (usb_endpoint_num(desc)*2); 1103 else 1104 index = (unsigned int) (usb_endpoint_num(desc)*2) + 1105 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; 1106 return index; 1107 } 1108 1109 /* Find the flag for this endpoint (for use in the control context). Use the 1110 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is 1111 * bit 1, etc. 1112 */ 1113 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) 1114 { 1115 return 1 << (xhci_get_endpoint_index(desc) + 1); 1116 } 1117 1118 /* Find the flag for this endpoint (for use in the control context). Use the 1119 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is 1120 * bit 1, etc. 1121 */ 1122 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index) 1123 { 1124 return 1 << (ep_index + 1); 1125 } 1126 1127 /* Compute the last valid endpoint context index. Basically, this is the 1128 * endpoint index plus one. For slot contexts with more than valid endpoint, 1129 * we find the most significant bit set in the added contexts flags. 1130 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 1131 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. 1132 */ 1133 unsigned int xhci_last_valid_endpoint(u32 added_ctxs) 1134 { 1135 return fls(added_ctxs) - 1; 1136 } 1137 1138 /* Returns 1 if the arguments are OK; 1139 * returns 0 this is a root hub; returns -EINVAL for NULL pointers. 1140 */ 1141 static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, 1142 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, 1143 const char *func) { 1144 struct xhci_hcd *xhci; 1145 struct xhci_virt_device *virt_dev; 1146 1147 if (!hcd || (check_ep && !ep) || !udev) { 1148 printk(KERN_DEBUG "xHCI %s called with invalid args\n", 1149 func); 1150 return -EINVAL; 1151 } 1152 if (!udev->parent) { 1153 printk(KERN_DEBUG "xHCI %s called for root hub\n", 1154 func); 1155 return 0; 1156 } 1157 1158 xhci = hcd_to_xhci(hcd); 1159 if (xhci->xhc_state & XHCI_STATE_HALTED) 1160 return -ENODEV; 1161 1162 if (check_virt_dev) { 1163 if (!udev->slot_id || !xhci->devs[udev->slot_id]) { 1164 printk(KERN_DEBUG "xHCI %s called with unaddressed " 1165 "device\n", func); 1166 return -EINVAL; 1167 } 1168 1169 virt_dev = xhci->devs[udev->slot_id]; 1170 if (virt_dev->udev != udev) { 1171 printk(KERN_DEBUG "xHCI %s called with udev and " 1172 "virt_dev does not match\n", func); 1173 return -EINVAL; 1174 } 1175 } 1176 1177 return 1; 1178 } 1179 1180 static int xhci_configure_endpoint(struct xhci_hcd *xhci, 1181 struct usb_device *udev, struct xhci_command *command, 1182 bool ctx_change, bool must_succeed); 1183 1184 /* 1185 * Full speed devices may have a max packet size greater than 8 bytes, but the 1186 * USB core doesn't know that until it reads the first 8 bytes of the 1187 * descriptor. If the usb_device's max packet size changes after that point, 1188 * we need to issue an evaluate context command and wait on it. 1189 */ 1190 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, 1191 unsigned int ep_index, struct urb *urb) 1192 { 1193 struct xhci_container_ctx *in_ctx; 1194 struct xhci_container_ctx *out_ctx; 1195 struct xhci_input_control_ctx *ctrl_ctx; 1196 struct xhci_ep_ctx *ep_ctx; 1197 int max_packet_size; 1198 int hw_max_packet_size; 1199 int ret = 0; 1200 1201 out_ctx = xhci->devs[slot_id]->out_ctx; 1202 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1203 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); 1204 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc); 1205 if (hw_max_packet_size != max_packet_size) { 1206 xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); 1207 xhci_dbg(xhci, "Max packet size in usb_device = %d\n", 1208 max_packet_size); 1209 xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n", 1210 hw_max_packet_size); 1211 xhci_dbg(xhci, "Issuing evaluate context command.\n"); 1212 1213 /* Set up the modified control endpoint 0 */ 1214 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, 1215 xhci->devs[slot_id]->out_ctx, ep_index); 1216 in_ctx = xhci->devs[slot_id]->in_ctx; 1217 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); 1218 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); 1219 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); 1220 1221 /* Set up the input context flags for the command */ 1222 /* FIXME: This won't work if a non-default control endpoint 1223 * changes max packet sizes. 1224 */ 1225 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1226 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); 1227 ctrl_ctx->drop_flags = 0; 1228 1229 xhci_dbg(xhci, "Slot %d input context\n", slot_id); 1230 xhci_dbg_ctx(xhci, in_ctx, ep_index); 1231 xhci_dbg(xhci, "Slot %d output context\n", slot_id); 1232 xhci_dbg_ctx(xhci, out_ctx, ep_index); 1233 1234 ret = xhci_configure_endpoint(xhci, urb->dev, NULL, 1235 true, false); 1236 1237 /* Clean up the input context for later use by bandwidth 1238 * functions. 1239 */ 1240 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); 1241 } 1242 return ret; 1243 } 1244 1245 /* 1246 * non-error returns are a promise to giveback() the urb later 1247 * we drop ownership so next owner (or urb unlink) can get it 1248 */ 1249 int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) 1250 { 1251 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 1252 struct xhci_td *buffer; 1253 unsigned long flags; 1254 int ret = 0; 1255 unsigned int slot_id, ep_index; 1256 struct urb_priv *urb_priv; 1257 int size, i; 1258 1259 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, 1260 true, true, __func__) <= 0) 1261 return -EINVAL; 1262 1263 slot_id = urb->dev->slot_id; 1264 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1265 1266 if (!HCD_HW_ACCESSIBLE(hcd)) { 1267 if (!in_interrupt()) 1268 xhci_dbg(xhci, "urb submitted during PCI suspend\n"); 1269 ret = -ESHUTDOWN; 1270 goto exit; 1271 } 1272 1273 if (usb_endpoint_xfer_isoc(&urb->ep->desc)) 1274 size = urb->number_of_packets; 1275 else 1276 size = 1; 1277 1278 urb_priv = kzalloc(sizeof(struct urb_priv) + 1279 size * sizeof(struct xhci_td *), mem_flags); 1280 if (!urb_priv) 1281 return -ENOMEM; 1282 1283 buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags); 1284 if (!buffer) { 1285 kfree(urb_priv); 1286 return -ENOMEM; 1287 } 1288 1289 for (i = 0; i < size; i++) { 1290 urb_priv->td[i] = buffer; 1291 buffer++; 1292 } 1293 1294 urb_priv->length = size; 1295 urb_priv->td_cnt = 0; 1296 urb->hcpriv = urb_priv; 1297 1298 if (usb_endpoint_xfer_control(&urb->ep->desc)) { 1299 /* Check to see if the max packet size for the default control 1300 * endpoint changed during FS device enumeration 1301 */ 1302 if (urb->dev->speed == USB_SPEED_FULL) { 1303 ret = xhci_check_maxpacket(xhci, slot_id, 1304 ep_index, urb); 1305 if (ret < 0) { 1306 xhci_urb_free_priv(xhci, urb_priv); 1307 urb->hcpriv = NULL; 1308 return ret; 1309 } 1310 } 1311 1312 /* We have a spinlock and interrupts disabled, so we must pass 1313 * atomic context to this function, which may allocate memory. 1314 */ 1315 spin_lock_irqsave(&xhci->lock, flags); 1316 if (xhci->xhc_state & XHCI_STATE_DYING) 1317 goto dying; 1318 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, 1319 slot_id, ep_index); 1320 if (ret) 1321 goto free_priv; 1322 spin_unlock_irqrestore(&xhci->lock, flags); 1323 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { 1324 spin_lock_irqsave(&xhci->lock, flags); 1325 if (xhci->xhc_state & XHCI_STATE_DYING) 1326 goto dying; 1327 if (xhci->devs[slot_id]->eps[ep_index].ep_state & 1328 EP_GETTING_STREAMS) { 1329 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " 1330 "is transitioning to using streams.\n"); 1331 ret = -EINVAL; 1332 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state & 1333 EP_GETTING_NO_STREAMS) { 1334 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " 1335 "is transitioning to " 1336 "not having streams.\n"); 1337 ret = -EINVAL; 1338 } else { 1339 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, 1340 slot_id, ep_index); 1341 } 1342 if (ret) 1343 goto free_priv; 1344 spin_unlock_irqrestore(&xhci->lock, flags); 1345 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { 1346 spin_lock_irqsave(&xhci->lock, flags); 1347 if (xhci->xhc_state & XHCI_STATE_DYING) 1348 goto dying; 1349 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, 1350 slot_id, ep_index); 1351 if (ret) 1352 goto free_priv; 1353 spin_unlock_irqrestore(&xhci->lock, flags); 1354 } else { 1355 spin_lock_irqsave(&xhci->lock, flags); 1356 if (xhci->xhc_state & XHCI_STATE_DYING) 1357 goto dying; 1358 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, 1359 slot_id, ep_index); 1360 if (ret) 1361 goto free_priv; 1362 spin_unlock_irqrestore(&xhci->lock, flags); 1363 } 1364 exit: 1365 return ret; 1366 dying: 1367 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " 1368 "non-responsive xHCI host.\n", 1369 urb->ep->desc.bEndpointAddress, urb); 1370 ret = -ESHUTDOWN; 1371 free_priv: 1372 xhci_urb_free_priv(xhci, urb_priv); 1373 urb->hcpriv = NULL; 1374 spin_unlock_irqrestore(&xhci->lock, flags); 1375 return ret; 1376 } 1377 1378 /* Get the right ring for the given URB. 1379 * If the endpoint supports streams, boundary check the URB's stream ID. 1380 * If the endpoint doesn't support streams, return the singular endpoint ring. 1381 */ 1382 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, 1383 struct urb *urb) 1384 { 1385 unsigned int slot_id; 1386 unsigned int ep_index; 1387 unsigned int stream_id; 1388 struct xhci_virt_ep *ep; 1389 1390 slot_id = urb->dev->slot_id; 1391 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1392 stream_id = urb->stream_id; 1393 ep = &xhci->devs[slot_id]->eps[ep_index]; 1394 /* Common case: no streams */ 1395 if (!(ep->ep_state & EP_HAS_STREAMS)) 1396 return ep->ring; 1397 1398 if (stream_id == 0) { 1399 xhci_warn(xhci, 1400 "WARN: Slot ID %u, ep index %u has streams, " 1401 "but URB has no stream ID.\n", 1402 slot_id, ep_index); 1403 return NULL; 1404 } 1405 1406 if (stream_id < ep->stream_info->num_streams) 1407 return ep->stream_info->stream_rings[stream_id]; 1408 1409 xhci_warn(xhci, 1410 "WARN: Slot ID %u, ep index %u has " 1411 "stream IDs 1 to %u allocated, " 1412 "but stream ID %u is requested.\n", 1413 slot_id, ep_index, 1414 ep->stream_info->num_streams - 1, 1415 stream_id); 1416 return NULL; 1417 } 1418 1419 /* 1420 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop 1421 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC 1422 * should pick up where it left off in the TD, unless a Set Transfer Ring 1423 * Dequeue Pointer is issued. 1424 * 1425 * The TRBs that make up the buffers for the canceled URB will be "removed" from 1426 * the ring. Since the ring is a contiguous structure, they can't be physically 1427 * removed. Instead, there are two options: 1428 * 1429 * 1) If the HC is in the middle of processing the URB to be canceled, we 1430 * simply move the ring's dequeue pointer past those TRBs using the Set 1431 * Transfer Ring Dequeue Pointer command. This will be the common case, 1432 * when drivers timeout on the last submitted URB and attempt to cancel. 1433 * 1434 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a 1435 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The 1436 * HC will need to invalidate the any TRBs it has cached after the stop 1437 * endpoint command, as noted in the xHCI 0.95 errata. 1438 * 1439 * 3) The TD may have completed by the time the Stop Endpoint Command 1440 * completes, so software needs to handle that case too. 1441 * 1442 * This function should protect against the TD enqueueing code ringing the 1443 * doorbell while this code is waiting for a Stop Endpoint command to complete. 1444 * It also needs to account for multiple cancellations on happening at the same 1445 * time for the same endpoint. 1446 * 1447 * Note that this function can be called in any context, or so says 1448 * usb_hcd_unlink_urb() 1449 */ 1450 int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) 1451 { 1452 unsigned long flags; 1453 int ret, i; 1454 u32 temp; 1455 struct xhci_hcd *xhci; 1456 struct urb_priv *urb_priv; 1457 struct xhci_td *td; 1458 unsigned int ep_index; 1459 struct xhci_ring *ep_ring; 1460 struct xhci_virt_ep *ep; 1461 1462 xhci = hcd_to_xhci(hcd); 1463 spin_lock_irqsave(&xhci->lock, flags); 1464 /* Make sure the URB hasn't completed or been unlinked already */ 1465 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 1466 if (ret || !urb->hcpriv) 1467 goto done; 1468 temp = xhci_readl(xhci, &xhci->op_regs->status); 1469 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { 1470 xhci_dbg(xhci, "HW died, freeing TD.\n"); 1471 urb_priv = urb->hcpriv; 1472 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { 1473 td = urb_priv->td[i]; 1474 if (!list_empty(&td->td_list)) 1475 list_del_init(&td->td_list); 1476 if (!list_empty(&td->cancelled_td_list)) 1477 list_del_init(&td->cancelled_td_list); 1478 } 1479 1480 usb_hcd_unlink_urb_from_ep(hcd, urb); 1481 spin_unlock_irqrestore(&xhci->lock, flags); 1482 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); 1483 xhci_urb_free_priv(xhci, urb_priv); 1484 return ret; 1485 } 1486 if ((xhci->xhc_state & XHCI_STATE_DYING) || 1487 (xhci->xhc_state & XHCI_STATE_HALTED)) { 1488 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " 1489 "non-responsive xHCI host.\n", 1490 urb->ep->desc.bEndpointAddress, urb); 1491 /* Let the stop endpoint command watchdog timer (which set this 1492 * state) finish cleaning up the endpoint TD lists. We must 1493 * have caught it in the middle of dropping a lock and giving 1494 * back an URB. 1495 */ 1496 goto done; 1497 } 1498 1499 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1500 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; 1501 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 1502 if (!ep_ring) { 1503 ret = -EINVAL; 1504 goto done; 1505 } 1506 1507 urb_priv = urb->hcpriv; 1508 i = urb_priv->td_cnt; 1509 if (i < urb_priv->length) 1510 xhci_dbg(xhci, "Cancel URB %p, dev %s, ep 0x%x, " 1511 "starting at offset 0x%llx\n", 1512 urb, urb->dev->devpath, 1513 urb->ep->desc.bEndpointAddress, 1514 (unsigned long long) xhci_trb_virt_to_dma( 1515 urb_priv->td[i]->start_seg, 1516 urb_priv->td[i]->first_trb)); 1517 1518 for (; i < urb_priv->length; i++) { 1519 td = urb_priv->td[i]; 1520 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); 1521 } 1522 1523 /* Queue a stop endpoint command, but only if this is 1524 * the first cancellation to be handled. 1525 */ 1526 if (!(ep->ep_state & EP_HALT_PENDING)) { 1527 ep->ep_state |= EP_HALT_PENDING; 1528 ep->stop_cmds_pending++; 1529 ep->stop_cmd_timer.expires = jiffies + 1530 XHCI_STOP_EP_CMD_TIMEOUT * HZ; 1531 add_timer(&ep->stop_cmd_timer); 1532 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0); 1533 xhci_ring_cmd_db(xhci); 1534 } 1535 done: 1536 spin_unlock_irqrestore(&xhci->lock, flags); 1537 return ret; 1538 } 1539 1540 /* Drop an endpoint from a new bandwidth configuration for this device. 1541 * Only one call to this function is allowed per endpoint before 1542 * check_bandwidth() or reset_bandwidth() must be called. 1543 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will 1544 * add the endpoint to the schedule with possibly new parameters denoted by a 1545 * different endpoint descriptor in usb_host_endpoint. 1546 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is 1547 * not allowed. 1548 * 1549 * The USB core will not allow URBs to be queued to an endpoint that is being 1550 * disabled, so there's no need for mutual exclusion to protect 1551 * the xhci->devs[slot_id] structure. 1552 */ 1553 int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, 1554 struct usb_host_endpoint *ep) 1555 { 1556 struct xhci_hcd *xhci; 1557 struct xhci_container_ctx *in_ctx, *out_ctx; 1558 struct xhci_input_control_ctx *ctrl_ctx; 1559 struct xhci_slot_ctx *slot_ctx; 1560 unsigned int last_ctx; 1561 unsigned int ep_index; 1562 struct xhci_ep_ctx *ep_ctx; 1563 u32 drop_flag; 1564 u32 new_add_flags, new_drop_flags, new_slot_info; 1565 int ret; 1566 1567 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); 1568 if (ret <= 0) 1569 return ret; 1570 xhci = hcd_to_xhci(hcd); 1571 if (xhci->xhc_state & XHCI_STATE_DYING) 1572 return -ENODEV; 1573 1574 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 1575 drop_flag = xhci_get_endpoint_flag(&ep->desc); 1576 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { 1577 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", 1578 __func__, drop_flag); 1579 return 0; 1580 } 1581 1582 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 1583 out_ctx = xhci->devs[udev->slot_id]->out_ctx; 1584 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1585 ep_index = xhci_get_endpoint_index(&ep->desc); 1586 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1587 /* If the HC already knows the endpoint is disabled, 1588 * or the HCD has noted it is disabled, ignore this request 1589 */ 1590 if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) == 1591 cpu_to_le32(EP_STATE_DISABLED)) || 1592 le32_to_cpu(ctrl_ctx->drop_flags) & 1593 xhci_get_endpoint_flag(&ep->desc)) { 1594 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", 1595 __func__, ep); 1596 return 0; 1597 } 1598 1599 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag); 1600 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); 1601 1602 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag); 1603 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); 1604 1605 last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)); 1606 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); 1607 /* Update the last valid endpoint context, if we deleted the last one */ 1608 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) > 1609 LAST_CTX(last_ctx)) { 1610 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 1611 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx)); 1612 } 1613 new_slot_info = le32_to_cpu(slot_ctx->dev_info); 1614 1615 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); 1616 1617 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", 1618 (unsigned int) ep->desc.bEndpointAddress, 1619 udev->slot_id, 1620 (unsigned int) new_drop_flags, 1621 (unsigned int) new_add_flags, 1622 (unsigned int) new_slot_info); 1623 return 0; 1624 } 1625 1626 /* Add an endpoint to a new possible bandwidth configuration for this device. 1627 * Only one call to this function is allowed per endpoint before 1628 * check_bandwidth() or reset_bandwidth() must be called. 1629 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will 1630 * add the endpoint to the schedule with possibly new parameters denoted by a 1631 * different endpoint descriptor in usb_host_endpoint. 1632 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is 1633 * not allowed. 1634 * 1635 * The USB core will not allow URBs to be queued to an endpoint until the 1636 * configuration or alt setting is installed in the device, so there's no need 1637 * for mutual exclusion to protect the xhci->devs[slot_id] structure. 1638 */ 1639 int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, 1640 struct usb_host_endpoint *ep) 1641 { 1642 struct xhci_hcd *xhci; 1643 struct xhci_container_ctx *in_ctx, *out_ctx; 1644 unsigned int ep_index; 1645 struct xhci_slot_ctx *slot_ctx; 1646 struct xhci_input_control_ctx *ctrl_ctx; 1647 u32 added_ctxs; 1648 unsigned int last_ctx; 1649 u32 new_add_flags, new_drop_flags, new_slot_info; 1650 struct xhci_virt_device *virt_dev; 1651 int ret = 0; 1652 1653 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); 1654 if (ret <= 0) { 1655 /* So we won't queue a reset ep command for a root hub */ 1656 ep->hcpriv = NULL; 1657 return ret; 1658 } 1659 xhci = hcd_to_xhci(hcd); 1660 if (xhci->xhc_state & XHCI_STATE_DYING) 1661 return -ENODEV; 1662 1663 added_ctxs = xhci_get_endpoint_flag(&ep->desc); 1664 last_ctx = xhci_last_valid_endpoint(added_ctxs); 1665 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { 1666 /* FIXME when we have to issue an evaluate endpoint command to 1667 * deal with ep0 max packet size changing once we get the 1668 * descriptors 1669 */ 1670 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", 1671 __func__, added_ctxs); 1672 return 0; 1673 } 1674 1675 virt_dev = xhci->devs[udev->slot_id]; 1676 in_ctx = virt_dev->in_ctx; 1677 out_ctx = virt_dev->out_ctx; 1678 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1679 ep_index = xhci_get_endpoint_index(&ep->desc); 1680 1681 /* If this endpoint is already in use, and the upper layers are trying 1682 * to add it again without dropping it, reject the addition. 1683 */ 1684 if (virt_dev->eps[ep_index].ring && 1685 !(le32_to_cpu(ctrl_ctx->drop_flags) & 1686 xhci_get_endpoint_flag(&ep->desc))) { 1687 xhci_warn(xhci, "Trying to add endpoint 0x%x " 1688 "without dropping it.\n", 1689 (unsigned int) ep->desc.bEndpointAddress); 1690 return -EINVAL; 1691 } 1692 1693 /* If the HCD has already noted the endpoint is enabled, 1694 * ignore this request. 1695 */ 1696 if (le32_to_cpu(ctrl_ctx->add_flags) & 1697 xhci_get_endpoint_flag(&ep->desc)) { 1698 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", 1699 __func__, ep); 1700 return 0; 1701 } 1702 1703 /* 1704 * Configuration and alternate setting changes must be done in 1705 * process context, not interrupt context (or so documenation 1706 * for usb_set_interface() and usb_set_configuration() claim). 1707 */ 1708 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { 1709 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", 1710 __func__, ep->desc.bEndpointAddress); 1711 return -ENOMEM; 1712 } 1713 1714 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); 1715 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); 1716 1717 /* If xhci_endpoint_disable() was called for this endpoint, but the 1718 * xHC hasn't been notified yet through the check_bandwidth() call, 1719 * this re-adds a new state for the endpoint from the new endpoint 1720 * descriptors. We must drop and re-add this endpoint, so we leave the 1721 * drop flags alone. 1722 */ 1723 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); 1724 1725 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); 1726 /* Update the last valid endpoint context, if we just added one past */ 1727 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) < 1728 LAST_CTX(last_ctx)) { 1729 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 1730 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx)); 1731 } 1732 new_slot_info = le32_to_cpu(slot_ctx->dev_info); 1733 1734 /* Store the usb_device pointer for later use */ 1735 ep->hcpriv = udev; 1736 1737 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", 1738 (unsigned int) ep->desc.bEndpointAddress, 1739 udev->slot_id, 1740 (unsigned int) new_drop_flags, 1741 (unsigned int) new_add_flags, 1742 (unsigned int) new_slot_info); 1743 return 0; 1744 } 1745 1746 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) 1747 { 1748 struct xhci_input_control_ctx *ctrl_ctx; 1749 struct xhci_ep_ctx *ep_ctx; 1750 struct xhci_slot_ctx *slot_ctx; 1751 int i; 1752 1753 /* When a device's add flag and drop flag are zero, any subsequent 1754 * configure endpoint command will leave that endpoint's state 1755 * untouched. Make sure we don't leave any old state in the input 1756 * endpoint contexts. 1757 */ 1758 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 1759 ctrl_ctx->drop_flags = 0; 1760 ctrl_ctx->add_flags = 0; 1761 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 1762 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 1763 /* Endpoint 0 is always valid */ 1764 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); 1765 for (i = 1; i < 31; ++i) { 1766 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); 1767 ep_ctx->ep_info = 0; 1768 ep_ctx->ep_info2 = 0; 1769 ep_ctx->deq = 0; 1770 ep_ctx->tx_info = 0; 1771 } 1772 } 1773 1774 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, 1775 struct usb_device *udev, u32 *cmd_status) 1776 { 1777 int ret; 1778 1779 switch (*cmd_status) { 1780 case COMP_ENOMEM: 1781 dev_warn(&udev->dev, "Not enough host controller resources " 1782 "for new device state.\n"); 1783 ret = -ENOMEM; 1784 /* FIXME: can we allocate more resources for the HC? */ 1785 break; 1786 case COMP_BW_ERR: 1787 case COMP_2ND_BW_ERR: 1788 dev_warn(&udev->dev, "Not enough bandwidth " 1789 "for new device state.\n"); 1790 ret = -ENOSPC; 1791 /* FIXME: can we go back to the old state? */ 1792 break; 1793 case COMP_TRB_ERR: 1794 /* the HCD set up something wrong */ 1795 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " 1796 "add flag = 1, " 1797 "and endpoint is not disabled.\n"); 1798 ret = -EINVAL; 1799 break; 1800 case COMP_DEV_ERR: 1801 dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint " 1802 "configure command.\n"); 1803 ret = -ENODEV; 1804 break; 1805 case COMP_SUCCESS: 1806 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n"); 1807 ret = 0; 1808 break; 1809 default: 1810 xhci_err(xhci, "ERROR: unexpected command completion " 1811 "code 0x%x.\n", *cmd_status); 1812 ret = -EINVAL; 1813 break; 1814 } 1815 return ret; 1816 } 1817 1818 static int xhci_evaluate_context_result(struct xhci_hcd *xhci, 1819 struct usb_device *udev, u32 *cmd_status) 1820 { 1821 int ret; 1822 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; 1823 1824 switch (*cmd_status) { 1825 case COMP_EINVAL: 1826 dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate " 1827 "context command.\n"); 1828 ret = -EINVAL; 1829 break; 1830 case COMP_EBADSLT: 1831 dev_warn(&udev->dev, "WARN: slot not enabled for" 1832 "evaluate context command.\n"); 1833 ret = -EINVAL; 1834 break; 1835 case COMP_CTX_STATE: 1836 dev_warn(&udev->dev, "WARN: invalid context state for " 1837 "evaluate context command.\n"); 1838 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); 1839 ret = -EINVAL; 1840 break; 1841 case COMP_DEV_ERR: 1842 dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate " 1843 "context command.\n"); 1844 ret = -ENODEV; 1845 break; 1846 case COMP_MEL_ERR: 1847 /* Max Exit Latency too large error */ 1848 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n"); 1849 ret = -EINVAL; 1850 break; 1851 case COMP_SUCCESS: 1852 dev_dbg(&udev->dev, "Successful evaluate context command\n"); 1853 ret = 0; 1854 break; 1855 default: 1856 xhci_err(xhci, "ERROR: unexpected command completion " 1857 "code 0x%x.\n", *cmd_status); 1858 ret = -EINVAL; 1859 break; 1860 } 1861 return ret; 1862 } 1863 1864 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, 1865 struct xhci_container_ctx *in_ctx) 1866 { 1867 struct xhci_input_control_ctx *ctrl_ctx; 1868 u32 valid_add_flags; 1869 u32 valid_drop_flags; 1870 1871 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1872 /* Ignore the slot flag (bit 0), and the default control endpoint flag 1873 * (bit 1). The default control endpoint is added during the Address 1874 * Device command and is never removed until the slot is disabled. 1875 */ 1876 valid_add_flags = ctrl_ctx->add_flags >> 2; 1877 valid_drop_flags = ctrl_ctx->drop_flags >> 2; 1878 1879 /* Use hweight32 to count the number of ones in the add flags, or 1880 * number of endpoints added. Don't count endpoints that are changed 1881 * (both added and dropped). 1882 */ 1883 return hweight32(valid_add_flags) - 1884 hweight32(valid_add_flags & valid_drop_flags); 1885 } 1886 1887 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, 1888 struct xhci_container_ctx *in_ctx) 1889 { 1890 struct xhci_input_control_ctx *ctrl_ctx; 1891 u32 valid_add_flags; 1892 u32 valid_drop_flags; 1893 1894 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1895 valid_add_flags = ctrl_ctx->add_flags >> 2; 1896 valid_drop_flags = ctrl_ctx->drop_flags >> 2; 1897 1898 return hweight32(valid_drop_flags) - 1899 hweight32(valid_add_flags & valid_drop_flags); 1900 } 1901 1902 /* 1903 * We need to reserve the new number of endpoints before the configure endpoint 1904 * command completes. We can't subtract the dropped endpoints from the number 1905 * of active endpoints until the command completes because we can oversubscribe 1906 * the host in this case: 1907 * 1908 * - the first configure endpoint command drops more endpoints than it adds 1909 * - a second configure endpoint command that adds more endpoints is queued 1910 * - the first configure endpoint command fails, so the config is unchanged 1911 * - the second command may succeed, even though there isn't enough resources 1912 * 1913 * Must be called with xhci->lock held. 1914 */ 1915 static int xhci_reserve_host_resources(struct xhci_hcd *xhci, 1916 struct xhci_container_ctx *in_ctx) 1917 { 1918 u32 added_eps; 1919 1920 added_eps = xhci_count_num_new_endpoints(xhci, in_ctx); 1921 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { 1922 xhci_dbg(xhci, "Not enough ep ctxs: " 1923 "%u active, need to add %u, limit is %u.\n", 1924 xhci->num_active_eps, added_eps, 1925 xhci->limit_active_eps); 1926 return -ENOMEM; 1927 } 1928 xhci->num_active_eps += added_eps; 1929 xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps, 1930 xhci->num_active_eps); 1931 return 0; 1932 } 1933 1934 /* 1935 * The configure endpoint was failed by the xHC for some other reason, so we 1936 * need to revert the resources that failed configuration would have used. 1937 * 1938 * Must be called with xhci->lock held. 1939 */ 1940 static void xhci_free_host_resources(struct xhci_hcd *xhci, 1941 struct xhci_container_ctx *in_ctx) 1942 { 1943 u32 num_failed_eps; 1944 1945 num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx); 1946 xhci->num_active_eps -= num_failed_eps; 1947 xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n", 1948 num_failed_eps, 1949 xhci->num_active_eps); 1950 } 1951 1952 /* 1953 * Now that the command has completed, clean up the active endpoint count by 1954 * subtracting out the endpoints that were dropped (but not changed). 1955 * 1956 * Must be called with xhci->lock held. 1957 */ 1958 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, 1959 struct xhci_container_ctx *in_ctx) 1960 { 1961 u32 num_dropped_eps; 1962 1963 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx); 1964 xhci->num_active_eps -= num_dropped_eps; 1965 if (num_dropped_eps) 1966 xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n", 1967 num_dropped_eps, 1968 xhci->num_active_eps); 1969 } 1970 1971 static unsigned int xhci_get_block_size(struct usb_device *udev) 1972 { 1973 switch (udev->speed) { 1974 case USB_SPEED_LOW: 1975 case USB_SPEED_FULL: 1976 return FS_BLOCK; 1977 case USB_SPEED_HIGH: 1978 return HS_BLOCK; 1979 case USB_SPEED_SUPER: 1980 return SS_BLOCK; 1981 case USB_SPEED_UNKNOWN: 1982 case USB_SPEED_WIRELESS: 1983 default: 1984 /* Should never happen */ 1985 return 1; 1986 } 1987 } 1988 1989 static unsigned int 1990 xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw) 1991 { 1992 if (interval_bw->overhead[LS_OVERHEAD_TYPE]) 1993 return LS_OVERHEAD; 1994 if (interval_bw->overhead[FS_OVERHEAD_TYPE]) 1995 return FS_OVERHEAD; 1996 return HS_OVERHEAD; 1997 } 1998 1999 /* If we are changing a LS/FS device under a HS hub, 2000 * make sure (if we are activating a new TT) that the HS bus has enough 2001 * bandwidth for this new TT. 2002 */ 2003 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, 2004 struct xhci_virt_device *virt_dev, 2005 int old_active_eps) 2006 { 2007 struct xhci_interval_bw_table *bw_table; 2008 struct xhci_tt_bw_info *tt_info; 2009 2010 /* Find the bandwidth table for the root port this TT is attached to. */ 2011 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table; 2012 tt_info = virt_dev->tt_info; 2013 /* If this TT already had active endpoints, the bandwidth for this TT 2014 * has already been added. Removing all periodic endpoints (and thus 2015 * making the TT enactive) will only decrease the bandwidth used. 2016 */ 2017 if (old_active_eps) 2018 return 0; 2019 if (old_active_eps == 0 && tt_info->active_eps != 0) { 2020 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT) 2021 return -ENOMEM; 2022 return 0; 2023 } 2024 /* Not sure why we would have no new active endpoints... 2025 * 2026 * Maybe because of an Evaluate Context change for a hub update or a 2027 * control endpoint 0 max packet size change? 2028 * FIXME: skip the bandwidth calculation in that case. 2029 */ 2030 return 0; 2031 } 2032 2033 static int xhci_check_ss_bw(struct xhci_hcd *xhci, 2034 struct xhci_virt_device *virt_dev) 2035 { 2036 unsigned int bw_reserved; 2037 2038 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100); 2039 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved)) 2040 return -ENOMEM; 2041 2042 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100); 2043 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved)) 2044 return -ENOMEM; 2045 2046 return 0; 2047 } 2048 2049 /* 2050 * This algorithm is a very conservative estimate of the worst-case scheduling 2051 * scenario for any one interval. The hardware dynamically schedules the 2052 * packets, so we can't tell which microframe could be the limiting factor in 2053 * the bandwidth scheduling. This only takes into account periodic endpoints. 2054 * 2055 * Obviously, we can't solve an NP complete problem to find the minimum worst 2056 * case scenario. Instead, we come up with an estimate that is no less than 2057 * the worst case bandwidth used for any one microframe, but may be an 2058 * over-estimate. 2059 * 2060 * We walk the requirements for each endpoint by interval, starting with the 2061 * smallest interval, and place packets in the schedule where there is only one 2062 * possible way to schedule packets for that interval. In order to simplify 2063 * this algorithm, we record the largest max packet size for each interval, and 2064 * assume all packets will be that size. 2065 * 2066 * For interval 0, we obviously must schedule all packets for each interval. 2067 * The bandwidth for interval 0 is just the amount of data to be transmitted 2068 * (the sum of all max ESIT payload sizes, plus any overhead per packet times 2069 * the number of packets). 2070 * 2071 * For interval 1, we have two possible microframes to schedule those packets 2072 * in. For this algorithm, if we can schedule the same number of packets for 2073 * each possible scheduling opportunity (each microframe), we will do so. The 2074 * remaining number of packets will be saved to be transmitted in the gaps in 2075 * the next interval's scheduling sequence. 2076 * 2077 * As we move those remaining packets to be scheduled with interval 2 packets, 2078 * we have to double the number of remaining packets to transmit. This is 2079 * because the intervals are actually powers of 2, and we would be transmitting 2080 * the previous interval's packets twice in this interval. We also have to be 2081 * sure that when we look at the largest max packet size for this interval, we 2082 * also look at the largest max packet size for the remaining packets and take 2083 * the greater of the two. 2084 * 2085 * The algorithm continues to evenly distribute packets in each scheduling 2086 * opportunity, and push the remaining packets out, until we get to the last 2087 * interval. Then those packets and their associated overhead are just added 2088 * to the bandwidth used. 2089 */ 2090 static int xhci_check_bw_table(struct xhci_hcd *xhci, 2091 struct xhci_virt_device *virt_dev, 2092 int old_active_eps) 2093 { 2094 unsigned int bw_reserved; 2095 unsigned int max_bandwidth; 2096 unsigned int bw_used; 2097 unsigned int block_size; 2098 struct xhci_interval_bw_table *bw_table; 2099 unsigned int packet_size = 0; 2100 unsigned int overhead = 0; 2101 unsigned int packets_transmitted = 0; 2102 unsigned int packets_remaining = 0; 2103 unsigned int i; 2104 2105 if (virt_dev->udev->speed == USB_SPEED_SUPER) 2106 return xhci_check_ss_bw(xhci, virt_dev); 2107 2108 if (virt_dev->udev->speed == USB_SPEED_HIGH) { 2109 max_bandwidth = HS_BW_LIMIT; 2110 /* Convert percent of bus BW reserved to blocks reserved */ 2111 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100); 2112 } else { 2113 max_bandwidth = FS_BW_LIMIT; 2114 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100); 2115 } 2116 2117 bw_table = virt_dev->bw_table; 2118 /* We need to translate the max packet size and max ESIT payloads into 2119 * the units the hardware uses. 2120 */ 2121 block_size = xhci_get_block_size(virt_dev->udev); 2122 2123 /* If we are manipulating a LS/FS device under a HS hub, double check 2124 * that the HS bus has enough bandwidth if we are activing a new TT. 2125 */ 2126 if (virt_dev->tt_info) { 2127 xhci_dbg(xhci, "Recalculating BW for rootport %u\n", 2128 virt_dev->real_port); 2129 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { 2130 xhci_warn(xhci, "Not enough bandwidth on HS bus for " 2131 "newly activated TT.\n"); 2132 return -ENOMEM; 2133 } 2134 xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n", 2135 virt_dev->tt_info->slot_id, 2136 virt_dev->tt_info->ttport); 2137 } else { 2138 xhci_dbg(xhci, "Recalculating BW for rootport %u\n", 2139 virt_dev->real_port); 2140 } 2141 2142 /* Add in how much bandwidth will be used for interval zero, or the 2143 * rounded max ESIT payload + number of packets * largest overhead. 2144 */ 2145 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) + 2146 bw_table->interval_bw[0].num_packets * 2147 xhci_get_largest_overhead(&bw_table->interval_bw[0]); 2148 2149 for (i = 1; i < XHCI_MAX_INTERVAL; i++) { 2150 unsigned int bw_added; 2151 unsigned int largest_mps; 2152 unsigned int interval_overhead; 2153 2154 /* 2155 * How many packets could we transmit in this interval? 2156 * If packets didn't fit in the previous interval, we will need 2157 * to transmit that many packets twice within this interval. 2158 */ 2159 packets_remaining = 2 * packets_remaining + 2160 bw_table->interval_bw[i].num_packets; 2161 2162 /* Find the largest max packet size of this or the previous 2163 * interval. 2164 */ 2165 if (list_empty(&bw_table->interval_bw[i].endpoints)) 2166 largest_mps = 0; 2167 else { 2168 struct xhci_virt_ep *virt_ep; 2169 struct list_head *ep_entry; 2170 2171 ep_entry = bw_table->interval_bw[i].endpoints.next; 2172 virt_ep = list_entry(ep_entry, 2173 struct xhci_virt_ep, bw_endpoint_list); 2174 /* Convert to blocks, rounding up */ 2175 largest_mps = DIV_ROUND_UP( 2176 virt_ep->bw_info.max_packet_size, 2177 block_size); 2178 } 2179 if (largest_mps > packet_size) 2180 packet_size = largest_mps; 2181 2182 /* Use the larger overhead of this or the previous interval. */ 2183 interval_overhead = xhci_get_largest_overhead( 2184 &bw_table->interval_bw[i]); 2185 if (interval_overhead > overhead) 2186 overhead = interval_overhead; 2187 2188 /* How many packets can we evenly distribute across 2189 * (1 << (i + 1)) possible scheduling opportunities? 2190 */ 2191 packets_transmitted = packets_remaining >> (i + 1); 2192 2193 /* Add in the bandwidth used for those scheduled packets */ 2194 bw_added = packets_transmitted * (overhead + packet_size); 2195 2196 /* How many packets do we have remaining to transmit? */ 2197 packets_remaining = packets_remaining % (1 << (i + 1)); 2198 2199 /* What largest max packet size should those packets have? */ 2200 /* If we've transmitted all packets, don't carry over the 2201 * largest packet size. 2202 */ 2203 if (packets_remaining == 0) { 2204 packet_size = 0; 2205 overhead = 0; 2206 } else if (packets_transmitted > 0) { 2207 /* Otherwise if we do have remaining packets, and we've 2208 * scheduled some packets in this interval, take the 2209 * largest max packet size from endpoints with this 2210 * interval. 2211 */ 2212 packet_size = largest_mps; 2213 overhead = interval_overhead; 2214 } 2215 /* Otherwise carry over packet_size and overhead from the last 2216 * time we had a remainder. 2217 */ 2218 bw_used += bw_added; 2219 if (bw_used > max_bandwidth) { 2220 xhci_warn(xhci, "Not enough bandwidth. " 2221 "Proposed: %u, Max: %u\n", 2222 bw_used, max_bandwidth); 2223 return -ENOMEM; 2224 } 2225 } 2226 /* 2227 * Ok, we know we have some packets left over after even-handedly 2228 * scheduling interval 15. We don't know which microframes they will 2229 * fit into, so we over-schedule and say they will be scheduled every 2230 * microframe. 2231 */ 2232 if (packets_remaining > 0) 2233 bw_used += overhead + packet_size; 2234 2235 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) { 2236 unsigned int port_index = virt_dev->real_port - 1; 2237 2238 /* OK, we're manipulating a HS device attached to a 2239 * root port bandwidth domain. Include the number of active TTs 2240 * in the bandwidth used. 2241 */ 2242 bw_used += TT_HS_OVERHEAD * 2243 xhci->rh_bw[port_index].num_active_tts; 2244 } 2245 2246 xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, " 2247 "Available: %u " "percent\n", 2248 bw_used, max_bandwidth, bw_reserved, 2249 (max_bandwidth - bw_used - bw_reserved) * 100 / 2250 max_bandwidth); 2251 2252 bw_used += bw_reserved; 2253 if (bw_used > max_bandwidth) { 2254 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n", 2255 bw_used, max_bandwidth); 2256 return -ENOMEM; 2257 } 2258 2259 bw_table->bw_used = bw_used; 2260 return 0; 2261 } 2262 2263 static bool xhci_is_async_ep(unsigned int ep_type) 2264 { 2265 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && 2266 ep_type != ISOC_IN_EP && 2267 ep_type != INT_IN_EP); 2268 } 2269 2270 static bool xhci_is_sync_in_ep(unsigned int ep_type) 2271 { 2272 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP); 2273 } 2274 2275 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw) 2276 { 2277 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK); 2278 2279 if (ep_bw->ep_interval == 0) 2280 return SS_OVERHEAD_BURST + 2281 (ep_bw->mult * ep_bw->num_packets * 2282 (SS_OVERHEAD + mps)); 2283 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets * 2284 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST), 2285 1 << ep_bw->ep_interval); 2286 2287 } 2288 2289 void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, 2290 struct xhci_bw_info *ep_bw, 2291 struct xhci_interval_bw_table *bw_table, 2292 struct usb_device *udev, 2293 struct xhci_virt_ep *virt_ep, 2294 struct xhci_tt_bw_info *tt_info) 2295 { 2296 struct xhci_interval_bw *interval_bw; 2297 int normalized_interval; 2298 2299 if (xhci_is_async_ep(ep_bw->type)) 2300 return; 2301 2302 if (udev->speed == USB_SPEED_SUPER) { 2303 if (xhci_is_sync_in_ep(ep_bw->type)) 2304 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= 2305 xhci_get_ss_bw_consumed(ep_bw); 2306 else 2307 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= 2308 xhci_get_ss_bw_consumed(ep_bw); 2309 return; 2310 } 2311 2312 /* SuperSpeed endpoints never get added to intervals in the table, so 2313 * this check is only valid for HS/FS/LS devices. 2314 */ 2315 if (list_empty(&virt_ep->bw_endpoint_list)) 2316 return; 2317 /* For LS/FS devices, we need to translate the interval expressed in 2318 * microframes to frames. 2319 */ 2320 if (udev->speed == USB_SPEED_HIGH) 2321 normalized_interval = ep_bw->ep_interval; 2322 else 2323 normalized_interval = ep_bw->ep_interval - 3; 2324 2325 if (normalized_interval == 0) 2326 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload; 2327 interval_bw = &bw_table->interval_bw[normalized_interval]; 2328 interval_bw->num_packets -= ep_bw->num_packets; 2329 switch (udev->speed) { 2330 case USB_SPEED_LOW: 2331 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1; 2332 break; 2333 case USB_SPEED_FULL: 2334 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1; 2335 break; 2336 case USB_SPEED_HIGH: 2337 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1; 2338 break; 2339 case USB_SPEED_SUPER: 2340 case USB_SPEED_UNKNOWN: 2341 case USB_SPEED_WIRELESS: 2342 /* Should never happen because only LS/FS/HS endpoints will get 2343 * added to the endpoint list. 2344 */ 2345 return; 2346 } 2347 if (tt_info) 2348 tt_info->active_eps -= 1; 2349 list_del_init(&virt_ep->bw_endpoint_list); 2350 } 2351 2352 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, 2353 struct xhci_bw_info *ep_bw, 2354 struct xhci_interval_bw_table *bw_table, 2355 struct usb_device *udev, 2356 struct xhci_virt_ep *virt_ep, 2357 struct xhci_tt_bw_info *tt_info) 2358 { 2359 struct xhci_interval_bw *interval_bw; 2360 struct xhci_virt_ep *smaller_ep; 2361 int normalized_interval; 2362 2363 if (xhci_is_async_ep(ep_bw->type)) 2364 return; 2365 2366 if (udev->speed == USB_SPEED_SUPER) { 2367 if (xhci_is_sync_in_ep(ep_bw->type)) 2368 xhci->devs[udev->slot_id]->bw_table->ss_bw_in += 2369 xhci_get_ss_bw_consumed(ep_bw); 2370 else 2371 xhci->devs[udev->slot_id]->bw_table->ss_bw_out += 2372 xhci_get_ss_bw_consumed(ep_bw); 2373 return; 2374 } 2375 2376 /* For LS/FS devices, we need to translate the interval expressed in 2377 * microframes to frames. 2378 */ 2379 if (udev->speed == USB_SPEED_HIGH) 2380 normalized_interval = ep_bw->ep_interval; 2381 else 2382 normalized_interval = ep_bw->ep_interval - 3; 2383 2384 if (normalized_interval == 0) 2385 bw_table->interval0_esit_payload += ep_bw->max_esit_payload; 2386 interval_bw = &bw_table->interval_bw[normalized_interval]; 2387 interval_bw->num_packets += ep_bw->num_packets; 2388 switch (udev->speed) { 2389 case USB_SPEED_LOW: 2390 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1; 2391 break; 2392 case USB_SPEED_FULL: 2393 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1; 2394 break; 2395 case USB_SPEED_HIGH: 2396 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1; 2397 break; 2398 case USB_SPEED_SUPER: 2399 case USB_SPEED_UNKNOWN: 2400 case USB_SPEED_WIRELESS: 2401 /* Should never happen because only LS/FS/HS endpoints will get 2402 * added to the endpoint list. 2403 */ 2404 return; 2405 } 2406 2407 if (tt_info) 2408 tt_info->active_eps += 1; 2409 /* Insert the endpoint into the list, largest max packet size first. */ 2410 list_for_each_entry(smaller_ep, &interval_bw->endpoints, 2411 bw_endpoint_list) { 2412 if (ep_bw->max_packet_size >= 2413 smaller_ep->bw_info.max_packet_size) { 2414 /* Add the new ep before the smaller endpoint */ 2415 list_add_tail(&virt_ep->bw_endpoint_list, 2416 &smaller_ep->bw_endpoint_list); 2417 return; 2418 } 2419 } 2420 /* Add the new endpoint at the end of the list. */ 2421 list_add_tail(&virt_ep->bw_endpoint_list, 2422 &interval_bw->endpoints); 2423 } 2424 2425 void xhci_update_tt_active_eps(struct xhci_hcd *xhci, 2426 struct xhci_virt_device *virt_dev, 2427 int old_active_eps) 2428 { 2429 struct xhci_root_port_bw_info *rh_bw_info; 2430 if (!virt_dev->tt_info) 2431 return; 2432 2433 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1]; 2434 if (old_active_eps == 0 && 2435 virt_dev->tt_info->active_eps != 0) { 2436 rh_bw_info->num_active_tts += 1; 2437 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD; 2438 } else if (old_active_eps != 0 && 2439 virt_dev->tt_info->active_eps == 0) { 2440 rh_bw_info->num_active_tts -= 1; 2441 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD; 2442 } 2443 } 2444 2445 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, 2446 struct xhci_virt_device *virt_dev, 2447 struct xhci_container_ctx *in_ctx) 2448 { 2449 struct xhci_bw_info ep_bw_info[31]; 2450 int i; 2451 struct xhci_input_control_ctx *ctrl_ctx; 2452 int old_active_eps = 0; 2453 2454 if (virt_dev->tt_info) 2455 old_active_eps = virt_dev->tt_info->active_eps; 2456 2457 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 2458 2459 for (i = 0; i < 31; i++) { 2460 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) 2461 continue; 2462 2463 /* Make a copy of the BW info in case we need to revert this */ 2464 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info, 2465 sizeof(ep_bw_info[i])); 2466 /* Drop the endpoint from the interval table if the endpoint is 2467 * being dropped or changed. 2468 */ 2469 if (EP_IS_DROPPED(ctrl_ctx, i)) 2470 xhci_drop_ep_from_interval_table(xhci, 2471 &virt_dev->eps[i].bw_info, 2472 virt_dev->bw_table, 2473 virt_dev->udev, 2474 &virt_dev->eps[i], 2475 virt_dev->tt_info); 2476 } 2477 /* Overwrite the information stored in the endpoints' bw_info */ 2478 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); 2479 for (i = 0; i < 31; i++) { 2480 /* Add any changed or added endpoints to the interval table */ 2481 if (EP_IS_ADDED(ctrl_ctx, i)) 2482 xhci_add_ep_to_interval_table(xhci, 2483 &virt_dev->eps[i].bw_info, 2484 virt_dev->bw_table, 2485 virt_dev->udev, 2486 &virt_dev->eps[i], 2487 virt_dev->tt_info); 2488 } 2489 2490 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { 2491 /* Ok, this fits in the bandwidth we have. 2492 * Update the number of active TTs. 2493 */ 2494 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); 2495 return 0; 2496 } 2497 2498 /* We don't have enough bandwidth for this, revert the stored info. */ 2499 for (i = 0; i < 31; i++) { 2500 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) 2501 continue; 2502 2503 /* Drop the new copies of any added or changed endpoints from 2504 * the interval table. 2505 */ 2506 if (EP_IS_ADDED(ctrl_ctx, i)) { 2507 xhci_drop_ep_from_interval_table(xhci, 2508 &virt_dev->eps[i].bw_info, 2509 virt_dev->bw_table, 2510 virt_dev->udev, 2511 &virt_dev->eps[i], 2512 virt_dev->tt_info); 2513 } 2514 /* Revert the endpoint back to its old information */ 2515 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i], 2516 sizeof(ep_bw_info[i])); 2517 /* Add any changed or dropped endpoints back into the table */ 2518 if (EP_IS_DROPPED(ctrl_ctx, i)) 2519 xhci_add_ep_to_interval_table(xhci, 2520 &virt_dev->eps[i].bw_info, 2521 virt_dev->bw_table, 2522 virt_dev->udev, 2523 &virt_dev->eps[i], 2524 virt_dev->tt_info); 2525 } 2526 return -ENOMEM; 2527 } 2528 2529 2530 /* Issue a configure endpoint command or evaluate context command 2531 * and wait for it to finish. 2532 */ 2533 static int xhci_configure_endpoint(struct xhci_hcd *xhci, 2534 struct usb_device *udev, 2535 struct xhci_command *command, 2536 bool ctx_change, bool must_succeed) 2537 { 2538 int ret; 2539 int timeleft; 2540 unsigned long flags; 2541 struct xhci_container_ctx *in_ctx; 2542 struct completion *cmd_completion; 2543 u32 *cmd_status; 2544 struct xhci_virt_device *virt_dev; 2545 union xhci_trb *cmd_trb; 2546 2547 spin_lock_irqsave(&xhci->lock, flags); 2548 virt_dev = xhci->devs[udev->slot_id]; 2549 2550 if (command) 2551 in_ctx = command->in_ctx; 2552 else 2553 in_ctx = virt_dev->in_ctx; 2554 2555 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && 2556 xhci_reserve_host_resources(xhci, in_ctx)) { 2557 spin_unlock_irqrestore(&xhci->lock, flags); 2558 xhci_warn(xhci, "Not enough host resources, " 2559 "active endpoint contexts = %u\n", 2560 xhci->num_active_eps); 2561 return -ENOMEM; 2562 } 2563 if ((xhci->quirks & XHCI_SW_BW_CHECKING) && 2564 xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) { 2565 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) 2566 xhci_free_host_resources(xhci, in_ctx); 2567 spin_unlock_irqrestore(&xhci->lock, flags); 2568 xhci_warn(xhci, "Not enough bandwidth\n"); 2569 return -ENOMEM; 2570 } 2571 2572 if (command) { 2573 cmd_completion = command->completion; 2574 cmd_status = &command->status; 2575 command->command_trb = xhci->cmd_ring->enqueue; 2576 2577 /* Enqueue pointer can be left pointing to the link TRB, 2578 * we must handle that 2579 */ 2580 if (TRB_TYPE_LINK_LE32(command->command_trb->link.control)) 2581 command->command_trb = 2582 xhci->cmd_ring->enq_seg->next->trbs; 2583 2584 list_add_tail(&command->cmd_list, &virt_dev->cmd_list); 2585 } else { 2586 cmd_completion = &virt_dev->cmd_completion; 2587 cmd_status = &virt_dev->cmd_status; 2588 } 2589 init_completion(cmd_completion); 2590 2591 cmd_trb = xhci->cmd_ring->dequeue; 2592 if (!ctx_change) 2593 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, 2594 udev->slot_id, must_succeed); 2595 else 2596 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma, 2597 udev->slot_id, must_succeed); 2598 if (ret < 0) { 2599 if (command) 2600 list_del(&command->cmd_list); 2601 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) 2602 xhci_free_host_resources(xhci, in_ctx); 2603 spin_unlock_irqrestore(&xhci->lock, flags); 2604 xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); 2605 return -ENOMEM; 2606 } 2607 xhci_ring_cmd_db(xhci); 2608 spin_unlock_irqrestore(&xhci->lock, flags); 2609 2610 /* Wait for the configure endpoint command to complete */ 2611 timeleft = wait_for_completion_interruptible_timeout( 2612 cmd_completion, 2613 XHCI_CMD_DEFAULT_TIMEOUT); 2614 if (timeleft <= 0) { 2615 xhci_warn(xhci, "%s while waiting for %s command\n", 2616 timeleft == 0 ? "Timeout" : "Signal", 2617 ctx_change == 0 ? 2618 "configure endpoint" : 2619 "evaluate context"); 2620 /* cancel the configure endpoint command */ 2621 ret = xhci_cancel_cmd(xhci, command, cmd_trb); 2622 if (ret < 0) 2623 return ret; 2624 return -ETIME; 2625 } 2626 2627 if (!ctx_change) 2628 ret = xhci_configure_endpoint_result(xhci, udev, cmd_status); 2629 else 2630 ret = xhci_evaluate_context_result(xhci, udev, cmd_status); 2631 2632 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 2633 spin_lock_irqsave(&xhci->lock, flags); 2634 /* If the command failed, remove the reserved resources. 2635 * Otherwise, clean up the estimate to include dropped eps. 2636 */ 2637 if (ret) 2638 xhci_free_host_resources(xhci, in_ctx); 2639 else 2640 xhci_finish_resource_reservation(xhci, in_ctx); 2641 spin_unlock_irqrestore(&xhci->lock, flags); 2642 } 2643 return ret; 2644 } 2645 2646 /* Called after one or more calls to xhci_add_endpoint() or 2647 * xhci_drop_endpoint(). If this call fails, the USB core is expected 2648 * to call xhci_reset_bandwidth(). 2649 * 2650 * Since we are in the middle of changing either configuration or 2651 * installing a new alt setting, the USB core won't allow URBs to be 2652 * enqueued for any endpoint on the old config or interface. Nothing 2653 * else should be touching the xhci->devs[slot_id] structure, so we 2654 * don't need to take the xhci->lock for manipulating that. 2655 */ 2656 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) 2657 { 2658 int i; 2659 int ret = 0; 2660 struct xhci_hcd *xhci; 2661 struct xhci_virt_device *virt_dev; 2662 struct xhci_input_control_ctx *ctrl_ctx; 2663 struct xhci_slot_ctx *slot_ctx; 2664 2665 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 2666 if (ret <= 0) 2667 return ret; 2668 xhci = hcd_to_xhci(hcd); 2669 if (xhci->xhc_state & XHCI_STATE_DYING) 2670 return -ENODEV; 2671 2672 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 2673 virt_dev = xhci->devs[udev->slot_id]; 2674 2675 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ 2676 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 2677 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 2678 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); 2679 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); 2680 2681 /* Don't issue the command if there's no endpoints to update. */ 2682 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && 2683 ctrl_ctx->drop_flags == 0) 2684 return 0; 2685 2686 xhci_dbg(xhci, "New Input Control Context:\n"); 2687 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 2688 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2689 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); 2690 2691 ret = xhci_configure_endpoint(xhci, udev, NULL, 2692 false, false); 2693 if (ret) { 2694 /* Callee should call reset_bandwidth() */ 2695 return ret; 2696 } 2697 2698 xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); 2699 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2700 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); 2701 2702 /* Free any rings that were dropped, but not changed. */ 2703 for (i = 1; i < 31; ++i) { 2704 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) && 2705 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) 2706 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 2707 } 2708 xhci_zero_in_ctx(xhci, virt_dev); 2709 /* 2710 * Install any rings for completely new endpoints or changed endpoints, 2711 * and free or cache any old rings from changed endpoints. 2712 */ 2713 for (i = 1; i < 31; ++i) { 2714 if (!virt_dev->eps[i].new_ring) 2715 continue; 2716 /* Only cache or free the old ring if it exists. 2717 * It may not if this is the first add of an endpoint. 2718 */ 2719 if (virt_dev->eps[i].ring) { 2720 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 2721 } 2722 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; 2723 virt_dev->eps[i].new_ring = NULL; 2724 } 2725 2726 return ret; 2727 } 2728 2729 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) 2730 { 2731 struct xhci_hcd *xhci; 2732 struct xhci_virt_device *virt_dev; 2733 int i, ret; 2734 2735 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 2736 if (ret <= 0) 2737 return; 2738 xhci = hcd_to_xhci(hcd); 2739 2740 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 2741 virt_dev = xhci->devs[udev->slot_id]; 2742 /* Free any rings allocated for added endpoints */ 2743 for (i = 0; i < 31; ++i) { 2744 if (virt_dev->eps[i].new_ring) { 2745 xhci_ring_free(xhci, virt_dev->eps[i].new_ring); 2746 virt_dev->eps[i].new_ring = NULL; 2747 } 2748 } 2749 xhci_zero_in_ctx(xhci, virt_dev); 2750 } 2751 2752 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, 2753 struct xhci_container_ctx *in_ctx, 2754 struct xhci_container_ctx *out_ctx, 2755 u32 add_flags, u32 drop_flags) 2756 { 2757 struct xhci_input_control_ctx *ctrl_ctx; 2758 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 2759 ctrl_ctx->add_flags = cpu_to_le32(add_flags); 2760 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags); 2761 xhci_slot_copy(xhci, in_ctx, out_ctx); 2762 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 2763 2764 xhci_dbg(xhci, "Input Context:\n"); 2765 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); 2766 } 2767 2768 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, 2769 unsigned int slot_id, unsigned int ep_index, 2770 struct xhci_dequeue_state *deq_state) 2771 { 2772 struct xhci_container_ctx *in_ctx; 2773 struct xhci_ep_ctx *ep_ctx; 2774 u32 added_ctxs; 2775 dma_addr_t addr; 2776 2777 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, 2778 xhci->devs[slot_id]->out_ctx, ep_index); 2779 in_ctx = xhci->devs[slot_id]->in_ctx; 2780 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); 2781 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, 2782 deq_state->new_deq_ptr); 2783 if (addr == 0) { 2784 xhci_warn(xhci, "WARN Cannot submit config ep after " 2785 "reset ep command\n"); 2786 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", 2787 deq_state->new_deq_seg, 2788 deq_state->new_deq_ptr); 2789 return; 2790 } 2791 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state); 2792 2793 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); 2794 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, 2795 xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs); 2796 } 2797 2798 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, 2799 struct usb_device *udev, unsigned int ep_index) 2800 { 2801 struct xhci_dequeue_state deq_state; 2802 struct xhci_virt_ep *ep; 2803 2804 xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); 2805 ep = &xhci->devs[udev->slot_id]->eps[ep_index]; 2806 /* We need to move the HW's dequeue pointer past this TD, 2807 * or it will attempt to resend it on the next doorbell ring. 2808 */ 2809 xhci_find_new_dequeue_state(xhci, udev->slot_id, 2810 ep_index, ep->stopped_stream, ep->stopped_td, 2811 &deq_state); 2812 2813 /* HW with the reset endpoint quirk will use the saved dequeue state to 2814 * issue a configure endpoint command later. 2815 */ 2816 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { 2817 xhci_dbg(xhci, "Queueing new dequeue state\n"); 2818 xhci_queue_new_dequeue_state(xhci, udev->slot_id, 2819 ep_index, ep->stopped_stream, &deq_state); 2820 } else { 2821 /* Better hope no one uses the input context between now and the 2822 * reset endpoint completion! 2823 * XXX: No idea how this hardware will react when stream rings 2824 * are enabled. 2825 */ 2826 xhci_dbg(xhci, "Setting up input context for " 2827 "configure endpoint command\n"); 2828 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, 2829 ep_index, &deq_state); 2830 } 2831 } 2832 2833 /* Deal with stalled endpoints. The core should have sent the control message 2834 * to clear the halt condition. However, we need to make the xHCI hardware 2835 * reset its sequence number, since a device will expect a sequence number of 2836 * zero after the halt condition is cleared. 2837 * Context: in_interrupt 2838 */ 2839 void xhci_endpoint_reset(struct usb_hcd *hcd, 2840 struct usb_host_endpoint *ep) 2841 { 2842 struct xhci_hcd *xhci; 2843 struct usb_device *udev; 2844 unsigned int ep_index; 2845 unsigned long flags; 2846 int ret; 2847 struct xhci_virt_ep *virt_ep; 2848 2849 xhci = hcd_to_xhci(hcd); 2850 udev = (struct usb_device *) ep->hcpriv; 2851 /* Called with a root hub endpoint (or an endpoint that wasn't added 2852 * with xhci_add_endpoint() 2853 */ 2854 if (!ep->hcpriv) 2855 return; 2856 ep_index = xhci_get_endpoint_index(&ep->desc); 2857 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index]; 2858 if (!virt_ep->stopped_td) { 2859 xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", 2860 ep->desc.bEndpointAddress); 2861 return; 2862 } 2863 if (usb_endpoint_xfer_control(&ep->desc)) { 2864 xhci_dbg(xhci, "Control endpoint stall already handled.\n"); 2865 return; 2866 } 2867 2868 xhci_dbg(xhci, "Queueing reset endpoint command\n"); 2869 spin_lock_irqsave(&xhci->lock, flags); 2870 ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); 2871 /* 2872 * Can't change the ring dequeue pointer until it's transitioned to the 2873 * stopped state, which is only upon a successful reset endpoint 2874 * command. Better hope that last command worked! 2875 */ 2876 if (!ret) { 2877 xhci_cleanup_stalled_ring(xhci, udev, ep_index); 2878 kfree(virt_ep->stopped_td); 2879 xhci_ring_cmd_db(xhci); 2880 } 2881 virt_ep->stopped_td = NULL; 2882 virt_ep->stopped_trb = NULL; 2883 virt_ep->stopped_stream = 0; 2884 spin_unlock_irqrestore(&xhci->lock, flags); 2885 2886 if (ret) 2887 xhci_warn(xhci, "FIXME allocate a new ring segment\n"); 2888 } 2889 2890 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, 2891 struct usb_device *udev, struct usb_host_endpoint *ep, 2892 unsigned int slot_id) 2893 { 2894 int ret; 2895 unsigned int ep_index; 2896 unsigned int ep_state; 2897 2898 if (!ep) 2899 return -EINVAL; 2900 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); 2901 if (ret <= 0) 2902 return -EINVAL; 2903 if (ep->ss_ep_comp.bmAttributes == 0) { 2904 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" 2905 " descriptor for ep 0x%x does not support streams\n", 2906 ep->desc.bEndpointAddress); 2907 return -EINVAL; 2908 } 2909 2910 ep_index = xhci_get_endpoint_index(&ep->desc); 2911 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 2912 if (ep_state & EP_HAS_STREAMS || 2913 ep_state & EP_GETTING_STREAMS) { 2914 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " 2915 "already has streams set up.\n", 2916 ep->desc.bEndpointAddress); 2917 xhci_warn(xhci, "Send email to xHCI maintainer and ask for " 2918 "dynamic stream context array reallocation.\n"); 2919 return -EINVAL; 2920 } 2921 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { 2922 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " 2923 "endpoint 0x%x; URBs are pending.\n", 2924 ep->desc.bEndpointAddress); 2925 return -EINVAL; 2926 } 2927 return 0; 2928 } 2929 2930 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, 2931 unsigned int *num_streams, unsigned int *num_stream_ctxs) 2932 { 2933 unsigned int max_streams; 2934 2935 /* The stream context array size must be a power of two */ 2936 *num_stream_ctxs = roundup_pow_of_two(*num_streams); 2937 /* 2938 * Find out how many primary stream array entries the host controller 2939 * supports. Later we may use secondary stream arrays (similar to 2nd 2940 * level page entries), but that's an optional feature for xHCI host 2941 * controllers. xHCs must support at least 4 stream IDs. 2942 */ 2943 max_streams = HCC_MAX_PSA(xhci->hcc_params); 2944 if (*num_stream_ctxs > max_streams) { 2945 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", 2946 max_streams); 2947 *num_stream_ctxs = max_streams; 2948 *num_streams = max_streams; 2949 } 2950 } 2951 2952 /* Returns an error code if one of the endpoint already has streams. 2953 * This does not change any data structures, it only checks and gathers 2954 * information. 2955 */ 2956 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, 2957 struct usb_device *udev, 2958 struct usb_host_endpoint **eps, unsigned int num_eps, 2959 unsigned int *num_streams, u32 *changed_ep_bitmask) 2960 { 2961 unsigned int max_streams; 2962 unsigned int endpoint_flag; 2963 int i; 2964 int ret; 2965 2966 for (i = 0; i < num_eps; i++) { 2967 ret = xhci_check_streams_endpoint(xhci, udev, 2968 eps[i], udev->slot_id); 2969 if (ret < 0) 2970 return ret; 2971 2972 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp); 2973 if (max_streams < (*num_streams - 1)) { 2974 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", 2975 eps[i]->desc.bEndpointAddress, 2976 max_streams); 2977 *num_streams = max_streams+1; 2978 } 2979 2980 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc); 2981 if (*changed_ep_bitmask & endpoint_flag) 2982 return -EINVAL; 2983 *changed_ep_bitmask |= endpoint_flag; 2984 } 2985 return 0; 2986 } 2987 2988 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, 2989 struct usb_device *udev, 2990 struct usb_host_endpoint **eps, unsigned int num_eps) 2991 { 2992 u32 changed_ep_bitmask = 0; 2993 unsigned int slot_id; 2994 unsigned int ep_index; 2995 unsigned int ep_state; 2996 int i; 2997 2998 slot_id = udev->slot_id; 2999 if (!xhci->devs[slot_id]) 3000 return 0; 3001 3002 for (i = 0; i < num_eps; i++) { 3003 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3004 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 3005 /* Are streams already being freed for the endpoint? */ 3006 if (ep_state & EP_GETTING_NO_STREAMS) { 3007 xhci_warn(xhci, "WARN Can't disable streams for " 3008 "endpoint 0x%x\n, " 3009 "streams are being disabled already.", 3010 eps[i]->desc.bEndpointAddress); 3011 return 0; 3012 } 3013 /* Are there actually any streams to free? */ 3014 if (!(ep_state & EP_HAS_STREAMS) && 3015 !(ep_state & EP_GETTING_STREAMS)) { 3016 xhci_warn(xhci, "WARN Can't disable streams for " 3017 "endpoint 0x%x\n, " 3018 "streams are already disabled!", 3019 eps[i]->desc.bEndpointAddress); 3020 xhci_warn(xhci, "WARN xhci_free_streams() called " 3021 "with non-streams endpoint\n"); 3022 return 0; 3023 } 3024 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc); 3025 } 3026 return changed_ep_bitmask; 3027 } 3028 3029 /* 3030 * The USB device drivers use this function (though the HCD interface in USB 3031 * core) to prepare a set of bulk endpoints to use streams. Streams are used to 3032 * coordinate mass storage command queueing across multiple endpoints (basically 3033 * a stream ID == a task ID). 3034 * 3035 * Setting up streams involves allocating the same size stream context array 3036 * for each endpoint and issuing a configure endpoint command for all endpoints. 3037 * 3038 * Don't allow the call to succeed if one endpoint only supports one stream 3039 * (which means it doesn't support streams at all). 3040 * 3041 * Drivers may get less stream IDs than they asked for, if the host controller 3042 * hardware or endpoints claim they can't support the number of requested 3043 * stream IDs. 3044 */ 3045 int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, 3046 struct usb_host_endpoint **eps, unsigned int num_eps, 3047 unsigned int num_streams, gfp_t mem_flags) 3048 { 3049 int i, ret; 3050 struct xhci_hcd *xhci; 3051 struct xhci_virt_device *vdev; 3052 struct xhci_command *config_cmd; 3053 unsigned int ep_index; 3054 unsigned int num_stream_ctxs; 3055 unsigned long flags; 3056 u32 changed_ep_bitmask = 0; 3057 3058 if (!eps) 3059 return -EINVAL; 3060 3061 /* Add one to the number of streams requested to account for 3062 * stream 0 that is reserved for xHCI usage. 3063 */ 3064 num_streams += 1; 3065 xhci = hcd_to_xhci(hcd); 3066 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", 3067 num_streams); 3068 3069 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); 3070 if (!config_cmd) { 3071 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); 3072 return -ENOMEM; 3073 } 3074 3075 /* Check to make sure all endpoints are not already configured for 3076 * streams. While we're at it, find the maximum number of streams that 3077 * all the endpoints will support and check for duplicate endpoints. 3078 */ 3079 spin_lock_irqsave(&xhci->lock, flags); 3080 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, 3081 num_eps, &num_streams, &changed_ep_bitmask); 3082 if (ret < 0) { 3083 xhci_free_command(xhci, config_cmd); 3084 spin_unlock_irqrestore(&xhci->lock, flags); 3085 return ret; 3086 } 3087 if (num_streams <= 1) { 3088 xhci_warn(xhci, "WARN: endpoints can't handle " 3089 "more than one stream.\n"); 3090 xhci_free_command(xhci, config_cmd); 3091 spin_unlock_irqrestore(&xhci->lock, flags); 3092 return -EINVAL; 3093 } 3094 vdev = xhci->devs[udev->slot_id]; 3095 /* Mark each endpoint as being in transition, so 3096 * xhci_urb_enqueue() will reject all URBs. 3097 */ 3098 for (i = 0; i < num_eps; i++) { 3099 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3100 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; 3101 } 3102 spin_unlock_irqrestore(&xhci->lock, flags); 3103 3104 /* Setup internal data structures and allocate HW data structures for 3105 * streams (but don't install the HW structures in the input context 3106 * until we're sure all memory allocation succeeded). 3107 */ 3108 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); 3109 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", 3110 num_stream_ctxs, num_streams); 3111 3112 for (i = 0; i < num_eps; i++) { 3113 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3114 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, 3115 num_stream_ctxs, 3116 num_streams, mem_flags); 3117 if (!vdev->eps[ep_index].stream_info) 3118 goto cleanup; 3119 /* Set maxPstreams in endpoint context and update deq ptr to 3120 * point to stream context array. FIXME 3121 */ 3122 } 3123 3124 /* Set up the input context for a configure endpoint command. */ 3125 for (i = 0; i < num_eps; i++) { 3126 struct xhci_ep_ctx *ep_ctx; 3127 3128 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3129 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); 3130 3131 xhci_endpoint_copy(xhci, config_cmd->in_ctx, 3132 vdev->out_ctx, ep_index); 3133 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, 3134 vdev->eps[ep_index].stream_info); 3135 } 3136 /* Tell the HW to drop its old copy of the endpoint context info 3137 * and add the updated copy from the input context. 3138 */ 3139 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, 3140 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask); 3141 3142 /* Issue and wait for the configure endpoint command */ 3143 ret = xhci_configure_endpoint(xhci, udev, config_cmd, 3144 false, false); 3145 3146 /* xHC rejected the configure endpoint command for some reason, so we 3147 * leave the old ring intact and free our internal streams data 3148 * structure. 3149 */ 3150 if (ret < 0) 3151 goto cleanup; 3152 3153 spin_lock_irqsave(&xhci->lock, flags); 3154 for (i = 0; i < num_eps; i++) { 3155 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3156 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; 3157 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", 3158 udev->slot_id, ep_index); 3159 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; 3160 } 3161 xhci_free_command(xhci, config_cmd); 3162 spin_unlock_irqrestore(&xhci->lock, flags); 3163 3164 /* Subtract 1 for stream 0, which drivers can't use */ 3165 return num_streams - 1; 3166 3167 cleanup: 3168 /* If it didn't work, free the streams! */ 3169 for (i = 0; i < num_eps; i++) { 3170 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3171 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); 3172 vdev->eps[ep_index].stream_info = NULL; 3173 /* FIXME Unset maxPstreams in endpoint context and 3174 * update deq ptr to point to normal string ring. 3175 */ 3176 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; 3177 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; 3178 xhci_endpoint_zero(xhci, vdev, eps[i]); 3179 } 3180 xhci_free_command(xhci, config_cmd); 3181 return -ENOMEM; 3182 } 3183 3184 /* Transition the endpoint from using streams to being a "normal" endpoint 3185 * without streams. 3186 * 3187 * Modify the endpoint context state, submit a configure endpoint command, 3188 * and free all endpoint rings for streams if that completes successfully. 3189 */ 3190 int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, 3191 struct usb_host_endpoint **eps, unsigned int num_eps, 3192 gfp_t mem_flags) 3193 { 3194 int i, ret; 3195 struct xhci_hcd *xhci; 3196 struct xhci_virt_device *vdev; 3197 struct xhci_command *command; 3198 unsigned int ep_index; 3199 unsigned long flags; 3200 u32 changed_ep_bitmask; 3201 3202 xhci = hcd_to_xhci(hcd); 3203 vdev = xhci->devs[udev->slot_id]; 3204 3205 /* Set up a configure endpoint command to remove the streams rings */ 3206 spin_lock_irqsave(&xhci->lock, flags); 3207 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, 3208 udev, eps, num_eps); 3209 if (changed_ep_bitmask == 0) { 3210 spin_unlock_irqrestore(&xhci->lock, flags); 3211 return -EINVAL; 3212 } 3213 3214 /* Use the xhci_command structure from the first endpoint. We may have 3215 * allocated too many, but the driver may call xhci_free_streams() for 3216 * each endpoint it grouped into one call to xhci_alloc_streams(). 3217 */ 3218 ep_index = xhci_get_endpoint_index(&eps[0]->desc); 3219 command = vdev->eps[ep_index].stream_info->free_streams_command; 3220 for (i = 0; i < num_eps; i++) { 3221 struct xhci_ep_ctx *ep_ctx; 3222 3223 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3224 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); 3225 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= 3226 EP_GETTING_NO_STREAMS; 3227 3228 xhci_endpoint_copy(xhci, command->in_ctx, 3229 vdev->out_ctx, ep_index); 3230 xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx, 3231 &vdev->eps[ep_index]); 3232 } 3233 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, 3234 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask); 3235 spin_unlock_irqrestore(&xhci->lock, flags); 3236 3237 /* Issue and wait for the configure endpoint command, 3238 * which must succeed. 3239 */ 3240 ret = xhci_configure_endpoint(xhci, udev, command, 3241 false, true); 3242 3243 /* xHC rejected the configure endpoint command for some reason, so we 3244 * leave the streams rings intact. 3245 */ 3246 if (ret < 0) 3247 return ret; 3248 3249 spin_lock_irqsave(&xhci->lock, flags); 3250 for (i = 0; i < num_eps; i++) { 3251 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3252 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); 3253 vdev->eps[ep_index].stream_info = NULL; 3254 /* FIXME Unset maxPstreams in endpoint context and 3255 * update deq ptr to point to normal string ring. 3256 */ 3257 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; 3258 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; 3259 } 3260 spin_unlock_irqrestore(&xhci->lock, flags); 3261 3262 return 0; 3263 } 3264 3265 /* 3266 * Deletes endpoint resources for endpoints that were active before a Reset 3267 * Device command, or a Disable Slot command. The Reset Device command leaves 3268 * the control endpoint intact, whereas the Disable Slot command deletes it. 3269 * 3270 * Must be called with xhci->lock held. 3271 */ 3272 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, 3273 struct xhci_virt_device *virt_dev, bool drop_control_ep) 3274 { 3275 int i; 3276 unsigned int num_dropped_eps = 0; 3277 unsigned int drop_flags = 0; 3278 3279 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) { 3280 if (virt_dev->eps[i].ring) { 3281 drop_flags |= 1 << i; 3282 num_dropped_eps++; 3283 } 3284 } 3285 xhci->num_active_eps -= num_dropped_eps; 3286 if (num_dropped_eps) 3287 xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, " 3288 "%u now active.\n", 3289 num_dropped_eps, drop_flags, 3290 xhci->num_active_eps); 3291 } 3292 3293 /* 3294 * This submits a Reset Device Command, which will set the device state to 0, 3295 * set the device address to 0, and disable all the endpoints except the default 3296 * control endpoint. The USB core should come back and call 3297 * xhci_address_device(), and then re-set up the configuration. If this is 3298 * called because of a usb_reset_and_verify_device(), then the old alternate 3299 * settings will be re-installed through the normal bandwidth allocation 3300 * functions. 3301 * 3302 * Wait for the Reset Device command to finish. Remove all structures 3303 * associated with the endpoints that were disabled. Clear the input device 3304 * structure? Cache the rings? Reset the control endpoint 0 max packet size? 3305 * 3306 * If the virt_dev to be reset does not exist or does not match the udev, 3307 * it means the device is lost, possibly due to the xHC restore error and 3308 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to 3309 * re-allocate the device. 3310 */ 3311 int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) 3312 { 3313 int ret, i; 3314 unsigned long flags; 3315 struct xhci_hcd *xhci; 3316 unsigned int slot_id; 3317 struct xhci_virt_device *virt_dev; 3318 struct xhci_command *reset_device_cmd; 3319 int timeleft; 3320 int last_freed_endpoint; 3321 struct xhci_slot_ctx *slot_ctx; 3322 int old_active_eps = 0; 3323 3324 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); 3325 if (ret <= 0) 3326 return ret; 3327 xhci = hcd_to_xhci(hcd); 3328 slot_id = udev->slot_id; 3329 virt_dev = xhci->devs[slot_id]; 3330 if (!virt_dev) { 3331 xhci_dbg(xhci, "The device to be reset with slot ID %u does " 3332 "not exist. Re-allocate the device\n", slot_id); 3333 ret = xhci_alloc_dev(hcd, udev); 3334 if (ret == 1) 3335 return 0; 3336 else 3337 return -EINVAL; 3338 } 3339 3340 if (virt_dev->udev != udev) { 3341 /* If the virt_dev and the udev does not match, this virt_dev 3342 * may belong to another udev. 3343 * Re-allocate the device. 3344 */ 3345 xhci_dbg(xhci, "The device to be reset with slot ID %u does " 3346 "not match the udev. Re-allocate the device\n", 3347 slot_id); 3348 ret = xhci_alloc_dev(hcd, udev); 3349 if (ret == 1) 3350 return 0; 3351 else 3352 return -EINVAL; 3353 } 3354 3355 /* If device is not setup, there is no point in resetting it */ 3356 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 3357 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == 3358 SLOT_STATE_DISABLED) 3359 return 0; 3360 3361 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); 3362 /* Allocate the command structure that holds the struct completion. 3363 * Assume we're in process context, since the normal device reset 3364 * process has to wait for the device anyway. Storage devices are 3365 * reset as part of error handling, so use GFP_NOIO instead of 3366 * GFP_KERNEL. 3367 */ 3368 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); 3369 if (!reset_device_cmd) { 3370 xhci_dbg(xhci, "Couldn't allocate command structure.\n"); 3371 return -ENOMEM; 3372 } 3373 3374 /* Attempt to submit the Reset Device command to the command ring */ 3375 spin_lock_irqsave(&xhci->lock, flags); 3376 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; 3377 3378 /* Enqueue pointer can be left pointing to the link TRB, 3379 * we must handle that 3380 */ 3381 if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control)) 3382 reset_device_cmd->command_trb = 3383 xhci->cmd_ring->enq_seg->next->trbs; 3384 3385 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); 3386 ret = xhci_queue_reset_device(xhci, slot_id); 3387 if (ret) { 3388 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3389 list_del(&reset_device_cmd->cmd_list); 3390 spin_unlock_irqrestore(&xhci->lock, flags); 3391 goto command_cleanup; 3392 } 3393 xhci_ring_cmd_db(xhci); 3394 spin_unlock_irqrestore(&xhci->lock, flags); 3395 3396 /* Wait for the Reset Device command to finish */ 3397 timeleft = wait_for_completion_interruptible_timeout( 3398 reset_device_cmd->completion, 3399 USB_CTRL_SET_TIMEOUT); 3400 if (timeleft <= 0) { 3401 xhci_warn(xhci, "%s while waiting for reset device command\n", 3402 timeleft == 0 ? "Timeout" : "Signal"); 3403 spin_lock_irqsave(&xhci->lock, flags); 3404 /* The timeout might have raced with the event ring handler, so 3405 * only delete from the list if the item isn't poisoned. 3406 */ 3407 if (reset_device_cmd->cmd_list.next != LIST_POISON1) 3408 list_del(&reset_device_cmd->cmd_list); 3409 spin_unlock_irqrestore(&xhci->lock, flags); 3410 ret = -ETIME; 3411 goto command_cleanup; 3412 } 3413 3414 /* The Reset Device command can't fail, according to the 0.95/0.96 spec, 3415 * unless we tried to reset a slot ID that wasn't enabled, 3416 * or the device wasn't in the addressed or configured state. 3417 */ 3418 ret = reset_device_cmd->status; 3419 switch (ret) { 3420 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */ 3421 case COMP_CTX_STATE: /* 0.96 completion code for same thing */ 3422 xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n", 3423 slot_id, 3424 xhci_get_slot_state(xhci, virt_dev->out_ctx)); 3425 xhci_info(xhci, "Not freeing device rings.\n"); 3426 /* Don't treat this as an error. May change my mind later. */ 3427 ret = 0; 3428 goto command_cleanup; 3429 case COMP_SUCCESS: 3430 xhci_dbg(xhci, "Successful reset device command.\n"); 3431 break; 3432 default: 3433 if (xhci_is_vendor_info_code(xhci, ret)) 3434 break; 3435 xhci_warn(xhci, "Unknown completion code %u for " 3436 "reset device command.\n", ret); 3437 ret = -EINVAL; 3438 goto command_cleanup; 3439 } 3440 3441 /* Free up host controller endpoint resources */ 3442 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 3443 spin_lock_irqsave(&xhci->lock, flags); 3444 /* Don't delete the default control endpoint resources */ 3445 xhci_free_device_endpoint_resources(xhci, virt_dev, false); 3446 spin_unlock_irqrestore(&xhci->lock, flags); 3447 } 3448 3449 /* Everything but endpoint 0 is disabled, so free or cache the rings. */ 3450 last_freed_endpoint = 1; 3451 for (i = 1; i < 31; ++i) { 3452 struct xhci_virt_ep *ep = &virt_dev->eps[i]; 3453 3454 if (ep->ep_state & EP_HAS_STREAMS) { 3455 xhci_free_stream_info(xhci, ep->stream_info); 3456 ep->stream_info = NULL; 3457 ep->ep_state &= ~EP_HAS_STREAMS; 3458 } 3459 3460 if (ep->ring) { 3461 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 3462 last_freed_endpoint = i; 3463 } 3464 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list)) 3465 xhci_drop_ep_from_interval_table(xhci, 3466 &virt_dev->eps[i].bw_info, 3467 virt_dev->bw_table, 3468 udev, 3469 &virt_dev->eps[i], 3470 virt_dev->tt_info); 3471 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info); 3472 } 3473 /* If necessary, update the number of active TTs on this root port */ 3474 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); 3475 3476 xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); 3477 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); 3478 ret = 0; 3479 3480 command_cleanup: 3481 xhci_free_command(xhci, reset_device_cmd); 3482 return ret; 3483 } 3484 3485 /* 3486 * At this point, the struct usb_device is about to go away, the device has 3487 * disconnected, and all traffic has been stopped and the endpoints have been 3488 * disabled. Free any HC data structures associated with that device. 3489 */ 3490 void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) 3491 { 3492 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3493 struct xhci_virt_device *virt_dev; 3494 unsigned long flags; 3495 u32 state; 3496 int i, ret; 3497 3498 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 3499 /* If the host is halted due to driver unload, we still need to free the 3500 * device. 3501 */ 3502 if (ret <= 0 && ret != -ENODEV) 3503 return; 3504 3505 virt_dev = xhci->devs[udev->slot_id]; 3506 3507 /* Stop any wayward timer functions (which may grab the lock) */ 3508 for (i = 0; i < 31; ++i) { 3509 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING; 3510 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); 3511 } 3512 3513 if (udev->usb2_hw_lpm_enabled) { 3514 xhci_set_usb2_hardware_lpm(hcd, udev, 0); 3515 udev->usb2_hw_lpm_enabled = 0; 3516 } 3517 3518 spin_lock_irqsave(&xhci->lock, flags); 3519 /* Don't disable the slot if the host controller is dead. */ 3520 state = xhci_readl(xhci, &xhci->op_regs->status); 3521 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || 3522 (xhci->xhc_state & XHCI_STATE_HALTED)) { 3523 xhci_free_virt_device(xhci, udev->slot_id); 3524 spin_unlock_irqrestore(&xhci->lock, flags); 3525 return; 3526 } 3527 3528 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) { 3529 spin_unlock_irqrestore(&xhci->lock, flags); 3530 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3531 return; 3532 } 3533 xhci_ring_cmd_db(xhci); 3534 spin_unlock_irqrestore(&xhci->lock, flags); 3535 /* 3536 * Event command completion handler will free any data structures 3537 * associated with the slot. XXX Can free sleep? 3538 */ 3539 } 3540 3541 /* 3542 * Checks if we have enough host controller resources for the default control 3543 * endpoint. 3544 * 3545 * Must be called with xhci->lock held. 3546 */ 3547 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) 3548 { 3549 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { 3550 xhci_dbg(xhci, "Not enough ep ctxs: " 3551 "%u active, need to add 1, limit is %u.\n", 3552 xhci->num_active_eps, xhci->limit_active_eps); 3553 return -ENOMEM; 3554 } 3555 xhci->num_active_eps += 1; 3556 xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n", 3557 xhci->num_active_eps); 3558 return 0; 3559 } 3560 3561 3562 /* 3563 * Returns 0 if the xHC ran out of device slots, the Enable Slot command 3564 * timed out, or allocating memory failed. Returns 1 on success. 3565 */ 3566 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) 3567 { 3568 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3569 unsigned long flags; 3570 int timeleft; 3571 int ret; 3572 union xhci_trb *cmd_trb; 3573 3574 spin_lock_irqsave(&xhci->lock, flags); 3575 cmd_trb = xhci->cmd_ring->dequeue; 3576 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); 3577 if (ret) { 3578 spin_unlock_irqrestore(&xhci->lock, flags); 3579 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3580 return 0; 3581 } 3582 xhci_ring_cmd_db(xhci); 3583 spin_unlock_irqrestore(&xhci->lock, flags); 3584 3585 /* XXX: how much time for xHC slot assignment? */ 3586 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, 3587 XHCI_CMD_DEFAULT_TIMEOUT); 3588 if (timeleft <= 0) { 3589 xhci_warn(xhci, "%s while waiting for a slot\n", 3590 timeleft == 0 ? "Timeout" : "Signal"); 3591 /* cancel the enable slot request */ 3592 return xhci_cancel_cmd(xhci, NULL, cmd_trb); 3593 } 3594 3595 if (!xhci->slot_id) { 3596 xhci_err(xhci, "Error while assigning device slot ID\n"); 3597 return 0; 3598 } 3599 3600 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 3601 spin_lock_irqsave(&xhci->lock, flags); 3602 ret = xhci_reserve_host_control_ep_resources(xhci); 3603 if (ret) { 3604 spin_unlock_irqrestore(&xhci->lock, flags); 3605 xhci_warn(xhci, "Not enough host resources, " 3606 "active endpoint contexts = %u\n", 3607 xhci->num_active_eps); 3608 goto disable_slot; 3609 } 3610 spin_unlock_irqrestore(&xhci->lock, flags); 3611 } 3612 /* Use GFP_NOIO, since this function can be called from 3613 * xhci_discover_or_reset_device(), which may be called as part of 3614 * mass storage driver error handling. 3615 */ 3616 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) { 3617 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); 3618 goto disable_slot; 3619 } 3620 udev->slot_id = xhci->slot_id; 3621 /* Is this a LS or FS device under a HS hub? */ 3622 /* Hub or peripherial? */ 3623 return 1; 3624 3625 disable_slot: 3626 /* Disable slot, if we can do it without mem alloc */ 3627 spin_lock_irqsave(&xhci->lock, flags); 3628 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) 3629 xhci_ring_cmd_db(xhci); 3630 spin_unlock_irqrestore(&xhci->lock, flags); 3631 return 0; 3632 } 3633 3634 /* 3635 * Issue an Address Device command (which will issue a SetAddress request to 3636 * the device). 3637 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so 3638 * we should only issue and wait on one address command at the same time. 3639 * 3640 * We add one to the device address issued by the hardware because the USB core 3641 * uses address 1 for the root hubs (even though they're not really devices). 3642 */ 3643 int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) 3644 { 3645 unsigned long flags; 3646 int timeleft; 3647 struct xhci_virt_device *virt_dev; 3648 int ret = 0; 3649 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3650 struct xhci_slot_ctx *slot_ctx; 3651 struct xhci_input_control_ctx *ctrl_ctx; 3652 u64 temp_64; 3653 union xhci_trb *cmd_trb; 3654 3655 if (!udev->slot_id) { 3656 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); 3657 return -EINVAL; 3658 } 3659 3660 virt_dev = xhci->devs[udev->slot_id]; 3661 3662 if (WARN_ON(!virt_dev)) { 3663 /* 3664 * In plug/unplug torture test with an NEC controller, 3665 * a zero-dereference was observed once due to virt_dev = 0. 3666 * Print useful debug rather than crash if it is observed again! 3667 */ 3668 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", 3669 udev->slot_id); 3670 return -EINVAL; 3671 } 3672 3673 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 3674 /* 3675 * If this is the first Set Address since device plug-in or 3676 * virt_device realloaction after a resume with an xHCI power loss, 3677 * then set up the slot context. 3678 */ 3679 if (!slot_ctx->dev_info) 3680 xhci_setup_addressable_virt_dev(xhci, udev); 3681 /* Otherwise, update the control endpoint ring enqueue pointer. */ 3682 else 3683 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); 3684 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 3685 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); 3686 ctrl_ctx->drop_flags = 0; 3687 3688 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 3689 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); 3690 3691 spin_lock_irqsave(&xhci->lock, flags); 3692 cmd_trb = xhci->cmd_ring->dequeue; 3693 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, 3694 udev->slot_id); 3695 if (ret) { 3696 spin_unlock_irqrestore(&xhci->lock, flags); 3697 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3698 return ret; 3699 } 3700 xhci_ring_cmd_db(xhci); 3701 spin_unlock_irqrestore(&xhci->lock, flags); 3702 3703 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ 3704 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, 3705 XHCI_CMD_DEFAULT_TIMEOUT); 3706 /* FIXME: From section 4.3.4: "Software shall be responsible for timing 3707 * the SetAddress() "recovery interval" required by USB and aborting the 3708 * command on a timeout. 3709 */ 3710 if (timeleft <= 0) { 3711 xhci_warn(xhci, "%s while waiting for address device command\n", 3712 timeleft == 0 ? "Timeout" : "Signal"); 3713 /* cancel the address device command */ 3714 ret = xhci_cancel_cmd(xhci, NULL, cmd_trb); 3715 if (ret < 0) 3716 return ret; 3717 return -ETIME; 3718 } 3719 3720 switch (virt_dev->cmd_status) { 3721 case COMP_CTX_STATE: 3722 case COMP_EBADSLT: 3723 xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n", 3724 udev->slot_id); 3725 ret = -EINVAL; 3726 break; 3727 case COMP_TX_ERR: 3728 dev_warn(&udev->dev, "Device not responding to set address.\n"); 3729 ret = -EPROTO; 3730 break; 3731 case COMP_DEV_ERR: 3732 dev_warn(&udev->dev, "ERROR: Incompatible device for address " 3733 "device command.\n"); 3734 ret = -ENODEV; 3735 break; 3736 case COMP_SUCCESS: 3737 xhci_dbg(xhci, "Successful Address Device command\n"); 3738 break; 3739 default: 3740 xhci_err(xhci, "ERROR: unexpected command completion " 3741 "code 0x%x.\n", virt_dev->cmd_status); 3742 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); 3743 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); 3744 ret = -EINVAL; 3745 break; 3746 } 3747 if (ret) { 3748 return ret; 3749 } 3750 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 3751 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); 3752 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", 3753 udev->slot_id, 3754 &xhci->dcbaa->dev_context_ptrs[udev->slot_id], 3755 (unsigned long long) 3756 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); 3757 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", 3758 (unsigned long long)virt_dev->out_ctx->dma); 3759 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 3760 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); 3761 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); 3762 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); 3763 /* 3764 * USB core uses address 1 for the roothubs, so we add one to the 3765 * address given back to us by the HC. 3766 */ 3767 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 3768 /* Use kernel assigned address for devices; store xHC assigned 3769 * address locally. */ 3770 virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK) 3771 + 1; 3772 /* Zero the input context control for later use */ 3773 ctrl_ctx->add_flags = 0; 3774 ctrl_ctx->drop_flags = 0; 3775 3776 xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address); 3777 3778 return 0; 3779 } 3780 3781 #ifdef CONFIG_USB_SUSPEND 3782 3783 /* BESL to HIRD Encoding array for USB2 LPM */ 3784 static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000, 3785 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000}; 3786 3787 /* Calculate HIRD/BESL for USB2 PORTPMSC*/ 3788 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci, 3789 struct usb_device *udev) 3790 { 3791 int u2del, besl, besl_host; 3792 int besl_device = 0; 3793 u32 field; 3794 3795 u2del = HCS_U2_LATENCY(xhci->hcs_params3); 3796 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); 3797 3798 if (field & USB_BESL_SUPPORT) { 3799 for (besl_host = 0; besl_host < 16; besl_host++) { 3800 if (xhci_besl_encoding[besl_host] >= u2del) 3801 break; 3802 } 3803 /* Use baseline BESL value as default */ 3804 if (field & USB_BESL_BASELINE_VALID) 3805 besl_device = USB_GET_BESL_BASELINE(field); 3806 else if (field & USB_BESL_DEEP_VALID) 3807 besl_device = USB_GET_BESL_DEEP(field); 3808 } else { 3809 if (u2del <= 50) 3810 besl_host = 0; 3811 else 3812 besl_host = (u2del - 51) / 75 + 1; 3813 } 3814 3815 besl = besl_host + besl_device; 3816 if (besl > 15) 3817 besl = 15; 3818 3819 return besl; 3820 } 3821 3822 static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd, 3823 struct usb_device *udev) 3824 { 3825 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3826 struct dev_info *dev_info; 3827 __le32 __iomem **port_array; 3828 __le32 __iomem *addr, *pm_addr; 3829 u32 temp, dev_id; 3830 unsigned int port_num; 3831 unsigned long flags; 3832 int hird; 3833 int ret; 3834 3835 if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support || 3836 !udev->lpm_capable) 3837 return -EINVAL; 3838 3839 /* we only support lpm for non-hub device connected to root hub yet */ 3840 if (!udev->parent || udev->parent->parent || 3841 udev->descriptor.bDeviceClass == USB_CLASS_HUB) 3842 return -EINVAL; 3843 3844 spin_lock_irqsave(&xhci->lock, flags); 3845 3846 /* Look for devices in lpm_failed_devs list */ 3847 dev_id = le16_to_cpu(udev->descriptor.idVendor) << 16 | 3848 le16_to_cpu(udev->descriptor.idProduct); 3849 list_for_each_entry(dev_info, &xhci->lpm_failed_devs, list) { 3850 if (dev_info->dev_id == dev_id) { 3851 ret = -EINVAL; 3852 goto finish; 3853 } 3854 } 3855 3856 port_array = xhci->usb2_ports; 3857 port_num = udev->portnum - 1; 3858 3859 if (port_num > HCS_MAX_PORTS(xhci->hcs_params1)) { 3860 xhci_dbg(xhci, "invalid port number %d\n", udev->portnum); 3861 ret = -EINVAL; 3862 goto finish; 3863 } 3864 3865 /* 3866 * Test USB 2.0 software LPM. 3867 * FIXME: some xHCI 1.0 hosts may implement a new register to set up 3868 * hardware-controlled USB 2.0 LPM. See section 5.4.11 and 4.23.5.1.1.1 3869 * in the June 2011 errata release. 3870 */ 3871 xhci_dbg(xhci, "test port %d software LPM\n", port_num); 3872 /* 3873 * Set L1 Device Slot and HIRD/BESL. 3874 * Check device's USB 2.0 extension descriptor to determine whether 3875 * HIRD or BESL shoule be used. See USB2.0 LPM errata. 3876 */ 3877 pm_addr = port_array[port_num] + 1; 3878 hird = xhci_calculate_hird_besl(xhci, udev); 3879 temp = PORT_L1DS(udev->slot_id) | PORT_HIRD(hird); 3880 xhci_writel(xhci, temp, pm_addr); 3881 3882 /* Set port link state to U2(L1) */ 3883 addr = port_array[port_num]; 3884 xhci_set_link_state(xhci, port_array, port_num, XDEV_U2); 3885 3886 /* wait for ACK */ 3887 spin_unlock_irqrestore(&xhci->lock, flags); 3888 msleep(10); 3889 spin_lock_irqsave(&xhci->lock, flags); 3890 3891 /* Check L1 Status */ 3892 ret = xhci_handshake(xhci, pm_addr, 3893 PORT_L1S_MASK, PORT_L1S_SUCCESS, 125); 3894 if (ret != -ETIMEDOUT) { 3895 /* enter L1 successfully */ 3896 temp = xhci_readl(xhci, addr); 3897 xhci_dbg(xhci, "port %d entered L1 state, port status 0x%x\n", 3898 port_num, temp); 3899 ret = 0; 3900 } else { 3901 temp = xhci_readl(xhci, pm_addr); 3902 xhci_dbg(xhci, "port %d software lpm failed, L1 status %d\n", 3903 port_num, temp & PORT_L1S_MASK); 3904 ret = -EINVAL; 3905 } 3906 3907 /* Resume the port */ 3908 xhci_set_link_state(xhci, port_array, port_num, XDEV_U0); 3909 3910 spin_unlock_irqrestore(&xhci->lock, flags); 3911 msleep(10); 3912 spin_lock_irqsave(&xhci->lock, flags); 3913 3914 /* Clear PLC */ 3915 xhci_test_and_clear_bit(xhci, port_array, port_num, PORT_PLC); 3916 3917 /* Check PORTSC to make sure the device is in the right state */ 3918 if (!ret) { 3919 temp = xhci_readl(xhci, addr); 3920 xhci_dbg(xhci, "resumed port %d status 0x%x\n", port_num, temp); 3921 if (!(temp & PORT_CONNECT) || !(temp & PORT_PE) || 3922 (temp & PORT_PLS_MASK) != XDEV_U0) { 3923 xhci_dbg(xhci, "port L1 resume fail\n"); 3924 ret = -EINVAL; 3925 } 3926 } 3927 3928 if (ret) { 3929 /* Insert dev to lpm_failed_devs list */ 3930 xhci_warn(xhci, "device LPM test failed, may disconnect and " 3931 "re-enumerate\n"); 3932 dev_info = kzalloc(sizeof(struct dev_info), GFP_ATOMIC); 3933 if (!dev_info) { 3934 ret = -ENOMEM; 3935 goto finish; 3936 } 3937 dev_info->dev_id = dev_id; 3938 INIT_LIST_HEAD(&dev_info->list); 3939 list_add(&dev_info->list, &xhci->lpm_failed_devs); 3940 } else { 3941 xhci_ring_device(xhci, udev->slot_id); 3942 } 3943 3944 finish: 3945 spin_unlock_irqrestore(&xhci->lock, flags); 3946 return ret; 3947 } 3948 3949 int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, 3950 struct usb_device *udev, int enable) 3951 { 3952 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3953 __le32 __iomem **port_array; 3954 __le32 __iomem *pm_addr; 3955 u32 temp; 3956 unsigned int port_num; 3957 unsigned long flags; 3958 int hird; 3959 3960 if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support || 3961 !udev->lpm_capable) 3962 return -EPERM; 3963 3964 if (!udev->parent || udev->parent->parent || 3965 udev->descriptor.bDeviceClass == USB_CLASS_HUB) 3966 return -EPERM; 3967 3968 if (udev->usb2_hw_lpm_capable != 1) 3969 return -EPERM; 3970 3971 spin_lock_irqsave(&xhci->lock, flags); 3972 3973 port_array = xhci->usb2_ports; 3974 port_num = udev->portnum - 1; 3975 pm_addr = port_array[port_num] + 1; 3976 temp = xhci_readl(xhci, pm_addr); 3977 3978 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", 3979 enable ? "enable" : "disable", port_num); 3980 3981 hird = xhci_calculate_hird_besl(xhci, udev); 3982 3983 if (enable) { 3984 temp &= ~PORT_HIRD_MASK; 3985 temp |= PORT_HIRD(hird) | PORT_RWE; 3986 xhci_writel(xhci, temp, pm_addr); 3987 temp = xhci_readl(xhci, pm_addr); 3988 temp |= PORT_HLE; 3989 xhci_writel(xhci, temp, pm_addr); 3990 } else { 3991 temp &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK); 3992 xhci_writel(xhci, temp, pm_addr); 3993 } 3994 3995 spin_unlock_irqrestore(&xhci->lock, flags); 3996 return 0; 3997 } 3998 3999 int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) 4000 { 4001 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4002 int ret; 4003 4004 ret = xhci_usb2_software_lpm_test(hcd, udev); 4005 if (!ret) { 4006 xhci_dbg(xhci, "software LPM test succeed\n"); 4007 if (xhci->hw_lpm_support == 1) { 4008 udev->usb2_hw_lpm_capable = 1; 4009 ret = xhci_set_usb2_hardware_lpm(hcd, udev, 1); 4010 if (!ret) 4011 udev->usb2_hw_lpm_enabled = 1; 4012 } 4013 } 4014 4015 return 0; 4016 } 4017 4018 #else 4019 4020 int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, 4021 struct usb_device *udev, int enable) 4022 { 4023 return 0; 4024 } 4025 4026 int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) 4027 { 4028 return 0; 4029 } 4030 4031 #endif /* CONFIG_USB_SUSPEND */ 4032 4033 /*---------------------- USB 3.0 Link PM functions ------------------------*/ 4034 4035 #ifdef CONFIG_PM 4036 /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */ 4037 static unsigned long long xhci_service_interval_to_ns( 4038 struct usb_endpoint_descriptor *desc) 4039 { 4040 return (1ULL << (desc->bInterval - 1)) * 125 * 1000; 4041 } 4042 4043 static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev, 4044 enum usb3_link_state state) 4045 { 4046 unsigned long long sel; 4047 unsigned long long pel; 4048 unsigned int max_sel_pel; 4049 char *state_name; 4050 4051 switch (state) { 4052 case USB3_LPM_U1: 4053 /* Convert SEL and PEL stored in nanoseconds to microseconds */ 4054 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); 4055 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); 4056 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL; 4057 state_name = "U1"; 4058 break; 4059 case USB3_LPM_U2: 4060 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000); 4061 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000); 4062 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL; 4063 state_name = "U2"; 4064 break; 4065 default: 4066 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n", 4067 __func__); 4068 return USB3_LPM_DISABLED; 4069 } 4070 4071 if (sel <= max_sel_pel && pel <= max_sel_pel) 4072 return USB3_LPM_DEVICE_INITIATED; 4073 4074 if (sel > max_sel_pel) 4075 dev_dbg(&udev->dev, "Device-initiated %s disabled " 4076 "due to long SEL %llu ms\n", 4077 state_name, sel); 4078 else 4079 dev_dbg(&udev->dev, "Device-initiated %s disabled " 4080 "due to long PEL %llu\n ms", 4081 state_name, pel); 4082 return USB3_LPM_DISABLED; 4083 } 4084 4085 /* Returns the hub-encoded U1 timeout value. 4086 * The U1 timeout should be the maximum of the following values: 4087 * - For control endpoints, U1 system exit latency (SEL) * 3 4088 * - For bulk endpoints, U1 SEL * 5 4089 * - For interrupt endpoints: 4090 * - Notification EPs, U1 SEL * 3 4091 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2) 4092 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2) 4093 */ 4094 static u16 xhci_calculate_intel_u1_timeout(struct usb_device *udev, 4095 struct usb_endpoint_descriptor *desc) 4096 { 4097 unsigned long long timeout_ns; 4098 int ep_type; 4099 int intr_type; 4100 4101 ep_type = usb_endpoint_type(desc); 4102 switch (ep_type) { 4103 case USB_ENDPOINT_XFER_CONTROL: 4104 timeout_ns = udev->u1_params.sel * 3; 4105 break; 4106 case USB_ENDPOINT_XFER_BULK: 4107 timeout_ns = udev->u1_params.sel * 5; 4108 break; 4109 case USB_ENDPOINT_XFER_INT: 4110 intr_type = usb_endpoint_interrupt_type(desc); 4111 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) { 4112 timeout_ns = udev->u1_params.sel * 3; 4113 break; 4114 } 4115 /* Otherwise the calculation is the same as isoc eps */ 4116 case USB_ENDPOINT_XFER_ISOC: 4117 timeout_ns = xhci_service_interval_to_ns(desc); 4118 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100); 4119 if (timeout_ns < udev->u1_params.sel * 2) 4120 timeout_ns = udev->u1_params.sel * 2; 4121 break; 4122 default: 4123 return 0; 4124 } 4125 4126 /* The U1 timeout is encoded in 1us intervals. */ 4127 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000); 4128 /* Don't return a timeout of zero, because that's USB3_LPM_DISABLED. */ 4129 if (timeout_ns == USB3_LPM_DISABLED) 4130 timeout_ns++; 4131 4132 /* If the necessary timeout value is bigger than what we can set in the 4133 * USB 3.0 hub, we have to disable hub-initiated U1. 4134 */ 4135 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT) 4136 return timeout_ns; 4137 dev_dbg(&udev->dev, "Hub-initiated U1 disabled " 4138 "due to long timeout %llu ms\n", timeout_ns); 4139 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1); 4140 } 4141 4142 /* Returns the hub-encoded U2 timeout value. 4143 * The U2 timeout should be the maximum of: 4144 * - 10 ms (to avoid the bandwidth impact on the scheduler) 4145 * - largest bInterval of any active periodic endpoint (to avoid going 4146 * into lower power link states between intervals). 4147 * - the U2 Exit Latency of the device 4148 */ 4149 static u16 xhci_calculate_intel_u2_timeout(struct usb_device *udev, 4150 struct usb_endpoint_descriptor *desc) 4151 { 4152 unsigned long long timeout_ns; 4153 unsigned long long u2_del_ns; 4154 4155 timeout_ns = 10 * 1000 * 1000; 4156 4157 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) && 4158 (xhci_service_interval_to_ns(desc) > timeout_ns)) 4159 timeout_ns = xhci_service_interval_to_ns(desc); 4160 4161 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL; 4162 if (u2_del_ns > timeout_ns) 4163 timeout_ns = u2_del_ns; 4164 4165 /* The U2 timeout is encoded in 256us intervals */ 4166 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000); 4167 /* If the necessary timeout value is bigger than what we can set in the 4168 * USB 3.0 hub, we have to disable hub-initiated U2. 4169 */ 4170 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT) 4171 return timeout_ns; 4172 dev_dbg(&udev->dev, "Hub-initiated U2 disabled " 4173 "due to long timeout %llu ms\n", timeout_ns); 4174 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2); 4175 } 4176 4177 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci, 4178 struct usb_device *udev, 4179 struct usb_endpoint_descriptor *desc, 4180 enum usb3_link_state state, 4181 u16 *timeout) 4182 { 4183 if (state == USB3_LPM_U1) { 4184 if (xhci->quirks & XHCI_INTEL_HOST) 4185 return xhci_calculate_intel_u1_timeout(udev, desc); 4186 } else { 4187 if (xhci->quirks & XHCI_INTEL_HOST) 4188 return xhci_calculate_intel_u2_timeout(udev, desc); 4189 } 4190 4191 return USB3_LPM_DISABLED; 4192 } 4193 4194 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci, 4195 struct usb_device *udev, 4196 struct usb_endpoint_descriptor *desc, 4197 enum usb3_link_state state, 4198 u16 *timeout) 4199 { 4200 u16 alt_timeout; 4201 4202 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev, 4203 desc, state, timeout); 4204 4205 /* If we found we can't enable hub-initiated LPM, or 4206 * the U1 or U2 exit latency was too high to allow 4207 * device-initiated LPM as well, just stop searching. 4208 */ 4209 if (alt_timeout == USB3_LPM_DISABLED || 4210 alt_timeout == USB3_LPM_DEVICE_INITIATED) { 4211 *timeout = alt_timeout; 4212 return -E2BIG; 4213 } 4214 if (alt_timeout > *timeout) 4215 *timeout = alt_timeout; 4216 return 0; 4217 } 4218 4219 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci, 4220 struct usb_device *udev, 4221 struct usb_host_interface *alt, 4222 enum usb3_link_state state, 4223 u16 *timeout) 4224 { 4225 int j; 4226 4227 for (j = 0; j < alt->desc.bNumEndpoints; j++) { 4228 if (xhci_update_timeout_for_endpoint(xhci, udev, 4229 &alt->endpoint[j].desc, state, timeout)) 4230 return -E2BIG; 4231 continue; 4232 } 4233 return 0; 4234 } 4235 4236 static int xhci_check_intel_tier_policy(struct usb_device *udev, 4237 enum usb3_link_state state) 4238 { 4239 struct usb_device *parent; 4240 unsigned int num_hubs; 4241 4242 if (state == USB3_LPM_U2) 4243 return 0; 4244 4245 /* Don't enable U1 if the device is on a 2nd tier hub or lower. */ 4246 for (parent = udev->parent, num_hubs = 0; parent->parent; 4247 parent = parent->parent) 4248 num_hubs++; 4249 4250 if (num_hubs < 2) 4251 return 0; 4252 4253 dev_dbg(&udev->dev, "Disabling U1 link state for device" 4254 " below second-tier hub.\n"); 4255 dev_dbg(&udev->dev, "Plug device into first-tier hub " 4256 "to decrease power consumption.\n"); 4257 return -E2BIG; 4258 } 4259 4260 static int xhci_check_tier_policy(struct xhci_hcd *xhci, 4261 struct usb_device *udev, 4262 enum usb3_link_state state) 4263 { 4264 if (xhci->quirks & XHCI_INTEL_HOST) 4265 return xhci_check_intel_tier_policy(udev, state); 4266 return -EINVAL; 4267 } 4268 4269 /* Returns the U1 or U2 timeout that should be enabled. 4270 * If the tier check or timeout setting functions return with a non-zero exit 4271 * code, that means the timeout value has been finalized and we shouldn't look 4272 * at any more endpoints. 4273 */ 4274 static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd, 4275 struct usb_device *udev, enum usb3_link_state state) 4276 { 4277 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4278 struct usb_host_config *config; 4279 char *state_name; 4280 int i; 4281 u16 timeout = USB3_LPM_DISABLED; 4282 4283 if (state == USB3_LPM_U1) 4284 state_name = "U1"; 4285 else if (state == USB3_LPM_U2) 4286 state_name = "U2"; 4287 else { 4288 dev_warn(&udev->dev, "Can't enable unknown link state %i\n", 4289 state); 4290 return timeout; 4291 } 4292 4293 if (xhci_check_tier_policy(xhci, udev, state) < 0) 4294 return timeout; 4295 4296 /* Gather some information about the currently installed configuration 4297 * and alternate interface settings. 4298 */ 4299 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc, 4300 state, &timeout)) 4301 return timeout; 4302 4303 config = udev->actconfig; 4304 if (!config) 4305 return timeout; 4306 4307 for (i = 0; i < USB_MAXINTERFACES; i++) { 4308 struct usb_driver *driver; 4309 struct usb_interface *intf = config->interface[i]; 4310 4311 if (!intf) 4312 continue; 4313 4314 /* Check if any currently bound drivers want hub-initiated LPM 4315 * disabled. 4316 */ 4317 if (intf->dev.driver) { 4318 driver = to_usb_driver(intf->dev.driver); 4319 if (driver && driver->disable_hub_initiated_lpm) { 4320 dev_dbg(&udev->dev, "Hub-initiated %s disabled " 4321 "at request of driver %s\n", 4322 state_name, driver->name); 4323 return xhci_get_timeout_no_hub_lpm(udev, state); 4324 } 4325 } 4326 4327 /* Not sure how this could happen... */ 4328 if (!intf->cur_altsetting) 4329 continue; 4330 4331 if (xhci_update_timeout_for_interface(xhci, udev, 4332 intf->cur_altsetting, 4333 state, &timeout)) 4334 return timeout; 4335 } 4336 return timeout; 4337 } 4338 4339 /* 4340 * Issue an Evaluate Context command to change the Maximum Exit Latency in the 4341 * slot context. If that succeeds, store the new MEL in the xhci_virt_device. 4342 */ 4343 static int xhci_change_max_exit_latency(struct xhci_hcd *xhci, 4344 struct usb_device *udev, u16 max_exit_latency) 4345 { 4346 struct xhci_virt_device *virt_dev; 4347 struct xhci_command *command; 4348 struct xhci_input_control_ctx *ctrl_ctx; 4349 struct xhci_slot_ctx *slot_ctx; 4350 unsigned long flags; 4351 int ret; 4352 4353 spin_lock_irqsave(&xhci->lock, flags); 4354 if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) { 4355 spin_unlock_irqrestore(&xhci->lock, flags); 4356 return 0; 4357 } 4358 4359 /* Attempt to issue an Evaluate Context command to change the MEL. */ 4360 virt_dev = xhci->devs[udev->slot_id]; 4361 command = xhci->lpm_command; 4362 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx); 4363 spin_unlock_irqrestore(&xhci->lock, flags); 4364 4365 ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx); 4366 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 4367 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); 4368 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT)); 4369 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency); 4370 4371 xhci_dbg(xhci, "Set up evaluate context for LPM MEL change.\n"); 4372 xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id); 4373 xhci_dbg_ctx(xhci, command->in_ctx, 0); 4374 4375 /* Issue and wait for the evaluate context command. */ 4376 ret = xhci_configure_endpoint(xhci, udev, command, 4377 true, true); 4378 xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id); 4379 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0); 4380 4381 if (!ret) { 4382 spin_lock_irqsave(&xhci->lock, flags); 4383 virt_dev->current_mel = max_exit_latency; 4384 spin_unlock_irqrestore(&xhci->lock, flags); 4385 } 4386 return ret; 4387 } 4388 4389 static int calculate_max_exit_latency(struct usb_device *udev, 4390 enum usb3_link_state state_changed, 4391 u16 hub_encoded_timeout) 4392 { 4393 unsigned long long u1_mel_us = 0; 4394 unsigned long long u2_mel_us = 0; 4395 unsigned long long mel_us = 0; 4396 bool disabling_u1; 4397 bool disabling_u2; 4398 bool enabling_u1; 4399 bool enabling_u2; 4400 4401 disabling_u1 = (state_changed == USB3_LPM_U1 && 4402 hub_encoded_timeout == USB3_LPM_DISABLED); 4403 disabling_u2 = (state_changed == USB3_LPM_U2 && 4404 hub_encoded_timeout == USB3_LPM_DISABLED); 4405 4406 enabling_u1 = (state_changed == USB3_LPM_U1 && 4407 hub_encoded_timeout != USB3_LPM_DISABLED); 4408 enabling_u2 = (state_changed == USB3_LPM_U2 && 4409 hub_encoded_timeout != USB3_LPM_DISABLED); 4410 4411 /* If U1 was already enabled and we're not disabling it, 4412 * or we're going to enable U1, account for the U1 max exit latency. 4413 */ 4414 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) || 4415 enabling_u1) 4416 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000); 4417 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) || 4418 enabling_u2) 4419 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000); 4420 4421 if (u1_mel_us > u2_mel_us) 4422 mel_us = u1_mel_us; 4423 else 4424 mel_us = u2_mel_us; 4425 /* xHCI host controller max exit latency field is only 16 bits wide. */ 4426 if (mel_us > MAX_EXIT) { 4427 dev_warn(&udev->dev, "Link PM max exit latency of %lluus " 4428 "is too big.\n", mel_us); 4429 return -E2BIG; 4430 } 4431 return mel_us; 4432 } 4433 4434 /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */ 4435 int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, 4436 struct usb_device *udev, enum usb3_link_state state) 4437 { 4438 struct xhci_hcd *xhci; 4439 u16 hub_encoded_timeout; 4440 int mel; 4441 int ret; 4442 4443 xhci = hcd_to_xhci(hcd); 4444 /* The LPM timeout values are pretty host-controller specific, so don't 4445 * enable hub-initiated timeouts unless the vendor has provided 4446 * information about their timeout algorithm. 4447 */ 4448 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || 4449 !xhci->devs[udev->slot_id]) 4450 return USB3_LPM_DISABLED; 4451 4452 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state); 4453 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout); 4454 if (mel < 0) { 4455 /* Max Exit Latency is too big, disable LPM. */ 4456 hub_encoded_timeout = USB3_LPM_DISABLED; 4457 mel = 0; 4458 } 4459 4460 ret = xhci_change_max_exit_latency(xhci, udev, mel); 4461 if (ret) 4462 return ret; 4463 return hub_encoded_timeout; 4464 } 4465 4466 int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, 4467 struct usb_device *udev, enum usb3_link_state state) 4468 { 4469 struct xhci_hcd *xhci; 4470 u16 mel; 4471 int ret; 4472 4473 xhci = hcd_to_xhci(hcd); 4474 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || 4475 !xhci->devs[udev->slot_id]) 4476 return 0; 4477 4478 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED); 4479 ret = xhci_change_max_exit_latency(xhci, udev, mel); 4480 if (ret) 4481 return ret; 4482 return 0; 4483 } 4484 #else /* CONFIG_PM */ 4485 4486 int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, 4487 struct usb_device *udev, enum usb3_link_state state) 4488 { 4489 return USB3_LPM_DISABLED; 4490 } 4491 4492 int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, 4493 struct usb_device *udev, enum usb3_link_state state) 4494 { 4495 return 0; 4496 } 4497 #endif /* CONFIG_PM */ 4498 4499 /*-------------------------------------------------------------------------*/ 4500 4501 /* Once a hub descriptor is fetched for a device, we need to update the xHC's 4502 * internal data structures for the device. 4503 */ 4504 int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, 4505 struct usb_tt *tt, gfp_t mem_flags) 4506 { 4507 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4508 struct xhci_virt_device *vdev; 4509 struct xhci_command *config_cmd; 4510 struct xhci_input_control_ctx *ctrl_ctx; 4511 struct xhci_slot_ctx *slot_ctx; 4512 unsigned long flags; 4513 unsigned think_time; 4514 int ret; 4515 4516 /* Ignore root hubs */ 4517 if (!hdev->parent) 4518 return 0; 4519 4520 vdev = xhci->devs[hdev->slot_id]; 4521 if (!vdev) { 4522 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); 4523 return -EINVAL; 4524 } 4525 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); 4526 if (!config_cmd) { 4527 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); 4528 return -ENOMEM; 4529 } 4530 4531 spin_lock_irqsave(&xhci->lock, flags); 4532 if (hdev->speed == USB_SPEED_HIGH && 4533 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) { 4534 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n"); 4535 xhci_free_command(xhci, config_cmd); 4536 spin_unlock_irqrestore(&xhci->lock, flags); 4537 return -ENOMEM; 4538 } 4539 4540 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); 4541 ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); 4542 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 4543 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); 4544 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); 4545 if (tt->multi) 4546 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); 4547 if (xhci->hci_version > 0x95) { 4548 xhci_dbg(xhci, "xHCI version %x needs hub " 4549 "TT think time and number of ports\n", 4550 (unsigned int) xhci->hci_version); 4551 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild)); 4552 /* Set TT think time - convert from ns to FS bit times. 4553 * 0 = 8 FS bit times, 1 = 16 FS bit times, 4554 * 2 = 24 FS bit times, 3 = 32 FS bit times. 4555 * 4556 * xHCI 1.0: this field shall be 0 if the device is not a 4557 * High-spped hub. 4558 */ 4559 think_time = tt->think_time; 4560 if (think_time != 0) 4561 think_time = (think_time / 666) - 1; 4562 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) 4563 slot_ctx->tt_info |= 4564 cpu_to_le32(TT_THINK_TIME(think_time)); 4565 } else { 4566 xhci_dbg(xhci, "xHCI version %x doesn't need hub " 4567 "TT think time or number of ports\n", 4568 (unsigned int) xhci->hci_version); 4569 } 4570 slot_ctx->dev_state = 0; 4571 spin_unlock_irqrestore(&xhci->lock, flags); 4572 4573 xhci_dbg(xhci, "Set up %s for hub device.\n", 4574 (xhci->hci_version > 0x95) ? 4575 "configure endpoint" : "evaluate context"); 4576 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id); 4577 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0); 4578 4579 /* Issue and wait for the configure endpoint or 4580 * evaluate context command. 4581 */ 4582 if (xhci->hci_version > 0x95) 4583 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, 4584 false, false); 4585 else 4586 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, 4587 true, false); 4588 4589 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id); 4590 xhci_dbg_ctx(xhci, vdev->out_ctx, 0); 4591 4592 xhci_free_command(xhci, config_cmd); 4593 return ret; 4594 } 4595 4596 int xhci_get_frame(struct usb_hcd *hcd) 4597 { 4598 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4599 /* EHCI mods by the periodic size. Why? */ 4600 return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3; 4601 } 4602 4603 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) 4604 { 4605 struct xhci_hcd *xhci; 4606 struct device *dev = hcd->self.controller; 4607 int retval; 4608 u32 temp; 4609 4610 /* Accept arbitrarily long scatter-gather lists */ 4611 hcd->self.sg_tablesize = ~0; 4612 /* XHCI controllers don't stop the ep queue on short packets :| */ 4613 hcd->self.no_stop_on_short = 1; 4614 4615 if (usb_hcd_is_primary_hcd(hcd)) { 4616 xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL); 4617 if (!xhci) 4618 return -ENOMEM; 4619 *((struct xhci_hcd **) hcd->hcd_priv) = xhci; 4620 xhci->main_hcd = hcd; 4621 /* Mark the first roothub as being USB 2.0. 4622 * The xHCI driver will register the USB 3.0 roothub. 4623 */ 4624 hcd->speed = HCD_USB2; 4625 hcd->self.root_hub->speed = USB_SPEED_HIGH; 4626 /* 4627 * USB 2.0 roothub under xHCI has an integrated TT, 4628 * (rate matching hub) as opposed to having an OHCI/UHCI 4629 * companion controller. 4630 */ 4631 hcd->has_tt = 1; 4632 } else { 4633 /* xHCI private pointer was set in xhci_pci_probe for the second 4634 * registered roothub. 4635 */ 4636 xhci = hcd_to_xhci(hcd); 4637 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params); 4638 if (HCC_64BIT_ADDR(temp)) { 4639 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); 4640 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)); 4641 } else { 4642 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32)); 4643 } 4644 return 0; 4645 } 4646 4647 xhci->cap_regs = hcd->regs; 4648 xhci->op_regs = hcd->regs + 4649 HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase)); 4650 xhci->run_regs = hcd->regs + 4651 (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK); 4652 /* Cache read-only capability registers */ 4653 xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1); 4654 xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2); 4655 xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3); 4656 xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase); 4657 xhci->hci_version = HC_VERSION(xhci->hcc_params); 4658 xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params); 4659 xhci_print_registers(xhci); 4660 4661 get_quirks(dev, xhci); 4662 4663 /* Make sure the HC is halted. */ 4664 retval = xhci_halt(xhci); 4665 if (retval) 4666 goto error; 4667 4668 xhci_dbg(xhci, "Resetting HCD\n"); 4669 /* Reset the internal HC memory state and registers. */ 4670 retval = xhci_reset(xhci); 4671 if (retval) 4672 goto error; 4673 xhci_dbg(xhci, "Reset complete\n"); 4674 4675 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params); 4676 if (HCC_64BIT_ADDR(temp)) { 4677 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); 4678 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)); 4679 } else { 4680 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32)); 4681 } 4682 4683 xhci_dbg(xhci, "Calling HCD init\n"); 4684 /* Initialize HCD and host controller data structures. */ 4685 retval = xhci_init(hcd); 4686 if (retval) 4687 goto error; 4688 xhci_dbg(xhci, "Called HCD init\n"); 4689 return 0; 4690 error: 4691 kfree(xhci); 4692 return retval; 4693 } 4694 4695 MODULE_DESCRIPTION(DRIVER_DESC); 4696 MODULE_AUTHOR(DRIVER_AUTHOR); 4697 MODULE_LICENSE("GPL"); 4698 4699 static int __init xhci_hcd_init(void) 4700 { 4701 int retval; 4702 4703 retval = xhci_register_pci(); 4704 if (retval < 0) { 4705 printk(KERN_DEBUG "Problem registering PCI driver."); 4706 return retval; 4707 } 4708 retval = xhci_register_plat(); 4709 if (retval < 0) { 4710 printk(KERN_DEBUG "Problem registering platform driver."); 4711 goto unreg_pci; 4712 } 4713 /* 4714 * Check the compiler generated sizes of structures that must be laid 4715 * out in specific ways for hardware access. 4716 */ 4717 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); 4718 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); 4719 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); 4720 /* xhci_device_control has eight fields, and also 4721 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx 4722 */ 4723 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); 4724 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); 4725 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); 4726 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8); 4727 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); 4728 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ 4729 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); 4730 return 0; 4731 unreg_pci: 4732 xhci_unregister_pci(); 4733 return retval; 4734 } 4735 module_init(xhci_hcd_init); 4736 4737 static void __exit xhci_hcd_cleanup(void) 4738 { 4739 xhci_unregister_pci(); 4740 xhci_unregister_plat(); 4741 } 4742 module_exit(xhci_hcd_cleanup); 4743