1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * xHCI host controller driver 4 * 5 * Copyright (C) 2008 Intel Corp. 6 * 7 * Author: Sarah Sharp 8 * Some code borrowed from the Linux EHCI driver. 9 */ 10 11 #include <linux/pci.h> 12 #include <linux/irq.h> 13 #include <linux/log2.h> 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/slab.h> 17 #include <linux/dmi.h> 18 #include <linux/dma-mapping.h> 19 20 #include "xhci.h" 21 #include "xhci-trace.h" 22 #include "xhci-mtk.h" 23 #include "xhci-debugfs.h" 24 #include "xhci-dbgcap.h" 25 26 #define DRIVER_AUTHOR "Sarah Sharp" 27 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" 28 29 #define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E) 30 31 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ 32 static int link_quirk; 33 module_param(link_quirk, int, S_IRUGO | S_IWUSR); 34 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); 35 36 static unsigned long long quirks; 37 module_param(quirks, ullong, S_IRUGO); 38 MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default"); 39 40 static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) 41 { 42 struct xhci_segment *seg = ring->first_seg; 43 44 if (!td || !td->start_seg) 45 return false; 46 do { 47 if (seg == td->start_seg) 48 return true; 49 seg = seg->next; 50 } while (seg && seg != ring->first_seg); 51 52 return false; 53 } 54 55 /* TODO: copied from ehci-hcd.c - can this be refactored? */ 56 /* 57 * xhci_handshake - spin reading hc until handshake completes or fails 58 * @ptr: address of hc register to be read 59 * @mask: bits to look at in result of read 60 * @done: value of those bits when handshake succeeds 61 * @usec: timeout in microseconds 62 * 63 * Returns negative errno, or zero on success 64 * 65 * Success happens when the "mask" bits have the specified value (hardware 66 * handshake done). There are two failure modes: "usec" have passed (major 67 * hardware flakeout), or the register reads as all-ones (hardware removed). 68 */ 69 int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec) 70 { 71 u32 result; 72 73 do { 74 result = readl(ptr); 75 if (result == ~(u32)0) /* card removed */ 76 return -ENODEV; 77 result &= mask; 78 if (result == done) 79 return 0; 80 udelay(1); 81 usec--; 82 } while (usec > 0); 83 return -ETIMEDOUT; 84 } 85 86 /* 87 * Disable interrupts and begin the xHCI halting process. 88 */ 89 void xhci_quiesce(struct xhci_hcd *xhci) 90 { 91 u32 halted; 92 u32 cmd; 93 u32 mask; 94 95 mask = ~(XHCI_IRQS); 96 halted = readl(&xhci->op_regs->status) & STS_HALT; 97 if (!halted) 98 mask &= ~CMD_RUN; 99 100 cmd = readl(&xhci->op_regs->command); 101 cmd &= mask; 102 writel(cmd, &xhci->op_regs->command); 103 } 104 105 /* 106 * Force HC into halt state. 107 * 108 * Disable any IRQs and clear the run/stop bit. 109 * HC will complete any current and actively pipelined transactions, and 110 * should halt within 16 ms of the run/stop bit being cleared. 111 * Read HC Halted bit in the status register to see when the HC is finished. 112 */ 113 int xhci_halt(struct xhci_hcd *xhci) 114 { 115 int ret; 116 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC"); 117 xhci_quiesce(xhci); 118 119 ret = xhci_handshake(&xhci->op_regs->status, 120 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); 121 if (ret) { 122 xhci_warn(xhci, "Host halt failed, %d\n", ret); 123 return ret; 124 } 125 xhci->xhc_state |= XHCI_STATE_HALTED; 126 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; 127 return ret; 128 } 129 130 /* 131 * Set the run bit and wait for the host to be running. 132 */ 133 int xhci_start(struct xhci_hcd *xhci) 134 { 135 u32 temp; 136 int ret; 137 138 temp = readl(&xhci->op_regs->command); 139 temp |= (CMD_RUN); 140 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.", 141 temp); 142 writel(temp, &xhci->op_regs->command); 143 144 /* 145 * Wait for the HCHalted Status bit to be 0 to indicate the host is 146 * running. 147 */ 148 ret = xhci_handshake(&xhci->op_regs->status, 149 STS_HALT, 0, XHCI_MAX_HALT_USEC); 150 if (ret == -ETIMEDOUT) 151 xhci_err(xhci, "Host took too long to start, " 152 "waited %u microseconds.\n", 153 XHCI_MAX_HALT_USEC); 154 if (!ret) 155 /* clear state flags. Including dying, halted or removing */ 156 xhci->xhc_state = 0; 157 158 return ret; 159 } 160 161 /* 162 * Reset a halted HC. 163 * 164 * This resets pipelines, timers, counters, state machines, etc. 165 * Transactions will be terminated immediately, and operational registers 166 * will be set to their defaults. 167 */ 168 int xhci_reset(struct xhci_hcd *xhci) 169 { 170 u32 command; 171 u32 state; 172 int ret, i; 173 174 state = readl(&xhci->op_regs->status); 175 176 if (state == ~(u32)0) { 177 xhci_warn(xhci, "Host not accessible, reset failed.\n"); 178 return -ENODEV; 179 } 180 181 if ((state & STS_HALT) == 0) { 182 xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); 183 return 0; 184 } 185 186 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC"); 187 command = readl(&xhci->op_regs->command); 188 command |= CMD_RESET; 189 writel(command, &xhci->op_regs->command); 190 191 /* Existing Intel xHCI controllers require a delay of 1 mS, 192 * after setting the CMD_RESET bit, and before accessing any 193 * HC registers. This allows the HC to complete the 194 * reset operation and be ready for HC register access. 195 * Without this delay, the subsequent HC register access, 196 * may result in a system hang very rarely. 197 */ 198 if (xhci->quirks & XHCI_INTEL_HOST) 199 udelay(1000); 200 201 ret = xhci_handshake(&xhci->op_regs->command, 202 CMD_RESET, 0, 10 * 1000 * 1000); 203 if (ret) 204 return ret; 205 206 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) 207 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller)); 208 209 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 210 "Wait for controller to be ready for doorbell rings"); 211 /* 212 * xHCI cannot write to any doorbells or operational registers other 213 * than status until the "Controller Not Ready" flag is cleared. 214 */ 215 ret = xhci_handshake(&xhci->op_regs->status, 216 STS_CNR, 0, 10 * 1000 * 1000); 217 218 for (i = 0; i < 2; i++) { 219 xhci->bus_state[i].port_c_suspend = 0; 220 xhci->bus_state[i].suspended_ports = 0; 221 xhci->bus_state[i].resuming_ports = 0; 222 } 223 224 return ret; 225 } 226 227 static void xhci_zero_64b_regs(struct xhci_hcd *xhci) 228 { 229 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 230 int err, i; 231 u64 val; 232 233 /* 234 * Some Renesas controllers get into a weird state if they are 235 * reset while programmed with 64bit addresses (they will preserve 236 * the top half of the address in internal, non visible 237 * registers). You end up with half the address coming from the 238 * kernel, and the other half coming from the firmware. Also, 239 * changing the programming leads to extra accesses even if the 240 * controller is supposed to be halted. The controller ends up with 241 * a fatal fault, and is then ripe for being properly reset. 242 * 243 * Special care is taken to only apply this if the device is behind 244 * an iommu. Doing anything when there is no iommu is definitely 245 * unsafe... 246 */ 247 if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !dev->iommu_group) 248 return; 249 250 xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n"); 251 252 /* Clear HSEIE so that faults do not get signaled */ 253 val = readl(&xhci->op_regs->command); 254 val &= ~CMD_HSEIE; 255 writel(val, &xhci->op_regs->command); 256 257 /* Clear HSE (aka FATAL) */ 258 val = readl(&xhci->op_regs->status); 259 val |= STS_FATAL; 260 writel(val, &xhci->op_regs->status); 261 262 /* Now zero the registers, and brace for impact */ 263 val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 264 if (upper_32_bits(val)) 265 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); 266 val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 267 if (upper_32_bits(val)) 268 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); 269 270 for (i = 0; i < HCS_MAX_INTRS(xhci->hcs_params1); i++) { 271 struct xhci_intr_reg __iomem *ir; 272 273 ir = &xhci->run_regs->ir_set[i]; 274 val = xhci_read_64(xhci, &ir->erst_base); 275 if (upper_32_bits(val)) 276 xhci_write_64(xhci, 0, &ir->erst_base); 277 val= xhci_read_64(xhci, &ir->erst_dequeue); 278 if (upper_32_bits(val)) 279 xhci_write_64(xhci, 0, &ir->erst_dequeue); 280 } 281 282 /* Wait for the fault to appear. It will be cleared on reset */ 283 err = xhci_handshake(&xhci->op_regs->status, 284 STS_FATAL, STS_FATAL, 285 XHCI_MAX_HALT_USEC); 286 if (!err) 287 xhci_info(xhci, "Fault detected\n"); 288 } 289 290 #ifdef CONFIG_USB_PCI 291 /* 292 * Set up MSI 293 */ 294 static int xhci_setup_msi(struct xhci_hcd *xhci) 295 { 296 int ret; 297 /* 298 * TODO:Check with MSI Soc for sysdev 299 */ 300 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 301 302 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); 303 if (ret < 0) { 304 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 305 "failed to allocate MSI entry"); 306 return ret; 307 } 308 309 ret = request_irq(pdev->irq, xhci_msi_irq, 310 0, "xhci_hcd", xhci_to_hcd(xhci)); 311 if (ret) { 312 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 313 "disable MSI interrupt"); 314 pci_free_irq_vectors(pdev); 315 } 316 317 return ret; 318 } 319 320 /* 321 * Set up MSI-X 322 */ 323 static int xhci_setup_msix(struct xhci_hcd *xhci) 324 { 325 int i, ret = 0; 326 struct usb_hcd *hcd = xhci_to_hcd(xhci); 327 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 328 329 /* 330 * calculate number of msi-x vectors supported. 331 * - HCS_MAX_INTRS: the max number of interrupts the host can handle, 332 * with max number of interrupters based on the xhci HCSPARAMS1. 333 * - num_online_cpus: maximum msi-x vectors per CPUs core. 334 * Add additional 1 vector to ensure always available interrupt. 335 */ 336 xhci->msix_count = min(num_online_cpus() + 1, 337 HCS_MAX_INTRS(xhci->hcs_params1)); 338 339 ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count, 340 PCI_IRQ_MSIX); 341 if (ret < 0) { 342 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 343 "Failed to enable MSI-X"); 344 return ret; 345 } 346 347 for (i = 0; i < xhci->msix_count; i++) { 348 ret = request_irq(pci_irq_vector(pdev, i), xhci_msi_irq, 0, 349 "xhci_hcd", xhci_to_hcd(xhci)); 350 if (ret) 351 goto disable_msix; 352 } 353 354 hcd->msix_enabled = 1; 355 return ret; 356 357 disable_msix: 358 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt"); 359 while (--i >= 0) 360 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci)); 361 pci_free_irq_vectors(pdev); 362 return ret; 363 } 364 365 /* Free any IRQs and disable MSI-X */ 366 static void xhci_cleanup_msix(struct xhci_hcd *xhci) 367 { 368 struct usb_hcd *hcd = xhci_to_hcd(xhci); 369 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 370 371 if (xhci->quirks & XHCI_PLAT) 372 return; 373 374 /* return if using legacy interrupt */ 375 if (hcd->irq > 0) 376 return; 377 378 if (hcd->msix_enabled) { 379 int i; 380 381 for (i = 0; i < xhci->msix_count; i++) 382 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci)); 383 } else { 384 free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci)); 385 } 386 387 pci_free_irq_vectors(pdev); 388 hcd->msix_enabled = 0; 389 } 390 391 static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci) 392 { 393 struct usb_hcd *hcd = xhci_to_hcd(xhci); 394 395 if (hcd->msix_enabled) { 396 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 397 int i; 398 399 for (i = 0; i < xhci->msix_count; i++) 400 synchronize_irq(pci_irq_vector(pdev, i)); 401 } 402 } 403 404 static int xhci_try_enable_msi(struct usb_hcd *hcd) 405 { 406 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 407 struct pci_dev *pdev; 408 int ret; 409 410 /* The xhci platform device has set up IRQs through usb_add_hcd. */ 411 if (xhci->quirks & XHCI_PLAT) 412 return 0; 413 414 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 415 /* 416 * Some Fresco Logic host controllers advertise MSI, but fail to 417 * generate interrupts. Don't even try to enable MSI. 418 */ 419 if (xhci->quirks & XHCI_BROKEN_MSI) 420 goto legacy_irq; 421 422 /* unregister the legacy interrupt */ 423 if (hcd->irq) 424 free_irq(hcd->irq, hcd); 425 hcd->irq = 0; 426 427 ret = xhci_setup_msix(xhci); 428 if (ret) 429 /* fall back to msi*/ 430 ret = xhci_setup_msi(xhci); 431 432 if (!ret) { 433 hcd->msi_enabled = 1; 434 return 0; 435 } 436 437 if (!pdev->irq) { 438 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); 439 return -EINVAL; 440 } 441 442 legacy_irq: 443 if (!strlen(hcd->irq_descr)) 444 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d", 445 hcd->driver->description, hcd->self.busnum); 446 447 /* fall back to legacy interrupt*/ 448 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, 449 hcd->irq_descr, hcd); 450 if (ret) { 451 xhci_err(xhci, "request interrupt %d failed\n", 452 pdev->irq); 453 return ret; 454 } 455 hcd->irq = pdev->irq; 456 return 0; 457 } 458 459 #else 460 461 static inline int xhci_try_enable_msi(struct usb_hcd *hcd) 462 { 463 return 0; 464 } 465 466 static inline void xhci_cleanup_msix(struct xhci_hcd *xhci) 467 { 468 } 469 470 static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci) 471 { 472 } 473 474 #endif 475 476 static void compliance_mode_recovery(struct timer_list *t) 477 { 478 struct xhci_hcd *xhci; 479 struct usb_hcd *hcd; 480 struct xhci_hub *rhub; 481 u32 temp; 482 int i; 483 484 xhci = from_timer(xhci, t, comp_mode_recovery_timer); 485 rhub = &xhci->usb3_rhub; 486 487 for (i = 0; i < rhub->num_ports; i++) { 488 temp = readl(rhub->ports[i]->addr); 489 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) { 490 /* 491 * Compliance Mode Detected. Letting USB Core 492 * handle the Warm Reset 493 */ 494 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 495 "Compliance mode detected->port %d", 496 i + 1); 497 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 498 "Attempting compliance mode recovery"); 499 hcd = xhci->shared_hcd; 500 501 if (hcd->state == HC_STATE_SUSPENDED) 502 usb_hcd_resume_root_hub(hcd); 503 504 usb_hcd_poll_rh_status(hcd); 505 } 506 } 507 508 if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1)) 509 mod_timer(&xhci->comp_mode_recovery_timer, 510 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); 511 } 512 513 /* 514 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver 515 * that causes ports behind that hardware to enter compliance mode sometimes. 516 * The quirk creates a timer that polls every 2 seconds the link state of 517 * each host controller's port and recovers it by issuing a Warm reset 518 * if Compliance mode is detected, otherwise the port will become "dead" (no 519 * device connections or disconnections will be detected anymore). Becasue no 520 * status event is generated when entering compliance mode (per xhci spec), 521 * this quirk is needed on systems that have the failing hardware installed. 522 */ 523 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) 524 { 525 xhci->port_status_u0 = 0; 526 timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery, 527 0); 528 xhci->comp_mode_recovery_timer.expires = jiffies + 529 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS); 530 531 add_timer(&xhci->comp_mode_recovery_timer); 532 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 533 "Compliance mode recovery timer initialized"); 534 } 535 536 /* 537 * This function identifies the systems that have installed the SN65LVPE502CP 538 * USB3.0 re-driver and that need the Compliance Mode Quirk. 539 * Systems: 540 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820 541 */ 542 static bool xhci_compliance_mode_recovery_timer_quirk_check(void) 543 { 544 const char *dmi_product_name, *dmi_sys_vendor; 545 546 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME); 547 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR); 548 if (!dmi_product_name || !dmi_sys_vendor) 549 return false; 550 551 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard"))) 552 return false; 553 554 if (strstr(dmi_product_name, "Z420") || 555 strstr(dmi_product_name, "Z620") || 556 strstr(dmi_product_name, "Z820") || 557 strstr(dmi_product_name, "Z1 Workstation")) 558 return true; 559 560 return false; 561 } 562 563 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) 564 { 565 return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1)); 566 } 567 568 569 /* 570 * Initialize memory for HCD and xHC (one-time init). 571 * 572 * Program the PAGESIZE register, initialize the device context array, create 573 * device contexts (?), set up a command ring segment (or two?), create event 574 * ring (one for now). 575 */ 576 static int xhci_init(struct usb_hcd *hcd) 577 { 578 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 579 int retval = 0; 580 581 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init"); 582 spin_lock_init(&xhci->lock); 583 if (xhci->hci_version == 0x95 && link_quirk) { 584 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 585 "QUIRK: Not clearing Link TRB chain bits."); 586 xhci->quirks |= XHCI_LINK_TRB_QUIRK; 587 } else { 588 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 589 "xHCI doesn't need link TRB QUIRK"); 590 } 591 retval = xhci_mem_init(xhci, GFP_KERNEL); 592 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init"); 593 594 /* Initializing Compliance Mode Recovery Data If Needed */ 595 if (xhci_compliance_mode_recovery_timer_quirk_check()) { 596 xhci->quirks |= XHCI_COMP_MODE_QUIRK; 597 compliance_mode_recovery_timer_init(xhci); 598 } 599 600 return retval; 601 } 602 603 /*-------------------------------------------------------------------------*/ 604 605 606 static int xhci_run_finished(struct xhci_hcd *xhci) 607 { 608 if (xhci_start(xhci)) { 609 xhci_halt(xhci); 610 return -ENODEV; 611 } 612 xhci->shared_hcd->state = HC_STATE_RUNNING; 613 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; 614 615 if (xhci->quirks & XHCI_NEC_HOST) 616 xhci_ring_cmd_db(xhci); 617 618 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 619 "Finished xhci_run for USB3 roothub"); 620 return 0; 621 } 622 623 /* 624 * Start the HC after it was halted. 625 * 626 * This function is called by the USB core when the HC driver is added. 627 * Its opposite is xhci_stop(). 628 * 629 * xhci_init() must be called once before this function can be called. 630 * Reset the HC, enable device slot contexts, program DCBAAP, and 631 * set command ring pointer and event ring pointer. 632 * 633 * Setup MSI-X vectors and enable interrupts. 634 */ 635 int xhci_run(struct usb_hcd *hcd) 636 { 637 u32 temp; 638 u64 temp_64; 639 int ret; 640 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 641 642 /* Start the xHCI host controller running only after the USB 2.0 roothub 643 * is setup. 644 */ 645 646 hcd->uses_new_polling = 1; 647 if (!usb_hcd_is_primary_hcd(hcd)) 648 return xhci_run_finished(xhci); 649 650 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run"); 651 652 ret = xhci_try_enable_msi(hcd); 653 if (ret) 654 return ret; 655 656 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 657 temp_64 &= ~ERST_PTR_MASK; 658 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 659 "ERST deq = 64'h%0lx", (long unsigned int) temp_64); 660 661 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 662 "// Set the interrupt modulation register"); 663 temp = readl(&xhci->ir_set->irq_control); 664 temp &= ~ER_IRQ_INTERVAL_MASK; 665 temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK; 666 writel(temp, &xhci->ir_set->irq_control); 667 668 /* Set the HCD state before we enable the irqs */ 669 temp = readl(&xhci->op_regs->command); 670 temp |= (CMD_EIE); 671 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 672 "// Enable interrupts, cmd = 0x%x.", temp); 673 writel(temp, &xhci->op_regs->command); 674 675 temp = readl(&xhci->ir_set->irq_pending); 676 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 677 "// Enabling event ring interrupter %p by writing 0x%x to irq_pending", 678 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); 679 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); 680 681 if (xhci->quirks & XHCI_NEC_HOST) { 682 struct xhci_command *command; 683 684 command = xhci_alloc_command(xhci, false, GFP_KERNEL); 685 if (!command) 686 return -ENOMEM; 687 688 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0, 689 TRB_TYPE(TRB_NEC_GET_FW)); 690 if (ret) 691 xhci_free_command(xhci, command); 692 } 693 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 694 "Finished xhci_run for USB2 roothub"); 695 696 xhci_dbc_init(xhci); 697 698 xhci_debugfs_init(xhci); 699 700 return 0; 701 } 702 EXPORT_SYMBOL_GPL(xhci_run); 703 704 /* 705 * Stop xHCI driver. 706 * 707 * This function is called by the USB core when the HC driver is removed. 708 * Its opposite is xhci_run(). 709 * 710 * Disable device contexts, disable IRQs, and quiesce the HC. 711 * Reset the HC, finish any completed transactions, and cleanup memory. 712 */ 713 static void xhci_stop(struct usb_hcd *hcd) 714 { 715 u32 temp; 716 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 717 718 mutex_lock(&xhci->mutex); 719 720 /* Only halt host and free memory after both hcds are removed */ 721 if (!usb_hcd_is_primary_hcd(hcd)) { 722 /* usb core will free this hcd shortly, unset pointer */ 723 xhci->shared_hcd = NULL; 724 mutex_unlock(&xhci->mutex); 725 return; 726 } 727 728 xhci_dbc_exit(xhci); 729 730 spin_lock_irq(&xhci->lock); 731 xhci->xhc_state |= XHCI_STATE_HALTED; 732 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; 733 xhci_halt(xhci); 734 xhci_reset(xhci); 735 spin_unlock_irq(&xhci->lock); 736 737 xhci_cleanup_msix(xhci); 738 739 /* Deleting Compliance Mode Recovery Timer */ 740 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 741 (!(xhci_all_ports_seen_u0(xhci)))) { 742 del_timer_sync(&xhci->comp_mode_recovery_timer); 743 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 744 "%s: compliance mode recovery timer deleted", 745 __func__); 746 } 747 748 if (xhci->quirks & XHCI_AMD_PLL_FIX) 749 usb_amd_dev_put(); 750 751 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 752 "// Disabling event ring interrupts"); 753 temp = readl(&xhci->op_regs->status); 754 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); 755 temp = readl(&xhci->ir_set->irq_pending); 756 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); 757 758 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); 759 xhci_mem_cleanup(xhci); 760 xhci_debugfs_exit(xhci); 761 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 762 "xhci_stop completed - status = %x", 763 readl(&xhci->op_regs->status)); 764 mutex_unlock(&xhci->mutex); 765 } 766 767 /* 768 * Shutdown HC (not bus-specific) 769 * 770 * This is called when the machine is rebooting or halting. We assume that the 771 * machine will be powered off, and the HC's internal state will be reset. 772 * Don't bother to free memory. 773 * 774 * This will only ever be called with the main usb_hcd (the USB3 roothub). 775 */ 776 static void xhci_shutdown(struct usb_hcd *hcd) 777 { 778 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 779 780 if (xhci->quirks & XHCI_SPURIOUS_REBOOT) 781 usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev)); 782 783 spin_lock_irq(&xhci->lock); 784 xhci_halt(xhci); 785 /* Workaround for spurious wakeups at shutdown with HSW */ 786 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) 787 xhci_reset(xhci); 788 spin_unlock_irq(&xhci->lock); 789 790 xhci_cleanup_msix(xhci); 791 792 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 793 "xhci_shutdown completed - status = %x", 794 readl(&xhci->op_regs->status)); 795 796 /* Yet another workaround for spurious wakeups at shutdown with HSW */ 797 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) 798 pci_set_power_state(to_pci_dev(hcd->self.sysdev), PCI_D3hot); 799 } 800 801 #ifdef CONFIG_PM 802 static void xhci_save_registers(struct xhci_hcd *xhci) 803 { 804 xhci->s3.command = readl(&xhci->op_regs->command); 805 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification); 806 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 807 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg); 808 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size); 809 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); 810 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 811 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending); 812 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control); 813 } 814 815 static void xhci_restore_registers(struct xhci_hcd *xhci) 816 { 817 writel(xhci->s3.command, &xhci->op_regs->command); 818 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); 819 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); 820 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); 821 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size); 822 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); 823 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); 824 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending); 825 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control); 826 } 827 828 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) 829 { 830 u64 val_64; 831 832 /* step 2: initialize command ring buffer */ 833 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 834 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 835 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 836 xhci->cmd_ring->dequeue) & 837 (u64) ~CMD_RING_RSVD_BITS) | 838 xhci->cmd_ring->cycle_state; 839 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 840 "// Setting command ring address to 0x%llx", 841 (long unsigned long) val_64); 842 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); 843 } 844 845 /* 846 * The whole command ring must be cleared to zero when we suspend the host. 847 * 848 * The host doesn't save the command ring pointer in the suspend well, so we 849 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte 850 * aligned, because of the reserved bits in the command ring dequeue pointer 851 * register. Therefore, we can't just set the dequeue pointer back in the 852 * middle of the ring (TRBs are 16-byte aligned). 853 */ 854 static void xhci_clear_command_ring(struct xhci_hcd *xhci) 855 { 856 struct xhci_ring *ring; 857 struct xhci_segment *seg; 858 859 ring = xhci->cmd_ring; 860 seg = ring->deq_seg; 861 do { 862 memset(seg->trbs, 0, 863 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); 864 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= 865 cpu_to_le32(~TRB_CYCLE); 866 seg = seg->next; 867 } while (seg != ring->deq_seg); 868 869 /* Reset the software enqueue and dequeue pointers */ 870 ring->deq_seg = ring->first_seg; 871 ring->dequeue = ring->first_seg->trbs; 872 ring->enq_seg = ring->deq_seg; 873 ring->enqueue = ring->dequeue; 874 875 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; 876 /* 877 * Ring is now zeroed, so the HW should look for change of ownership 878 * when the cycle bit is set to 1. 879 */ 880 ring->cycle_state = 1; 881 882 /* 883 * Reset the hardware dequeue pointer. 884 * Yes, this will need to be re-written after resume, but we're paranoid 885 * and want to make sure the hardware doesn't access bogus memory 886 * because, say, the BIOS or an SMI started the host without changing 887 * the command ring pointers. 888 */ 889 xhci_set_cmd_ring_deq(xhci); 890 } 891 892 static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) 893 { 894 struct xhci_port **ports; 895 int port_index; 896 unsigned long flags; 897 u32 t1, t2; 898 899 spin_lock_irqsave(&xhci->lock, flags); 900 901 /* disable usb3 ports Wake bits */ 902 port_index = xhci->usb3_rhub.num_ports; 903 ports = xhci->usb3_rhub.ports; 904 while (port_index--) { 905 t1 = readl(ports[port_index]->addr); 906 t1 = xhci_port_state_to_neutral(t1); 907 t2 = t1 & ~PORT_WAKE_BITS; 908 if (t1 != t2) 909 writel(t2, ports[port_index]->addr); 910 } 911 912 /* disable usb2 ports Wake bits */ 913 port_index = xhci->usb2_rhub.num_ports; 914 ports = xhci->usb2_rhub.ports; 915 while (port_index--) { 916 t1 = readl(ports[port_index]->addr); 917 t1 = xhci_port_state_to_neutral(t1); 918 t2 = t1 & ~PORT_WAKE_BITS; 919 if (t1 != t2) 920 writel(t2, ports[port_index]->addr); 921 } 922 923 spin_unlock_irqrestore(&xhci->lock, flags); 924 } 925 926 static bool xhci_pending_portevent(struct xhci_hcd *xhci) 927 { 928 struct xhci_port **ports; 929 int port_index; 930 u32 status; 931 u32 portsc; 932 933 status = readl(&xhci->op_regs->status); 934 if (status & STS_EINT) 935 return true; 936 /* 937 * Checking STS_EINT is not enough as there is a lag between a change 938 * bit being set and the Port Status Change Event that it generated 939 * being written to the Event Ring. See note in xhci 1.1 section 4.19.2. 940 */ 941 942 port_index = xhci->usb2_rhub.num_ports; 943 ports = xhci->usb2_rhub.ports; 944 while (port_index--) { 945 portsc = readl(ports[port_index]->addr); 946 if (portsc & PORT_CHANGE_MASK || 947 (portsc & PORT_PLS_MASK) == XDEV_RESUME) 948 return true; 949 } 950 port_index = xhci->usb3_rhub.num_ports; 951 ports = xhci->usb3_rhub.ports; 952 while (port_index--) { 953 portsc = readl(ports[port_index]->addr); 954 if (portsc & PORT_CHANGE_MASK || 955 (portsc & PORT_PLS_MASK) == XDEV_RESUME) 956 return true; 957 } 958 return false; 959 } 960 961 /* 962 * Stop HC (not bus-specific) 963 * 964 * This is called when the machine transition into S3/S4 mode. 965 * 966 */ 967 int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) 968 { 969 int rc = 0; 970 unsigned int delay = XHCI_MAX_HALT_USEC; 971 struct usb_hcd *hcd = xhci_to_hcd(xhci); 972 u32 command; 973 974 if (!hcd->state) 975 return 0; 976 977 if (hcd->state != HC_STATE_SUSPENDED || 978 xhci->shared_hcd->state != HC_STATE_SUSPENDED) 979 return -EINVAL; 980 981 xhci_dbc_suspend(xhci); 982 983 /* Clear root port wake on bits if wakeup not allowed. */ 984 if (!do_wakeup) 985 xhci_disable_port_wake_on_bits(xhci); 986 987 /* Don't poll the roothubs on bus suspend. */ 988 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); 989 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 990 del_timer_sync(&hcd->rh_timer); 991 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 992 del_timer_sync(&xhci->shared_hcd->rh_timer); 993 994 if (xhci->quirks & XHCI_SUSPEND_DELAY) 995 usleep_range(1000, 1500); 996 997 spin_lock_irq(&xhci->lock); 998 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 999 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); 1000 /* step 1: stop endpoint */ 1001 /* skipped assuming that port suspend has done */ 1002 1003 /* step 2: clear Run/Stop bit */ 1004 command = readl(&xhci->op_regs->command); 1005 command &= ~CMD_RUN; 1006 writel(command, &xhci->op_regs->command); 1007 1008 /* Some chips from Fresco Logic need an extraordinary delay */ 1009 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1; 1010 1011 if (xhci_handshake(&xhci->op_regs->status, 1012 STS_HALT, STS_HALT, delay)) { 1013 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); 1014 spin_unlock_irq(&xhci->lock); 1015 return -ETIMEDOUT; 1016 } 1017 xhci_clear_command_ring(xhci); 1018 1019 /* step 3: save registers */ 1020 xhci_save_registers(xhci); 1021 1022 /* step 4: set CSS flag */ 1023 command = readl(&xhci->op_regs->command); 1024 command |= CMD_CSS; 1025 writel(command, &xhci->op_regs->command); 1026 if (xhci_handshake(&xhci->op_regs->status, 1027 STS_SAVE, 0, 10 * 1000)) { 1028 xhci_warn(xhci, "WARN: xHC save state timeout\n"); 1029 spin_unlock_irq(&xhci->lock); 1030 return -ETIMEDOUT; 1031 } 1032 spin_unlock_irq(&xhci->lock); 1033 1034 /* 1035 * Deleting Compliance Mode Recovery Timer because the xHCI Host 1036 * is about to be suspended. 1037 */ 1038 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 1039 (!(xhci_all_ports_seen_u0(xhci)))) { 1040 del_timer_sync(&xhci->comp_mode_recovery_timer); 1041 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1042 "%s: compliance mode recovery timer deleted", 1043 __func__); 1044 } 1045 1046 /* step 5: remove core well power */ 1047 /* synchronize irq when using MSI-X */ 1048 xhci_msix_sync_irqs(xhci); 1049 1050 return rc; 1051 } 1052 EXPORT_SYMBOL_GPL(xhci_suspend); 1053 1054 /* 1055 * start xHC (not bus-specific) 1056 * 1057 * This is called when the machine transition from S3/S4 mode. 1058 * 1059 */ 1060 int xhci_resume(struct xhci_hcd *xhci, bool hibernated) 1061 { 1062 u32 command, temp = 0; 1063 struct usb_hcd *hcd = xhci_to_hcd(xhci); 1064 struct usb_hcd *secondary_hcd; 1065 int retval = 0; 1066 bool comp_timer_running = false; 1067 1068 if (!hcd->state) 1069 return 0; 1070 1071 /* Wait a bit if either of the roothubs need to settle from the 1072 * transition into bus suspend. 1073 */ 1074 if (time_before(jiffies, xhci->bus_state[0].next_statechange) || 1075 time_before(jiffies, 1076 xhci->bus_state[1].next_statechange)) 1077 msleep(100); 1078 1079 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 1080 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); 1081 1082 spin_lock_irq(&xhci->lock); 1083 if (xhci->quirks & XHCI_RESET_ON_RESUME) 1084 hibernated = true; 1085 1086 if (!hibernated) { 1087 /* step 1: restore register */ 1088 xhci_restore_registers(xhci); 1089 /* step 2: initialize command ring buffer */ 1090 xhci_set_cmd_ring_deq(xhci); 1091 /* step 3: restore state and start state*/ 1092 /* step 3: set CRS flag */ 1093 command = readl(&xhci->op_regs->command); 1094 command |= CMD_CRS; 1095 writel(command, &xhci->op_regs->command); 1096 /* 1097 * Some controllers take up to 55+ ms to complete the controller 1098 * restore so setting the timeout to 100ms. Xhci specification 1099 * doesn't mention any timeout value. 1100 */ 1101 if (xhci_handshake(&xhci->op_regs->status, 1102 STS_RESTORE, 0, 100 * 1000)) { 1103 xhci_warn(xhci, "WARN: xHC restore state timeout\n"); 1104 spin_unlock_irq(&xhci->lock); 1105 return -ETIMEDOUT; 1106 } 1107 temp = readl(&xhci->op_regs->status); 1108 } 1109 1110 /* If restore operation fails, re-initialize the HC during resume */ 1111 if ((temp & STS_SRE) || hibernated) { 1112 1113 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 1114 !(xhci_all_ports_seen_u0(xhci))) { 1115 del_timer_sync(&xhci->comp_mode_recovery_timer); 1116 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1117 "Compliance Mode Recovery Timer deleted!"); 1118 } 1119 1120 /* Let the USB core know _both_ roothubs lost power. */ 1121 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); 1122 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); 1123 1124 xhci_dbg(xhci, "Stop HCD\n"); 1125 xhci_halt(xhci); 1126 xhci_zero_64b_regs(xhci); 1127 xhci_reset(xhci); 1128 spin_unlock_irq(&xhci->lock); 1129 xhci_cleanup_msix(xhci); 1130 1131 xhci_dbg(xhci, "// Disabling event ring interrupts\n"); 1132 temp = readl(&xhci->op_regs->status); 1133 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); 1134 temp = readl(&xhci->ir_set->irq_pending); 1135 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); 1136 1137 xhci_dbg(xhci, "cleaning up memory\n"); 1138 xhci_mem_cleanup(xhci); 1139 xhci_debugfs_exit(xhci); 1140 xhci_dbg(xhci, "xhci_stop completed - status = %x\n", 1141 readl(&xhci->op_regs->status)); 1142 1143 /* USB core calls the PCI reinit and start functions twice: 1144 * first with the primary HCD, and then with the secondary HCD. 1145 * If we don't do the same, the host will never be started. 1146 */ 1147 if (!usb_hcd_is_primary_hcd(hcd)) 1148 secondary_hcd = hcd; 1149 else 1150 secondary_hcd = xhci->shared_hcd; 1151 1152 xhci_dbg(xhci, "Initialize the xhci_hcd\n"); 1153 retval = xhci_init(hcd->primary_hcd); 1154 if (retval) 1155 return retval; 1156 comp_timer_running = true; 1157 1158 xhci_dbg(xhci, "Start the primary HCD\n"); 1159 retval = xhci_run(hcd->primary_hcd); 1160 if (!retval) { 1161 xhci_dbg(xhci, "Start the secondary HCD\n"); 1162 retval = xhci_run(secondary_hcd); 1163 } 1164 hcd->state = HC_STATE_SUSPENDED; 1165 xhci->shared_hcd->state = HC_STATE_SUSPENDED; 1166 goto done; 1167 } 1168 1169 /* step 4: set Run/Stop bit */ 1170 command = readl(&xhci->op_regs->command); 1171 command |= CMD_RUN; 1172 writel(command, &xhci->op_regs->command); 1173 xhci_handshake(&xhci->op_regs->status, STS_HALT, 1174 0, 250 * 1000); 1175 1176 /* step 5: walk topology and initialize portsc, 1177 * portpmsc and portli 1178 */ 1179 /* this is done in bus_resume */ 1180 1181 /* step 6: restart each of the previously 1182 * Running endpoints by ringing their doorbells 1183 */ 1184 1185 spin_unlock_irq(&xhci->lock); 1186 1187 xhci_dbc_resume(xhci); 1188 1189 done: 1190 if (retval == 0) { 1191 /* Resume root hubs only when have pending events. */ 1192 if (xhci_pending_portevent(xhci)) { 1193 usb_hcd_resume_root_hub(xhci->shared_hcd); 1194 usb_hcd_resume_root_hub(hcd); 1195 } 1196 } 1197 1198 /* 1199 * If system is subject to the Quirk, Compliance Mode Timer needs to 1200 * be re-initialized Always after a system resume. Ports are subject 1201 * to suffer the Compliance Mode issue again. It doesn't matter if 1202 * ports have entered previously to U0 before system's suspension. 1203 */ 1204 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running) 1205 compliance_mode_recovery_timer_init(xhci); 1206 1207 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) 1208 usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller)); 1209 1210 /* Re-enable port polling. */ 1211 xhci_dbg(xhci, "%s: starting port polling.\n", __func__); 1212 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 1213 usb_hcd_poll_rh_status(xhci->shared_hcd); 1214 set_bit(HCD_FLAG_POLL_RH, &hcd->flags); 1215 usb_hcd_poll_rh_status(hcd); 1216 1217 return retval; 1218 } 1219 EXPORT_SYMBOL_GPL(xhci_resume); 1220 #endif /* CONFIG_PM */ 1221 1222 /*-------------------------------------------------------------------------*/ 1223 1224 /** 1225 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and 1226 * HCDs. Find the index for an endpoint given its descriptor. Use the return 1227 * value to right shift 1 for the bitmask. 1228 * 1229 * Index = (epnum * 2) + direction - 1, 1230 * where direction = 0 for OUT, 1 for IN. 1231 * For control endpoints, the IN index is used (OUT index is unused), so 1232 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) 1233 */ 1234 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) 1235 { 1236 unsigned int index; 1237 if (usb_endpoint_xfer_control(desc)) 1238 index = (unsigned int) (usb_endpoint_num(desc)*2); 1239 else 1240 index = (unsigned int) (usb_endpoint_num(desc)*2) + 1241 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; 1242 return index; 1243 } 1244 1245 /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint 1246 * address from the XHCI endpoint index. 1247 */ 1248 unsigned int xhci_get_endpoint_address(unsigned int ep_index) 1249 { 1250 unsigned int number = DIV_ROUND_UP(ep_index, 2); 1251 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN; 1252 return direction | number; 1253 } 1254 1255 /* Find the flag for this endpoint (for use in the control context). Use the 1256 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is 1257 * bit 1, etc. 1258 */ 1259 static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) 1260 { 1261 return 1 << (xhci_get_endpoint_index(desc) + 1); 1262 } 1263 1264 /* Find the flag for this endpoint (for use in the control context). Use the 1265 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is 1266 * bit 1, etc. 1267 */ 1268 static unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index) 1269 { 1270 return 1 << (ep_index + 1); 1271 } 1272 1273 /* Compute the last valid endpoint context index. Basically, this is the 1274 * endpoint index plus one. For slot contexts with more than valid endpoint, 1275 * we find the most significant bit set in the added contexts flags. 1276 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 1277 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. 1278 */ 1279 unsigned int xhci_last_valid_endpoint(u32 added_ctxs) 1280 { 1281 return fls(added_ctxs) - 1; 1282 } 1283 1284 /* Returns 1 if the arguments are OK; 1285 * returns 0 this is a root hub; returns -EINVAL for NULL pointers. 1286 */ 1287 static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, 1288 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, 1289 const char *func) { 1290 struct xhci_hcd *xhci; 1291 struct xhci_virt_device *virt_dev; 1292 1293 if (!hcd || (check_ep && !ep) || !udev) { 1294 pr_debug("xHCI %s called with invalid args\n", func); 1295 return -EINVAL; 1296 } 1297 if (!udev->parent) { 1298 pr_debug("xHCI %s called for root hub\n", func); 1299 return 0; 1300 } 1301 1302 xhci = hcd_to_xhci(hcd); 1303 if (check_virt_dev) { 1304 if (!udev->slot_id || !xhci->devs[udev->slot_id]) { 1305 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n", 1306 func); 1307 return -EINVAL; 1308 } 1309 1310 virt_dev = xhci->devs[udev->slot_id]; 1311 if (virt_dev->udev != udev) { 1312 xhci_dbg(xhci, "xHCI %s called with udev and " 1313 "virt_dev does not match\n", func); 1314 return -EINVAL; 1315 } 1316 } 1317 1318 if (xhci->xhc_state & XHCI_STATE_HALTED) 1319 return -ENODEV; 1320 1321 return 1; 1322 } 1323 1324 static int xhci_configure_endpoint(struct xhci_hcd *xhci, 1325 struct usb_device *udev, struct xhci_command *command, 1326 bool ctx_change, bool must_succeed); 1327 1328 /* 1329 * Full speed devices may have a max packet size greater than 8 bytes, but the 1330 * USB core doesn't know that until it reads the first 8 bytes of the 1331 * descriptor. If the usb_device's max packet size changes after that point, 1332 * we need to issue an evaluate context command and wait on it. 1333 */ 1334 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, 1335 unsigned int ep_index, struct urb *urb) 1336 { 1337 struct xhci_container_ctx *out_ctx; 1338 struct xhci_input_control_ctx *ctrl_ctx; 1339 struct xhci_ep_ctx *ep_ctx; 1340 struct xhci_command *command; 1341 int max_packet_size; 1342 int hw_max_packet_size; 1343 int ret = 0; 1344 1345 out_ctx = xhci->devs[slot_id]->out_ctx; 1346 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1347 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); 1348 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc); 1349 if (hw_max_packet_size != max_packet_size) { 1350 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1351 "Max Packet Size for ep 0 changed."); 1352 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1353 "Max packet size in usb_device = %d", 1354 max_packet_size); 1355 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1356 "Max packet size in xHCI HW = %d", 1357 hw_max_packet_size); 1358 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1359 "Issuing evaluate context command."); 1360 1361 /* Set up the input context flags for the command */ 1362 /* FIXME: This won't work if a non-default control endpoint 1363 * changes max packet sizes. 1364 */ 1365 1366 command = xhci_alloc_command(xhci, true, GFP_KERNEL); 1367 if (!command) 1368 return -ENOMEM; 1369 1370 command->in_ctx = xhci->devs[slot_id]->in_ctx; 1371 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 1372 if (!ctrl_ctx) { 1373 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 1374 __func__); 1375 ret = -ENOMEM; 1376 goto command_cleanup; 1377 } 1378 /* Set up the modified control endpoint 0 */ 1379 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, 1380 xhci->devs[slot_id]->out_ctx, ep_index); 1381 1382 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); 1383 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); 1384 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); 1385 1386 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); 1387 ctrl_ctx->drop_flags = 0; 1388 1389 ret = xhci_configure_endpoint(xhci, urb->dev, command, 1390 true, false); 1391 1392 /* Clean up the input context for later use by bandwidth 1393 * functions. 1394 */ 1395 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); 1396 command_cleanup: 1397 kfree(command->completion); 1398 kfree(command); 1399 } 1400 return ret; 1401 } 1402 1403 /* 1404 * non-error returns are a promise to giveback() the urb later 1405 * we drop ownership so next owner (or urb unlink) can get it 1406 */ 1407 static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) 1408 { 1409 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 1410 unsigned long flags; 1411 int ret = 0; 1412 unsigned int slot_id, ep_index; 1413 unsigned int *ep_state; 1414 struct urb_priv *urb_priv; 1415 int num_tds; 1416 1417 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, 1418 true, true, __func__) <= 0) 1419 return -EINVAL; 1420 1421 slot_id = urb->dev->slot_id; 1422 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1423 ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state; 1424 1425 if (!HCD_HW_ACCESSIBLE(hcd)) { 1426 if (!in_interrupt()) 1427 xhci_dbg(xhci, "urb submitted during PCI suspend\n"); 1428 return -ESHUTDOWN; 1429 } 1430 1431 if (usb_endpoint_xfer_isoc(&urb->ep->desc)) 1432 num_tds = urb->number_of_packets; 1433 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) && 1434 urb->transfer_buffer_length > 0 && 1435 urb->transfer_flags & URB_ZERO_PACKET && 1436 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc))) 1437 num_tds = 2; 1438 else 1439 num_tds = 1; 1440 1441 urb_priv = kzalloc(sizeof(struct urb_priv) + 1442 num_tds * sizeof(struct xhci_td), mem_flags); 1443 if (!urb_priv) 1444 return -ENOMEM; 1445 1446 urb_priv->num_tds = num_tds; 1447 urb_priv->num_tds_done = 0; 1448 urb->hcpriv = urb_priv; 1449 1450 trace_xhci_urb_enqueue(urb); 1451 1452 if (usb_endpoint_xfer_control(&urb->ep->desc)) { 1453 /* Check to see if the max packet size for the default control 1454 * endpoint changed during FS device enumeration 1455 */ 1456 if (urb->dev->speed == USB_SPEED_FULL) { 1457 ret = xhci_check_maxpacket(xhci, slot_id, 1458 ep_index, urb); 1459 if (ret < 0) { 1460 xhci_urb_free_priv(urb_priv); 1461 urb->hcpriv = NULL; 1462 return ret; 1463 } 1464 } 1465 } 1466 1467 spin_lock_irqsave(&xhci->lock, flags); 1468 1469 if (xhci->xhc_state & XHCI_STATE_DYING) { 1470 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n", 1471 urb->ep->desc.bEndpointAddress, urb); 1472 ret = -ESHUTDOWN; 1473 goto free_priv; 1474 } 1475 if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) { 1476 xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n", 1477 *ep_state); 1478 ret = -EINVAL; 1479 goto free_priv; 1480 } 1481 if (*ep_state & EP_SOFT_CLEAR_TOGGLE) { 1482 xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n"); 1483 ret = -EINVAL; 1484 goto free_priv; 1485 } 1486 1487 switch (usb_endpoint_type(&urb->ep->desc)) { 1488 1489 case USB_ENDPOINT_XFER_CONTROL: 1490 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, 1491 slot_id, ep_index); 1492 break; 1493 case USB_ENDPOINT_XFER_BULK: 1494 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, 1495 slot_id, ep_index); 1496 break; 1497 case USB_ENDPOINT_XFER_INT: 1498 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, 1499 slot_id, ep_index); 1500 break; 1501 case USB_ENDPOINT_XFER_ISOC: 1502 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, 1503 slot_id, ep_index); 1504 } 1505 1506 if (ret) { 1507 free_priv: 1508 xhci_urb_free_priv(urb_priv); 1509 urb->hcpriv = NULL; 1510 } 1511 spin_unlock_irqrestore(&xhci->lock, flags); 1512 return ret; 1513 } 1514 1515 /* 1516 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop 1517 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC 1518 * should pick up where it left off in the TD, unless a Set Transfer Ring 1519 * Dequeue Pointer is issued. 1520 * 1521 * The TRBs that make up the buffers for the canceled URB will be "removed" from 1522 * the ring. Since the ring is a contiguous structure, they can't be physically 1523 * removed. Instead, there are two options: 1524 * 1525 * 1) If the HC is in the middle of processing the URB to be canceled, we 1526 * simply move the ring's dequeue pointer past those TRBs using the Set 1527 * Transfer Ring Dequeue Pointer command. This will be the common case, 1528 * when drivers timeout on the last submitted URB and attempt to cancel. 1529 * 1530 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a 1531 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The 1532 * HC will need to invalidate the any TRBs it has cached after the stop 1533 * endpoint command, as noted in the xHCI 0.95 errata. 1534 * 1535 * 3) The TD may have completed by the time the Stop Endpoint Command 1536 * completes, so software needs to handle that case too. 1537 * 1538 * This function should protect against the TD enqueueing code ringing the 1539 * doorbell while this code is waiting for a Stop Endpoint command to complete. 1540 * It also needs to account for multiple cancellations on happening at the same 1541 * time for the same endpoint. 1542 * 1543 * Note that this function can be called in any context, or so says 1544 * usb_hcd_unlink_urb() 1545 */ 1546 static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) 1547 { 1548 unsigned long flags; 1549 int ret, i; 1550 u32 temp; 1551 struct xhci_hcd *xhci; 1552 struct urb_priv *urb_priv; 1553 struct xhci_td *td; 1554 unsigned int ep_index; 1555 struct xhci_ring *ep_ring; 1556 struct xhci_virt_ep *ep; 1557 struct xhci_command *command; 1558 struct xhci_virt_device *vdev; 1559 1560 xhci = hcd_to_xhci(hcd); 1561 spin_lock_irqsave(&xhci->lock, flags); 1562 1563 trace_xhci_urb_dequeue(urb); 1564 1565 /* Make sure the URB hasn't completed or been unlinked already */ 1566 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 1567 if (ret) 1568 goto done; 1569 1570 /* give back URB now if we can't queue it for cancel */ 1571 vdev = xhci->devs[urb->dev->slot_id]; 1572 urb_priv = urb->hcpriv; 1573 if (!vdev || !urb_priv) 1574 goto err_giveback; 1575 1576 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1577 ep = &vdev->eps[ep_index]; 1578 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 1579 if (!ep || !ep_ring) 1580 goto err_giveback; 1581 1582 /* If xHC is dead take it down and return ALL URBs in xhci_hc_died() */ 1583 temp = readl(&xhci->op_regs->status); 1584 if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) { 1585 xhci_hc_died(xhci); 1586 goto done; 1587 } 1588 1589 /* 1590 * check ring is not re-allocated since URB was enqueued. If it is, then 1591 * make sure none of the ring related pointers in this URB private data 1592 * are touched, such as td_list, otherwise we overwrite freed data 1593 */ 1594 if (!td_on_ring(&urb_priv->td[0], ep_ring)) { 1595 xhci_err(xhci, "Canceled URB td not found on endpoint ring"); 1596 for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) { 1597 td = &urb_priv->td[i]; 1598 if (!list_empty(&td->cancelled_td_list)) 1599 list_del_init(&td->cancelled_td_list); 1600 } 1601 goto err_giveback; 1602 } 1603 1604 if (xhci->xhc_state & XHCI_STATE_HALTED) { 1605 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1606 "HC halted, freeing TD manually."); 1607 for (i = urb_priv->num_tds_done; 1608 i < urb_priv->num_tds; 1609 i++) { 1610 td = &urb_priv->td[i]; 1611 if (!list_empty(&td->td_list)) 1612 list_del_init(&td->td_list); 1613 if (!list_empty(&td->cancelled_td_list)) 1614 list_del_init(&td->cancelled_td_list); 1615 } 1616 goto err_giveback; 1617 } 1618 1619 i = urb_priv->num_tds_done; 1620 if (i < urb_priv->num_tds) 1621 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1622 "Cancel URB %p, dev %s, ep 0x%x, " 1623 "starting at offset 0x%llx", 1624 urb, urb->dev->devpath, 1625 urb->ep->desc.bEndpointAddress, 1626 (unsigned long long) xhci_trb_virt_to_dma( 1627 urb_priv->td[i].start_seg, 1628 urb_priv->td[i].first_trb)); 1629 1630 for (; i < urb_priv->num_tds; i++) { 1631 td = &urb_priv->td[i]; 1632 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); 1633 } 1634 1635 /* Queue a stop endpoint command, but only if this is 1636 * the first cancellation to be handled. 1637 */ 1638 if (!(ep->ep_state & EP_STOP_CMD_PENDING)) { 1639 command = xhci_alloc_command(xhci, false, GFP_ATOMIC); 1640 if (!command) { 1641 ret = -ENOMEM; 1642 goto done; 1643 } 1644 ep->ep_state |= EP_STOP_CMD_PENDING; 1645 ep->stop_cmd_timer.expires = jiffies + 1646 XHCI_STOP_EP_CMD_TIMEOUT * HZ; 1647 add_timer(&ep->stop_cmd_timer); 1648 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id, 1649 ep_index, 0); 1650 xhci_ring_cmd_db(xhci); 1651 } 1652 done: 1653 spin_unlock_irqrestore(&xhci->lock, flags); 1654 return ret; 1655 1656 err_giveback: 1657 if (urb_priv) 1658 xhci_urb_free_priv(urb_priv); 1659 usb_hcd_unlink_urb_from_ep(hcd, urb); 1660 spin_unlock_irqrestore(&xhci->lock, flags); 1661 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); 1662 return ret; 1663 } 1664 1665 /* Drop an endpoint from a new bandwidth configuration for this device. 1666 * Only one call to this function is allowed per endpoint before 1667 * check_bandwidth() or reset_bandwidth() must be called. 1668 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will 1669 * add the endpoint to the schedule with possibly new parameters denoted by a 1670 * different endpoint descriptor in usb_host_endpoint. 1671 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is 1672 * not allowed. 1673 * 1674 * The USB core will not allow URBs to be queued to an endpoint that is being 1675 * disabled, so there's no need for mutual exclusion to protect 1676 * the xhci->devs[slot_id] structure. 1677 */ 1678 static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, 1679 struct usb_host_endpoint *ep) 1680 { 1681 struct xhci_hcd *xhci; 1682 struct xhci_container_ctx *in_ctx, *out_ctx; 1683 struct xhci_input_control_ctx *ctrl_ctx; 1684 unsigned int ep_index; 1685 struct xhci_ep_ctx *ep_ctx; 1686 u32 drop_flag; 1687 u32 new_add_flags, new_drop_flags; 1688 int ret; 1689 1690 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); 1691 if (ret <= 0) 1692 return ret; 1693 xhci = hcd_to_xhci(hcd); 1694 if (xhci->xhc_state & XHCI_STATE_DYING) 1695 return -ENODEV; 1696 1697 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 1698 drop_flag = xhci_get_endpoint_flag(&ep->desc); 1699 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { 1700 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", 1701 __func__, drop_flag); 1702 return 0; 1703 } 1704 1705 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 1706 out_ctx = xhci->devs[udev->slot_id]->out_ctx; 1707 ctrl_ctx = xhci_get_input_control_ctx(in_ctx); 1708 if (!ctrl_ctx) { 1709 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 1710 __func__); 1711 return 0; 1712 } 1713 1714 ep_index = xhci_get_endpoint_index(&ep->desc); 1715 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1716 /* If the HC already knows the endpoint is disabled, 1717 * or the HCD has noted it is disabled, ignore this request 1718 */ 1719 if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) || 1720 le32_to_cpu(ctrl_ctx->drop_flags) & 1721 xhci_get_endpoint_flag(&ep->desc)) { 1722 /* Do not warn when called after a usb_device_reset */ 1723 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL) 1724 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", 1725 __func__, ep); 1726 return 0; 1727 } 1728 1729 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag); 1730 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); 1731 1732 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag); 1733 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); 1734 1735 xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index); 1736 1737 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); 1738 1739 if (xhci->quirks & XHCI_MTK_HOST) 1740 xhci_mtk_drop_ep_quirk(hcd, udev, ep); 1741 1742 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", 1743 (unsigned int) ep->desc.bEndpointAddress, 1744 udev->slot_id, 1745 (unsigned int) new_drop_flags, 1746 (unsigned int) new_add_flags); 1747 return 0; 1748 } 1749 1750 /* Add an endpoint to a new possible bandwidth configuration for this device. 1751 * Only one call to this function is allowed per endpoint before 1752 * check_bandwidth() or reset_bandwidth() must be called. 1753 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will 1754 * add the endpoint to the schedule with possibly new parameters denoted by a 1755 * different endpoint descriptor in usb_host_endpoint. 1756 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is 1757 * not allowed. 1758 * 1759 * The USB core will not allow URBs to be queued to an endpoint until the 1760 * configuration or alt setting is installed in the device, so there's no need 1761 * for mutual exclusion to protect the xhci->devs[slot_id] structure. 1762 */ 1763 static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, 1764 struct usb_host_endpoint *ep) 1765 { 1766 struct xhci_hcd *xhci; 1767 struct xhci_container_ctx *in_ctx; 1768 unsigned int ep_index; 1769 struct xhci_input_control_ctx *ctrl_ctx; 1770 u32 added_ctxs; 1771 u32 new_add_flags, new_drop_flags; 1772 struct xhci_virt_device *virt_dev; 1773 int ret = 0; 1774 1775 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); 1776 if (ret <= 0) { 1777 /* So we won't queue a reset ep command for a root hub */ 1778 ep->hcpriv = NULL; 1779 return ret; 1780 } 1781 xhci = hcd_to_xhci(hcd); 1782 if (xhci->xhc_state & XHCI_STATE_DYING) 1783 return -ENODEV; 1784 1785 added_ctxs = xhci_get_endpoint_flag(&ep->desc); 1786 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { 1787 /* FIXME when we have to issue an evaluate endpoint command to 1788 * deal with ep0 max packet size changing once we get the 1789 * descriptors 1790 */ 1791 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", 1792 __func__, added_ctxs); 1793 return 0; 1794 } 1795 1796 virt_dev = xhci->devs[udev->slot_id]; 1797 in_ctx = virt_dev->in_ctx; 1798 ctrl_ctx = xhci_get_input_control_ctx(in_ctx); 1799 if (!ctrl_ctx) { 1800 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 1801 __func__); 1802 return 0; 1803 } 1804 1805 ep_index = xhci_get_endpoint_index(&ep->desc); 1806 /* If this endpoint is already in use, and the upper layers are trying 1807 * to add it again without dropping it, reject the addition. 1808 */ 1809 if (virt_dev->eps[ep_index].ring && 1810 !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) { 1811 xhci_warn(xhci, "Trying to add endpoint 0x%x " 1812 "without dropping it.\n", 1813 (unsigned int) ep->desc.bEndpointAddress); 1814 return -EINVAL; 1815 } 1816 1817 /* If the HCD has already noted the endpoint is enabled, 1818 * ignore this request. 1819 */ 1820 if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) { 1821 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", 1822 __func__, ep); 1823 return 0; 1824 } 1825 1826 /* 1827 * Configuration and alternate setting changes must be done in 1828 * process context, not interrupt context (or so documenation 1829 * for usb_set_interface() and usb_set_configuration() claim). 1830 */ 1831 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { 1832 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", 1833 __func__, ep->desc.bEndpointAddress); 1834 return -ENOMEM; 1835 } 1836 1837 if (xhci->quirks & XHCI_MTK_HOST) { 1838 ret = xhci_mtk_add_ep_quirk(hcd, udev, ep); 1839 if (ret < 0) { 1840 xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring); 1841 virt_dev->eps[ep_index].new_ring = NULL; 1842 return ret; 1843 } 1844 } 1845 1846 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); 1847 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); 1848 1849 /* If xhci_endpoint_disable() was called for this endpoint, but the 1850 * xHC hasn't been notified yet through the check_bandwidth() call, 1851 * this re-adds a new state for the endpoint from the new endpoint 1852 * descriptors. We must drop and re-add this endpoint, so we leave the 1853 * drop flags alone. 1854 */ 1855 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); 1856 1857 /* Store the usb_device pointer for later use */ 1858 ep->hcpriv = udev; 1859 1860 xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index); 1861 1862 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", 1863 (unsigned int) ep->desc.bEndpointAddress, 1864 udev->slot_id, 1865 (unsigned int) new_drop_flags, 1866 (unsigned int) new_add_flags); 1867 return 0; 1868 } 1869 1870 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) 1871 { 1872 struct xhci_input_control_ctx *ctrl_ctx; 1873 struct xhci_ep_ctx *ep_ctx; 1874 struct xhci_slot_ctx *slot_ctx; 1875 int i; 1876 1877 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); 1878 if (!ctrl_ctx) { 1879 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 1880 __func__); 1881 return; 1882 } 1883 1884 /* When a device's add flag and drop flag are zero, any subsequent 1885 * configure endpoint command will leave that endpoint's state 1886 * untouched. Make sure we don't leave any old state in the input 1887 * endpoint contexts. 1888 */ 1889 ctrl_ctx->drop_flags = 0; 1890 ctrl_ctx->add_flags = 0; 1891 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 1892 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 1893 /* Endpoint 0 is always valid */ 1894 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); 1895 for (i = 1; i < 31; i++) { 1896 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); 1897 ep_ctx->ep_info = 0; 1898 ep_ctx->ep_info2 = 0; 1899 ep_ctx->deq = 0; 1900 ep_ctx->tx_info = 0; 1901 } 1902 } 1903 1904 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, 1905 struct usb_device *udev, u32 *cmd_status) 1906 { 1907 int ret; 1908 1909 switch (*cmd_status) { 1910 case COMP_COMMAND_ABORTED: 1911 case COMP_COMMAND_RING_STOPPED: 1912 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n"); 1913 ret = -ETIME; 1914 break; 1915 case COMP_RESOURCE_ERROR: 1916 dev_warn(&udev->dev, 1917 "Not enough host controller resources for new device state.\n"); 1918 ret = -ENOMEM; 1919 /* FIXME: can we allocate more resources for the HC? */ 1920 break; 1921 case COMP_BANDWIDTH_ERROR: 1922 case COMP_SECONDARY_BANDWIDTH_ERROR: 1923 dev_warn(&udev->dev, 1924 "Not enough bandwidth for new device state.\n"); 1925 ret = -ENOSPC; 1926 /* FIXME: can we go back to the old state? */ 1927 break; 1928 case COMP_TRB_ERROR: 1929 /* the HCD set up something wrong */ 1930 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " 1931 "add flag = 1, " 1932 "and endpoint is not disabled.\n"); 1933 ret = -EINVAL; 1934 break; 1935 case COMP_INCOMPATIBLE_DEVICE_ERROR: 1936 dev_warn(&udev->dev, 1937 "ERROR: Incompatible device for endpoint configure command.\n"); 1938 ret = -ENODEV; 1939 break; 1940 case COMP_SUCCESS: 1941 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1942 "Successful Endpoint Configure command"); 1943 ret = 0; 1944 break; 1945 default: 1946 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", 1947 *cmd_status); 1948 ret = -EINVAL; 1949 break; 1950 } 1951 return ret; 1952 } 1953 1954 static int xhci_evaluate_context_result(struct xhci_hcd *xhci, 1955 struct usb_device *udev, u32 *cmd_status) 1956 { 1957 int ret; 1958 1959 switch (*cmd_status) { 1960 case COMP_COMMAND_ABORTED: 1961 case COMP_COMMAND_RING_STOPPED: 1962 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n"); 1963 ret = -ETIME; 1964 break; 1965 case COMP_PARAMETER_ERROR: 1966 dev_warn(&udev->dev, 1967 "WARN: xHCI driver setup invalid evaluate context command.\n"); 1968 ret = -EINVAL; 1969 break; 1970 case COMP_SLOT_NOT_ENABLED_ERROR: 1971 dev_warn(&udev->dev, 1972 "WARN: slot not enabled for evaluate context command.\n"); 1973 ret = -EINVAL; 1974 break; 1975 case COMP_CONTEXT_STATE_ERROR: 1976 dev_warn(&udev->dev, 1977 "WARN: invalid context state for evaluate context command.\n"); 1978 ret = -EINVAL; 1979 break; 1980 case COMP_INCOMPATIBLE_DEVICE_ERROR: 1981 dev_warn(&udev->dev, 1982 "ERROR: Incompatible device for evaluate context command.\n"); 1983 ret = -ENODEV; 1984 break; 1985 case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR: 1986 /* Max Exit Latency too large error */ 1987 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n"); 1988 ret = -EINVAL; 1989 break; 1990 case COMP_SUCCESS: 1991 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1992 "Successful evaluate context command"); 1993 ret = 0; 1994 break; 1995 default: 1996 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", 1997 *cmd_status); 1998 ret = -EINVAL; 1999 break; 2000 } 2001 return ret; 2002 } 2003 2004 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, 2005 struct xhci_input_control_ctx *ctrl_ctx) 2006 { 2007 u32 valid_add_flags; 2008 u32 valid_drop_flags; 2009 2010 /* Ignore the slot flag (bit 0), and the default control endpoint flag 2011 * (bit 1). The default control endpoint is added during the Address 2012 * Device command and is never removed until the slot is disabled. 2013 */ 2014 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; 2015 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; 2016 2017 /* Use hweight32 to count the number of ones in the add flags, or 2018 * number of endpoints added. Don't count endpoints that are changed 2019 * (both added and dropped). 2020 */ 2021 return hweight32(valid_add_flags) - 2022 hweight32(valid_add_flags & valid_drop_flags); 2023 } 2024 2025 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, 2026 struct xhci_input_control_ctx *ctrl_ctx) 2027 { 2028 u32 valid_add_flags; 2029 u32 valid_drop_flags; 2030 2031 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; 2032 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; 2033 2034 return hweight32(valid_drop_flags) - 2035 hweight32(valid_add_flags & valid_drop_flags); 2036 } 2037 2038 /* 2039 * We need to reserve the new number of endpoints before the configure endpoint 2040 * command completes. We can't subtract the dropped endpoints from the number 2041 * of active endpoints until the command completes because we can oversubscribe 2042 * the host in this case: 2043 * 2044 * - the first configure endpoint command drops more endpoints than it adds 2045 * - a second configure endpoint command that adds more endpoints is queued 2046 * - the first configure endpoint command fails, so the config is unchanged 2047 * - the second command may succeed, even though there isn't enough resources 2048 * 2049 * Must be called with xhci->lock held. 2050 */ 2051 static int xhci_reserve_host_resources(struct xhci_hcd *xhci, 2052 struct xhci_input_control_ctx *ctrl_ctx) 2053 { 2054 u32 added_eps; 2055 2056 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); 2057 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { 2058 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2059 "Not enough ep ctxs: " 2060 "%u active, need to add %u, limit is %u.", 2061 xhci->num_active_eps, added_eps, 2062 xhci->limit_active_eps); 2063 return -ENOMEM; 2064 } 2065 xhci->num_active_eps += added_eps; 2066 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2067 "Adding %u ep ctxs, %u now active.", added_eps, 2068 xhci->num_active_eps); 2069 return 0; 2070 } 2071 2072 /* 2073 * The configure endpoint was failed by the xHC for some other reason, so we 2074 * need to revert the resources that failed configuration would have used. 2075 * 2076 * Must be called with xhci->lock held. 2077 */ 2078 static void xhci_free_host_resources(struct xhci_hcd *xhci, 2079 struct xhci_input_control_ctx *ctrl_ctx) 2080 { 2081 u32 num_failed_eps; 2082 2083 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); 2084 xhci->num_active_eps -= num_failed_eps; 2085 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2086 "Removing %u failed ep ctxs, %u now active.", 2087 num_failed_eps, 2088 xhci->num_active_eps); 2089 } 2090 2091 /* 2092 * Now that the command has completed, clean up the active endpoint count by 2093 * subtracting out the endpoints that were dropped (but not changed). 2094 * 2095 * Must be called with xhci->lock held. 2096 */ 2097 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, 2098 struct xhci_input_control_ctx *ctrl_ctx) 2099 { 2100 u32 num_dropped_eps; 2101 2102 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx); 2103 xhci->num_active_eps -= num_dropped_eps; 2104 if (num_dropped_eps) 2105 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2106 "Removing %u dropped ep ctxs, %u now active.", 2107 num_dropped_eps, 2108 xhci->num_active_eps); 2109 } 2110 2111 static unsigned int xhci_get_block_size(struct usb_device *udev) 2112 { 2113 switch (udev->speed) { 2114 case USB_SPEED_LOW: 2115 case USB_SPEED_FULL: 2116 return FS_BLOCK; 2117 case USB_SPEED_HIGH: 2118 return HS_BLOCK; 2119 case USB_SPEED_SUPER: 2120 case USB_SPEED_SUPER_PLUS: 2121 return SS_BLOCK; 2122 case USB_SPEED_UNKNOWN: 2123 case USB_SPEED_WIRELESS: 2124 default: 2125 /* Should never happen */ 2126 return 1; 2127 } 2128 } 2129 2130 static unsigned int 2131 xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw) 2132 { 2133 if (interval_bw->overhead[LS_OVERHEAD_TYPE]) 2134 return LS_OVERHEAD; 2135 if (interval_bw->overhead[FS_OVERHEAD_TYPE]) 2136 return FS_OVERHEAD; 2137 return HS_OVERHEAD; 2138 } 2139 2140 /* If we are changing a LS/FS device under a HS hub, 2141 * make sure (if we are activating a new TT) that the HS bus has enough 2142 * bandwidth for this new TT. 2143 */ 2144 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, 2145 struct xhci_virt_device *virt_dev, 2146 int old_active_eps) 2147 { 2148 struct xhci_interval_bw_table *bw_table; 2149 struct xhci_tt_bw_info *tt_info; 2150 2151 /* Find the bandwidth table for the root port this TT is attached to. */ 2152 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table; 2153 tt_info = virt_dev->tt_info; 2154 /* If this TT already had active endpoints, the bandwidth for this TT 2155 * has already been added. Removing all periodic endpoints (and thus 2156 * making the TT enactive) will only decrease the bandwidth used. 2157 */ 2158 if (old_active_eps) 2159 return 0; 2160 if (old_active_eps == 0 && tt_info->active_eps != 0) { 2161 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT) 2162 return -ENOMEM; 2163 return 0; 2164 } 2165 /* Not sure why we would have no new active endpoints... 2166 * 2167 * Maybe because of an Evaluate Context change for a hub update or a 2168 * control endpoint 0 max packet size change? 2169 * FIXME: skip the bandwidth calculation in that case. 2170 */ 2171 return 0; 2172 } 2173 2174 static int xhci_check_ss_bw(struct xhci_hcd *xhci, 2175 struct xhci_virt_device *virt_dev) 2176 { 2177 unsigned int bw_reserved; 2178 2179 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100); 2180 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved)) 2181 return -ENOMEM; 2182 2183 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100); 2184 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved)) 2185 return -ENOMEM; 2186 2187 return 0; 2188 } 2189 2190 /* 2191 * This algorithm is a very conservative estimate of the worst-case scheduling 2192 * scenario for any one interval. The hardware dynamically schedules the 2193 * packets, so we can't tell which microframe could be the limiting factor in 2194 * the bandwidth scheduling. This only takes into account periodic endpoints. 2195 * 2196 * Obviously, we can't solve an NP complete problem to find the minimum worst 2197 * case scenario. Instead, we come up with an estimate that is no less than 2198 * the worst case bandwidth used for any one microframe, but may be an 2199 * over-estimate. 2200 * 2201 * We walk the requirements for each endpoint by interval, starting with the 2202 * smallest interval, and place packets in the schedule where there is only one 2203 * possible way to schedule packets for that interval. In order to simplify 2204 * this algorithm, we record the largest max packet size for each interval, and 2205 * assume all packets will be that size. 2206 * 2207 * For interval 0, we obviously must schedule all packets for each interval. 2208 * The bandwidth for interval 0 is just the amount of data to be transmitted 2209 * (the sum of all max ESIT payload sizes, plus any overhead per packet times 2210 * the number of packets). 2211 * 2212 * For interval 1, we have two possible microframes to schedule those packets 2213 * in. For this algorithm, if we can schedule the same number of packets for 2214 * each possible scheduling opportunity (each microframe), we will do so. The 2215 * remaining number of packets will be saved to be transmitted in the gaps in 2216 * the next interval's scheduling sequence. 2217 * 2218 * As we move those remaining packets to be scheduled with interval 2 packets, 2219 * we have to double the number of remaining packets to transmit. This is 2220 * because the intervals are actually powers of 2, and we would be transmitting 2221 * the previous interval's packets twice in this interval. We also have to be 2222 * sure that when we look at the largest max packet size for this interval, we 2223 * also look at the largest max packet size for the remaining packets and take 2224 * the greater of the two. 2225 * 2226 * The algorithm continues to evenly distribute packets in each scheduling 2227 * opportunity, and push the remaining packets out, until we get to the last 2228 * interval. Then those packets and their associated overhead are just added 2229 * to the bandwidth used. 2230 */ 2231 static int xhci_check_bw_table(struct xhci_hcd *xhci, 2232 struct xhci_virt_device *virt_dev, 2233 int old_active_eps) 2234 { 2235 unsigned int bw_reserved; 2236 unsigned int max_bandwidth; 2237 unsigned int bw_used; 2238 unsigned int block_size; 2239 struct xhci_interval_bw_table *bw_table; 2240 unsigned int packet_size = 0; 2241 unsigned int overhead = 0; 2242 unsigned int packets_transmitted = 0; 2243 unsigned int packets_remaining = 0; 2244 unsigned int i; 2245 2246 if (virt_dev->udev->speed >= USB_SPEED_SUPER) 2247 return xhci_check_ss_bw(xhci, virt_dev); 2248 2249 if (virt_dev->udev->speed == USB_SPEED_HIGH) { 2250 max_bandwidth = HS_BW_LIMIT; 2251 /* Convert percent of bus BW reserved to blocks reserved */ 2252 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100); 2253 } else { 2254 max_bandwidth = FS_BW_LIMIT; 2255 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100); 2256 } 2257 2258 bw_table = virt_dev->bw_table; 2259 /* We need to translate the max packet size and max ESIT payloads into 2260 * the units the hardware uses. 2261 */ 2262 block_size = xhci_get_block_size(virt_dev->udev); 2263 2264 /* If we are manipulating a LS/FS device under a HS hub, double check 2265 * that the HS bus has enough bandwidth if we are activing a new TT. 2266 */ 2267 if (virt_dev->tt_info) { 2268 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2269 "Recalculating BW for rootport %u", 2270 virt_dev->real_port); 2271 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { 2272 xhci_warn(xhci, "Not enough bandwidth on HS bus for " 2273 "newly activated TT.\n"); 2274 return -ENOMEM; 2275 } 2276 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2277 "Recalculating BW for TT slot %u port %u", 2278 virt_dev->tt_info->slot_id, 2279 virt_dev->tt_info->ttport); 2280 } else { 2281 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2282 "Recalculating BW for rootport %u", 2283 virt_dev->real_port); 2284 } 2285 2286 /* Add in how much bandwidth will be used for interval zero, or the 2287 * rounded max ESIT payload + number of packets * largest overhead. 2288 */ 2289 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) + 2290 bw_table->interval_bw[0].num_packets * 2291 xhci_get_largest_overhead(&bw_table->interval_bw[0]); 2292 2293 for (i = 1; i < XHCI_MAX_INTERVAL; i++) { 2294 unsigned int bw_added; 2295 unsigned int largest_mps; 2296 unsigned int interval_overhead; 2297 2298 /* 2299 * How many packets could we transmit in this interval? 2300 * If packets didn't fit in the previous interval, we will need 2301 * to transmit that many packets twice within this interval. 2302 */ 2303 packets_remaining = 2 * packets_remaining + 2304 bw_table->interval_bw[i].num_packets; 2305 2306 /* Find the largest max packet size of this or the previous 2307 * interval. 2308 */ 2309 if (list_empty(&bw_table->interval_bw[i].endpoints)) 2310 largest_mps = 0; 2311 else { 2312 struct xhci_virt_ep *virt_ep; 2313 struct list_head *ep_entry; 2314 2315 ep_entry = bw_table->interval_bw[i].endpoints.next; 2316 virt_ep = list_entry(ep_entry, 2317 struct xhci_virt_ep, bw_endpoint_list); 2318 /* Convert to blocks, rounding up */ 2319 largest_mps = DIV_ROUND_UP( 2320 virt_ep->bw_info.max_packet_size, 2321 block_size); 2322 } 2323 if (largest_mps > packet_size) 2324 packet_size = largest_mps; 2325 2326 /* Use the larger overhead of this or the previous interval. */ 2327 interval_overhead = xhci_get_largest_overhead( 2328 &bw_table->interval_bw[i]); 2329 if (interval_overhead > overhead) 2330 overhead = interval_overhead; 2331 2332 /* How many packets can we evenly distribute across 2333 * (1 << (i + 1)) possible scheduling opportunities? 2334 */ 2335 packets_transmitted = packets_remaining >> (i + 1); 2336 2337 /* Add in the bandwidth used for those scheduled packets */ 2338 bw_added = packets_transmitted * (overhead + packet_size); 2339 2340 /* How many packets do we have remaining to transmit? */ 2341 packets_remaining = packets_remaining % (1 << (i + 1)); 2342 2343 /* What largest max packet size should those packets have? */ 2344 /* If we've transmitted all packets, don't carry over the 2345 * largest packet size. 2346 */ 2347 if (packets_remaining == 0) { 2348 packet_size = 0; 2349 overhead = 0; 2350 } else if (packets_transmitted > 0) { 2351 /* Otherwise if we do have remaining packets, and we've 2352 * scheduled some packets in this interval, take the 2353 * largest max packet size from endpoints with this 2354 * interval. 2355 */ 2356 packet_size = largest_mps; 2357 overhead = interval_overhead; 2358 } 2359 /* Otherwise carry over packet_size and overhead from the last 2360 * time we had a remainder. 2361 */ 2362 bw_used += bw_added; 2363 if (bw_used > max_bandwidth) { 2364 xhci_warn(xhci, "Not enough bandwidth. " 2365 "Proposed: %u, Max: %u\n", 2366 bw_used, max_bandwidth); 2367 return -ENOMEM; 2368 } 2369 } 2370 /* 2371 * Ok, we know we have some packets left over after even-handedly 2372 * scheduling interval 15. We don't know which microframes they will 2373 * fit into, so we over-schedule and say they will be scheduled every 2374 * microframe. 2375 */ 2376 if (packets_remaining > 0) 2377 bw_used += overhead + packet_size; 2378 2379 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) { 2380 unsigned int port_index = virt_dev->real_port - 1; 2381 2382 /* OK, we're manipulating a HS device attached to a 2383 * root port bandwidth domain. Include the number of active TTs 2384 * in the bandwidth used. 2385 */ 2386 bw_used += TT_HS_OVERHEAD * 2387 xhci->rh_bw[port_index].num_active_tts; 2388 } 2389 2390 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2391 "Final bandwidth: %u, Limit: %u, Reserved: %u, " 2392 "Available: %u " "percent", 2393 bw_used, max_bandwidth, bw_reserved, 2394 (max_bandwidth - bw_used - bw_reserved) * 100 / 2395 max_bandwidth); 2396 2397 bw_used += bw_reserved; 2398 if (bw_used > max_bandwidth) { 2399 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n", 2400 bw_used, max_bandwidth); 2401 return -ENOMEM; 2402 } 2403 2404 bw_table->bw_used = bw_used; 2405 return 0; 2406 } 2407 2408 static bool xhci_is_async_ep(unsigned int ep_type) 2409 { 2410 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && 2411 ep_type != ISOC_IN_EP && 2412 ep_type != INT_IN_EP); 2413 } 2414 2415 static bool xhci_is_sync_in_ep(unsigned int ep_type) 2416 { 2417 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP); 2418 } 2419 2420 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw) 2421 { 2422 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK); 2423 2424 if (ep_bw->ep_interval == 0) 2425 return SS_OVERHEAD_BURST + 2426 (ep_bw->mult * ep_bw->num_packets * 2427 (SS_OVERHEAD + mps)); 2428 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets * 2429 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST), 2430 1 << ep_bw->ep_interval); 2431 2432 } 2433 2434 static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, 2435 struct xhci_bw_info *ep_bw, 2436 struct xhci_interval_bw_table *bw_table, 2437 struct usb_device *udev, 2438 struct xhci_virt_ep *virt_ep, 2439 struct xhci_tt_bw_info *tt_info) 2440 { 2441 struct xhci_interval_bw *interval_bw; 2442 int normalized_interval; 2443 2444 if (xhci_is_async_ep(ep_bw->type)) 2445 return; 2446 2447 if (udev->speed >= USB_SPEED_SUPER) { 2448 if (xhci_is_sync_in_ep(ep_bw->type)) 2449 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= 2450 xhci_get_ss_bw_consumed(ep_bw); 2451 else 2452 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= 2453 xhci_get_ss_bw_consumed(ep_bw); 2454 return; 2455 } 2456 2457 /* SuperSpeed endpoints never get added to intervals in the table, so 2458 * this check is only valid for HS/FS/LS devices. 2459 */ 2460 if (list_empty(&virt_ep->bw_endpoint_list)) 2461 return; 2462 /* For LS/FS devices, we need to translate the interval expressed in 2463 * microframes to frames. 2464 */ 2465 if (udev->speed == USB_SPEED_HIGH) 2466 normalized_interval = ep_bw->ep_interval; 2467 else 2468 normalized_interval = ep_bw->ep_interval - 3; 2469 2470 if (normalized_interval == 0) 2471 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload; 2472 interval_bw = &bw_table->interval_bw[normalized_interval]; 2473 interval_bw->num_packets -= ep_bw->num_packets; 2474 switch (udev->speed) { 2475 case USB_SPEED_LOW: 2476 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1; 2477 break; 2478 case USB_SPEED_FULL: 2479 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1; 2480 break; 2481 case USB_SPEED_HIGH: 2482 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1; 2483 break; 2484 case USB_SPEED_SUPER: 2485 case USB_SPEED_SUPER_PLUS: 2486 case USB_SPEED_UNKNOWN: 2487 case USB_SPEED_WIRELESS: 2488 /* Should never happen because only LS/FS/HS endpoints will get 2489 * added to the endpoint list. 2490 */ 2491 return; 2492 } 2493 if (tt_info) 2494 tt_info->active_eps -= 1; 2495 list_del_init(&virt_ep->bw_endpoint_list); 2496 } 2497 2498 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, 2499 struct xhci_bw_info *ep_bw, 2500 struct xhci_interval_bw_table *bw_table, 2501 struct usb_device *udev, 2502 struct xhci_virt_ep *virt_ep, 2503 struct xhci_tt_bw_info *tt_info) 2504 { 2505 struct xhci_interval_bw *interval_bw; 2506 struct xhci_virt_ep *smaller_ep; 2507 int normalized_interval; 2508 2509 if (xhci_is_async_ep(ep_bw->type)) 2510 return; 2511 2512 if (udev->speed == USB_SPEED_SUPER) { 2513 if (xhci_is_sync_in_ep(ep_bw->type)) 2514 xhci->devs[udev->slot_id]->bw_table->ss_bw_in += 2515 xhci_get_ss_bw_consumed(ep_bw); 2516 else 2517 xhci->devs[udev->slot_id]->bw_table->ss_bw_out += 2518 xhci_get_ss_bw_consumed(ep_bw); 2519 return; 2520 } 2521 2522 /* For LS/FS devices, we need to translate the interval expressed in 2523 * microframes to frames. 2524 */ 2525 if (udev->speed == USB_SPEED_HIGH) 2526 normalized_interval = ep_bw->ep_interval; 2527 else 2528 normalized_interval = ep_bw->ep_interval - 3; 2529 2530 if (normalized_interval == 0) 2531 bw_table->interval0_esit_payload += ep_bw->max_esit_payload; 2532 interval_bw = &bw_table->interval_bw[normalized_interval]; 2533 interval_bw->num_packets += ep_bw->num_packets; 2534 switch (udev->speed) { 2535 case USB_SPEED_LOW: 2536 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1; 2537 break; 2538 case USB_SPEED_FULL: 2539 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1; 2540 break; 2541 case USB_SPEED_HIGH: 2542 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1; 2543 break; 2544 case USB_SPEED_SUPER: 2545 case USB_SPEED_SUPER_PLUS: 2546 case USB_SPEED_UNKNOWN: 2547 case USB_SPEED_WIRELESS: 2548 /* Should never happen because only LS/FS/HS endpoints will get 2549 * added to the endpoint list. 2550 */ 2551 return; 2552 } 2553 2554 if (tt_info) 2555 tt_info->active_eps += 1; 2556 /* Insert the endpoint into the list, largest max packet size first. */ 2557 list_for_each_entry(smaller_ep, &interval_bw->endpoints, 2558 bw_endpoint_list) { 2559 if (ep_bw->max_packet_size >= 2560 smaller_ep->bw_info.max_packet_size) { 2561 /* Add the new ep before the smaller endpoint */ 2562 list_add_tail(&virt_ep->bw_endpoint_list, 2563 &smaller_ep->bw_endpoint_list); 2564 return; 2565 } 2566 } 2567 /* Add the new endpoint at the end of the list. */ 2568 list_add_tail(&virt_ep->bw_endpoint_list, 2569 &interval_bw->endpoints); 2570 } 2571 2572 void xhci_update_tt_active_eps(struct xhci_hcd *xhci, 2573 struct xhci_virt_device *virt_dev, 2574 int old_active_eps) 2575 { 2576 struct xhci_root_port_bw_info *rh_bw_info; 2577 if (!virt_dev->tt_info) 2578 return; 2579 2580 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1]; 2581 if (old_active_eps == 0 && 2582 virt_dev->tt_info->active_eps != 0) { 2583 rh_bw_info->num_active_tts += 1; 2584 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD; 2585 } else if (old_active_eps != 0 && 2586 virt_dev->tt_info->active_eps == 0) { 2587 rh_bw_info->num_active_tts -= 1; 2588 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD; 2589 } 2590 } 2591 2592 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, 2593 struct xhci_virt_device *virt_dev, 2594 struct xhci_container_ctx *in_ctx) 2595 { 2596 struct xhci_bw_info ep_bw_info[31]; 2597 int i; 2598 struct xhci_input_control_ctx *ctrl_ctx; 2599 int old_active_eps = 0; 2600 2601 if (virt_dev->tt_info) 2602 old_active_eps = virt_dev->tt_info->active_eps; 2603 2604 ctrl_ctx = xhci_get_input_control_ctx(in_ctx); 2605 if (!ctrl_ctx) { 2606 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 2607 __func__); 2608 return -ENOMEM; 2609 } 2610 2611 for (i = 0; i < 31; i++) { 2612 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) 2613 continue; 2614 2615 /* Make a copy of the BW info in case we need to revert this */ 2616 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info, 2617 sizeof(ep_bw_info[i])); 2618 /* Drop the endpoint from the interval table if the endpoint is 2619 * being dropped or changed. 2620 */ 2621 if (EP_IS_DROPPED(ctrl_ctx, i)) 2622 xhci_drop_ep_from_interval_table(xhci, 2623 &virt_dev->eps[i].bw_info, 2624 virt_dev->bw_table, 2625 virt_dev->udev, 2626 &virt_dev->eps[i], 2627 virt_dev->tt_info); 2628 } 2629 /* Overwrite the information stored in the endpoints' bw_info */ 2630 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); 2631 for (i = 0; i < 31; i++) { 2632 /* Add any changed or added endpoints to the interval table */ 2633 if (EP_IS_ADDED(ctrl_ctx, i)) 2634 xhci_add_ep_to_interval_table(xhci, 2635 &virt_dev->eps[i].bw_info, 2636 virt_dev->bw_table, 2637 virt_dev->udev, 2638 &virt_dev->eps[i], 2639 virt_dev->tt_info); 2640 } 2641 2642 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { 2643 /* Ok, this fits in the bandwidth we have. 2644 * Update the number of active TTs. 2645 */ 2646 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); 2647 return 0; 2648 } 2649 2650 /* We don't have enough bandwidth for this, revert the stored info. */ 2651 for (i = 0; i < 31; i++) { 2652 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) 2653 continue; 2654 2655 /* Drop the new copies of any added or changed endpoints from 2656 * the interval table. 2657 */ 2658 if (EP_IS_ADDED(ctrl_ctx, i)) { 2659 xhci_drop_ep_from_interval_table(xhci, 2660 &virt_dev->eps[i].bw_info, 2661 virt_dev->bw_table, 2662 virt_dev->udev, 2663 &virt_dev->eps[i], 2664 virt_dev->tt_info); 2665 } 2666 /* Revert the endpoint back to its old information */ 2667 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i], 2668 sizeof(ep_bw_info[i])); 2669 /* Add any changed or dropped endpoints back into the table */ 2670 if (EP_IS_DROPPED(ctrl_ctx, i)) 2671 xhci_add_ep_to_interval_table(xhci, 2672 &virt_dev->eps[i].bw_info, 2673 virt_dev->bw_table, 2674 virt_dev->udev, 2675 &virt_dev->eps[i], 2676 virt_dev->tt_info); 2677 } 2678 return -ENOMEM; 2679 } 2680 2681 2682 /* Issue a configure endpoint command or evaluate context command 2683 * and wait for it to finish. 2684 */ 2685 static int xhci_configure_endpoint(struct xhci_hcd *xhci, 2686 struct usb_device *udev, 2687 struct xhci_command *command, 2688 bool ctx_change, bool must_succeed) 2689 { 2690 int ret; 2691 unsigned long flags; 2692 struct xhci_input_control_ctx *ctrl_ctx; 2693 struct xhci_virt_device *virt_dev; 2694 struct xhci_slot_ctx *slot_ctx; 2695 2696 if (!command) 2697 return -EINVAL; 2698 2699 spin_lock_irqsave(&xhci->lock, flags); 2700 2701 if (xhci->xhc_state & XHCI_STATE_DYING) { 2702 spin_unlock_irqrestore(&xhci->lock, flags); 2703 return -ESHUTDOWN; 2704 } 2705 2706 virt_dev = xhci->devs[udev->slot_id]; 2707 2708 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 2709 if (!ctrl_ctx) { 2710 spin_unlock_irqrestore(&xhci->lock, flags); 2711 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 2712 __func__); 2713 return -ENOMEM; 2714 } 2715 2716 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && 2717 xhci_reserve_host_resources(xhci, ctrl_ctx)) { 2718 spin_unlock_irqrestore(&xhci->lock, flags); 2719 xhci_warn(xhci, "Not enough host resources, " 2720 "active endpoint contexts = %u\n", 2721 xhci->num_active_eps); 2722 return -ENOMEM; 2723 } 2724 if ((xhci->quirks & XHCI_SW_BW_CHECKING) && 2725 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) { 2726 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) 2727 xhci_free_host_resources(xhci, ctrl_ctx); 2728 spin_unlock_irqrestore(&xhci->lock, flags); 2729 xhci_warn(xhci, "Not enough bandwidth\n"); 2730 return -ENOMEM; 2731 } 2732 2733 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); 2734 trace_xhci_configure_endpoint(slot_ctx); 2735 2736 if (!ctx_change) 2737 ret = xhci_queue_configure_endpoint(xhci, command, 2738 command->in_ctx->dma, 2739 udev->slot_id, must_succeed); 2740 else 2741 ret = xhci_queue_evaluate_context(xhci, command, 2742 command->in_ctx->dma, 2743 udev->slot_id, must_succeed); 2744 if (ret < 0) { 2745 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) 2746 xhci_free_host_resources(xhci, ctrl_ctx); 2747 spin_unlock_irqrestore(&xhci->lock, flags); 2748 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 2749 "FIXME allocate a new ring segment"); 2750 return -ENOMEM; 2751 } 2752 xhci_ring_cmd_db(xhci); 2753 spin_unlock_irqrestore(&xhci->lock, flags); 2754 2755 /* Wait for the configure endpoint command to complete */ 2756 wait_for_completion(command->completion); 2757 2758 if (!ctx_change) 2759 ret = xhci_configure_endpoint_result(xhci, udev, 2760 &command->status); 2761 else 2762 ret = xhci_evaluate_context_result(xhci, udev, 2763 &command->status); 2764 2765 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 2766 spin_lock_irqsave(&xhci->lock, flags); 2767 /* If the command failed, remove the reserved resources. 2768 * Otherwise, clean up the estimate to include dropped eps. 2769 */ 2770 if (ret) 2771 xhci_free_host_resources(xhci, ctrl_ctx); 2772 else 2773 xhci_finish_resource_reservation(xhci, ctrl_ctx); 2774 spin_unlock_irqrestore(&xhci->lock, flags); 2775 } 2776 return ret; 2777 } 2778 2779 static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, 2780 struct xhci_virt_device *vdev, int i) 2781 { 2782 struct xhci_virt_ep *ep = &vdev->eps[i]; 2783 2784 if (ep->ep_state & EP_HAS_STREAMS) { 2785 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n", 2786 xhci_get_endpoint_address(i)); 2787 xhci_free_stream_info(xhci, ep->stream_info); 2788 ep->stream_info = NULL; 2789 ep->ep_state &= ~EP_HAS_STREAMS; 2790 } 2791 } 2792 2793 /* Called after one or more calls to xhci_add_endpoint() or 2794 * xhci_drop_endpoint(). If this call fails, the USB core is expected 2795 * to call xhci_reset_bandwidth(). 2796 * 2797 * Since we are in the middle of changing either configuration or 2798 * installing a new alt setting, the USB core won't allow URBs to be 2799 * enqueued for any endpoint on the old config or interface. Nothing 2800 * else should be touching the xhci->devs[slot_id] structure, so we 2801 * don't need to take the xhci->lock for manipulating that. 2802 */ 2803 static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) 2804 { 2805 int i; 2806 int ret = 0; 2807 struct xhci_hcd *xhci; 2808 struct xhci_virt_device *virt_dev; 2809 struct xhci_input_control_ctx *ctrl_ctx; 2810 struct xhci_slot_ctx *slot_ctx; 2811 struct xhci_command *command; 2812 2813 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 2814 if (ret <= 0) 2815 return ret; 2816 xhci = hcd_to_xhci(hcd); 2817 if ((xhci->xhc_state & XHCI_STATE_DYING) || 2818 (xhci->xhc_state & XHCI_STATE_REMOVING)) 2819 return -ENODEV; 2820 2821 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 2822 virt_dev = xhci->devs[udev->slot_id]; 2823 2824 command = xhci_alloc_command(xhci, true, GFP_KERNEL); 2825 if (!command) 2826 return -ENOMEM; 2827 2828 command->in_ctx = virt_dev->in_ctx; 2829 2830 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ 2831 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 2832 if (!ctrl_ctx) { 2833 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 2834 __func__); 2835 ret = -ENOMEM; 2836 goto command_cleanup; 2837 } 2838 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 2839 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); 2840 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); 2841 2842 /* Don't issue the command if there's no endpoints to update. */ 2843 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && 2844 ctrl_ctx->drop_flags == 0) { 2845 ret = 0; 2846 goto command_cleanup; 2847 } 2848 /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */ 2849 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 2850 for (i = 31; i >= 1; i--) { 2851 __le32 le32 = cpu_to_le32(BIT(i)); 2852 2853 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32)) 2854 || (ctrl_ctx->add_flags & le32) || i == 1) { 2855 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 2856 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i)); 2857 break; 2858 } 2859 } 2860 2861 ret = xhci_configure_endpoint(xhci, udev, command, 2862 false, false); 2863 if (ret) 2864 /* Callee should call reset_bandwidth() */ 2865 goto command_cleanup; 2866 2867 /* Free any rings that were dropped, but not changed. */ 2868 for (i = 1; i < 31; i++) { 2869 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) && 2870 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) { 2871 xhci_free_endpoint_ring(xhci, virt_dev, i); 2872 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); 2873 } 2874 } 2875 xhci_zero_in_ctx(xhci, virt_dev); 2876 /* 2877 * Install any rings for completely new endpoints or changed endpoints, 2878 * and free any old rings from changed endpoints. 2879 */ 2880 for (i = 1; i < 31; i++) { 2881 if (!virt_dev->eps[i].new_ring) 2882 continue; 2883 /* Only free the old ring if it exists. 2884 * It may not if this is the first add of an endpoint. 2885 */ 2886 if (virt_dev->eps[i].ring) { 2887 xhci_free_endpoint_ring(xhci, virt_dev, i); 2888 } 2889 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); 2890 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; 2891 virt_dev->eps[i].new_ring = NULL; 2892 } 2893 command_cleanup: 2894 kfree(command->completion); 2895 kfree(command); 2896 2897 return ret; 2898 } 2899 2900 static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) 2901 { 2902 struct xhci_hcd *xhci; 2903 struct xhci_virt_device *virt_dev; 2904 int i, ret; 2905 2906 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 2907 if (ret <= 0) 2908 return; 2909 xhci = hcd_to_xhci(hcd); 2910 2911 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 2912 virt_dev = xhci->devs[udev->slot_id]; 2913 /* Free any rings allocated for added endpoints */ 2914 for (i = 0; i < 31; i++) { 2915 if (virt_dev->eps[i].new_ring) { 2916 xhci_debugfs_remove_endpoint(xhci, virt_dev, i); 2917 xhci_ring_free(xhci, virt_dev->eps[i].new_ring); 2918 virt_dev->eps[i].new_ring = NULL; 2919 } 2920 } 2921 xhci_zero_in_ctx(xhci, virt_dev); 2922 } 2923 2924 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, 2925 struct xhci_container_ctx *in_ctx, 2926 struct xhci_container_ctx *out_ctx, 2927 struct xhci_input_control_ctx *ctrl_ctx, 2928 u32 add_flags, u32 drop_flags) 2929 { 2930 ctrl_ctx->add_flags = cpu_to_le32(add_flags); 2931 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags); 2932 xhci_slot_copy(xhci, in_ctx, out_ctx); 2933 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 2934 } 2935 2936 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, 2937 unsigned int slot_id, unsigned int ep_index, 2938 struct xhci_dequeue_state *deq_state) 2939 { 2940 struct xhci_input_control_ctx *ctrl_ctx; 2941 struct xhci_container_ctx *in_ctx; 2942 struct xhci_ep_ctx *ep_ctx; 2943 u32 added_ctxs; 2944 dma_addr_t addr; 2945 2946 in_ctx = xhci->devs[slot_id]->in_ctx; 2947 ctrl_ctx = xhci_get_input_control_ctx(in_ctx); 2948 if (!ctrl_ctx) { 2949 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 2950 __func__); 2951 return; 2952 } 2953 2954 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, 2955 xhci->devs[slot_id]->out_ctx, ep_index); 2956 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); 2957 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, 2958 deq_state->new_deq_ptr); 2959 if (addr == 0) { 2960 xhci_warn(xhci, "WARN Cannot submit config ep after " 2961 "reset ep command\n"); 2962 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", 2963 deq_state->new_deq_seg, 2964 deq_state->new_deq_ptr); 2965 return; 2966 } 2967 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state); 2968 2969 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); 2970 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, 2971 xhci->devs[slot_id]->out_ctx, ctrl_ctx, 2972 added_ctxs, added_ctxs); 2973 } 2974 2975 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, 2976 unsigned int stream_id, struct xhci_td *td) 2977 { 2978 struct xhci_dequeue_state deq_state; 2979 struct usb_device *udev = td->urb->dev; 2980 2981 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, 2982 "Cleaning up stalled endpoint ring"); 2983 /* We need to move the HW's dequeue pointer past this TD, 2984 * or it will attempt to resend it on the next doorbell ring. 2985 */ 2986 xhci_find_new_dequeue_state(xhci, udev->slot_id, 2987 ep_index, stream_id, td, &deq_state); 2988 2989 if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg) 2990 return; 2991 2992 /* HW with the reset endpoint quirk will use the saved dequeue state to 2993 * issue a configure endpoint command later. 2994 */ 2995 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { 2996 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, 2997 "Queueing new dequeue state"); 2998 xhci_queue_new_dequeue_state(xhci, udev->slot_id, 2999 ep_index, &deq_state); 3000 } else { 3001 /* Better hope no one uses the input context between now and the 3002 * reset endpoint completion! 3003 * XXX: No idea how this hardware will react when stream rings 3004 * are enabled. 3005 */ 3006 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 3007 "Setting up input context for " 3008 "configure endpoint command"); 3009 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, 3010 ep_index, &deq_state); 3011 } 3012 } 3013 3014 /* 3015 * Called after usb core issues a clear halt control message. 3016 * The host side of the halt should already be cleared by a reset endpoint 3017 * command issued when the STALL event was received. 3018 * 3019 * The reset endpoint command may only be issued to endpoints in the halted 3020 * state. For software that wishes to reset the data toggle or sequence number 3021 * of an endpoint that isn't in the halted state this function will issue a 3022 * configure endpoint command with the Drop and Add bits set for the target 3023 * endpoint. Refer to the additional note in xhci spcification section 4.6.8. 3024 */ 3025 3026 static void xhci_endpoint_reset(struct usb_hcd *hcd, 3027 struct usb_host_endpoint *host_ep) 3028 { 3029 struct xhci_hcd *xhci; 3030 struct usb_device *udev; 3031 struct xhci_virt_device *vdev; 3032 struct xhci_virt_ep *ep; 3033 struct xhci_input_control_ctx *ctrl_ctx; 3034 struct xhci_command *stop_cmd, *cfg_cmd; 3035 unsigned int ep_index; 3036 unsigned long flags; 3037 u32 ep_flag; 3038 3039 xhci = hcd_to_xhci(hcd); 3040 if (!host_ep->hcpriv) 3041 return; 3042 udev = (struct usb_device *) host_ep->hcpriv; 3043 vdev = xhci->devs[udev->slot_id]; 3044 ep_index = xhci_get_endpoint_index(&host_ep->desc); 3045 ep = &vdev->eps[ep_index]; 3046 3047 /* Bail out if toggle is already being cleared by a endpoint reset */ 3048 if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) { 3049 ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE; 3050 return; 3051 } 3052 /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */ 3053 if (usb_endpoint_xfer_control(&host_ep->desc) || 3054 usb_endpoint_xfer_isoc(&host_ep->desc)) 3055 return; 3056 3057 ep_flag = xhci_get_endpoint_flag(&host_ep->desc); 3058 3059 if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG) 3060 return; 3061 3062 stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT); 3063 if (!stop_cmd) 3064 return; 3065 3066 cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT); 3067 if (!cfg_cmd) 3068 goto cleanup; 3069 3070 spin_lock_irqsave(&xhci->lock, flags); 3071 3072 /* block queuing new trbs and ringing ep doorbell */ 3073 ep->ep_state |= EP_SOFT_CLEAR_TOGGLE; 3074 3075 /* 3076 * Make sure endpoint ring is empty before resetting the toggle/seq. 3077 * Driver is required to synchronously cancel all transfer request. 3078 * Stop the endpoint to force xHC to update the output context 3079 */ 3080 3081 if (!list_empty(&ep->ring->td_list)) { 3082 dev_err(&udev->dev, "EP not empty, refuse reset\n"); 3083 spin_unlock_irqrestore(&xhci->lock, flags); 3084 xhci_free_command(xhci, cfg_cmd); 3085 goto cleanup; 3086 } 3087 xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0); 3088 xhci_ring_cmd_db(xhci); 3089 spin_unlock_irqrestore(&xhci->lock, flags); 3090 3091 wait_for_completion(stop_cmd->completion); 3092 3093 spin_lock_irqsave(&xhci->lock, flags); 3094 3095 /* config ep command clears toggle if add and drop ep flags are set */ 3096 ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx); 3097 xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx, 3098 ctrl_ctx, ep_flag, ep_flag); 3099 xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index); 3100 3101 xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma, 3102 udev->slot_id, false); 3103 xhci_ring_cmd_db(xhci); 3104 spin_unlock_irqrestore(&xhci->lock, flags); 3105 3106 wait_for_completion(cfg_cmd->completion); 3107 3108 ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; 3109 xhci_free_command(xhci, cfg_cmd); 3110 cleanup: 3111 xhci_free_command(xhci, stop_cmd); 3112 } 3113 3114 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, 3115 struct usb_device *udev, struct usb_host_endpoint *ep, 3116 unsigned int slot_id) 3117 { 3118 int ret; 3119 unsigned int ep_index; 3120 unsigned int ep_state; 3121 3122 if (!ep) 3123 return -EINVAL; 3124 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); 3125 if (ret <= 0) 3126 return -EINVAL; 3127 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) { 3128 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" 3129 " descriptor for ep 0x%x does not support streams\n", 3130 ep->desc.bEndpointAddress); 3131 return -EINVAL; 3132 } 3133 3134 ep_index = xhci_get_endpoint_index(&ep->desc); 3135 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 3136 if (ep_state & EP_HAS_STREAMS || 3137 ep_state & EP_GETTING_STREAMS) { 3138 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " 3139 "already has streams set up.\n", 3140 ep->desc.bEndpointAddress); 3141 xhci_warn(xhci, "Send email to xHCI maintainer and ask for " 3142 "dynamic stream context array reallocation.\n"); 3143 return -EINVAL; 3144 } 3145 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { 3146 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " 3147 "endpoint 0x%x; URBs are pending.\n", 3148 ep->desc.bEndpointAddress); 3149 return -EINVAL; 3150 } 3151 return 0; 3152 } 3153 3154 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, 3155 unsigned int *num_streams, unsigned int *num_stream_ctxs) 3156 { 3157 unsigned int max_streams; 3158 3159 /* The stream context array size must be a power of two */ 3160 *num_stream_ctxs = roundup_pow_of_two(*num_streams); 3161 /* 3162 * Find out how many primary stream array entries the host controller 3163 * supports. Later we may use secondary stream arrays (similar to 2nd 3164 * level page entries), but that's an optional feature for xHCI host 3165 * controllers. xHCs must support at least 4 stream IDs. 3166 */ 3167 max_streams = HCC_MAX_PSA(xhci->hcc_params); 3168 if (*num_stream_ctxs > max_streams) { 3169 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", 3170 max_streams); 3171 *num_stream_ctxs = max_streams; 3172 *num_streams = max_streams; 3173 } 3174 } 3175 3176 /* Returns an error code if one of the endpoint already has streams. 3177 * This does not change any data structures, it only checks and gathers 3178 * information. 3179 */ 3180 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, 3181 struct usb_device *udev, 3182 struct usb_host_endpoint **eps, unsigned int num_eps, 3183 unsigned int *num_streams, u32 *changed_ep_bitmask) 3184 { 3185 unsigned int max_streams; 3186 unsigned int endpoint_flag; 3187 int i; 3188 int ret; 3189 3190 for (i = 0; i < num_eps; i++) { 3191 ret = xhci_check_streams_endpoint(xhci, udev, 3192 eps[i], udev->slot_id); 3193 if (ret < 0) 3194 return ret; 3195 3196 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp); 3197 if (max_streams < (*num_streams - 1)) { 3198 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", 3199 eps[i]->desc.bEndpointAddress, 3200 max_streams); 3201 *num_streams = max_streams+1; 3202 } 3203 3204 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc); 3205 if (*changed_ep_bitmask & endpoint_flag) 3206 return -EINVAL; 3207 *changed_ep_bitmask |= endpoint_flag; 3208 } 3209 return 0; 3210 } 3211 3212 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, 3213 struct usb_device *udev, 3214 struct usb_host_endpoint **eps, unsigned int num_eps) 3215 { 3216 u32 changed_ep_bitmask = 0; 3217 unsigned int slot_id; 3218 unsigned int ep_index; 3219 unsigned int ep_state; 3220 int i; 3221 3222 slot_id = udev->slot_id; 3223 if (!xhci->devs[slot_id]) 3224 return 0; 3225 3226 for (i = 0; i < num_eps; i++) { 3227 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3228 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 3229 /* Are streams already being freed for the endpoint? */ 3230 if (ep_state & EP_GETTING_NO_STREAMS) { 3231 xhci_warn(xhci, "WARN Can't disable streams for " 3232 "endpoint 0x%x, " 3233 "streams are being disabled already\n", 3234 eps[i]->desc.bEndpointAddress); 3235 return 0; 3236 } 3237 /* Are there actually any streams to free? */ 3238 if (!(ep_state & EP_HAS_STREAMS) && 3239 !(ep_state & EP_GETTING_STREAMS)) { 3240 xhci_warn(xhci, "WARN Can't disable streams for " 3241 "endpoint 0x%x, " 3242 "streams are already disabled!\n", 3243 eps[i]->desc.bEndpointAddress); 3244 xhci_warn(xhci, "WARN xhci_free_streams() called " 3245 "with non-streams endpoint\n"); 3246 return 0; 3247 } 3248 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc); 3249 } 3250 return changed_ep_bitmask; 3251 } 3252 3253 /* 3254 * The USB device drivers use this function (through the HCD interface in USB 3255 * core) to prepare a set of bulk endpoints to use streams. Streams are used to 3256 * coordinate mass storage command queueing across multiple endpoints (basically 3257 * a stream ID == a task ID). 3258 * 3259 * Setting up streams involves allocating the same size stream context array 3260 * for each endpoint and issuing a configure endpoint command for all endpoints. 3261 * 3262 * Don't allow the call to succeed if one endpoint only supports one stream 3263 * (which means it doesn't support streams at all). 3264 * 3265 * Drivers may get less stream IDs than they asked for, if the host controller 3266 * hardware or endpoints claim they can't support the number of requested 3267 * stream IDs. 3268 */ 3269 static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, 3270 struct usb_host_endpoint **eps, unsigned int num_eps, 3271 unsigned int num_streams, gfp_t mem_flags) 3272 { 3273 int i, ret; 3274 struct xhci_hcd *xhci; 3275 struct xhci_virt_device *vdev; 3276 struct xhci_command *config_cmd; 3277 struct xhci_input_control_ctx *ctrl_ctx; 3278 unsigned int ep_index; 3279 unsigned int num_stream_ctxs; 3280 unsigned int max_packet; 3281 unsigned long flags; 3282 u32 changed_ep_bitmask = 0; 3283 3284 if (!eps) 3285 return -EINVAL; 3286 3287 /* Add one to the number of streams requested to account for 3288 * stream 0 that is reserved for xHCI usage. 3289 */ 3290 num_streams += 1; 3291 xhci = hcd_to_xhci(hcd); 3292 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", 3293 num_streams); 3294 3295 /* MaxPSASize value 0 (2 streams) means streams are not supported */ 3296 if ((xhci->quirks & XHCI_BROKEN_STREAMS) || 3297 HCC_MAX_PSA(xhci->hcc_params) < 4) { 3298 xhci_dbg(xhci, "xHCI controller does not support streams.\n"); 3299 return -ENOSYS; 3300 } 3301 3302 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags); 3303 if (!config_cmd) 3304 return -ENOMEM; 3305 3306 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx); 3307 if (!ctrl_ctx) { 3308 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 3309 __func__); 3310 xhci_free_command(xhci, config_cmd); 3311 return -ENOMEM; 3312 } 3313 3314 /* Check to make sure all endpoints are not already configured for 3315 * streams. While we're at it, find the maximum number of streams that 3316 * all the endpoints will support and check for duplicate endpoints. 3317 */ 3318 spin_lock_irqsave(&xhci->lock, flags); 3319 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, 3320 num_eps, &num_streams, &changed_ep_bitmask); 3321 if (ret < 0) { 3322 xhci_free_command(xhci, config_cmd); 3323 spin_unlock_irqrestore(&xhci->lock, flags); 3324 return ret; 3325 } 3326 if (num_streams <= 1) { 3327 xhci_warn(xhci, "WARN: endpoints can't handle " 3328 "more than one stream.\n"); 3329 xhci_free_command(xhci, config_cmd); 3330 spin_unlock_irqrestore(&xhci->lock, flags); 3331 return -EINVAL; 3332 } 3333 vdev = xhci->devs[udev->slot_id]; 3334 /* Mark each endpoint as being in transition, so 3335 * xhci_urb_enqueue() will reject all URBs. 3336 */ 3337 for (i = 0; i < num_eps; i++) { 3338 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3339 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; 3340 } 3341 spin_unlock_irqrestore(&xhci->lock, flags); 3342 3343 /* Setup internal data structures and allocate HW data structures for 3344 * streams (but don't install the HW structures in the input context 3345 * until we're sure all memory allocation succeeded). 3346 */ 3347 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); 3348 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", 3349 num_stream_ctxs, num_streams); 3350 3351 for (i = 0; i < num_eps; i++) { 3352 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3353 max_packet = usb_endpoint_maxp(&eps[i]->desc); 3354 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, 3355 num_stream_ctxs, 3356 num_streams, 3357 max_packet, mem_flags); 3358 if (!vdev->eps[ep_index].stream_info) 3359 goto cleanup; 3360 /* Set maxPstreams in endpoint context and update deq ptr to 3361 * point to stream context array. FIXME 3362 */ 3363 } 3364 3365 /* Set up the input context for a configure endpoint command. */ 3366 for (i = 0; i < num_eps; i++) { 3367 struct xhci_ep_ctx *ep_ctx; 3368 3369 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3370 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); 3371 3372 xhci_endpoint_copy(xhci, config_cmd->in_ctx, 3373 vdev->out_ctx, ep_index); 3374 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, 3375 vdev->eps[ep_index].stream_info); 3376 } 3377 /* Tell the HW to drop its old copy of the endpoint context info 3378 * and add the updated copy from the input context. 3379 */ 3380 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, 3381 vdev->out_ctx, ctrl_ctx, 3382 changed_ep_bitmask, changed_ep_bitmask); 3383 3384 /* Issue and wait for the configure endpoint command */ 3385 ret = xhci_configure_endpoint(xhci, udev, config_cmd, 3386 false, false); 3387 3388 /* xHC rejected the configure endpoint command for some reason, so we 3389 * leave the old ring intact and free our internal streams data 3390 * structure. 3391 */ 3392 if (ret < 0) 3393 goto cleanup; 3394 3395 spin_lock_irqsave(&xhci->lock, flags); 3396 for (i = 0; i < num_eps; i++) { 3397 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3398 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; 3399 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", 3400 udev->slot_id, ep_index); 3401 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; 3402 } 3403 xhci_free_command(xhci, config_cmd); 3404 spin_unlock_irqrestore(&xhci->lock, flags); 3405 3406 /* Subtract 1 for stream 0, which drivers can't use */ 3407 return num_streams - 1; 3408 3409 cleanup: 3410 /* If it didn't work, free the streams! */ 3411 for (i = 0; i < num_eps; i++) { 3412 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3413 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); 3414 vdev->eps[ep_index].stream_info = NULL; 3415 /* FIXME Unset maxPstreams in endpoint context and 3416 * update deq ptr to point to normal string ring. 3417 */ 3418 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; 3419 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; 3420 xhci_endpoint_zero(xhci, vdev, eps[i]); 3421 } 3422 xhci_free_command(xhci, config_cmd); 3423 return -ENOMEM; 3424 } 3425 3426 /* Transition the endpoint from using streams to being a "normal" endpoint 3427 * without streams. 3428 * 3429 * Modify the endpoint context state, submit a configure endpoint command, 3430 * and free all endpoint rings for streams if that completes successfully. 3431 */ 3432 static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, 3433 struct usb_host_endpoint **eps, unsigned int num_eps, 3434 gfp_t mem_flags) 3435 { 3436 int i, ret; 3437 struct xhci_hcd *xhci; 3438 struct xhci_virt_device *vdev; 3439 struct xhci_command *command; 3440 struct xhci_input_control_ctx *ctrl_ctx; 3441 unsigned int ep_index; 3442 unsigned long flags; 3443 u32 changed_ep_bitmask; 3444 3445 xhci = hcd_to_xhci(hcd); 3446 vdev = xhci->devs[udev->slot_id]; 3447 3448 /* Set up a configure endpoint command to remove the streams rings */ 3449 spin_lock_irqsave(&xhci->lock, flags); 3450 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, 3451 udev, eps, num_eps); 3452 if (changed_ep_bitmask == 0) { 3453 spin_unlock_irqrestore(&xhci->lock, flags); 3454 return -EINVAL; 3455 } 3456 3457 /* Use the xhci_command structure from the first endpoint. We may have 3458 * allocated too many, but the driver may call xhci_free_streams() for 3459 * each endpoint it grouped into one call to xhci_alloc_streams(). 3460 */ 3461 ep_index = xhci_get_endpoint_index(&eps[0]->desc); 3462 command = vdev->eps[ep_index].stream_info->free_streams_command; 3463 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 3464 if (!ctrl_ctx) { 3465 spin_unlock_irqrestore(&xhci->lock, flags); 3466 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 3467 __func__); 3468 return -EINVAL; 3469 } 3470 3471 for (i = 0; i < num_eps; i++) { 3472 struct xhci_ep_ctx *ep_ctx; 3473 3474 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3475 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); 3476 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= 3477 EP_GETTING_NO_STREAMS; 3478 3479 xhci_endpoint_copy(xhci, command->in_ctx, 3480 vdev->out_ctx, ep_index); 3481 xhci_setup_no_streams_ep_input_ctx(ep_ctx, 3482 &vdev->eps[ep_index]); 3483 } 3484 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, 3485 vdev->out_ctx, ctrl_ctx, 3486 changed_ep_bitmask, changed_ep_bitmask); 3487 spin_unlock_irqrestore(&xhci->lock, flags); 3488 3489 /* Issue and wait for the configure endpoint command, 3490 * which must succeed. 3491 */ 3492 ret = xhci_configure_endpoint(xhci, udev, command, 3493 false, true); 3494 3495 /* xHC rejected the configure endpoint command for some reason, so we 3496 * leave the streams rings intact. 3497 */ 3498 if (ret < 0) 3499 return ret; 3500 3501 spin_lock_irqsave(&xhci->lock, flags); 3502 for (i = 0; i < num_eps; i++) { 3503 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3504 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); 3505 vdev->eps[ep_index].stream_info = NULL; 3506 /* FIXME Unset maxPstreams in endpoint context and 3507 * update deq ptr to point to normal string ring. 3508 */ 3509 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; 3510 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; 3511 } 3512 spin_unlock_irqrestore(&xhci->lock, flags); 3513 3514 return 0; 3515 } 3516 3517 /* 3518 * Deletes endpoint resources for endpoints that were active before a Reset 3519 * Device command, or a Disable Slot command. The Reset Device command leaves 3520 * the control endpoint intact, whereas the Disable Slot command deletes it. 3521 * 3522 * Must be called with xhci->lock held. 3523 */ 3524 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, 3525 struct xhci_virt_device *virt_dev, bool drop_control_ep) 3526 { 3527 int i; 3528 unsigned int num_dropped_eps = 0; 3529 unsigned int drop_flags = 0; 3530 3531 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) { 3532 if (virt_dev->eps[i].ring) { 3533 drop_flags |= 1 << i; 3534 num_dropped_eps++; 3535 } 3536 } 3537 xhci->num_active_eps -= num_dropped_eps; 3538 if (num_dropped_eps) 3539 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 3540 "Dropped %u ep ctxs, flags = 0x%x, " 3541 "%u now active.", 3542 num_dropped_eps, drop_flags, 3543 xhci->num_active_eps); 3544 } 3545 3546 /* 3547 * This submits a Reset Device Command, which will set the device state to 0, 3548 * set the device address to 0, and disable all the endpoints except the default 3549 * control endpoint. The USB core should come back and call 3550 * xhci_address_device(), and then re-set up the configuration. If this is 3551 * called because of a usb_reset_and_verify_device(), then the old alternate 3552 * settings will be re-installed through the normal bandwidth allocation 3553 * functions. 3554 * 3555 * Wait for the Reset Device command to finish. Remove all structures 3556 * associated with the endpoints that were disabled. Clear the input device 3557 * structure? Reset the control endpoint 0 max packet size? 3558 * 3559 * If the virt_dev to be reset does not exist or does not match the udev, 3560 * it means the device is lost, possibly due to the xHC restore error and 3561 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to 3562 * re-allocate the device. 3563 */ 3564 static int xhci_discover_or_reset_device(struct usb_hcd *hcd, 3565 struct usb_device *udev) 3566 { 3567 int ret, i; 3568 unsigned long flags; 3569 struct xhci_hcd *xhci; 3570 unsigned int slot_id; 3571 struct xhci_virt_device *virt_dev; 3572 struct xhci_command *reset_device_cmd; 3573 struct xhci_slot_ctx *slot_ctx; 3574 int old_active_eps = 0; 3575 3576 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); 3577 if (ret <= 0) 3578 return ret; 3579 xhci = hcd_to_xhci(hcd); 3580 slot_id = udev->slot_id; 3581 virt_dev = xhci->devs[slot_id]; 3582 if (!virt_dev) { 3583 xhci_dbg(xhci, "The device to be reset with slot ID %u does " 3584 "not exist. Re-allocate the device\n", slot_id); 3585 ret = xhci_alloc_dev(hcd, udev); 3586 if (ret == 1) 3587 return 0; 3588 else 3589 return -EINVAL; 3590 } 3591 3592 if (virt_dev->tt_info) 3593 old_active_eps = virt_dev->tt_info->active_eps; 3594 3595 if (virt_dev->udev != udev) { 3596 /* If the virt_dev and the udev does not match, this virt_dev 3597 * may belong to another udev. 3598 * Re-allocate the device. 3599 */ 3600 xhci_dbg(xhci, "The device to be reset with slot ID %u does " 3601 "not match the udev. Re-allocate the device\n", 3602 slot_id); 3603 ret = xhci_alloc_dev(hcd, udev); 3604 if (ret == 1) 3605 return 0; 3606 else 3607 return -EINVAL; 3608 } 3609 3610 /* If device is not setup, there is no point in resetting it */ 3611 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 3612 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == 3613 SLOT_STATE_DISABLED) 3614 return 0; 3615 3616 trace_xhci_discover_or_reset_device(slot_ctx); 3617 3618 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); 3619 /* Allocate the command structure that holds the struct completion. 3620 * Assume we're in process context, since the normal device reset 3621 * process has to wait for the device anyway. Storage devices are 3622 * reset as part of error handling, so use GFP_NOIO instead of 3623 * GFP_KERNEL. 3624 */ 3625 reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO); 3626 if (!reset_device_cmd) { 3627 xhci_dbg(xhci, "Couldn't allocate command structure.\n"); 3628 return -ENOMEM; 3629 } 3630 3631 /* Attempt to submit the Reset Device command to the command ring */ 3632 spin_lock_irqsave(&xhci->lock, flags); 3633 3634 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id); 3635 if (ret) { 3636 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3637 spin_unlock_irqrestore(&xhci->lock, flags); 3638 goto command_cleanup; 3639 } 3640 xhci_ring_cmd_db(xhci); 3641 spin_unlock_irqrestore(&xhci->lock, flags); 3642 3643 /* Wait for the Reset Device command to finish */ 3644 wait_for_completion(reset_device_cmd->completion); 3645 3646 /* The Reset Device command can't fail, according to the 0.95/0.96 spec, 3647 * unless we tried to reset a slot ID that wasn't enabled, 3648 * or the device wasn't in the addressed or configured state. 3649 */ 3650 ret = reset_device_cmd->status; 3651 switch (ret) { 3652 case COMP_COMMAND_ABORTED: 3653 case COMP_COMMAND_RING_STOPPED: 3654 xhci_warn(xhci, "Timeout waiting for reset device command\n"); 3655 ret = -ETIME; 3656 goto command_cleanup; 3657 case COMP_SLOT_NOT_ENABLED_ERROR: /* 0.95 completion for bad slot ID */ 3658 case COMP_CONTEXT_STATE_ERROR: /* 0.96 completion code for same thing */ 3659 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n", 3660 slot_id, 3661 xhci_get_slot_state(xhci, virt_dev->out_ctx)); 3662 xhci_dbg(xhci, "Not freeing device rings.\n"); 3663 /* Don't treat this as an error. May change my mind later. */ 3664 ret = 0; 3665 goto command_cleanup; 3666 case COMP_SUCCESS: 3667 xhci_dbg(xhci, "Successful reset device command.\n"); 3668 break; 3669 default: 3670 if (xhci_is_vendor_info_code(xhci, ret)) 3671 break; 3672 xhci_warn(xhci, "Unknown completion code %u for " 3673 "reset device command.\n", ret); 3674 ret = -EINVAL; 3675 goto command_cleanup; 3676 } 3677 3678 /* Free up host controller endpoint resources */ 3679 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 3680 spin_lock_irqsave(&xhci->lock, flags); 3681 /* Don't delete the default control endpoint resources */ 3682 xhci_free_device_endpoint_resources(xhci, virt_dev, false); 3683 spin_unlock_irqrestore(&xhci->lock, flags); 3684 } 3685 3686 /* Everything but endpoint 0 is disabled, so free the rings. */ 3687 for (i = 1; i < 31; i++) { 3688 struct xhci_virt_ep *ep = &virt_dev->eps[i]; 3689 3690 if (ep->ep_state & EP_HAS_STREAMS) { 3691 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n", 3692 xhci_get_endpoint_address(i)); 3693 xhci_free_stream_info(xhci, ep->stream_info); 3694 ep->stream_info = NULL; 3695 ep->ep_state &= ~EP_HAS_STREAMS; 3696 } 3697 3698 if (ep->ring) { 3699 xhci_debugfs_remove_endpoint(xhci, virt_dev, i); 3700 xhci_free_endpoint_ring(xhci, virt_dev, i); 3701 } 3702 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list)) 3703 xhci_drop_ep_from_interval_table(xhci, 3704 &virt_dev->eps[i].bw_info, 3705 virt_dev->bw_table, 3706 udev, 3707 &virt_dev->eps[i], 3708 virt_dev->tt_info); 3709 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info); 3710 } 3711 /* If necessary, update the number of active TTs on this root port */ 3712 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); 3713 ret = 0; 3714 3715 command_cleanup: 3716 xhci_free_command(xhci, reset_device_cmd); 3717 return ret; 3718 } 3719 3720 /* 3721 * At this point, the struct usb_device is about to go away, the device has 3722 * disconnected, and all traffic has been stopped and the endpoints have been 3723 * disabled. Free any HC data structures associated with that device. 3724 */ 3725 static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) 3726 { 3727 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3728 struct xhci_virt_device *virt_dev; 3729 struct xhci_slot_ctx *slot_ctx; 3730 int i, ret; 3731 3732 #ifndef CONFIG_USB_DEFAULT_PERSIST 3733 /* 3734 * We called pm_runtime_get_noresume when the device was attached. 3735 * Decrement the counter here to allow controller to runtime suspend 3736 * if no devices remain. 3737 */ 3738 if (xhci->quirks & XHCI_RESET_ON_RESUME) 3739 pm_runtime_put_noidle(hcd->self.controller); 3740 #endif 3741 3742 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 3743 /* If the host is halted due to driver unload, we still need to free the 3744 * device. 3745 */ 3746 if (ret <= 0 && ret != -ENODEV) 3747 return; 3748 3749 virt_dev = xhci->devs[udev->slot_id]; 3750 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 3751 trace_xhci_free_dev(slot_ctx); 3752 3753 /* Stop any wayward timer functions (which may grab the lock) */ 3754 for (i = 0; i < 31; i++) { 3755 virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING; 3756 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); 3757 } 3758 xhci_debugfs_remove_slot(xhci, udev->slot_id); 3759 virt_dev->udev = NULL; 3760 ret = xhci_disable_slot(xhci, udev->slot_id); 3761 if (ret) 3762 xhci_free_virt_device(xhci, udev->slot_id); 3763 } 3764 3765 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) 3766 { 3767 struct xhci_command *command; 3768 unsigned long flags; 3769 u32 state; 3770 int ret = 0; 3771 3772 command = xhci_alloc_command(xhci, false, GFP_KERNEL); 3773 if (!command) 3774 return -ENOMEM; 3775 3776 spin_lock_irqsave(&xhci->lock, flags); 3777 /* Don't disable the slot if the host controller is dead. */ 3778 state = readl(&xhci->op_regs->status); 3779 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || 3780 (xhci->xhc_state & XHCI_STATE_HALTED)) { 3781 spin_unlock_irqrestore(&xhci->lock, flags); 3782 kfree(command); 3783 return -ENODEV; 3784 } 3785 3786 ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, 3787 slot_id); 3788 if (ret) { 3789 spin_unlock_irqrestore(&xhci->lock, flags); 3790 kfree(command); 3791 return ret; 3792 } 3793 xhci_ring_cmd_db(xhci); 3794 spin_unlock_irqrestore(&xhci->lock, flags); 3795 return ret; 3796 } 3797 3798 /* 3799 * Checks if we have enough host controller resources for the default control 3800 * endpoint. 3801 * 3802 * Must be called with xhci->lock held. 3803 */ 3804 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) 3805 { 3806 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { 3807 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 3808 "Not enough ep ctxs: " 3809 "%u active, need to add 1, limit is %u.", 3810 xhci->num_active_eps, xhci->limit_active_eps); 3811 return -ENOMEM; 3812 } 3813 xhci->num_active_eps += 1; 3814 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 3815 "Adding 1 ep ctx, %u now active.", 3816 xhci->num_active_eps); 3817 return 0; 3818 } 3819 3820 3821 /* 3822 * Returns 0 if the xHC ran out of device slots, the Enable Slot command 3823 * timed out, or allocating memory failed. Returns 1 on success. 3824 */ 3825 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) 3826 { 3827 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3828 struct xhci_virt_device *vdev; 3829 struct xhci_slot_ctx *slot_ctx; 3830 unsigned long flags; 3831 int ret, slot_id; 3832 struct xhci_command *command; 3833 3834 command = xhci_alloc_command(xhci, true, GFP_KERNEL); 3835 if (!command) 3836 return 0; 3837 3838 spin_lock_irqsave(&xhci->lock, flags); 3839 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0); 3840 if (ret) { 3841 spin_unlock_irqrestore(&xhci->lock, flags); 3842 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3843 xhci_free_command(xhci, command); 3844 return 0; 3845 } 3846 xhci_ring_cmd_db(xhci); 3847 spin_unlock_irqrestore(&xhci->lock, flags); 3848 3849 wait_for_completion(command->completion); 3850 slot_id = command->slot_id; 3851 3852 if (!slot_id || command->status != COMP_SUCCESS) { 3853 xhci_err(xhci, "Error while assigning device slot ID\n"); 3854 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", 3855 HCS_MAX_SLOTS( 3856 readl(&xhci->cap_regs->hcs_params1))); 3857 xhci_free_command(xhci, command); 3858 return 0; 3859 } 3860 3861 xhci_free_command(xhci, command); 3862 3863 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 3864 spin_lock_irqsave(&xhci->lock, flags); 3865 ret = xhci_reserve_host_control_ep_resources(xhci); 3866 if (ret) { 3867 spin_unlock_irqrestore(&xhci->lock, flags); 3868 xhci_warn(xhci, "Not enough host resources, " 3869 "active endpoint contexts = %u\n", 3870 xhci->num_active_eps); 3871 goto disable_slot; 3872 } 3873 spin_unlock_irqrestore(&xhci->lock, flags); 3874 } 3875 /* Use GFP_NOIO, since this function can be called from 3876 * xhci_discover_or_reset_device(), which may be called as part of 3877 * mass storage driver error handling. 3878 */ 3879 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) { 3880 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); 3881 goto disable_slot; 3882 } 3883 vdev = xhci->devs[slot_id]; 3884 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); 3885 trace_xhci_alloc_dev(slot_ctx); 3886 3887 udev->slot_id = slot_id; 3888 3889 xhci_debugfs_create_slot(xhci, slot_id); 3890 3891 #ifndef CONFIG_USB_DEFAULT_PERSIST 3892 /* 3893 * If resetting upon resume, we can't put the controller into runtime 3894 * suspend if there is a device attached. 3895 */ 3896 if (xhci->quirks & XHCI_RESET_ON_RESUME) 3897 pm_runtime_get_noresume(hcd->self.controller); 3898 #endif 3899 3900 /* Is this a LS or FS device under a HS hub? */ 3901 /* Hub or peripherial? */ 3902 return 1; 3903 3904 disable_slot: 3905 ret = xhci_disable_slot(xhci, udev->slot_id); 3906 if (ret) 3907 xhci_free_virt_device(xhci, udev->slot_id); 3908 3909 return 0; 3910 } 3911 3912 /* 3913 * Issue an Address Device command and optionally send a corresponding 3914 * SetAddress request to the device. 3915 */ 3916 static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, 3917 enum xhci_setup_dev setup) 3918 { 3919 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address"; 3920 unsigned long flags; 3921 struct xhci_virt_device *virt_dev; 3922 int ret = 0; 3923 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3924 struct xhci_slot_ctx *slot_ctx; 3925 struct xhci_input_control_ctx *ctrl_ctx; 3926 u64 temp_64; 3927 struct xhci_command *command = NULL; 3928 3929 mutex_lock(&xhci->mutex); 3930 3931 if (xhci->xhc_state) { /* dying, removing or halted */ 3932 ret = -ESHUTDOWN; 3933 goto out; 3934 } 3935 3936 if (!udev->slot_id) { 3937 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3938 "Bad Slot ID %d", udev->slot_id); 3939 ret = -EINVAL; 3940 goto out; 3941 } 3942 3943 virt_dev = xhci->devs[udev->slot_id]; 3944 3945 if (WARN_ON(!virt_dev)) { 3946 /* 3947 * In plug/unplug torture test with an NEC controller, 3948 * a zero-dereference was observed once due to virt_dev = 0. 3949 * Print useful debug rather than crash if it is observed again! 3950 */ 3951 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", 3952 udev->slot_id); 3953 ret = -EINVAL; 3954 goto out; 3955 } 3956 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 3957 trace_xhci_setup_device_slot(slot_ctx); 3958 3959 if (setup == SETUP_CONTEXT_ONLY) { 3960 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == 3961 SLOT_STATE_DEFAULT) { 3962 xhci_dbg(xhci, "Slot already in default state\n"); 3963 goto out; 3964 } 3965 } 3966 3967 command = xhci_alloc_command(xhci, true, GFP_KERNEL); 3968 if (!command) { 3969 ret = -ENOMEM; 3970 goto out; 3971 } 3972 3973 command->in_ctx = virt_dev->in_ctx; 3974 3975 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 3976 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); 3977 if (!ctrl_ctx) { 3978 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 3979 __func__); 3980 ret = -EINVAL; 3981 goto out; 3982 } 3983 /* 3984 * If this is the first Set Address since device plug-in or 3985 * virt_device realloaction after a resume with an xHCI power loss, 3986 * then set up the slot context. 3987 */ 3988 if (!slot_ctx->dev_info) 3989 xhci_setup_addressable_virt_dev(xhci, udev); 3990 /* Otherwise, update the control endpoint ring enqueue pointer. */ 3991 else 3992 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); 3993 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); 3994 ctrl_ctx->drop_flags = 0; 3995 3996 trace_xhci_address_ctx(xhci, virt_dev->in_ctx, 3997 le32_to_cpu(slot_ctx->dev_info) >> 27); 3998 3999 spin_lock_irqsave(&xhci->lock, flags); 4000 trace_xhci_setup_device(virt_dev); 4001 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma, 4002 udev->slot_id, setup); 4003 if (ret) { 4004 spin_unlock_irqrestore(&xhci->lock, flags); 4005 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4006 "FIXME: allocate a command ring segment"); 4007 goto out; 4008 } 4009 xhci_ring_cmd_db(xhci); 4010 spin_unlock_irqrestore(&xhci->lock, flags); 4011 4012 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ 4013 wait_for_completion(command->completion); 4014 4015 /* FIXME: From section 4.3.4: "Software shall be responsible for timing 4016 * the SetAddress() "recovery interval" required by USB and aborting the 4017 * command on a timeout. 4018 */ 4019 switch (command->status) { 4020 case COMP_COMMAND_ABORTED: 4021 case COMP_COMMAND_RING_STOPPED: 4022 xhci_warn(xhci, "Timeout while waiting for setup device command\n"); 4023 ret = -ETIME; 4024 break; 4025 case COMP_CONTEXT_STATE_ERROR: 4026 case COMP_SLOT_NOT_ENABLED_ERROR: 4027 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n", 4028 act, udev->slot_id); 4029 ret = -EINVAL; 4030 break; 4031 case COMP_USB_TRANSACTION_ERROR: 4032 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act); 4033 4034 mutex_unlock(&xhci->mutex); 4035 ret = xhci_disable_slot(xhci, udev->slot_id); 4036 if (!ret) 4037 xhci_alloc_dev(hcd, udev); 4038 kfree(command->completion); 4039 kfree(command); 4040 return -EPROTO; 4041 case COMP_INCOMPATIBLE_DEVICE_ERROR: 4042 dev_warn(&udev->dev, 4043 "ERROR: Incompatible device for setup %s command\n", act); 4044 ret = -ENODEV; 4045 break; 4046 case COMP_SUCCESS: 4047 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4048 "Successful setup %s command", act); 4049 break; 4050 default: 4051 xhci_err(xhci, 4052 "ERROR: unexpected setup %s command completion code 0x%x.\n", 4053 act, command->status); 4054 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1); 4055 ret = -EINVAL; 4056 break; 4057 } 4058 if (ret) 4059 goto out; 4060 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 4061 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4062 "Op regs DCBAA ptr = %#016llx", temp_64); 4063 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4064 "Slot ID %d dcbaa entry @%p = %#016llx", 4065 udev->slot_id, 4066 &xhci->dcbaa->dev_context_ptrs[udev->slot_id], 4067 (unsigned long long) 4068 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); 4069 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4070 "Output Context DMA address = %#08llx", 4071 (unsigned long long)virt_dev->out_ctx->dma); 4072 trace_xhci_address_ctx(xhci, virt_dev->in_ctx, 4073 le32_to_cpu(slot_ctx->dev_info) >> 27); 4074 /* 4075 * USB core uses address 1 for the roothubs, so we add one to the 4076 * address given back to us by the HC. 4077 */ 4078 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 4079 le32_to_cpu(slot_ctx->dev_info) >> 27); 4080 /* Zero the input context control for later use */ 4081 ctrl_ctx->add_flags = 0; 4082 ctrl_ctx->drop_flags = 0; 4083 4084 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4085 "Internal device address = %d", 4086 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); 4087 out: 4088 mutex_unlock(&xhci->mutex); 4089 if (command) { 4090 kfree(command->completion); 4091 kfree(command); 4092 } 4093 return ret; 4094 } 4095 4096 static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) 4097 { 4098 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS); 4099 } 4100 4101 static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev) 4102 { 4103 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY); 4104 } 4105 4106 /* 4107 * Transfer the port index into real index in the HW port status 4108 * registers. Caculate offset between the port's PORTSC register 4109 * and port status base. Divide the number of per port register 4110 * to get the real index. The raw port number bases 1. 4111 */ 4112 int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1) 4113 { 4114 struct xhci_hub *rhub; 4115 4116 rhub = xhci_get_rhub(hcd); 4117 return rhub->ports[port1 - 1]->hw_portnum + 1; 4118 } 4119 4120 /* 4121 * Issue an Evaluate Context command to change the Maximum Exit Latency in the 4122 * slot context. If that succeeds, store the new MEL in the xhci_virt_device. 4123 */ 4124 static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, 4125 struct usb_device *udev, u16 max_exit_latency) 4126 { 4127 struct xhci_virt_device *virt_dev; 4128 struct xhci_command *command; 4129 struct xhci_input_control_ctx *ctrl_ctx; 4130 struct xhci_slot_ctx *slot_ctx; 4131 unsigned long flags; 4132 int ret; 4133 4134 spin_lock_irqsave(&xhci->lock, flags); 4135 4136 virt_dev = xhci->devs[udev->slot_id]; 4137 4138 /* 4139 * virt_dev might not exists yet if xHC resumed from hibernate (S4) and 4140 * xHC was re-initialized. Exit latency will be set later after 4141 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated 4142 */ 4143 4144 if (!virt_dev || max_exit_latency == virt_dev->current_mel) { 4145 spin_unlock_irqrestore(&xhci->lock, flags); 4146 return 0; 4147 } 4148 4149 /* Attempt to issue an Evaluate Context command to change the MEL. */ 4150 command = xhci->lpm_command; 4151 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 4152 if (!ctrl_ctx) { 4153 spin_unlock_irqrestore(&xhci->lock, flags); 4154 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 4155 __func__); 4156 return -ENOMEM; 4157 } 4158 4159 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx); 4160 spin_unlock_irqrestore(&xhci->lock, flags); 4161 4162 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 4163 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); 4164 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT)); 4165 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency); 4166 slot_ctx->dev_state = 0; 4167 4168 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 4169 "Set up evaluate context for LPM MEL change."); 4170 4171 /* Issue and wait for the evaluate context command. */ 4172 ret = xhci_configure_endpoint(xhci, udev, command, 4173 true, true); 4174 4175 if (!ret) { 4176 spin_lock_irqsave(&xhci->lock, flags); 4177 virt_dev->current_mel = max_exit_latency; 4178 spin_unlock_irqrestore(&xhci->lock, flags); 4179 } 4180 return ret; 4181 } 4182 4183 #ifdef CONFIG_PM 4184 4185 /* BESL to HIRD Encoding array for USB2 LPM */ 4186 static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000, 4187 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000}; 4188 4189 /* Calculate HIRD/BESL for USB2 PORTPMSC*/ 4190 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci, 4191 struct usb_device *udev) 4192 { 4193 int u2del, besl, besl_host; 4194 int besl_device = 0; 4195 u32 field; 4196 4197 u2del = HCS_U2_LATENCY(xhci->hcs_params3); 4198 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); 4199 4200 if (field & USB_BESL_SUPPORT) { 4201 for (besl_host = 0; besl_host < 16; besl_host++) { 4202 if (xhci_besl_encoding[besl_host] >= u2del) 4203 break; 4204 } 4205 /* Use baseline BESL value as default */ 4206 if (field & USB_BESL_BASELINE_VALID) 4207 besl_device = USB_GET_BESL_BASELINE(field); 4208 else if (field & USB_BESL_DEEP_VALID) 4209 besl_device = USB_GET_BESL_DEEP(field); 4210 } else { 4211 if (u2del <= 50) 4212 besl_host = 0; 4213 else 4214 besl_host = (u2del - 51) / 75 + 1; 4215 } 4216 4217 besl = besl_host + besl_device; 4218 if (besl > 15) 4219 besl = 15; 4220 4221 return besl; 4222 } 4223 4224 /* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */ 4225 static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev) 4226 { 4227 u32 field; 4228 int l1; 4229 int besld = 0; 4230 int hirdm = 0; 4231 4232 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); 4233 4234 /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */ 4235 l1 = udev->l1_params.timeout / 256; 4236 4237 /* device has preferred BESLD */ 4238 if (field & USB_BESL_DEEP_VALID) { 4239 besld = USB_GET_BESL_DEEP(field); 4240 hirdm = 1; 4241 } 4242 4243 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm); 4244 } 4245 4246 static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, 4247 struct usb_device *udev, int enable) 4248 { 4249 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4250 struct xhci_port **ports; 4251 __le32 __iomem *pm_addr, *hlpm_addr; 4252 u32 pm_val, hlpm_val, field; 4253 unsigned int port_num; 4254 unsigned long flags; 4255 int hird, exit_latency; 4256 int ret; 4257 4258 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support || 4259 !udev->lpm_capable) 4260 return -EPERM; 4261 4262 if (!udev->parent || udev->parent->parent || 4263 udev->descriptor.bDeviceClass == USB_CLASS_HUB) 4264 return -EPERM; 4265 4266 if (udev->usb2_hw_lpm_capable != 1) 4267 return -EPERM; 4268 4269 spin_lock_irqsave(&xhci->lock, flags); 4270 4271 ports = xhci->usb2_rhub.ports; 4272 port_num = udev->portnum - 1; 4273 pm_addr = ports[port_num]->addr + PORTPMSC; 4274 pm_val = readl(pm_addr); 4275 hlpm_addr = ports[port_num]->addr + PORTHLPMC; 4276 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); 4277 4278 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", 4279 enable ? "enable" : "disable", port_num + 1); 4280 4281 if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) { 4282 /* Host supports BESL timeout instead of HIRD */ 4283 if (udev->usb2_hw_lpm_besl_capable) { 4284 /* if device doesn't have a preferred BESL value use a 4285 * default one which works with mixed HIRD and BESL 4286 * systems. See XHCI_DEFAULT_BESL definition in xhci.h 4287 */ 4288 if ((field & USB_BESL_SUPPORT) && 4289 (field & USB_BESL_BASELINE_VALID)) 4290 hird = USB_GET_BESL_BASELINE(field); 4291 else 4292 hird = udev->l1_params.besl; 4293 4294 exit_latency = xhci_besl_encoding[hird]; 4295 spin_unlock_irqrestore(&xhci->lock, flags); 4296 4297 /* USB 3.0 code dedicate one xhci->lpm_command->in_ctx 4298 * input context for link powermanagement evaluate 4299 * context commands. It is protected by hcd->bandwidth 4300 * mutex and is shared by all devices. We need to set 4301 * the max ext latency in USB 2 BESL LPM as well, so 4302 * use the same mutex and xhci_change_max_exit_latency() 4303 */ 4304 mutex_lock(hcd->bandwidth_mutex); 4305 ret = xhci_change_max_exit_latency(xhci, udev, 4306 exit_latency); 4307 mutex_unlock(hcd->bandwidth_mutex); 4308 4309 if (ret < 0) 4310 return ret; 4311 spin_lock_irqsave(&xhci->lock, flags); 4312 4313 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev); 4314 writel(hlpm_val, hlpm_addr); 4315 /* flush write */ 4316 readl(hlpm_addr); 4317 } else { 4318 hird = xhci_calculate_hird_besl(xhci, udev); 4319 } 4320 4321 pm_val &= ~PORT_HIRD_MASK; 4322 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id); 4323 writel(pm_val, pm_addr); 4324 pm_val = readl(pm_addr); 4325 pm_val |= PORT_HLE; 4326 writel(pm_val, pm_addr); 4327 /* flush write */ 4328 readl(pm_addr); 4329 } else { 4330 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK); 4331 writel(pm_val, pm_addr); 4332 /* flush write */ 4333 readl(pm_addr); 4334 if (udev->usb2_hw_lpm_besl_capable) { 4335 spin_unlock_irqrestore(&xhci->lock, flags); 4336 mutex_lock(hcd->bandwidth_mutex); 4337 xhci_change_max_exit_latency(xhci, udev, 0); 4338 mutex_unlock(hcd->bandwidth_mutex); 4339 return 0; 4340 } 4341 } 4342 4343 spin_unlock_irqrestore(&xhci->lock, flags); 4344 return 0; 4345 } 4346 4347 /* check if a usb2 port supports a given extened capability protocol 4348 * only USB2 ports extended protocol capability values are cached. 4349 * Return 1 if capability is supported 4350 */ 4351 static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port, 4352 unsigned capability) 4353 { 4354 u32 port_offset, port_count; 4355 int i; 4356 4357 for (i = 0; i < xhci->num_ext_caps; i++) { 4358 if (xhci->ext_caps[i] & capability) { 4359 /* port offsets starts at 1 */ 4360 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1; 4361 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]); 4362 if (port >= port_offset && 4363 port < port_offset + port_count) 4364 return 1; 4365 } 4366 } 4367 return 0; 4368 } 4369 4370 static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) 4371 { 4372 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4373 int portnum = udev->portnum - 1; 4374 4375 if (hcd->speed >= HCD_USB3 || !xhci->sw_lpm_support || 4376 !udev->lpm_capable) 4377 return 0; 4378 4379 /* we only support lpm for non-hub device connected to root hub yet */ 4380 if (!udev->parent || udev->parent->parent || 4381 udev->descriptor.bDeviceClass == USB_CLASS_HUB) 4382 return 0; 4383 4384 if (xhci->hw_lpm_support == 1 && 4385 xhci_check_usb2_port_capability( 4386 xhci, portnum, XHCI_HLC)) { 4387 udev->usb2_hw_lpm_capable = 1; 4388 udev->l1_params.timeout = XHCI_L1_TIMEOUT; 4389 udev->l1_params.besl = XHCI_DEFAULT_BESL; 4390 if (xhci_check_usb2_port_capability(xhci, portnum, 4391 XHCI_BLC)) 4392 udev->usb2_hw_lpm_besl_capable = 1; 4393 } 4394 4395 return 0; 4396 } 4397 4398 /*---------------------- USB 3.0 Link PM functions ------------------------*/ 4399 4400 /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */ 4401 static unsigned long long xhci_service_interval_to_ns( 4402 struct usb_endpoint_descriptor *desc) 4403 { 4404 return (1ULL << (desc->bInterval - 1)) * 125 * 1000; 4405 } 4406 4407 static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev, 4408 enum usb3_link_state state) 4409 { 4410 unsigned long long sel; 4411 unsigned long long pel; 4412 unsigned int max_sel_pel; 4413 char *state_name; 4414 4415 switch (state) { 4416 case USB3_LPM_U1: 4417 /* Convert SEL and PEL stored in nanoseconds to microseconds */ 4418 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); 4419 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); 4420 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL; 4421 state_name = "U1"; 4422 break; 4423 case USB3_LPM_U2: 4424 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000); 4425 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000); 4426 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL; 4427 state_name = "U2"; 4428 break; 4429 default: 4430 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n", 4431 __func__); 4432 return USB3_LPM_DISABLED; 4433 } 4434 4435 if (sel <= max_sel_pel && pel <= max_sel_pel) 4436 return USB3_LPM_DEVICE_INITIATED; 4437 4438 if (sel > max_sel_pel) 4439 dev_dbg(&udev->dev, "Device-initiated %s disabled " 4440 "due to long SEL %llu ms\n", 4441 state_name, sel); 4442 else 4443 dev_dbg(&udev->dev, "Device-initiated %s disabled " 4444 "due to long PEL %llu ms\n", 4445 state_name, pel); 4446 return USB3_LPM_DISABLED; 4447 } 4448 4449 /* The U1 timeout should be the maximum of the following values: 4450 * - For control endpoints, U1 system exit latency (SEL) * 3 4451 * - For bulk endpoints, U1 SEL * 5 4452 * - For interrupt endpoints: 4453 * - Notification EPs, U1 SEL * 3 4454 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2) 4455 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2) 4456 */ 4457 static unsigned long long xhci_calculate_intel_u1_timeout( 4458 struct usb_device *udev, 4459 struct usb_endpoint_descriptor *desc) 4460 { 4461 unsigned long long timeout_ns; 4462 int ep_type; 4463 int intr_type; 4464 4465 ep_type = usb_endpoint_type(desc); 4466 switch (ep_type) { 4467 case USB_ENDPOINT_XFER_CONTROL: 4468 timeout_ns = udev->u1_params.sel * 3; 4469 break; 4470 case USB_ENDPOINT_XFER_BULK: 4471 timeout_ns = udev->u1_params.sel * 5; 4472 break; 4473 case USB_ENDPOINT_XFER_INT: 4474 intr_type = usb_endpoint_interrupt_type(desc); 4475 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) { 4476 timeout_ns = udev->u1_params.sel * 3; 4477 break; 4478 } 4479 /* Otherwise the calculation is the same as isoc eps */ 4480 /* fall through */ 4481 case USB_ENDPOINT_XFER_ISOC: 4482 timeout_ns = xhci_service_interval_to_ns(desc); 4483 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100); 4484 if (timeout_ns < udev->u1_params.sel * 2) 4485 timeout_ns = udev->u1_params.sel * 2; 4486 break; 4487 default: 4488 return 0; 4489 } 4490 4491 return timeout_ns; 4492 } 4493 4494 /* Returns the hub-encoded U1 timeout value. */ 4495 static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, 4496 struct usb_device *udev, 4497 struct usb_endpoint_descriptor *desc) 4498 { 4499 unsigned long long timeout_ns; 4500 4501 if (xhci->quirks & XHCI_INTEL_HOST) 4502 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); 4503 else 4504 timeout_ns = udev->u1_params.sel; 4505 4506 /* The U1 timeout is encoded in 1us intervals. 4507 * Don't return a timeout of zero, because that's USB3_LPM_DISABLED. 4508 */ 4509 if (timeout_ns == USB3_LPM_DISABLED) 4510 timeout_ns = 1; 4511 else 4512 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000); 4513 4514 /* If the necessary timeout value is bigger than what we can set in the 4515 * USB 3.0 hub, we have to disable hub-initiated U1. 4516 */ 4517 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT) 4518 return timeout_ns; 4519 dev_dbg(&udev->dev, "Hub-initiated U1 disabled " 4520 "due to long timeout %llu ms\n", timeout_ns); 4521 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1); 4522 } 4523 4524 /* The U2 timeout should be the maximum of: 4525 * - 10 ms (to avoid the bandwidth impact on the scheduler) 4526 * - largest bInterval of any active periodic endpoint (to avoid going 4527 * into lower power link states between intervals). 4528 * - the U2 Exit Latency of the device 4529 */ 4530 static unsigned long long xhci_calculate_intel_u2_timeout( 4531 struct usb_device *udev, 4532 struct usb_endpoint_descriptor *desc) 4533 { 4534 unsigned long long timeout_ns; 4535 unsigned long long u2_del_ns; 4536 4537 timeout_ns = 10 * 1000 * 1000; 4538 4539 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) && 4540 (xhci_service_interval_to_ns(desc) > timeout_ns)) 4541 timeout_ns = xhci_service_interval_to_ns(desc); 4542 4543 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL; 4544 if (u2_del_ns > timeout_ns) 4545 timeout_ns = u2_del_ns; 4546 4547 return timeout_ns; 4548 } 4549 4550 /* Returns the hub-encoded U2 timeout value. */ 4551 static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, 4552 struct usb_device *udev, 4553 struct usb_endpoint_descriptor *desc) 4554 { 4555 unsigned long long timeout_ns; 4556 4557 if (xhci->quirks & XHCI_INTEL_HOST) 4558 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); 4559 else 4560 timeout_ns = udev->u2_params.sel; 4561 4562 /* The U2 timeout is encoded in 256us intervals */ 4563 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000); 4564 /* If the necessary timeout value is bigger than what we can set in the 4565 * USB 3.0 hub, we have to disable hub-initiated U2. 4566 */ 4567 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT) 4568 return timeout_ns; 4569 dev_dbg(&udev->dev, "Hub-initiated U2 disabled " 4570 "due to long timeout %llu ms\n", timeout_ns); 4571 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2); 4572 } 4573 4574 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci, 4575 struct usb_device *udev, 4576 struct usb_endpoint_descriptor *desc, 4577 enum usb3_link_state state, 4578 u16 *timeout) 4579 { 4580 if (state == USB3_LPM_U1) 4581 return xhci_calculate_u1_timeout(xhci, udev, desc); 4582 else if (state == USB3_LPM_U2) 4583 return xhci_calculate_u2_timeout(xhci, udev, desc); 4584 4585 return USB3_LPM_DISABLED; 4586 } 4587 4588 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci, 4589 struct usb_device *udev, 4590 struct usb_endpoint_descriptor *desc, 4591 enum usb3_link_state state, 4592 u16 *timeout) 4593 { 4594 u16 alt_timeout; 4595 4596 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev, 4597 desc, state, timeout); 4598 4599 /* If we found we can't enable hub-initiated LPM, or 4600 * the U1 or U2 exit latency was too high to allow 4601 * device-initiated LPM as well, just stop searching. 4602 */ 4603 if (alt_timeout == USB3_LPM_DISABLED || 4604 alt_timeout == USB3_LPM_DEVICE_INITIATED) { 4605 *timeout = alt_timeout; 4606 return -E2BIG; 4607 } 4608 if (alt_timeout > *timeout) 4609 *timeout = alt_timeout; 4610 return 0; 4611 } 4612 4613 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci, 4614 struct usb_device *udev, 4615 struct usb_host_interface *alt, 4616 enum usb3_link_state state, 4617 u16 *timeout) 4618 { 4619 int j; 4620 4621 for (j = 0; j < alt->desc.bNumEndpoints; j++) { 4622 if (xhci_update_timeout_for_endpoint(xhci, udev, 4623 &alt->endpoint[j].desc, state, timeout)) 4624 return -E2BIG; 4625 continue; 4626 } 4627 return 0; 4628 } 4629 4630 static int xhci_check_intel_tier_policy(struct usb_device *udev, 4631 enum usb3_link_state state) 4632 { 4633 struct usb_device *parent; 4634 unsigned int num_hubs; 4635 4636 if (state == USB3_LPM_U2) 4637 return 0; 4638 4639 /* Don't enable U1 if the device is on a 2nd tier hub or lower. */ 4640 for (parent = udev->parent, num_hubs = 0; parent->parent; 4641 parent = parent->parent) 4642 num_hubs++; 4643 4644 if (num_hubs < 2) 4645 return 0; 4646 4647 dev_dbg(&udev->dev, "Disabling U1 link state for device" 4648 " below second-tier hub.\n"); 4649 dev_dbg(&udev->dev, "Plug device into first-tier hub " 4650 "to decrease power consumption.\n"); 4651 return -E2BIG; 4652 } 4653 4654 static int xhci_check_tier_policy(struct xhci_hcd *xhci, 4655 struct usb_device *udev, 4656 enum usb3_link_state state) 4657 { 4658 if (xhci->quirks & XHCI_INTEL_HOST) 4659 return xhci_check_intel_tier_policy(udev, state); 4660 else 4661 return 0; 4662 } 4663 4664 /* Returns the U1 or U2 timeout that should be enabled. 4665 * If the tier check or timeout setting functions return with a non-zero exit 4666 * code, that means the timeout value has been finalized and we shouldn't look 4667 * at any more endpoints. 4668 */ 4669 static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd, 4670 struct usb_device *udev, enum usb3_link_state state) 4671 { 4672 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4673 struct usb_host_config *config; 4674 char *state_name; 4675 int i; 4676 u16 timeout = USB3_LPM_DISABLED; 4677 4678 if (state == USB3_LPM_U1) 4679 state_name = "U1"; 4680 else if (state == USB3_LPM_U2) 4681 state_name = "U2"; 4682 else { 4683 dev_warn(&udev->dev, "Can't enable unknown link state %i\n", 4684 state); 4685 return timeout; 4686 } 4687 4688 if (xhci_check_tier_policy(xhci, udev, state) < 0) 4689 return timeout; 4690 4691 /* Gather some information about the currently installed configuration 4692 * and alternate interface settings. 4693 */ 4694 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc, 4695 state, &timeout)) 4696 return timeout; 4697 4698 config = udev->actconfig; 4699 if (!config) 4700 return timeout; 4701 4702 for (i = 0; i < config->desc.bNumInterfaces; i++) { 4703 struct usb_driver *driver; 4704 struct usb_interface *intf = config->interface[i]; 4705 4706 if (!intf) 4707 continue; 4708 4709 /* Check if any currently bound drivers want hub-initiated LPM 4710 * disabled. 4711 */ 4712 if (intf->dev.driver) { 4713 driver = to_usb_driver(intf->dev.driver); 4714 if (driver && driver->disable_hub_initiated_lpm) { 4715 dev_dbg(&udev->dev, "Hub-initiated %s disabled " 4716 "at request of driver %s\n", 4717 state_name, driver->name); 4718 return xhci_get_timeout_no_hub_lpm(udev, state); 4719 } 4720 } 4721 4722 /* Not sure how this could happen... */ 4723 if (!intf->cur_altsetting) 4724 continue; 4725 4726 if (xhci_update_timeout_for_interface(xhci, udev, 4727 intf->cur_altsetting, 4728 state, &timeout)) 4729 return timeout; 4730 } 4731 return timeout; 4732 } 4733 4734 static int calculate_max_exit_latency(struct usb_device *udev, 4735 enum usb3_link_state state_changed, 4736 u16 hub_encoded_timeout) 4737 { 4738 unsigned long long u1_mel_us = 0; 4739 unsigned long long u2_mel_us = 0; 4740 unsigned long long mel_us = 0; 4741 bool disabling_u1; 4742 bool disabling_u2; 4743 bool enabling_u1; 4744 bool enabling_u2; 4745 4746 disabling_u1 = (state_changed == USB3_LPM_U1 && 4747 hub_encoded_timeout == USB3_LPM_DISABLED); 4748 disabling_u2 = (state_changed == USB3_LPM_U2 && 4749 hub_encoded_timeout == USB3_LPM_DISABLED); 4750 4751 enabling_u1 = (state_changed == USB3_LPM_U1 && 4752 hub_encoded_timeout != USB3_LPM_DISABLED); 4753 enabling_u2 = (state_changed == USB3_LPM_U2 && 4754 hub_encoded_timeout != USB3_LPM_DISABLED); 4755 4756 /* If U1 was already enabled and we're not disabling it, 4757 * or we're going to enable U1, account for the U1 max exit latency. 4758 */ 4759 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) || 4760 enabling_u1) 4761 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000); 4762 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) || 4763 enabling_u2) 4764 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000); 4765 4766 if (u1_mel_us > u2_mel_us) 4767 mel_us = u1_mel_us; 4768 else 4769 mel_us = u2_mel_us; 4770 /* xHCI host controller max exit latency field is only 16 bits wide. */ 4771 if (mel_us > MAX_EXIT) { 4772 dev_warn(&udev->dev, "Link PM max exit latency of %lluus " 4773 "is too big.\n", mel_us); 4774 return -E2BIG; 4775 } 4776 return mel_us; 4777 } 4778 4779 /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */ 4780 static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, 4781 struct usb_device *udev, enum usb3_link_state state) 4782 { 4783 struct xhci_hcd *xhci; 4784 u16 hub_encoded_timeout; 4785 int mel; 4786 int ret; 4787 4788 xhci = hcd_to_xhci(hcd); 4789 /* The LPM timeout values are pretty host-controller specific, so don't 4790 * enable hub-initiated timeouts unless the vendor has provided 4791 * information about their timeout algorithm. 4792 */ 4793 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || 4794 !xhci->devs[udev->slot_id]) 4795 return USB3_LPM_DISABLED; 4796 4797 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state); 4798 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout); 4799 if (mel < 0) { 4800 /* Max Exit Latency is too big, disable LPM. */ 4801 hub_encoded_timeout = USB3_LPM_DISABLED; 4802 mel = 0; 4803 } 4804 4805 ret = xhci_change_max_exit_latency(xhci, udev, mel); 4806 if (ret) 4807 return ret; 4808 return hub_encoded_timeout; 4809 } 4810 4811 static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, 4812 struct usb_device *udev, enum usb3_link_state state) 4813 { 4814 struct xhci_hcd *xhci; 4815 u16 mel; 4816 4817 xhci = hcd_to_xhci(hcd); 4818 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || 4819 !xhci->devs[udev->slot_id]) 4820 return 0; 4821 4822 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED); 4823 return xhci_change_max_exit_latency(xhci, udev, mel); 4824 } 4825 #else /* CONFIG_PM */ 4826 4827 static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, 4828 struct usb_device *udev, int enable) 4829 { 4830 return 0; 4831 } 4832 4833 static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) 4834 { 4835 return 0; 4836 } 4837 4838 static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, 4839 struct usb_device *udev, enum usb3_link_state state) 4840 { 4841 return USB3_LPM_DISABLED; 4842 } 4843 4844 static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, 4845 struct usb_device *udev, enum usb3_link_state state) 4846 { 4847 return 0; 4848 } 4849 #endif /* CONFIG_PM */ 4850 4851 /*-------------------------------------------------------------------------*/ 4852 4853 /* Once a hub descriptor is fetched for a device, we need to update the xHC's 4854 * internal data structures for the device. 4855 */ 4856 static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, 4857 struct usb_tt *tt, gfp_t mem_flags) 4858 { 4859 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4860 struct xhci_virt_device *vdev; 4861 struct xhci_command *config_cmd; 4862 struct xhci_input_control_ctx *ctrl_ctx; 4863 struct xhci_slot_ctx *slot_ctx; 4864 unsigned long flags; 4865 unsigned think_time; 4866 int ret; 4867 4868 /* Ignore root hubs */ 4869 if (!hdev->parent) 4870 return 0; 4871 4872 vdev = xhci->devs[hdev->slot_id]; 4873 if (!vdev) { 4874 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); 4875 return -EINVAL; 4876 } 4877 4878 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags); 4879 if (!config_cmd) 4880 return -ENOMEM; 4881 4882 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx); 4883 if (!ctrl_ctx) { 4884 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 4885 __func__); 4886 xhci_free_command(xhci, config_cmd); 4887 return -ENOMEM; 4888 } 4889 4890 spin_lock_irqsave(&xhci->lock, flags); 4891 if (hdev->speed == USB_SPEED_HIGH && 4892 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) { 4893 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n"); 4894 xhci_free_command(xhci, config_cmd); 4895 spin_unlock_irqrestore(&xhci->lock, flags); 4896 return -ENOMEM; 4897 } 4898 4899 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); 4900 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 4901 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); 4902 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); 4903 /* 4904 * refer to section 6.2.2: MTT should be 0 for full speed hub, 4905 * but it may be already set to 1 when setup an xHCI virtual 4906 * device, so clear it anyway. 4907 */ 4908 if (tt->multi) 4909 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); 4910 else if (hdev->speed == USB_SPEED_FULL) 4911 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT); 4912 4913 if (xhci->hci_version > 0x95) { 4914 xhci_dbg(xhci, "xHCI version %x needs hub " 4915 "TT think time and number of ports\n", 4916 (unsigned int) xhci->hci_version); 4917 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild)); 4918 /* Set TT think time - convert from ns to FS bit times. 4919 * 0 = 8 FS bit times, 1 = 16 FS bit times, 4920 * 2 = 24 FS bit times, 3 = 32 FS bit times. 4921 * 4922 * xHCI 1.0: this field shall be 0 if the device is not a 4923 * High-spped hub. 4924 */ 4925 think_time = tt->think_time; 4926 if (think_time != 0) 4927 think_time = (think_time / 666) - 1; 4928 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) 4929 slot_ctx->tt_info |= 4930 cpu_to_le32(TT_THINK_TIME(think_time)); 4931 } else { 4932 xhci_dbg(xhci, "xHCI version %x doesn't need hub " 4933 "TT think time or number of ports\n", 4934 (unsigned int) xhci->hci_version); 4935 } 4936 slot_ctx->dev_state = 0; 4937 spin_unlock_irqrestore(&xhci->lock, flags); 4938 4939 xhci_dbg(xhci, "Set up %s for hub device.\n", 4940 (xhci->hci_version > 0x95) ? 4941 "configure endpoint" : "evaluate context"); 4942 4943 /* Issue and wait for the configure endpoint or 4944 * evaluate context command. 4945 */ 4946 if (xhci->hci_version > 0x95) 4947 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, 4948 false, false); 4949 else 4950 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, 4951 true, false); 4952 4953 xhci_free_command(xhci, config_cmd); 4954 return ret; 4955 } 4956 4957 static int xhci_get_frame(struct usb_hcd *hcd) 4958 { 4959 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4960 /* EHCI mods by the periodic size. Why? */ 4961 return readl(&xhci->run_regs->microframe_index) >> 3; 4962 } 4963 4964 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) 4965 { 4966 struct xhci_hcd *xhci; 4967 /* 4968 * TODO: Check with DWC3 clients for sysdev according to 4969 * quirks 4970 */ 4971 struct device *dev = hcd->self.sysdev; 4972 unsigned int minor_rev; 4973 int retval; 4974 4975 /* Accept arbitrarily long scatter-gather lists */ 4976 hcd->self.sg_tablesize = ~0; 4977 4978 /* support to build packet from discontinuous buffers */ 4979 hcd->self.no_sg_constraint = 1; 4980 4981 /* XHCI controllers don't stop the ep queue on short packets :| */ 4982 hcd->self.no_stop_on_short = 1; 4983 4984 xhci = hcd_to_xhci(hcd); 4985 4986 if (usb_hcd_is_primary_hcd(hcd)) { 4987 xhci->main_hcd = hcd; 4988 xhci->usb2_rhub.hcd = hcd; 4989 /* Mark the first roothub as being USB 2.0. 4990 * The xHCI driver will register the USB 3.0 roothub. 4991 */ 4992 hcd->speed = HCD_USB2; 4993 hcd->self.root_hub->speed = USB_SPEED_HIGH; 4994 /* 4995 * USB 2.0 roothub under xHCI has an integrated TT, 4996 * (rate matching hub) as opposed to having an OHCI/UHCI 4997 * companion controller. 4998 */ 4999 hcd->has_tt = 1; 5000 } else { 5001 /* 5002 * Some 3.1 hosts return sbrn 0x30, use xhci supported protocol 5003 * minor revision instead of sbrn 5004 */ 5005 minor_rev = xhci->usb3_rhub.min_rev; 5006 if (minor_rev) { 5007 hcd->speed = HCD_USB31; 5008 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; 5009 } 5010 xhci_info(xhci, "Host supports USB 3.%x %s SuperSpeed\n", 5011 minor_rev, 5012 minor_rev ? "Enhanced" : ""); 5013 5014 xhci->usb3_rhub.hcd = hcd; 5015 /* xHCI private pointer was set in xhci_pci_probe for the second 5016 * registered roothub. 5017 */ 5018 return 0; 5019 } 5020 5021 mutex_init(&xhci->mutex); 5022 xhci->cap_regs = hcd->regs; 5023 xhci->op_regs = hcd->regs + 5024 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); 5025 xhci->run_regs = hcd->regs + 5026 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK); 5027 /* Cache read-only capability registers */ 5028 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1); 5029 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2); 5030 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3); 5031 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase); 5032 xhci->hci_version = HC_VERSION(xhci->hcc_params); 5033 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params); 5034 if (xhci->hci_version > 0x100) 5035 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2); 5036 5037 xhci->quirks |= quirks; 5038 5039 get_quirks(dev, xhci); 5040 5041 /* In xhci controllers which follow xhci 1.0 spec gives a spurious 5042 * success event after a short transfer. This quirk will ignore such 5043 * spurious event. 5044 */ 5045 if (xhci->hci_version > 0x96) 5046 xhci->quirks |= XHCI_SPURIOUS_SUCCESS; 5047 5048 /* Make sure the HC is halted. */ 5049 retval = xhci_halt(xhci); 5050 if (retval) 5051 return retval; 5052 5053 xhci_zero_64b_regs(xhci); 5054 5055 xhci_dbg(xhci, "Resetting HCD\n"); 5056 /* Reset the internal HC memory state and registers. */ 5057 retval = xhci_reset(xhci); 5058 if (retval) 5059 return retval; 5060 xhci_dbg(xhci, "Reset complete\n"); 5061 5062 /* 5063 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0) 5064 * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit 5065 * address memory pointers actually. So, this driver clears the AC64 5066 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev, 5067 * DMA_BIT_MASK(32)) in this xhci_gen_setup(). 5068 */ 5069 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT) 5070 xhci->hcc_params &= ~BIT(0); 5071 5072 /* Set dma_mask and coherent_dma_mask to 64-bits, 5073 * if xHC supports 64-bit addressing */ 5074 if (HCC_64BIT_ADDR(xhci->hcc_params) && 5075 !dma_set_mask(dev, DMA_BIT_MASK(64))) { 5076 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); 5077 dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); 5078 } else { 5079 /* 5080 * This is to avoid error in cases where a 32-bit USB 5081 * controller is used on a 64-bit capable system. 5082 */ 5083 retval = dma_set_mask(dev, DMA_BIT_MASK(32)); 5084 if (retval) 5085 return retval; 5086 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n"); 5087 dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); 5088 } 5089 5090 xhci_dbg(xhci, "Calling HCD init\n"); 5091 /* Initialize HCD and host controller data structures. */ 5092 retval = xhci_init(hcd); 5093 if (retval) 5094 return retval; 5095 xhci_dbg(xhci, "Called HCD init\n"); 5096 5097 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n", 5098 xhci->hcc_params, xhci->hci_version, xhci->quirks); 5099 5100 return 0; 5101 } 5102 EXPORT_SYMBOL_GPL(xhci_gen_setup); 5103 5104 static const struct hc_driver xhci_hc_driver = { 5105 .description = "xhci-hcd", 5106 .product_desc = "xHCI Host Controller", 5107 .hcd_priv_size = sizeof(struct xhci_hcd), 5108 5109 /* 5110 * generic hardware linkage 5111 */ 5112 .irq = xhci_irq, 5113 .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED, 5114 5115 /* 5116 * basic lifecycle operations 5117 */ 5118 .reset = NULL, /* set in xhci_init_driver() */ 5119 .start = xhci_run, 5120 .stop = xhci_stop, 5121 .shutdown = xhci_shutdown, 5122 5123 /* 5124 * managing i/o requests and associated device resources 5125 */ 5126 .urb_enqueue = xhci_urb_enqueue, 5127 .urb_dequeue = xhci_urb_dequeue, 5128 .alloc_dev = xhci_alloc_dev, 5129 .free_dev = xhci_free_dev, 5130 .alloc_streams = xhci_alloc_streams, 5131 .free_streams = xhci_free_streams, 5132 .add_endpoint = xhci_add_endpoint, 5133 .drop_endpoint = xhci_drop_endpoint, 5134 .endpoint_reset = xhci_endpoint_reset, 5135 .check_bandwidth = xhci_check_bandwidth, 5136 .reset_bandwidth = xhci_reset_bandwidth, 5137 .address_device = xhci_address_device, 5138 .enable_device = xhci_enable_device, 5139 .update_hub_device = xhci_update_hub_device, 5140 .reset_device = xhci_discover_or_reset_device, 5141 5142 /* 5143 * scheduling support 5144 */ 5145 .get_frame_number = xhci_get_frame, 5146 5147 /* 5148 * root hub support 5149 */ 5150 .hub_control = xhci_hub_control, 5151 .hub_status_data = xhci_hub_status_data, 5152 .bus_suspend = xhci_bus_suspend, 5153 .bus_resume = xhci_bus_resume, 5154 .get_resuming_ports = xhci_get_resuming_ports, 5155 5156 /* 5157 * call back when device connected and addressed 5158 */ 5159 .update_device = xhci_update_device, 5160 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm, 5161 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout, 5162 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout, 5163 .find_raw_port_number = xhci_find_raw_port_number, 5164 }; 5165 5166 void xhci_init_driver(struct hc_driver *drv, 5167 const struct xhci_driver_overrides *over) 5168 { 5169 BUG_ON(!over); 5170 5171 /* Copy the generic table to drv then apply the overrides */ 5172 *drv = xhci_hc_driver; 5173 5174 if (over) { 5175 drv->hcd_priv_size += over->extra_priv_size; 5176 if (over->reset) 5177 drv->reset = over->reset; 5178 if (over->start) 5179 drv->start = over->start; 5180 } 5181 } 5182 EXPORT_SYMBOL_GPL(xhci_init_driver); 5183 5184 MODULE_DESCRIPTION(DRIVER_DESC); 5185 MODULE_AUTHOR(DRIVER_AUTHOR); 5186 MODULE_LICENSE("GPL"); 5187 5188 static int __init xhci_hcd_init(void) 5189 { 5190 /* 5191 * Check the compiler generated sizes of structures that must be laid 5192 * out in specific ways for hardware access. 5193 */ 5194 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); 5195 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); 5196 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); 5197 /* xhci_device_control has eight fields, and also 5198 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx 5199 */ 5200 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); 5201 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); 5202 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); 5203 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8); 5204 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); 5205 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ 5206 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); 5207 5208 if (usb_disabled()) 5209 return -ENODEV; 5210 5211 xhci_debugfs_create_root(); 5212 5213 return 0; 5214 } 5215 5216 /* 5217 * If an init function is provided, an exit function must also be provided 5218 * to allow module unload. 5219 */ 5220 static void __exit xhci_hcd_fini(void) 5221 { 5222 xhci_debugfs_remove_root(); 5223 } 5224 5225 module_init(xhci_hcd_init); 5226 module_exit(xhci_hcd_fini); 5227