1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * xHCI host controller driver
4 *
5 * Copyright (C) 2008 Intel Corp.
6 *
7 * Author: Sarah Sharp
8 * Some code borrowed from the Linux EHCI driver.
9 */
10
11 #include <linux/pci.h>
12 #include <linux/iommu.h>
13 #include <linux/iopoll.h>
14 #include <linux/irq.h>
15 #include <linux/log2.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/slab.h>
19 #include <linux/dmi.h>
20 #include <linux/dma-mapping.h>
21
22 #include "xhci.h"
23 #include "xhci-trace.h"
24 #include "xhci-debugfs.h"
25 #include "xhci-dbgcap.h"
26
27 #define DRIVER_AUTHOR "Sarah Sharp"
28 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
29
30 #define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
31
32 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
33 static int link_quirk;
34 module_param(link_quirk, int, S_IRUGO | S_IWUSR);
35 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
36
37 static unsigned long long quirks;
38 module_param(quirks, ullong, S_IRUGO);
39 MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
40
td_on_ring(struct xhci_td * td,struct xhci_ring * ring)41 static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
42 {
43 struct xhci_segment *seg = ring->first_seg;
44
45 if (!td || !td->start_seg)
46 return false;
47 do {
48 if (seg == td->start_seg)
49 return true;
50 seg = seg->next;
51 } while (seg && seg != ring->first_seg);
52
53 return false;
54 }
55
56 /*
57 * xhci_handshake - spin reading hc until handshake completes or fails
58 * @ptr: address of hc register to be read
59 * @mask: bits to look at in result of read
60 * @done: value of those bits when handshake succeeds
61 * @usec: timeout in microseconds
62 *
63 * Returns negative errno, or zero on success
64 *
65 * Success happens when the "mask" bits have the specified value (hardware
66 * handshake done). There are two failure modes: "usec" have passed (major
67 * hardware flakeout), or the register reads as all-ones (hardware removed).
68 */
xhci_handshake(void __iomem * ptr,u32 mask,u32 done,u64 timeout_us)69 int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
70 {
71 u32 result;
72 int ret;
73
74 ret = readl_poll_timeout_atomic(ptr, result,
75 (result & mask) == done ||
76 result == U32_MAX,
77 1, timeout_us);
78 if (result == U32_MAX) /* card removed */
79 return -ENODEV;
80
81 return ret;
82 }
83
84 /*
85 * Disable interrupts and begin the xHCI halting process.
86 */
xhci_quiesce(struct xhci_hcd * xhci)87 void xhci_quiesce(struct xhci_hcd *xhci)
88 {
89 u32 halted;
90 u32 cmd;
91 u32 mask;
92
93 mask = ~(XHCI_IRQS);
94 halted = readl(&xhci->op_regs->status) & STS_HALT;
95 if (!halted)
96 mask &= ~CMD_RUN;
97
98 cmd = readl(&xhci->op_regs->command);
99 cmd &= mask;
100 writel(cmd, &xhci->op_regs->command);
101 }
102
103 /*
104 * Force HC into halt state.
105 *
106 * Disable any IRQs and clear the run/stop bit.
107 * HC will complete any current and actively pipelined transactions, and
108 * should halt within 16 ms of the run/stop bit being cleared.
109 * Read HC Halted bit in the status register to see when the HC is finished.
110 */
xhci_halt(struct xhci_hcd * xhci)111 int xhci_halt(struct xhci_hcd *xhci)
112 {
113 int ret;
114
115 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
116 xhci_quiesce(xhci);
117
118 ret = xhci_handshake(&xhci->op_regs->status,
119 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
120 if (ret) {
121 xhci_warn(xhci, "Host halt failed, %d\n", ret);
122 return ret;
123 }
124
125 xhci->xhc_state |= XHCI_STATE_HALTED;
126 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
127
128 return ret;
129 }
130
131 /*
132 * Set the run bit and wait for the host to be running.
133 */
xhci_start(struct xhci_hcd * xhci)134 int xhci_start(struct xhci_hcd *xhci)
135 {
136 u32 temp;
137 int ret;
138
139 temp = readl(&xhci->op_regs->command);
140 temp |= (CMD_RUN);
141 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
142 temp);
143 writel(temp, &xhci->op_regs->command);
144
145 /*
146 * Wait for the HCHalted Status bit to be 0 to indicate the host is
147 * running.
148 */
149 ret = xhci_handshake(&xhci->op_regs->status,
150 STS_HALT, 0, XHCI_MAX_HALT_USEC);
151 if (ret == -ETIMEDOUT)
152 xhci_err(xhci, "Host took too long to start, "
153 "waited %u microseconds.\n",
154 XHCI_MAX_HALT_USEC);
155 if (!ret) {
156 /* clear state flags. Including dying, halted or removing */
157 xhci->xhc_state = 0;
158 xhci->run_graceperiod = jiffies + msecs_to_jiffies(500);
159 }
160
161 return ret;
162 }
163
164 /*
165 * Reset a halted HC.
166 *
167 * This resets pipelines, timers, counters, state machines, etc.
168 * Transactions will be terminated immediately, and operational registers
169 * will be set to their defaults.
170 */
xhci_reset(struct xhci_hcd * xhci,u64 timeout_us)171 int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
172 {
173 u32 command;
174 u32 state;
175 int ret;
176
177 state = readl(&xhci->op_regs->status);
178
179 if (state == ~(u32)0) {
180 xhci_warn(xhci, "Host not accessible, reset failed.\n");
181 return -ENODEV;
182 }
183
184 if ((state & STS_HALT) == 0) {
185 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
186 return 0;
187 }
188
189 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
190 command = readl(&xhci->op_regs->command);
191 command |= CMD_RESET;
192 writel(command, &xhci->op_regs->command);
193
194 /* Existing Intel xHCI controllers require a delay of 1 mS,
195 * after setting the CMD_RESET bit, and before accessing any
196 * HC registers. This allows the HC to complete the
197 * reset operation and be ready for HC register access.
198 * Without this delay, the subsequent HC register access,
199 * may result in a system hang very rarely.
200 */
201 if (xhci->quirks & XHCI_INTEL_HOST)
202 udelay(1000);
203
204 ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us);
205 if (ret)
206 return ret;
207
208 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
209 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
210
211 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
212 "Wait for controller to be ready for doorbell rings");
213 /*
214 * xHCI cannot write to any doorbells or operational registers other
215 * than status until the "Controller Not Ready" flag is cleared.
216 */
217 ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us);
218
219 xhci->usb2_rhub.bus_state.port_c_suspend = 0;
220 xhci->usb2_rhub.bus_state.suspended_ports = 0;
221 xhci->usb2_rhub.bus_state.resuming_ports = 0;
222 xhci->usb3_rhub.bus_state.port_c_suspend = 0;
223 xhci->usb3_rhub.bus_state.suspended_ports = 0;
224 xhci->usb3_rhub.bus_state.resuming_ports = 0;
225
226 return ret;
227 }
228
xhci_zero_64b_regs(struct xhci_hcd * xhci)229 static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
230 {
231 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
232 struct iommu_domain *domain;
233 int err, i;
234 u64 val;
235 u32 intrs;
236
237 /*
238 * Some Renesas controllers get into a weird state if they are
239 * reset while programmed with 64bit addresses (they will preserve
240 * the top half of the address in internal, non visible
241 * registers). You end up with half the address coming from the
242 * kernel, and the other half coming from the firmware. Also,
243 * changing the programming leads to extra accesses even if the
244 * controller is supposed to be halted. The controller ends up with
245 * a fatal fault, and is then ripe for being properly reset.
246 *
247 * Special care is taken to only apply this if the device is behind
248 * an iommu. Doing anything when there is no iommu is definitely
249 * unsafe...
250 */
251 domain = iommu_get_domain_for_dev(dev);
252 if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain ||
253 domain->type == IOMMU_DOMAIN_IDENTITY)
254 return;
255
256 xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
257
258 /* Clear HSEIE so that faults do not get signaled */
259 val = readl(&xhci->op_regs->command);
260 val &= ~CMD_HSEIE;
261 writel(val, &xhci->op_regs->command);
262
263 /* Clear HSE (aka FATAL) */
264 val = readl(&xhci->op_regs->status);
265 val |= STS_FATAL;
266 writel(val, &xhci->op_regs->status);
267
268 /* Now zero the registers, and brace for impact */
269 val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
270 if (upper_32_bits(val))
271 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
272 val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
273 if (upper_32_bits(val))
274 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
275
276 intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1),
277 ARRAY_SIZE(xhci->run_regs->ir_set));
278
279 for (i = 0; i < intrs; i++) {
280 struct xhci_intr_reg __iomem *ir;
281
282 ir = &xhci->run_regs->ir_set[i];
283 val = xhci_read_64(xhci, &ir->erst_base);
284 if (upper_32_bits(val))
285 xhci_write_64(xhci, 0, &ir->erst_base);
286 val= xhci_read_64(xhci, &ir->erst_dequeue);
287 if (upper_32_bits(val))
288 xhci_write_64(xhci, 0, &ir->erst_dequeue);
289 }
290
291 /* Wait for the fault to appear. It will be cleared on reset */
292 err = xhci_handshake(&xhci->op_regs->status,
293 STS_FATAL, STS_FATAL,
294 XHCI_MAX_HALT_USEC);
295 if (!err)
296 xhci_info(xhci, "Fault detected\n");
297 }
298
xhci_enable_interrupter(struct xhci_interrupter * ir)299 static int xhci_enable_interrupter(struct xhci_interrupter *ir)
300 {
301 u32 iman;
302
303 if (!ir || !ir->ir_set)
304 return -EINVAL;
305
306 iman = readl(&ir->ir_set->irq_pending);
307 writel(ER_IRQ_ENABLE(iman), &ir->ir_set->irq_pending);
308
309 return 0;
310 }
311
xhci_disable_interrupter(struct xhci_interrupter * ir)312 static int xhci_disable_interrupter(struct xhci_interrupter *ir)
313 {
314 u32 iman;
315
316 if (!ir || !ir->ir_set)
317 return -EINVAL;
318
319 iman = readl(&ir->ir_set->irq_pending);
320 writel(ER_IRQ_DISABLE(iman), &ir->ir_set->irq_pending);
321
322 return 0;
323 }
324
compliance_mode_recovery(struct timer_list * t)325 static void compliance_mode_recovery(struct timer_list *t)
326 {
327 struct xhci_hcd *xhci;
328 struct usb_hcd *hcd;
329 struct xhci_hub *rhub;
330 u32 temp;
331 int i;
332
333 xhci = from_timer(xhci, t, comp_mode_recovery_timer);
334 rhub = &xhci->usb3_rhub;
335 hcd = rhub->hcd;
336
337 if (!hcd)
338 return;
339
340 for (i = 0; i < rhub->num_ports; i++) {
341 temp = readl(rhub->ports[i]->addr);
342 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
343 /*
344 * Compliance Mode Detected. Letting USB Core
345 * handle the Warm Reset
346 */
347 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
348 "Compliance mode detected->port %d",
349 i + 1);
350 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
351 "Attempting compliance mode recovery");
352
353 if (hcd->state == HC_STATE_SUSPENDED)
354 usb_hcd_resume_root_hub(hcd);
355
356 usb_hcd_poll_rh_status(hcd);
357 }
358 }
359
360 if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1))
361 mod_timer(&xhci->comp_mode_recovery_timer,
362 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
363 }
364
365 /*
366 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
367 * that causes ports behind that hardware to enter compliance mode sometimes.
368 * The quirk creates a timer that polls every 2 seconds the link state of
369 * each host controller's port and recovers it by issuing a Warm reset
370 * if Compliance mode is detected, otherwise the port will become "dead" (no
371 * device connections or disconnections will be detected anymore). Becasue no
372 * status event is generated when entering compliance mode (per xhci spec),
373 * this quirk is needed on systems that have the failing hardware installed.
374 */
compliance_mode_recovery_timer_init(struct xhci_hcd * xhci)375 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
376 {
377 xhci->port_status_u0 = 0;
378 timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
379 0);
380 xhci->comp_mode_recovery_timer.expires = jiffies +
381 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
382
383 add_timer(&xhci->comp_mode_recovery_timer);
384 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
385 "Compliance mode recovery timer initialized");
386 }
387
388 /*
389 * This function identifies the systems that have installed the SN65LVPE502CP
390 * USB3.0 re-driver and that need the Compliance Mode Quirk.
391 * Systems:
392 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
393 */
xhci_compliance_mode_recovery_timer_quirk_check(void)394 static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
395 {
396 const char *dmi_product_name, *dmi_sys_vendor;
397
398 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
399 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
400 if (!dmi_product_name || !dmi_sys_vendor)
401 return false;
402
403 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
404 return false;
405
406 if (strstr(dmi_product_name, "Z420") ||
407 strstr(dmi_product_name, "Z620") ||
408 strstr(dmi_product_name, "Z820") ||
409 strstr(dmi_product_name, "Z1 Workstation"))
410 return true;
411
412 return false;
413 }
414
xhci_all_ports_seen_u0(struct xhci_hcd * xhci)415 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
416 {
417 return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
418 }
419
420
421 /*
422 * Initialize memory for HCD and xHC (one-time init).
423 *
424 * Program the PAGESIZE register, initialize the device context array, create
425 * device contexts (?), set up a command ring segment (or two?), create event
426 * ring (one for now).
427 */
xhci_init(struct usb_hcd * hcd)428 static int xhci_init(struct usb_hcd *hcd)
429 {
430 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
431 int retval;
432
433 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
434 spin_lock_init(&xhci->lock);
435 if (xhci->hci_version == 0x95 && link_quirk) {
436 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
437 "QUIRK: Not clearing Link TRB chain bits.");
438 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
439 } else {
440 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
441 "xHCI doesn't need link TRB QUIRK");
442 }
443 retval = xhci_mem_init(xhci, GFP_KERNEL);
444 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
445
446 /* Initializing Compliance Mode Recovery Data If Needed */
447 if (xhci_compliance_mode_recovery_timer_quirk_check()) {
448 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
449 compliance_mode_recovery_timer_init(xhci);
450 }
451
452 return retval;
453 }
454
455 /*-------------------------------------------------------------------------*/
456
xhci_run_finished(struct xhci_hcd * xhci)457 static int xhci_run_finished(struct xhci_hcd *xhci)
458 {
459 struct xhci_interrupter *ir = xhci->interrupter;
460 unsigned long flags;
461 u32 temp;
462
463 /*
464 * Enable interrupts before starting the host (xhci 4.2 and 5.5.2).
465 * Protect the short window before host is running with a lock
466 */
467 spin_lock_irqsave(&xhci->lock, flags);
468
469 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable interrupts");
470 temp = readl(&xhci->op_regs->command);
471 temp |= (CMD_EIE);
472 writel(temp, &xhci->op_regs->command);
473
474 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable primary interrupter");
475 xhci_enable_interrupter(ir);
476
477 if (xhci_start(xhci)) {
478 xhci_halt(xhci);
479 spin_unlock_irqrestore(&xhci->lock, flags);
480 return -ENODEV;
481 }
482
483 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
484
485 if (xhci->quirks & XHCI_NEC_HOST)
486 xhci_ring_cmd_db(xhci);
487
488 spin_unlock_irqrestore(&xhci->lock, flags);
489
490 return 0;
491 }
492
493 /*
494 * Start the HC after it was halted.
495 *
496 * This function is called by the USB core when the HC driver is added.
497 * Its opposite is xhci_stop().
498 *
499 * xhci_init() must be called once before this function can be called.
500 * Reset the HC, enable device slot contexts, program DCBAAP, and
501 * set command ring pointer and event ring pointer.
502 *
503 * Setup MSI-X vectors and enable interrupts.
504 */
xhci_run(struct usb_hcd * hcd)505 int xhci_run(struct usb_hcd *hcd)
506 {
507 u32 temp;
508 u64 temp_64;
509 int ret;
510 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
511 struct xhci_interrupter *ir = xhci->interrupter;
512 /* Start the xHCI host controller running only after the USB 2.0 roothub
513 * is setup.
514 */
515
516 hcd->uses_new_polling = 1;
517 if (!usb_hcd_is_primary_hcd(hcd))
518 return xhci_run_finished(xhci);
519
520 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
521
522 temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
523 temp_64 &= ~ERST_PTR_MASK;
524 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
525 "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
526
527 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
528 "// Set the interrupt modulation register");
529 temp = readl(&ir->ir_set->irq_control);
530 temp &= ~ER_IRQ_INTERVAL_MASK;
531 temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
532 writel(temp, &ir->ir_set->irq_control);
533
534 if (xhci->quirks & XHCI_NEC_HOST) {
535 struct xhci_command *command;
536
537 command = xhci_alloc_command(xhci, false, GFP_KERNEL);
538 if (!command)
539 return -ENOMEM;
540
541 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
542 TRB_TYPE(TRB_NEC_GET_FW));
543 if (ret)
544 xhci_free_command(xhci, command);
545 }
546 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
547 "Finished %s for main hcd", __func__);
548
549 xhci_create_dbc_dev(xhci);
550
551 xhci_debugfs_init(xhci);
552
553 if (xhci_has_one_roothub(xhci))
554 return xhci_run_finished(xhci);
555
556 set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
557
558 return 0;
559 }
560 EXPORT_SYMBOL_GPL(xhci_run);
561
562 /*
563 * Stop xHCI driver.
564 *
565 * This function is called by the USB core when the HC driver is removed.
566 * Its opposite is xhci_run().
567 *
568 * Disable device contexts, disable IRQs, and quiesce the HC.
569 * Reset the HC, finish any completed transactions, and cleanup memory.
570 */
xhci_stop(struct usb_hcd * hcd)571 void xhci_stop(struct usb_hcd *hcd)
572 {
573 u32 temp;
574 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
575 struct xhci_interrupter *ir = xhci->interrupter;
576
577 mutex_lock(&xhci->mutex);
578
579 /* Only halt host and free memory after both hcds are removed */
580 if (!usb_hcd_is_primary_hcd(hcd)) {
581 mutex_unlock(&xhci->mutex);
582 return;
583 }
584
585 xhci_remove_dbc_dev(xhci);
586
587 spin_lock_irq(&xhci->lock);
588 xhci->xhc_state |= XHCI_STATE_HALTED;
589 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
590 xhci_halt(xhci);
591 xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
592 spin_unlock_irq(&xhci->lock);
593
594 /* Deleting Compliance Mode Recovery Timer */
595 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
596 (!(xhci_all_ports_seen_u0(xhci)))) {
597 del_timer_sync(&xhci->comp_mode_recovery_timer);
598 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
599 "%s: compliance mode recovery timer deleted",
600 __func__);
601 }
602
603 if (xhci->quirks & XHCI_AMD_PLL_FIX)
604 usb_amd_dev_put();
605
606 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
607 "// Disabling event ring interrupts");
608 temp = readl(&xhci->op_regs->status);
609 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
610 xhci_disable_interrupter(ir);
611
612 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
613 xhci_mem_cleanup(xhci);
614 xhci_debugfs_exit(xhci);
615 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
616 "xhci_stop completed - status = %x",
617 readl(&xhci->op_regs->status));
618 mutex_unlock(&xhci->mutex);
619 }
620 EXPORT_SYMBOL_GPL(xhci_stop);
621
622 /*
623 * Shutdown HC (not bus-specific)
624 *
625 * This is called when the machine is rebooting or halting. We assume that the
626 * machine will be powered off, and the HC's internal state will be reset.
627 * Don't bother to free memory.
628 *
629 * This will only ever be called with the main usb_hcd (the USB3 roothub).
630 */
xhci_shutdown(struct usb_hcd * hcd)631 void xhci_shutdown(struct usb_hcd *hcd)
632 {
633 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
634
635 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
636 usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
637
638 /* Don't poll the roothubs after shutdown. */
639 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
640 __func__, hcd->self.busnum);
641 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
642 del_timer_sync(&hcd->rh_timer);
643
644 if (xhci->shared_hcd) {
645 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
646 del_timer_sync(&xhci->shared_hcd->rh_timer);
647 }
648
649 spin_lock_irq(&xhci->lock);
650 xhci_halt(xhci);
651
652 /*
653 * Workaround for spurious wakeps at shutdown with HSW, and for boot
654 * firmware delay in ADL-P PCH if port are left in U3 at shutdown
655 */
656 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP ||
657 xhci->quirks & XHCI_RESET_TO_DEFAULT)
658 xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
659
660 spin_unlock_irq(&xhci->lock);
661
662 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
663 "xhci_shutdown completed - status = %x",
664 readl(&xhci->op_regs->status));
665 }
666 EXPORT_SYMBOL_GPL(xhci_shutdown);
667
668 #ifdef CONFIG_PM
xhci_save_registers(struct xhci_hcd * xhci)669 static void xhci_save_registers(struct xhci_hcd *xhci)
670 {
671 struct xhci_interrupter *ir = xhci->interrupter;
672
673 xhci->s3.command = readl(&xhci->op_regs->command);
674 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
675 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
676 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
677
678 if (!ir)
679 return;
680
681 ir->s3_erst_size = readl(&ir->ir_set->erst_size);
682 ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
683 ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
684 ir->s3_irq_pending = readl(&ir->ir_set->irq_pending);
685 ir->s3_irq_control = readl(&ir->ir_set->irq_control);
686 }
687
xhci_restore_registers(struct xhci_hcd * xhci)688 static void xhci_restore_registers(struct xhci_hcd *xhci)
689 {
690 struct xhci_interrupter *ir = xhci->interrupter;
691
692 writel(xhci->s3.command, &xhci->op_regs->command);
693 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
694 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
695 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
696 writel(ir->s3_erst_size, &ir->ir_set->erst_size);
697 xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base);
698 xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue);
699 writel(ir->s3_irq_pending, &ir->ir_set->irq_pending);
700 writel(ir->s3_irq_control, &ir->ir_set->irq_control);
701 }
702
xhci_set_cmd_ring_deq(struct xhci_hcd * xhci)703 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
704 {
705 u64 val_64;
706
707 /* step 2: initialize command ring buffer */
708 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
709 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
710 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
711 xhci->cmd_ring->dequeue) &
712 (u64) ~CMD_RING_RSVD_BITS) |
713 xhci->cmd_ring->cycle_state;
714 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
715 "// Setting command ring address to 0x%llx",
716 (long unsigned long) val_64);
717 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
718 }
719
720 /*
721 * The whole command ring must be cleared to zero when we suspend the host.
722 *
723 * The host doesn't save the command ring pointer in the suspend well, so we
724 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
725 * aligned, because of the reserved bits in the command ring dequeue pointer
726 * register. Therefore, we can't just set the dequeue pointer back in the
727 * middle of the ring (TRBs are 16-byte aligned).
728 */
xhci_clear_command_ring(struct xhci_hcd * xhci)729 static void xhci_clear_command_ring(struct xhci_hcd *xhci)
730 {
731 struct xhci_ring *ring;
732 struct xhci_segment *seg;
733
734 ring = xhci->cmd_ring;
735 seg = ring->deq_seg;
736 do {
737 memset(seg->trbs, 0,
738 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
739 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
740 cpu_to_le32(~TRB_CYCLE);
741 seg = seg->next;
742 } while (seg != ring->deq_seg);
743
744 /* Reset the software enqueue and dequeue pointers */
745 ring->deq_seg = ring->first_seg;
746 ring->dequeue = ring->first_seg->trbs;
747 ring->enq_seg = ring->deq_seg;
748 ring->enqueue = ring->dequeue;
749
750 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
751 /*
752 * Ring is now zeroed, so the HW should look for change of ownership
753 * when the cycle bit is set to 1.
754 */
755 ring->cycle_state = 1;
756
757 /*
758 * Reset the hardware dequeue pointer.
759 * Yes, this will need to be re-written after resume, but we're paranoid
760 * and want to make sure the hardware doesn't access bogus memory
761 * because, say, the BIOS or an SMI started the host without changing
762 * the command ring pointers.
763 */
764 xhci_set_cmd_ring_deq(xhci);
765 }
766
767 /*
768 * Disable port wake bits if do_wakeup is not set.
769 *
770 * Also clear a possible internal port wake state left hanging for ports that
771 * detected termination but never successfully enumerated (trained to 0U).
772 * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done
773 * at enumeration clears this wake, force one here as well for unconnected ports
774 */
775
xhci_disable_hub_port_wake(struct xhci_hcd * xhci,struct xhci_hub * rhub,bool do_wakeup)776 static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci,
777 struct xhci_hub *rhub,
778 bool do_wakeup)
779 {
780 unsigned long flags;
781 u32 t1, t2, portsc;
782 int i;
783
784 spin_lock_irqsave(&xhci->lock, flags);
785
786 for (i = 0; i < rhub->num_ports; i++) {
787 portsc = readl(rhub->ports[i]->addr);
788 t1 = xhci_port_state_to_neutral(portsc);
789 t2 = t1;
790
791 /* clear wake bits if do_wake is not set */
792 if (!do_wakeup)
793 t2 &= ~PORT_WAKE_BITS;
794
795 /* Don't touch csc bit if connected or connect change is set */
796 if (!(portsc & (PORT_CSC | PORT_CONNECT)))
797 t2 |= PORT_CSC;
798
799 if (t1 != t2) {
800 writel(t2, rhub->ports[i]->addr);
801 xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n",
802 rhub->hcd->self.busnum, i + 1, portsc, t2);
803 }
804 }
805 spin_unlock_irqrestore(&xhci->lock, flags);
806 }
807
xhci_pending_portevent(struct xhci_hcd * xhci)808 static bool xhci_pending_portevent(struct xhci_hcd *xhci)
809 {
810 struct xhci_port **ports;
811 int port_index;
812 u32 status;
813 u32 portsc;
814
815 status = readl(&xhci->op_regs->status);
816 if (status & STS_EINT)
817 return true;
818 /*
819 * Checking STS_EINT is not enough as there is a lag between a change
820 * bit being set and the Port Status Change Event that it generated
821 * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
822 */
823
824 port_index = xhci->usb2_rhub.num_ports;
825 ports = xhci->usb2_rhub.ports;
826 while (port_index--) {
827 portsc = readl(ports[port_index]->addr);
828 if (portsc & PORT_CHANGE_MASK ||
829 (portsc & PORT_PLS_MASK) == XDEV_RESUME)
830 return true;
831 }
832 port_index = xhci->usb3_rhub.num_ports;
833 ports = xhci->usb3_rhub.ports;
834 while (port_index--) {
835 portsc = readl(ports[port_index]->addr);
836 if (portsc & (PORT_CHANGE_MASK | PORT_CAS) ||
837 (portsc & PORT_PLS_MASK) == XDEV_RESUME)
838 return true;
839 }
840 return false;
841 }
842
843 /*
844 * Stop HC (not bus-specific)
845 *
846 * This is called when the machine transition into S3/S4 mode.
847 *
848 */
xhci_suspend(struct xhci_hcd * xhci,bool do_wakeup)849 int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
850 {
851 int rc = 0;
852 unsigned int delay = XHCI_MAX_HALT_USEC * 2;
853 struct usb_hcd *hcd = xhci_to_hcd(xhci);
854 u32 command;
855 u32 res;
856
857 if (!hcd->state)
858 return 0;
859
860 if (hcd->state != HC_STATE_SUSPENDED ||
861 (xhci->shared_hcd && xhci->shared_hcd->state != HC_STATE_SUSPENDED))
862 return -EINVAL;
863
864 /* Clear root port wake on bits if wakeup not allowed. */
865 xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup);
866 xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup);
867
868 if (!HCD_HW_ACCESSIBLE(hcd))
869 return 0;
870
871 xhci_dbc_suspend(xhci);
872
873 /* Don't poll the roothubs on bus suspend. */
874 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
875 __func__, hcd->self.busnum);
876 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
877 del_timer_sync(&hcd->rh_timer);
878 if (xhci->shared_hcd) {
879 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
880 del_timer_sync(&xhci->shared_hcd->rh_timer);
881 }
882
883 if (xhci->quirks & XHCI_SUSPEND_DELAY)
884 usleep_range(1000, 1500);
885
886 spin_lock_irq(&xhci->lock);
887 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
888 if (xhci->shared_hcd)
889 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
890 /* step 1: stop endpoint */
891 /* skipped assuming that port suspend has done */
892
893 /* step 2: clear Run/Stop bit */
894 command = readl(&xhci->op_regs->command);
895 command &= ~CMD_RUN;
896 writel(command, &xhci->op_regs->command);
897
898 /* Some chips from Fresco Logic need an extraordinary delay */
899 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
900
901 if (xhci_handshake(&xhci->op_regs->status,
902 STS_HALT, STS_HALT, delay)) {
903 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
904 spin_unlock_irq(&xhci->lock);
905 return -ETIMEDOUT;
906 }
907 xhci_clear_command_ring(xhci);
908
909 /* step 3: save registers */
910 xhci_save_registers(xhci);
911
912 /* step 4: set CSS flag */
913 command = readl(&xhci->op_regs->command);
914 command |= CMD_CSS;
915 writel(command, &xhci->op_regs->command);
916 xhci->broken_suspend = 0;
917 if (xhci_handshake(&xhci->op_regs->status,
918 STS_SAVE, 0, 20 * 1000)) {
919 /*
920 * AMD SNPS xHC 3.0 occasionally does not clear the
921 * SSS bit of USBSTS and when driver tries to poll
922 * to see if the xHC clears BIT(8) which never happens
923 * and driver assumes that controller is not responding
924 * and times out. To workaround this, its good to check
925 * if SRE and HCE bits are not set (as per xhci
926 * Section 5.4.2) and bypass the timeout.
927 */
928 res = readl(&xhci->op_regs->status);
929 if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
930 (((res & STS_SRE) == 0) &&
931 ((res & STS_HCE) == 0))) {
932 xhci->broken_suspend = 1;
933 } else {
934 xhci_warn(xhci, "WARN: xHC save state timeout\n");
935 spin_unlock_irq(&xhci->lock);
936 return -ETIMEDOUT;
937 }
938 }
939 spin_unlock_irq(&xhci->lock);
940
941 /*
942 * Deleting Compliance Mode Recovery Timer because the xHCI Host
943 * is about to be suspended.
944 */
945 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
946 (!(xhci_all_ports_seen_u0(xhci)))) {
947 del_timer_sync(&xhci->comp_mode_recovery_timer);
948 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
949 "%s: compliance mode recovery timer deleted",
950 __func__);
951 }
952
953 return rc;
954 }
955 EXPORT_SYMBOL_GPL(xhci_suspend);
956
957 /*
958 * start xHC (not bus-specific)
959 *
960 * This is called when the machine transition from S3/S4 mode.
961 *
962 */
xhci_resume(struct xhci_hcd * xhci,pm_message_t msg)963 int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
964 {
965 bool hibernated = (msg.event == PM_EVENT_RESTORE);
966 u32 command, temp = 0;
967 struct usb_hcd *hcd = xhci_to_hcd(xhci);
968 int retval = 0;
969 bool comp_timer_running = false;
970 bool pending_portevent = false;
971 bool suspended_usb3_devs = false;
972 bool reinit_xhc = false;
973
974 if (!hcd->state)
975 return 0;
976
977 /* Wait a bit if either of the roothubs need to settle from the
978 * transition into bus suspend.
979 */
980
981 if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) ||
982 time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange))
983 msleep(100);
984
985 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
986 if (xhci->shared_hcd)
987 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
988
989 spin_lock_irq(&xhci->lock);
990
991 if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
992 reinit_xhc = true;
993
994 if (!reinit_xhc) {
995 /*
996 * Some controllers might lose power during suspend, so wait
997 * for controller not ready bit to clear, just as in xHC init.
998 */
999 retval = xhci_handshake(&xhci->op_regs->status,
1000 STS_CNR, 0, 10 * 1000 * 1000);
1001 if (retval) {
1002 xhci_warn(xhci, "Controller not ready at resume %d\n",
1003 retval);
1004 spin_unlock_irq(&xhci->lock);
1005 return retval;
1006 }
1007 /* step 1: restore register */
1008 xhci_restore_registers(xhci);
1009 /* step 2: initialize command ring buffer */
1010 xhci_set_cmd_ring_deq(xhci);
1011 /* step 3: restore state and start state*/
1012 /* step 3: set CRS flag */
1013 command = readl(&xhci->op_regs->command);
1014 command |= CMD_CRS;
1015 writel(command, &xhci->op_regs->command);
1016 /*
1017 * Some controllers take up to 55+ ms to complete the controller
1018 * restore so setting the timeout to 100ms. Xhci specification
1019 * doesn't mention any timeout value.
1020 */
1021 if (xhci_handshake(&xhci->op_regs->status,
1022 STS_RESTORE, 0, 100 * 1000)) {
1023 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1024 spin_unlock_irq(&xhci->lock);
1025 return -ETIMEDOUT;
1026 }
1027 }
1028
1029 temp = readl(&xhci->op_regs->status);
1030
1031 /* re-initialize the HC on Restore Error, or Host Controller Error */
1032 if ((temp & (STS_SRE | STS_HCE)) &&
1033 !(xhci->xhc_state & XHCI_STATE_REMOVING)) {
1034 reinit_xhc = true;
1035 if (!xhci->broken_suspend)
1036 xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
1037 }
1038
1039 if (reinit_xhc) {
1040 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1041 !(xhci_all_ports_seen_u0(xhci))) {
1042 del_timer_sync(&xhci->comp_mode_recovery_timer);
1043 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1044 "Compliance Mode Recovery Timer deleted!");
1045 }
1046
1047 /* Let the USB core know _both_ roothubs lost power. */
1048 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1049 if (xhci->shared_hcd)
1050 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1051
1052 xhci_dbg(xhci, "Stop HCD\n");
1053 xhci_halt(xhci);
1054 xhci_zero_64b_regs(xhci);
1055 retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
1056 spin_unlock_irq(&xhci->lock);
1057 if (retval)
1058 return retval;
1059
1060 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1061 temp = readl(&xhci->op_regs->status);
1062 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
1063 xhci_disable_interrupter(xhci->interrupter);
1064
1065 xhci_dbg(xhci, "cleaning up memory\n");
1066 xhci_mem_cleanup(xhci);
1067 xhci_debugfs_exit(xhci);
1068 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1069 readl(&xhci->op_regs->status));
1070
1071 /* USB core calls the PCI reinit and start functions twice:
1072 * first with the primary HCD, and then with the secondary HCD.
1073 * If we don't do the same, the host will never be started.
1074 */
1075 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1076 retval = xhci_init(hcd);
1077 if (retval)
1078 return retval;
1079 comp_timer_running = true;
1080
1081 xhci_dbg(xhci, "Start the primary HCD\n");
1082 retval = xhci_run(hcd);
1083 if (!retval && xhci->shared_hcd) {
1084 xhci_dbg(xhci, "Start the secondary HCD\n");
1085 retval = xhci_run(xhci->shared_hcd);
1086 }
1087
1088 hcd->state = HC_STATE_SUSPENDED;
1089 if (xhci->shared_hcd)
1090 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1091 goto done;
1092 }
1093
1094 /* step 4: set Run/Stop bit */
1095 command = readl(&xhci->op_regs->command);
1096 command |= CMD_RUN;
1097 writel(command, &xhci->op_regs->command);
1098 xhci_handshake(&xhci->op_regs->status, STS_HALT,
1099 0, 250 * 1000);
1100
1101 /* step 5: walk topology and initialize portsc,
1102 * portpmsc and portli
1103 */
1104 /* this is done in bus_resume */
1105
1106 /* step 6: restart each of the previously
1107 * Running endpoints by ringing their doorbells
1108 */
1109
1110 spin_unlock_irq(&xhci->lock);
1111
1112 xhci_dbc_resume(xhci);
1113
1114 done:
1115 if (retval == 0) {
1116 /*
1117 * Resume roothubs only if there are pending events.
1118 * USB 3 devices resend U3 LFPS wake after a 100ms delay if
1119 * the first wake signalling failed, give it that chance if
1120 * there are suspended USB 3 devices.
1121 */
1122 if (xhci->usb3_rhub.bus_state.suspended_ports ||
1123 xhci->usb3_rhub.bus_state.bus_suspended)
1124 suspended_usb3_devs = true;
1125
1126 pending_portevent = xhci_pending_portevent(xhci);
1127
1128 if (suspended_usb3_devs && !pending_portevent &&
1129 msg.event == PM_EVENT_AUTO_RESUME) {
1130 msleep(120);
1131 pending_portevent = xhci_pending_portevent(xhci);
1132 }
1133
1134 if (pending_portevent) {
1135 if (xhci->shared_hcd)
1136 usb_hcd_resume_root_hub(xhci->shared_hcd);
1137 usb_hcd_resume_root_hub(hcd);
1138 }
1139 }
1140 /*
1141 * If system is subject to the Quirk, Compliance Mode Timer needs to
1142 * be re-initialized Always after a system resume. Ports are subject
1143 * to suffer the Compliance Mode issue again. It doesn't matter if
1144 * ports have entered previously to U0 before system's suspension.
1145 */
1146 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1147 compliance_mode_recovery_timer_init(xhci);
1148
1149 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
1150 usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
1151
1152 /* Re-enable port polling. */
1153 xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
1154 __func__, hcd->self.busnum);
1155 if (xhci->shared_hcd) {
1156 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1157 usb_hcd_poll_rh_status(xhci->shared_hcd);
1158 }
1159 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1160 usb_hcd_poll_rh_status(hcd);
1161
1162 return retval;
1163 }
1164 EXPORT_SYMBOL_GPL(xhci_resume);
1165 #endif /* CONFIG_PM */
1166
1167 /*-------------------------------------------------------------------------*/
1168
xhci_map_temp_buffer(struct usb_hcd * hcd,struct urb * urb)1169 static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb)
1170 {
1171 void *temp;
1172 int ret = 0;
1173 unsigned int buf_len;
1174 enum dma_data_direction dir;
1175
1176 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1177 buf_len = urb->transfer_buffer_length;
1178
1179 temp = kzalloc_node(buf_len, GFP_ATOMIC,
1180 dev_to_node(hcd->self.sysdev));
1181 if (!temp)
1182 return -ENOMEM;
1183
1184 if (usb_urb_dir_out(urb))
1185 sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
1186 temp, buf_len, 0);
1187
1188 urb->transfer_buffer = temp;
1189 urb->transfer_dma = dma_map_single(hcd->self.sysdev,
1190 urb->transfer_buffer,
1191 urb->transfer_buffer_length,
1192 dir);
1193
1194 if (dma_mapping_error(hcd->self.sysdev,
1195 urb->transfer_dma)) {
1196 ret = -EAGAIN;
1197 kfree(temp);
1198 } else {
1199 urb->transfer_flags |= URB_DMA_MAP_SINGLE;
1200 }
1201
1202 return ret;
1203 }
1204
xhci_urb_temp_buffer_required(struct usb_hcd * hcd,struct urb * urb)1205 static bool xhci_urb_temp_buffer_required(struct usb_hcd *hcd,
1206 struct urb *urb)
1207 {
1208 bool ret = false;
1209 unsigned int i;
1210 unsigned int len = 0;
1211 unsigned int trb_size;
1212 unsigned int max_pkt;
1213 struct scatterlist *sg;
1214 struct scatterlist *tail_sg;
1215
1216 tail_sg = urb->sg;
1217 max_pkt = usb_endpoint_maxp(&urb->ep->desc);
1218
1219 if (!urb->num_sgs)
1220 return ret;
1221
1222 if (urb->dev->speed >= USB_SPEED_SUPER)
1223 trb_size = TRB_CACHE_SIZE_SS;
1224 else
1225 trb_size = TRB_CACHE_SIZE_HS;
1226
1227 if (urb->transfer_buffer_length != 0 &&
1228 !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
1229 for_each_sg(urb->sg, sg, urb->num_sgs, i) {
1230 len = len + sg->length;
1231 if (i > trb_size - 2) {
1232 len = len - tail_sg->length;
1233 if (len < max_pkt) {
1234 ret = true;
1235 break;
1236 }
1237
1238 tail_sg = sg_next(tail_sg);
1239 }
1240 }
1241 }
1242 return ret;
1243 }
1244
xhci_unmap_temp_buf(struct usb_hcd * hcd,struct urb * urb)1245 static void xhci_unmap_temp_buf(struct usb_hcd *hcd, struct urb *urb)
1246 {
1247 unsigned int len;
1248 unsigned int buf_len;
1249 enum dma_data_direction dir;
1250
1251 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1252
1253 buf_len = urb->transfer_buffer_length;
1254
1255 if (IS_ENABLED(CONFIG_HAS_DMA) &&
1256 (urb->transfer_flags & URB_DMA_MAP_SINGLE))
1257 dma_unmap_single(hcd->self.sysdev,
1258 urb->transfer_dma,
1259 urb->transfer_buffer_length,
1260 dir);
1261
1262 if (usb_urb_dir_in(urb)) {
1263 len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs,
1264 urb->transfer_buffer,
1265 buf_len,
1266 0);
1267 if (len != buf_len) {
1268 xhci_dbg(hcd_to_xhci(hcd),
1269 "Copy from tmp buf to urb sg list failed\n");
1270 urb->actual_length = len;
1271 }
1272 }
1273 urb->transfer_flags &= ~URB_DMA_MAP_SINGLE;
1274 kfree(urb->transfer_buffer);
1275 urb->transfer_buffer = NULL;
1276 }
1277
1278 /*
1279 * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT),
1280 * we'll copy the actual data into the TRB address register. This is limited to
1281 * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize
1282 * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed.
1283 */
xhci_map_urb_for_dma(struct usb_hcd * hcd,struct urb * urb,gfp_t mem_flags)1284 static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1285 gfp_t mem_flags)
1286 {
1287 struct xhci_hcd *xhci;
1288
1289 xhci = hcd_to_xhci(hcd);
1290
1291 if (xhci_urb_suitable_for_idt(urb))
1292 return 0;
1293
1294 if (xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) {
1295 if (xhci_urb_temp_buffer_required(hcd, urb))
1296 return xhci_map_temp_buffer(hcd, urb);
1297 }
1298 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
1299 }
1300
xhci_unmap_urb_for_dma(struct usb_hcd * hcd,struct urb * urb)1301 static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
1302 {
1303 struct xhci_hcd *xhci;
1304 bool unmap_temp_buf = false;
1305
1306 xhci = hcd_to_xhci(hcd);
1307
1308 if (urb->num_sgs && (urb->transfer_flags & URB_DMA_MAP_SINGLE))
1309 unmap_temp_buf = true;
1310
1311 if ((xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) && unmap_temp_buf)
1312 xhci_unmap_temp_buf(hcd, urb);
1313 else
1314 usb_hcd_unmap_urb_for_dma(hcd, urb);
1315 }
1316
1317 /**
1318 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
1319 * HCDs. Find the index for an endpoint given its descriptor. Use the return
1320 * value to right shift 1 for the bitmask.
1321 *
1322 * Index = (epnum * 2) + direction - 1,
1323 * where direction = 0 for OUT, 1 for IN.
1324 * For control endpoints, the IN index is used (OUT index is unused), so
1325 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
1326 */
xhci_get_endpoint_index(struct usb_endpoint_descriptor * desc)1327 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1328 {
1329 unsigned int index;
1330 if (usb_endpoint_xfer_control(desc))
1331 index = (unsigned int) (usb_endpoint_num(desc)*2);
1332 else
1333 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1334 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1335 return index;
1336 }
1337 EXPORT_SYMBOL_GPL(xhci_get_endpoint_index);
1338
1339 /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
1340 * address from the XHCI endpoint index.
1341 */
xhci_get_endpoint_address(unsigned int ep_index)1342 static unsigned int xhci_get_endpoint_address(unsigned int ep_index)
1343 {
1344 unsigned int number = DIV_ROUND_UP(ep_index, 2);
1345 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
1346 return direction | number;
1347 }
1348
1349 /* Find the flag for this endpoint (for use in the control context). Use the
1350 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1351 * bit 1, etc.
1352 */
xhci_get_endpoint_flag(struct usb_endpoint_descriptor * desc)1353 static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1354 {
1355 return 1 << (xhci_get_endpoint_index(desc) + 1);
1356 }
1357
1358 /* Compute the last valid endpoint context index. Basically, this is the
1359 * endpoint index plus one. For slot contexts with more than valid endpoint,
1360 * we find the most significant bit set in the added contexts flags.
1361 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
1362 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
1363 */
xhci_last_valid_endpoint(u32 added_ctxs)1364 unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1365 {
1366 return fls(added_ctxs) - 1;
1367 }
1368
1369 /* Returns 1 if the arguments are OK;
1370 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
1371 */
xhci_check_args(struct usb_hcd * hcd,struct usb_device * udev,struct usb_host_endpoint * ep,int check_ep,bool check_virt_dev,const char * func)1372 static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1373 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1374 const char *func) {
1375 struct xhci_hcd *xhci;
1376 struct xhci_virt_device *virt_dev;
1377
1378 if (!hcd || (check_ep && !ep) || !udev) {
1379 pr_debug("xHCI %s called with invalid args\n", func);
1380 return -EINVAL;
1381 }
1382 if (!udev->parent) {
1383 pr_debug("xHCI %s called for root hub\n", func);
1384 return 0;
1385 }
1386
1387 xhci = hcd_to_xhci(hcd);
1388 if (check_virt_dev) {
1389 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1390 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1391 func);
1392 return -EINVAL;
1393 }
1394
1395 virt_dev = xhci->devs[udev->slot_id];
1396 if (virt_dev->udev != udev) {
1397 xhci_dbg(xhci, "xHCI %s called with udev and "
1398 "virt_dev does not match\n", func);
1399 return -EINVAL;
1400 }
1401 }
1402
1403 if (xhci->xhc_state & XHCI_STATE_HALTED)
1404 return -ENODEV;
1405
1406 return 1;
1407 }
1408
1409 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1410 struct usb_device *udev, struct xhci_command *command,
1411 bool ctx_change, bool must_succeed);
1412
1413 /*
1414 * Full speed devices may have a max packet size greater than 8 bytes, but the
1415 * USB core doesn't know that until it reads the first 8 bytes of the
1416 * descriptor. If the usb_device's max packet size changes after that point,
1417 * we need to issue an evaluate context command and wait on it.
1418 */
xhci_check_maxpacket(struct xhci_hcd * xhci,unsigned int slot_id,unsigned int ep_index,struct urb * urb,gfp_t mem_flags)1419 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1420 unsigned int ep_index, struct urb *urb, gfp_t mem_flags)
1421 {
1422 struct xhci_container_ctx *out_ctx;
1423 struct xhci_input_control_ctx *ctrl_ctx;
1424 struct xhci_ep_ctx *ep_ctx;
1425 struct xhci_command *command;
1426 int max_packet_size;
1427 int hw_max_packet_size;
1428 int ret = 0;
1429
1430 out_ctx = xhci->devs[slot_id]->out_ctx;
1431 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1432 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1433 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1434 if (hw_max_packet_size != max_packet_size) {
1435 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1436 "Max Packet Size for ep 0 changed.");
1437 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1438 "Max packet size in usb_device = %d",
1439 max_packet_size);
1440 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1441 "Max packet size in xHCI HW = %d",
1442 hw_max_packet_size);
1443 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1444 "Issuing evaluate context command.");
1445
1446 /* Set up the input context flags for the command */
1447 /* FIXME: This won't work if a non-default control endpoint
1448 * changes max packet sizes.
1449 */
1450
1451 command = xhci_alloc_command(xhci, true, mem_flags);
1452 if (!command)
1453 return -ENOMEM;
1454
1455 command->in_ctx = xhci->devs[slot_id]->in_ctx;
1456 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
1457 if (!ctrl_ctx) {
1458 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1459 __func__);
1460 ret = -ENOMEM;
1461 goto command_cleanup;
1462 }
1463 /* Set up the modified control endpoint 0 */
1464 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1465 xhci->devs[slot_id]->out_ctx, ep_index);
1466
1467 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1468 ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */
1469 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1470 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1471
1472 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1473 ctrl_ctx->drop_flags = 0;
1474
1475 ret = xhci_configure_endpoint(xhci, urb->dev, command,
1476 true, false);
1477
1478 /* Clean up the input context for later use by bandwidth
1479 * functions.
1480 */
1481 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1482 command_cleanup:
1483 kfree(command->completion);
1484 kfree(command);
1485 }
1486 return ret;
1487 }
1488
1489 /*
1490 * non-error returns are a promise to giveback() the urb later
1491 * we drop ownership so next owner (or urb unlink) can get it
1492 */
xhci_urb_enqueue(struct usb_hcd * hcd,struct urb * urb,gfp_t mem_flags)1493 static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1494 {
1495 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1496 unsigned long flags;
1497 int ret = 0;
1498 unsigned int slot_id, ep_index;
1499 unsigned int *ep_state;
1500 struct urb_priv *urb_priv;
1501 int num_tds;
1502
1503 if (!urb)
1504 return -EINVAL;
1505 ret = xhci_check_args(hcd, urb->dev, urb->ep,
1506 true, true, __func__);
1507 if (ret <= 0)
1508 return ret ? ret : -EINVAL;
1509
1510 slot_id = urb->dev->slot_id;
1511 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1512 ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
1513
1514 if (!HCD_HW_ACCESSIBLE(hcd))
1515 return -ESHUTDOWN;
1516
1517 if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
1518 xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
1519 return -ENODEV;
1520 }
1521
1522 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1523 num_tds = urb->number_of_packets;
1524 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
1525 urb->transfer_buffer_length > 0 &&
1526 urb->transfer_flags & URB_ZERO_PACKET &&
1527 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
1528 num_tds = 2;
1529 else
1530 num_tds = 1;
1531
1532 urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags);
1533 if (!urb_priv)
1534 return -ENOMEM;
1535
1536 urb_priv->num_tds = num_tds;
1537 urb_priv->num_tds_done = 0;
1538 urb->hcpriv = urb_priv;
1539
1540 trace_xhci_urb_enqueue(urb);
1541
1542 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1543 /* Check to see if the max packet size for the default control
1544 * endpoint changed during FS device enumeration
1545 */
1546 if (urb->dev->speed == USB_SPEED_FULL) {
1547 ret = xhci_check_maxpacket(xhci, slot_id,
1548 ep_index, urb, mem_flags);
1549 if (ret < 0) {
1550 xhci_urb_free_priv(urb_priv);
1551 urb->hcpriv = NULL;
1552 return ret;
1553 }
1554 }
1555 }
1556
1557 spin_lock_irqsave(&xhci->lock, flags);
1558
1559 if (xhci->xhc_state & XHCI_STATE_DYING) {
1560 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
1561 urb->ep->desc.bEndpointAddress, urb);
1562 ret = -ESHUTDOWN;
1563 goto free_priv;
1564 }
1565 if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) {
1566 xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
1567 *ep_state);
1568 ret = -EINVAL;
1569 goto free_priv;
1570 }
1571 if (*ep_state & EP_SOFT_CLEAR_TOGGLE) {
1572 xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n");
1573 ret = -EINVAL;
1574 goto free_priv;
1575 }
1576
1577 switch (usb_endpoint_type(&urb->ep->desc)) {
1578
1579 case USB_ENDPOINT_XFER_CONTROL:
1580 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1581 slot_id, ep_index);
1582 break;
1583 case USB_ENDPOINT_XFER_BULK:
1584 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1585 slot_id, ep_index);
1586 break;
1587 case USB_ENDPOINT_XFER_INT:
1588 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1589 slot_id, ep_index);
1590 break;
1591 case USB_ENDPOINT_XFER_ISOC:
1592 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1593 slot_id, ep_index);
1594 }
1595
1596 if (ret) {
1597 free_priv:
1598 xhci_urb_free_priv(urb_priv);
1599 urb->hcpriv = NULL;
1600 }
1601 spin_unlock_irqrestore(&xhci->lock, flags);
1602 return ret;
1603 }
1604
1605 /*
1606 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1607 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1608 * should pick up where it left off in the TD, unless a Set Transfer Ring
1609 * Dequeue Pointer is issued.
1610 *
1611 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1612 * the ring. Since the ring is a contiguous structure, they can't be physically
1613 * removed. Instead, there are two options:
1614 *
1615 * 1) If the HC is in the middle of processing the URB to be canceled, we
1616 * simply move the ring's dequeue pointer past those TRBs using the Set
1617 * Transfer Ring Dequeue Pointer command. This will be the common case,
1618 * when drivers timeout on the last submitted URB and attempt to cancel.
1619 *
1620 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1621 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1622 * HC will need to invalidate the any TRBs it has cached after the stop
1623 * endpoint command, as noted in the xHCI 0.95 errata.
1624 *
1625 * 3) The TD may have completed by the time the Stop Endpoint Command
1626 * completes, so software needs to handle that case too.
1627 *
1628 * This function should protect against the TD enqueueing code ringing the
1629 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1630 * It also needs to account for multiple cancellations on happening at the same
1631 * time for the same endpoint.
1632 *
1633 * Note that this function can be called in any context, or so says
1634 * usb_hcd_unlink_urb()
1635 */
xhci_urb_dequeue(struct usb_hcd * hcd,struct urb * urb,int status)1636 static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1637 {
1638 unsigned long flags;
1639 int ret, i;
1640 u32 temp;
1641 struct xhci_hcd *xhci;
1642 struct urb_priv *urb_priv;
1643 struct xhci_td *td;
1644 unsigned int ep_index;
1645 struct xhci_ring *ep_ring;
1646 struct xhci_virt_ep *ep;
1647 struct xhci_command *command;
1648 struct xhci_virt_device *vdev;
1649
1650 xhci = hcd_to_xhci(hcd);
1651 spin_lock_irqsave(&xhci->lock, flags);
1652
1653 trace_xhci_urb_dequeue(urb);
1654
1655 /* Make sure the URB hasn't completed or been unlinked already */
1656 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1657 if (ret)
1658 goto done;
1659
1660 /* give back URB now if we can't queue it for cancel */
1661 vdev = xhci->devs[urb->dev->slot_id];
1662 urb_priv = urb->hcpriv;
1663 if (!vdev || !urb_priv)
1664 goto err_giveback;
1665
1666 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1667 ep = &vdev->eps[ep_index];
1668 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1669 if (!ep || !ep_ring)
1670 goto err_giveback;
1671
1672 /* If xHC is dead take it down and return ALL URBs in xhci_hc_died() */
1673 temp = readl(&xhci->op_regs->status);
1674 if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) {
1675 xhci_hc_died(xhci);
1676 goto done;
1677 }
1678
1679 /*
1680 * check ring is not re-allocated since URB was enqueued. If it is, then
1681 * make sure none of the ring related pointers in this URB private data
1682 * are touched, such as td_list, otherwise we overwrite freed data
1683 */
1684 if (!td_on_ring(&urb_priv->td[0], ep_ring)) {
1685 xhci_err(xhci, "Canceled URB td not found on endpoint ring");
1686 for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) {
1687 td = &urb_priv->td[i];
1688 if (!list_empty(&td->cancelled_td_list))
1689 list_del_init(&td->cancelled_td_list);
1690 }
1691 goto err_giveback;
1692 }
1693
1694 if (xhci->xhc_state & XHCI_STATE_HALTED) {
1695 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1696 "HC halted, freeing TD manually.");
1697 for (i = urb_priv->num_tds_done;
1698 i < urb_priv->num_tds;
1699 i++) {
1700 td = &urb_priv->td[i];
1701 if (!list_empty(&td->td_list))
1702 list_del_init(&td->td_list);
1703 if (!list_empty(&td->cancelled_td_list))
1704 list_del_init(&td->cancelled_td_list);
1705 }
1706 goto err_giveback;
1707 }
1708
1709 i = urb_priv->num_tds_done;
1710 if (i < urb_priv->num_tds)
1711 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1712 "Cancel URB %p, dev %s, ep 0x%x, "
1713 "starting at offset 0x%llx",
1714 urb, urb->dev->devpath,
1715 urb->ep->desc.bEndpointAddress,
1716 (unsigned long long) xhci_trb_virt_to_dma(
1717 urb_priv->td[i].start_seg,
1718 urb_priv->td[i].first_trb));
1719
1720 for (; i < urb_priv->num_tds; i++) {
1721 td = &urb_priv->td[i];
1722 /* TD can already be on cancelled list if ep halted on it */
1723 if (list_empty(&td->cancelled_td_list)) {
1724 td->cancel_status = TD_DIRTY;
1725 list_add_tail(&td->cancelled_td_list,
1726 &ep->cancelled_td_list);
1727 }
1728 }
1729
1730 /* Queue a stop endpoint command, but only if this is
1731 * the first cancellation to be handled.
1732 */
1733 if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
1734 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1735 if (!command) {
1736 ret = -ENOMEM;
1737 goto done;
1738 }
1739 ep->ep_state |= EP_STOP_CMD_PENDING;
1740 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1741 ep_index, 0);
1742 xhci_ring_cmd_db(xhci);
1743 }
1744 done:
1745 spin_unlock_irqrestore(&xhci->lock, flags);
1746 return ret;
1747
1748 err_giveback:
1749 if (urb_priv)
1750 xhci_urb_free_priv(urb_priv);
1751 usb_hcd_unlink_urb_from_ep(hcd, urb);
1752 spin_unlock_irqrestore(&xhci->lock, flags);
1753 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1754 return ret;
1755 }
1756
1757 /* Drop an endpoint from a new bandwidth configuration for this device.
1758 * Only one call to this function is allowed per endpoint before
1759 * check_bandwidth() or reset_bandwidth() must be called.
1760 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1761 * add the endpoint to the schedule with possibly new parameters denoted by a
1762 * different endpoint descriptor in usb_host_endpoint.
1763 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1764 * not allowed.
1765 *
1766 * The USB core will not allow URBs to be queued to an endpoint that is being
1767 * disabled, so there's no need for mutual exclusion to protect
1768 * the xhci->devs[slot_id] structure.
1769 */
xhci_drop_endpoint(struct usb_hcd * hcd,struct usb_device * udev,struct usb_host_endpoint * ep)1770 int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1771 struct usb_host_endpoint *ep)
1772 {
1773 struct xhci_hcd *xhci;
1774 struct xhci_container_ctx *in_ctx, *out_ctx;
1775 struct xhci_input_control_ctx *ctrl_ctx;
1776 unsigned int ep_index;
1777 struct xhci_ep_ctx *ep_ctx;
1778 u32 drop_flag;
1779 u32 new_add_flags, new_drop_flags;
1780 int ret;
1781
1782 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1783 if (ret <= 0)
1784 return ret;
1785 xhci = hcd_to_xhci(hcd);
1786 if (xhci->xhc_state & XHCI_STATE_DYING)
1787 return -ENODEV;
1788
1789 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1790 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1791 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1792 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1793 __func__, drop_flag);
1794 return 0;
1795 }
1796
1797 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1798 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1799 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1800 if (!ctrl_ctx) {
1801 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1802 __func__);
1803 return 0;
1804 }
1805
1806 ep_index = xhci_get_endpoint_index(&ep->desc);
1807 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1808 /* If the HC already knows the endpoint is disabled,
1809 * or the HCD has noted it is disabled, ignore this request
1810 */
1811 if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) ||
1812 le32_to_cpu(ctrl_ctx->drop_flags) &
1813 xhci_get_endpoint_flag(&ep->desc)) {
1814 /* Do not warn when called after a usb_device_reset */
1815 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1816 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1817 __func__, ep);
1818 return 0;
1819 }
1820
1821 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1822 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1823
1824 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1825 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1826
1827 xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index);
1828
1829 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1830
1831 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1832 (unsigned int) ep->desc.bEndpointAddress,
1833 udev->slot_id,
1834 (unsigned int) new_drop_flags,
1835 (unsigned int) new_add_flags);
1836 return 0;
1837 }
1838 EXPORT_SYMBOL_GPL(xhci_drop_endpoint);
1839
1840 /* Add an endpoint to a new possible bandwidth configuration for this device.
1841 * Only one call to this function is allowed per endpoint before
1842 * check_bandwidth() or reset_bandwidth() must be called.
1843 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1844 * add the endpoint to the schedule with possibly new parameters denoted by a
1845 * different endpoint descriptor in usb_host_endpoint.
1846 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1847 * not allowed.
1848 *
1849 * The USB core will not allow URBs to be queued to an endpoint until the
1850 * configuration or alt setting is installed in the device, so there's no need
1851 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1852 */
xhci_add_endpoint(struct usb_hcd * hcd,struct usb_device * udev,struct usb_host_endpoint * ep)1853 int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1854 struct usb_host_endpoint *ep)
1855 {
1856 struct xhci_hcd *xhci;
1857 struct xhci_container_ctx *in_ctx;
1858 unsigned int ep_index;
1859 struct xhci_input_control_ctx *ctrl_ctx;
1860 struct xhci_ep_ctx *ep_ctx;
1861 u32 added_ctxs;
1862 u32 new_add_flags, new_drop_flags;
1863 struct xhci_virt_device *virt_dev;
1864 int ret = 0;
1865
1866 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1867 if (ret <= 0) {
1868 /* So we won't queue a reset ep command for a root hub */
1869 ep->hcpriv = NULL;
1870 return ret;
1871 }
1872 xhci = hcd_to_xhci(hcd);
1873 if (xhci->xhc_state & XHCI_STATE_DYING)
1874 return -ENODEV;
1875
1876 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1877 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1878 /* FIXME when we have to issue an evaluate endpoint command to
1879 * deal with ep0 max packet size changing once we get the
1880 * descriptors
1881 */
1882 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1883 __func__, added_ctxs);
1884 return 0;
1885 }
1886
1887 virt_dev = xhci->devs[udev->slot_id];
1888 in_ctx = virt_dev->in_ctx;
1889 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1890 if (!ctrl_ctx) {
1891 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1892 __func__);
1893 return 0;
1894 }
1895
1896 ep_index = xhci_get_endpoint_index(&ep->desc);
1897 /* If this endpoint is already in use, and the upper layers are trying
1898 * to add it again without dropping it, reject the addition.
1899 */
1900 if (virt_dev->eps[ep_index].ring &&
1901 !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
1902 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1903 "without dropping it.\n",
1904 (unsigned int) ep->desc.bEndpointAddress);
1905 return -EINVAL;
1906 }
1907
1908 /* If the HCD has already noted the endpoint is enabled,
1909 * ignore this request.
1910 */
1911 if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
1912 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1913 __func__, ep);
1914 return 0;
1915 }
1916
1917 /*
1918 * Configuration and alternate setting changes must be done in
1919 * process context, not interrupt context (or so documenation
1920 * for usb_set_interface() and usb_set_configuration() claim).
1921 */
1922 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1923 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1924 __func__, ep->desc.bEndpointAddress);
1925 return -ENOMEM;
1926 }
1927
1928 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1929 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1930
1931 /* If xhci_endpoint_disable() was called for this endpoint, but the
1932 * xHC hasn't been notified yet through the check_bandwidth() call,
1933 * this re-adds a new state for the endpoint from the new endpoint
1934 * descriptors. We must drop and re-add this endpoint, so we leave the
1935 * drop flags alone.
1936 */
1937 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1938
1939 /* Store the usb_device pointer for later use */
1940 ep->hcpriv = udev;
1941
1942 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1943 trace_xhci_add_endpoint(ep_ctx);
1944
1945 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1946 (unsigned int) ep->desc.bEndpointAddress,
1947 udev->slot_id,
1948 (unsigned int) new_drop_flags,
1949 (unsigned int) new_add_flags);
1950 return 0;
1951 }
1952 EXPORT_SYMBOL_GPL(xhci_add_endpoint);
1953
xhci_zero_in_ctx(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev)1954 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1955 {
1956 struct xhci_input_control_ctx *ctrl_ctx;
1957 struct xhci_ep_ctx *ep_ctx;
1958 struct xhci_slot_ctx *slot_ctx;
1959 int i;
1960
1961 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1962 if (!ctrl_ctx) {
1963 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1964 __func__);
1965 return;
1966 }
1967
1968 /* When a device's add flag and drop flag are zero, any subsequent
1969 * configure endpoint command will leave that endpoint's state
1970 * untouched. Make sure we don't leave any old state in the input
1971 * endpoint contexts.
1972 */
1973 ctrl_ctx->drop_flags = 0;
1974 ctrl_ctx->add_flags = 0;
1975 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1976 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1977 /* Endpoint 0 is always valid */
1978 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1979 for (i = 1; i < 31; i++) {
1980 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1981 ep_ctx->ep_info = 0;
1982 ep_ctx->ep_info2 = 0;
1983 ep_ctx->deq = 0;
1984 ep_ctx->tx_info = 0;
1985 }
1986 }
1987
xhci_configure_endpoint_result(struct xhci_hcd * xhci,struct usb_device * udev,u32 * cmd_status)1988 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1989 struct usb_device *udev, u32 *cmd_status)
1990 {
1991 int ret;
1992
1993 switch (*cmd_status) {
1994 case COMP_COMMAND_ABORTED:
1995 case COMP_COMMAND_RING_STOPPED:
1996 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
1997 ret = -ETIME;
1998 break;
1999 case COMP_RESOURCE_ERROR:
2000 dev_warn(&udev->dev,
2001 "Not enough host controller resources for new device state.\n");
2002 ret = -ENOMEM;
2003 /* FIXME: can we allocate more resources for the HC? */
2004 break;
2005 case COMP_BANDWIDTH_ERROR:
2006 case COMP_SECONDARY_BANDWIDTH_ERROR:
2007 dev_warn(&udev->dev,
2008 "Not enough bandwidth for new device state.\n");
2009 ret = -ENOSPC;
2010 /* FIXME: can we go back to the old state? */
2011 break;
2012 case COMP_TRB_ERROR:
2013 /* the HCD set up something wrong */
2014 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
2015 "add flag = 1, "
2016 "and endpoint is not disabled.\n");
2017 ret = -EINVAL;
2018 break;
2019 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2020 dev_warn(&udev->dev,
2021 "ERROR: Incompatible device for endpoint configure command.\n");
2022 ret = -ENODEV;
2023 break;
2024 case COMP_SUCCESS:
2025 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2026 "Successful Endpoint Configure command");
2027 ret = 0;
2028 break;
2029 default:
2030 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2031 *cmd_status);
2032 ret = -EINVAL;
2033 break;
2034 }
2035 return ret;
2036 }
2037
xhci_evaluate_context_result(struct xhci_hcd * xhci,struct usb_device * udev,u32 * cmd_status)2038 static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
2039 struct usb_device *udev, u32 *cmd_status)
2040 {
2041 int ret;
2042
2043 switch (*cmd_status) {
2044 case COMP_COMMAND_ABORTED:
2045 case COMP_COMMAND_RING_STOPPED:
2046 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
2047 ret = -ETIME;
2048 break;
2049 case COMP_PARAMETER_ERROR:
2050 dev_warn(&udev->dev,
2051 "WARN: xHCI driver setup invalid evaluate context command.\n");
2052 ret = -EINVAL;
2053 break;
2054 case COMP_SLOT_NOT_ENABLED_ERROR:
2055 dev_warn(&udev->dev,
2056 "WARN: slot not enabled for evaluate context command.\n");
2057 ret = -EINVAL;
2058 break;
2059 case COMP_CONTEXT_STATE_ERROR:
2060 dev_warn(&udev->dev,
2061 "WARN: invalid context state for evaluate context command.\n");
2062 ret = -EINVAL;
2063 break;
2064 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2065 dev_warn(&udev->dev,
2066 "ERROR: Incompatible device for evaluate context command.\n");
2067 ret = -ENODEV;
2068 break;
2069 case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
2070 /* Max Exit Latency too large error */
2071 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
2072 ret = -EINVAL;
2073 break;
2074 case COMP_SUCCESS:
2075 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2076 "Successful evaluate context command");
2077 ret = 0;
2078 break;
2079 default:
2080 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2081 *cmd_status);
2082 ret = -EINVAL;
2083 break;
2084 }
2085 return ret;
2086 }
2087
xhci_count_num_new_endpoints(struct xhci_hcd * xhci,struct xhci_input_control_ctx * ctrl_ctx)2088 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
2089 struct xhci_input_control_ctx *ctrl_ctx)
2090 {
2091 u32 valid_add_flags;
2092 u32 valid_drop_flags;
2093
2094 /* Ignore the slot flag (bit 0), and the default control endpoint flag
2095 * (bit 1). The default control endpoint is added during the Address
2096 * Device command and is never removed until the slot is disabled.
2097 */
2098 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2099 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
2100
2101 /* Use hweight32 to count the number of ones in the add flags, or
2102 * number of endpoints added. Don't count endpoints that are changed
2103 * (both added and dropped).
2104 */
2105 return hweight32(valid_add_flags) -
2106 hweight32(valid_add_flags & valid_drop_flags);
2107 }
2108
xhci_count_num_dropped_endpoints(struct xhci_hcd * xhci,struct xhci_input_control_ctx * ctrl_ctx)2109 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
2110 struct xhci_input_control_ctx *ctrl_ctx)
2111 {
2112 u32 valid_add_flags;
2113 u32 valid_drop_flags;
2114
2115 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2116 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
2117
2118 return hweight32(valid_drop_flags) -
2119 hweight32(valid_add_flags & valid_drop_flags);
2120 }
2121
2122 /*
2123 * We need to reserve the new number of endpoints before the configure endpoint
2124 * command completes. We can't subtract the dropped endpoints from the number
2125 * of active endpoints until the command completes because we can oversubscribe
2126 * the host in this case:
2127 *
2128 * - the first configure endpoint command drops more endpoints than it adds
2129 * - a second configure endpoint command that adds more endpoints is queued
2130 * - the first configure endpoint command fails, so the config is unchanged
2131 * - the second command may succeed, even though there isn't enough resources
2132 *
2133 * Must be called with xhci->lock held.
2134 */
xhci_reserve_host_resources(struct xhci_hcd * xhci,struct xhci_input_control_ctx * ctrl_ctx)2135 static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
2136 struct xhci_input_control_ctx *ctrl_ctx)
2137 {
2138 u32 added_eps;
2139
2140 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2141 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
2142 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2143 "Not enough ep ctxs: "
2144 "%u active, need to add %u, limit is %u.",
2145 xhci->num_active_eps, added_eps,
2146 xhci->limit_active_eps);
2147 return -ENOMEM;
2148 }
2149 xhci->num_active_eps += added_eps;
2150 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2151 "Adding %u ep ctxs, %u now active.", added_eps,
2152 xhci->num_active_eps);
2153 return 0;
2154 }
2155
2156 /*
2157 * The configure endpoint was failed by the xHC for some other reason, so we
2158 * need to revert the resources that failed configuration would have used.
2159 *
2160 * Must be called with xhci->lock held.
2161 */
xhci_free_host_resources(struct xhci_hcd * xhci,struct xhci_input_control_ctx * ctrl_ctx)2162 static void xhci_free_host_resources(struct xhci_hcd *xhci,
2163 struct xhci_input_control_ctx *ctrl_ctx)
2164 {
2165 u32 num_failed_eps;
2166
2167 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2168 xhci->num_active_eps -= num_failed_eps;
2169 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2170 "Removing %u failed ep ctxs, %u now active.",
2171 num_failed_eps,
2172 xhci->num_active_eps);
2173 }
2174
2175 /*
2176 * Now that the command has completed, clean up the active endpoint count by
2177 * subtracting out the endpoints that were dropped (but not changed).
2178 *
2179 * Must be called with xhci->lock held.
2180 */
xhci_finish_resource_reservation(struct xhci_hcd * xhci,struct xhci_input_control_ctx * ctrl_ctx)2181 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
2182 struct xhci_input_control_ctx *ctrl_ctx)
2183 {
2184 u32 num_dropped_eps;
2185
2186 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
2187 xhci->num_active_eps -= num_dropped_eps;
2188 if (num_dropped_eps)
2189 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2190 "Removing %u dropped ep ctxs, %u now active.",
2191 num_dropped_eps,
2192 xhci->num_active_eps);
2193 }
2194
xhci_get_block_size(struct usb_device * udev)2195 static unsigned int xhci_get_block_size(struct usb_device *udev)
2196 {
2197 switch (udev->speed) {
2198 case USB_SPEED_LOW:
2199 case USB_SPEED_FULL:
2200 return FS_BLOCK;
2201 case USB_SPEED_HIGH:
2202 return HS_BLOCK;
2203 case USB_SPEED_SUPER:
2204 case USB_SPEED_SUPER_PLUS:
2205 return SS_BLOCK;
2206 case USB_SPEED_UNKNOWN:
2207 default:
2208 /* Should never happen */
2209 return 1;
2210 }
2211 }
2212
2213 static unsigned int
xhci_get_largest_overhead(struct xhci_interval_bw * interval_bw)2214 xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
2215 {
2216 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2217 return LS_OVERHEAD;
2218 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2219 return FS_OVERHEAD;
2220 return HS_OVERHEAD;
2221 }
2222
2223 /* If we are changing a LS/FS device under a HS hub,
2224 * make sure (if we are activating a new TT) that the HS bus has enough
2225 * bandwidth for this new TT.
2226 */
xhci_check_tt_bw_table(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev,int old_active_eps)2227 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2228 struct xhci_virt_device *virt_dev,
2229 int old_active_eps)
2230 {
2231 struct xhci_interval_bw_table *bw_table;
2232 struct xhci_tt_bw_info *tt_info;
2233
2234 /* Find the bandwidth table for the root port this TT is attached to. */
2235 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2236 tt_info = virt_dev->tt_info;
2237 /* If this TT already had active endpoints, the bandwidth for this TT
2238 * has already been added. Removing all periodic endpoints (and thus
2239 * making the TT enactive) will only decrease the bandwidth used.
2240 */
2241 if (old_active_eps)
2242 return 0;
2243 if (old_active_eps == 0 && tt_info->active_eps != 0) {
2244 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2245 return -ENOMEM;
2246 return 0;
2247 }
2248 /* Not sure why we would have no new active endpoints...
2249 *
2250 * Maybe because of an Evaluate Context change for a hub update or a
2251 * control endpoint 0 max packet size change?
2252 * FIXME: skip the bandwidth calculation in that case.
2253 */
2254 return 0;
2255 }
2256
xhci_check_ss_bw(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev)2257 static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2258 struct xhci_virt_device *virt_dev)
2259 {
2260 unsigned int bw_reserved;
2261
2262 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2263 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2264 return -ENOMEM;
2265
2266 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2267 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2268 return -ENOMEM;
2269
2270 return 0;
2271 }
2272
2273 /*
2274 * This algorithm is a very conservative estimate of the worst-case scheduling
2275 * scenario for any one interval. The hardware dynamically schedules the
2276 * packets, so we can't tell which microframe could be the limiting factor in
2277 * the bandwidth scheduling. This only takes into account periodic endpoints.
2278 *
2279 * Obviously, we can't solve an NP complete problem to find the minimum worst
2280 * case scenario. Instead, we come up with an estimate that is no less than
2281 * the worst case bandwidth used for any one microframe, but may be an
2282 * over-estimate.
2283 *
2284 * We walk the requirements for each endpoint by interval, starting with the
2285 * smallest interval, and place packets in the schedule where there is only one
2286 * possible way to schedule packets for that interval. In order to simplify
2287 * this algorithm, we record the largest max packet size for each interval, and
2288 * assume all packets will be that size.
2289 *
2290 * For interval 0, we obviously must schedule all packets for each interval.
2291 * The bandwidth for interval 0 is just the amount of data to be transmitted
2292 * (the sum of all max ESIT payload sizes, plus any overhead per packet times
2293 * the number of packets).
2294 *
2295 * For interval 1, we have two possible microframes to schedule those packets
2296 * in. For this algorithm, if we can schedule the same number of packets for
2297 * each possible scheduling opportunity (each microframe), we will do so. The
2298 * remaining number of packets will be saved to be transmitted in the gaps in
2299 * the next interval's scheduling sequence.
2300 *
2301 * As we move those remaining packets to be scheduled with interval 2 packets,
2302 * we have to double the number of remaining packets to transmit. This is
2303 * because the intervals are actually powers of 2, and we would be transmitting
2304 * the previous interval's packets twice in this interval. We also have to be
2305 * sure that when we look at the largest max packet size for this interval, we
2306 * also look at the largest max packet size for the remaining packets and take
2307 * the greater of the two.
2308 *
2309 * The algorithm continues to evenly distribute packets in each scheduling
2310 * opportunity, and push the remaining packets out, until we get to the last
2311 * interval. Then those packets and their associated overhead are just added
2312 * to the bandwidth used.
2313 */
xhci_check_bw_table(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev,int old_active_eps)2314 static int xhci_check_bw_table(struct xhci_hcd *xhci,
2315 struct xhci_virt_device *virt_dev,
2316 int old_active_eps)
2317 {
2318 unsigned int bw_reserved;
2319 unsigned int max_bandwidth;
2320 unsigned int bw_used;
2321 unsigned int block_size;
2322 struct xhci_interval_bw_table *bw_table;
2323 unsigned int packet_size = 0;
2324 unsigned int overhead = 0;
2325 unsigned int packets_transmitted = 0;
2326 unsigned int packets_remaining = 0;
2327 unsigned int i;
2328
2329 if (virt_dev->udev->speed >= USB_SPEED_SUPER)
2330 return xhci_check_ss_bw(xhci, virt_dev);
2331
2332 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2333 max_bandwidth = HS_BW_LIMIT;
2334 /* Convert percent of bus BW reserved to blocks reserved */
2335 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2336 } else {
2337 max_bandwidth = FS_BW_LIMIT;
2338 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2339 }
2340
2341 bw_table = virt_dev->bw_table;
2342 /* We need to translate the max packet size and max ESIT payloads into
2343 * the units the hardware uses.
2344 */
2345 block_size = xhci_get_block_size(virt_dev->udev);
2346
2347 /* If we are manipulating a LS/FS device under a HS hub, double check
2348 * that the HS bus has enough bandwidth if we are activing a new TT.
2349 */
2350 if (virt_dev->tt_info) {
2351 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2352 "Recalculating BW for rootport %u",
2353 virt_dev->real_port);
2354 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2355 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2356 "newly activated TT.\n");
2357 return -ENOMEM;
2358 }
2359 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2360 "Recalculating BW for TT slot %u port %u",
2361 virt_dev->tt_info->slot_id,
2362 virt_dev->tt_info->ttport);
2363 } else {
2364 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2365 "Recalculating BW for rootport %u",
2366 virt_dev->real_port);
2367 }
2368
2369 /* Add in how much bandwidth will be used for interval zero, or the
2370 * rounded max ESIT payload + number of packets * largest overhead.
2371 */
2372 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2373 bw_table->interval_bw[0].num_packets *
2374 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2375
2376 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2377 unsigned int bw_added;
2378 unsigned int largest_mps;
2379 unsigned int interval_overhead;
2380
2381 /*
2382 * How many packets could we transmit in this interval?
2383 * If packets didn't fit in the previous interval, we will need
2384 * to transmit that many packets twice within this interval.
2385 */
2386 packets_remaining = 2 * packets_remaining +
2387 bw_table->interval_bw[i].num_packets;
2388
2389 /* Find the largest max packet size of this or the previous
2390 * interval.
2391 */
2392 if (list_empty(&bw_table->interval_bw[i].endpoints))
2393 largest_mps = 0;
2394 else {
2395 struct xhci_virt_ep *virt_ep;
2396 struct list_head *ep_entry;
2397
2398 ep_entry = bw_table->interval_bw[i].endpoints.next;
2399 virt_ep = list_entry(ep_entry,
2400 struct xhci_virt_ep, bw_endpoint_list);
2401 /* Convert to blocks, rounding up */
2402 largest_mps = DIV_ROUND_UP(
2403 virt_ep->bw_info.max_packet_size,
2404 block_size);
2405 }
2406 if (largest_mps > packet_size)
2407 packet_size = largest_mps;
2408
2409 /* Use the larger overhead of this or the previous interval. */
2410 interval_overhead = xhci_get_largest_overhead(
2411 &bw_table->interval_bw[i]);
2412 if (interval_overhead > overhead)
2413 overhead = interval_overhead;
2414
2415 /* How many packets can we evenly distribute across
2416 * (1 << (i + 1)) possible scheduling opportunities?
2417 */
2418 packets_transmitted = packets_remaining >> (i + 1);
2419
2420 /* Add in the bandwidth used for those scheduled packets */
2421 bw_added = packets_transmitted * (overhead + packet_size);
2422
2423 /* How many packets do we have remaining to transmit? */
2424 packets_remaining = packets_remaining % (1 << (i + 1));
2425
2426 /* What largest max packet size should those packets have? */
2427 /* If we've transmitted all packets, don't carry over the
2428 * largest packet size.
2429 */
2430 if (packets_remaining == 0) {
2431 packet_size = 0;
2432 overhead = 0;
2433 } else if (packets_transmitted > 0) {
2434 /* Otherwise if we do have remaining packets, and we've
2435 * scheduled some packets in this interval, take the
2436 * largest max packet size from endpoints with this
2437 * interval.
2438 */
2439 packet_size = largest_mps;
2440 overhead = interval_overhead;
2441 }
2442 /* Otherwise carry over packet_size and overhead from the last
2443 * time we had a remainder.
2444 */
2445 bw_used += bw_added;
2446 if (bw_used > max_bandwidth) {
2447 xhci_warn(xhci, "Not enough bandwidth. "
2448 "Proposed: %u, Max: %u\n",
2449 bw_used, max_bandwidth);
2450 return -ENOMEM;
2451 }
2452 }
2453 /*
2454 * Ok, we know we have some packets left over after even-handedly
2455 * scheduling interval 15. We don't know which microframes they will
2456 * fit into, so we over-schedule and say they will be scheduled every
2457 * microframe.
2458 */
2459 if (packets_remaining > 0)
2460 bw_used += overhead + packet_size;
2461
2462 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2463 unsigned int port_index = virt_dev->real_port - 1;
2464
2465 /* OK, we're manipulating a HS device attached to a
2466 * root port bandwidth domain. Include the number of active TTs
2467 * in the bandwidth used.
2468 */
2469 bw_used += TT_HS_OVERHEAD *
2470 xhci->rh_bw[port_index].num_active_tts;
2471 }
2472
2473 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2474 "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2475 "Available: %u " "percent",
2476 bw_used, max_bandwidth, bw_reserved,
2477 (max_bandwidth - bw_used - bw_reserved) * 100 /
2478 max_bandwidth);
2479
2480 bw_used += bw_reserved;
2481 if (bw_used > max_bandwidth) {
2482 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2483 bw_used, max_bandwidth);
2484 return -ENOMEM;
2485 }
2486
2487 bw_table->bw_used = bw_used;
2488 return 0;
2489 }
2490
xhci_is_async_ep(unsigned int ep_type)2491 static bool xhci_is_async_ep(unsigned int ep_type)
2492 {
2493 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2494 ep_type != ISOC_IN_EP &&
2495 ep_type != INT_IN_EP);
2496 }
2497
xhci_is_sync_in_ep(unsigned int ep_type)2498 static bool xhci_is_sync_in_ep(unsigned int ep_type)
2499 {
2500 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2501 }
2502
xhci_get_ss_bw_consumed(struct xhci_bw_info * ep_bw)2503 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2504 {
2505 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2506
2507 if (ep_bw->ep_interval == 0)
2508 return SS_OVERHEAD_BURST +
2509 (ep_bw->mult * ep_bw->num_packets *
2510 (SS_OVERHEAD + mps));
2511 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2512 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2513 1 << ep_bw->ep_interval);
2514
2515 }
2516
xhci_drop_ep_from_interval_table(struct xhci_hcd * xhci,struct xhci_bw_info * ep_bw,struct xhci_interval_bw_table * bw_table,struct usb_device * udev,struct xhci_virt_ep * virt_ep,struct xhci_tt_bw_info * tt_info)2517 static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2518 struct xhci_bw_info *ep_bw,
2519 struct xhci_interval_bw_table *bw_table,
2520 struct usb_device *udev,
2521 struct xhci_virt_ep *virt_ep,
2522 struct xhci_tt_bw_info *tt_info)
2523 {
2524 struct xhci_interval_bw *interval_bw;
2525 int normalized_interval;
2526
2527 if (xhci_is_async_ep(ep_bw->type))
2528 return;
2529
2530 if (udev->speed >= USB_SPEED_SUPER) {
2531 if (xhci_is_sync_in_ep(ep_bw->type))
2532 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2533 xhci_get_ss_bw_consumed(ep_bw);
2534 else
2535 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2536 xhci_get_ss_bw_consumed(ep_bw);
2537 return;
2538 }
2539
2540 /* SuperSpeed endpoints never get added to intervals in the table, so
2541 * this check is only valid for HS/FS/LS devices.
2542 */
2543 if (list_empty(&virt_ep->bw_endpoint_list))
2544 return;
2545 /* For LS/FS devices, we need to translate the interval expressed in
2546 * microframes to frames.
2547 */
2548 if (udev->speed == USB_SPEED_HIGH)
2549 normalized_interval = ep_bw->ep_interval;
2550 else
2551 normalized_interval = ep_bw->ep_interval - 3;
2552
2553 if (normalized_interval == 0)
2554 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2555 interval_bw = &bw_table->interval_bw[normalized_interval];
2556 interval_bw->num_packets -= ep_bw->num_packets;
2557 switch (udev->speed) {
2558 case USB_SPEED_LOW:
2559 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2560 break;
2561 case USB_SPEED_FULL:
2562 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2563 break;
2564 case USB_SPEED_HIGH:
2565 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2566 break;
2567 default:
2568 /* Should never happen because only LS/FS/HS endpoints will get
2569 * added to the endpoint list.
2570 */
2571 return;
2572 }
2573 if (tt_info)
2574 tt_info->active_eps -= 1;
2575 list_del_init(&virt_ep->bw_endpoint_list);
2576 }
2577
xhci_add_ep_to_interval_table(struct xhci_hcd * xhci,struct xhci_bw_info * ep_bw,struct xhci_interval_bw_table * bw_table,struct usb_device * udev,struct xhci_virt_ep * virt_ep,struct xhci_tt_bw_info * tt_info)2578 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2579 struct xhci_bw_info *ep_bw,
2580 struct xhci_interval_bw_table *bw_table,
2581 struct usb_device *udev,
2582 struct xhci_virt_ep *virt_ep,
2583 struct xhci_tt_bw_info *tt_info)
2584 {
2585 struct xhci_interval_bw *interval_bw;
2586 struct xhci_virt_ep *smaller_ep;
2587 int normalized_interval;
2588
2589 if (xhci_is_async_ep(ep_bw->type))
2590 return;
2591
2592 if (udev->speed == USB_SPEED_SUPER) {
2593 if (xhci_is_sync_in_ep(ep_bw->type))
2594 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2595 xhci_get_ss_bw_consumed(ep_bw);
2596 else
2597 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2598 xhci_get_ss_bw_consumed(ep_bw);
2599 return;
2600 }
2601
2602 /* For LS/FS devices, we need to translate the interval expressed in
2603 * microframes to frames.
2604 */
2605 if (udev->speed == USB_SPEED_HIGH)
2606 normalized_interval = ep_bw->ep_interval;
2607 else
2608 normalized_interval = ep_bw->ep_interval - 3;
2609
2610 if (normalized_interval == 0)
2611 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2612 interval_bw = &bw_table->interval_bw[normalized_interval];
2613 interval_bw->num_packets += ep_bw->num_packets;
2614 switch (udev->speed) {
2615 case USB_SPEED_LOW:
2616 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2617 break;
2618 case USB_SPEED_FULL:
2619 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2620 break;
2621 case USB_SPEED_HIGH:
2622 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2623 break;
2624 default:
2625 /* Should never happen because only LS/FS/HS endpoints will get
2626 * added to the endpoint list.
2627 */
2628 return;
2629 }
2630
2631 if (tt_info)
2632 tt_info->active_eps += 1;
2633 /* Insert the endpoint into the list, largest max packet size first. */
2634 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2635 bw_endpoint_list) {
2636 if (ep_bw->max_packet_size >=
2637 smaller_ep->bw_info.max_packet_size) {
2638 /* Add the new ep before the smaller endpoint */
2639 list_add_tail(&virt_ep->bw_endpoint_list,
2640 &smaller_ep->bw_endpoint_list);
2641 return;
2642 }
2643 }
2644 /* Add the new endpoint at the end of the list. */
2645 list_add_tail(&virt_ep->bw_endpoint_list,
2646 &interval_bw->endpoints);
2647 }
2648
xhci_update_tt_active_eps(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev,int old_active_eps)2649 void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2650 struct xhci_virt_device *virt_dev,
2651 int old_active_eps)
2652 {
2653 struct xhci_root_port_bw_info *rh_bw_info;
2654 if (!virt_dev->tt_info)
2655 return;
2656
2657 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2658 if (old_active_eps == 0 &&
2659 virt_dev->tt_info->active_eps != 0) {
2660 rh_bw_info->num_active_tts += 1;
2661 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2662 } else if (old_active_eps != 0 &&
2663 virt_dev->tt_info->active_eps == 0) {
2664 rh_bw_info->num_active_tts -= 1;
2665 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2666 }
2667 }
2668
xhci_reserve_bandwidth(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev,struct xhci_container_ctx * in_ctx)2669 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2670 struct xhci_virt_device *virt_dev,
2671 struct xhci_container_ctx *in_ctx)
2672 {
2673 struct xhci_bw_info ep_bw_info[31];
2674 int i;
2675 struct xhci_input_control_ctx *ctrl_ctx;
2676 int old_active_eps = 0;
2677
2678 if (virt_dev->tt_info)
2679 old_active_eps = virt_dev->tt_info->active_eps;
2680
2681 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2682 if (!ctrl_ctx) {
2683 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2684 __func__);
2685 return -ENOMEM;
2686 }
2687
2688 for (i = 0; i < 31; i++) {
2689 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2690 continue;
2691
2692 /* Make a copy of the BW info in case we need to revert this */
2693 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2694 sizeof(ep_bw_info[i]));
2695 /* Drop the endpoint from the interval table if the endpoint is
2696 * being dropped or changed.
2697 */
2698 if (EP_IS_DROPPED(ctrl_ctx, i))
2699 xhci_drop_ep_from_interval_table(xhci,
2700 &virt_dev->eps[i].bw_info,
2701 virt_dev->bw_table,
2702 virt_dev->udev,
2703 &virt_dev->eps[i],
2704 virt_dev->tt_info);
2705 }
2706 /* Overwrite the information stored in the endpoints' bw_info */
2707 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2708 for (i = 0; i < 31; i++) {
2709 /* Add any changed or added endpoints to the interval table */
2710 if (EP_IS_ADDED(ctrl_ctx, i))
2711 xhci_add_ep_to_interval_table(xhci,
2712 &virt_dev->eps[i].bw_info,
2713 virt_dev->bw_table,
2714 virt_dev->udev,
2715 &virt_dev->eps[i],
2716 virt_dev->tt_info);
2717 }
2718
2719 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2720 /* Ok, this fits in the bandwidth we have.
2721 * Update the number of active TTs.
2722 */
2723 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2724 return 0;
2725 }
2726
2727 /* We don't have enough bandwidth for this, revert the stored info. */
2728 for (i = 0; i < 31; i++) {
2729 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2730 continue;
2731
2732 /* Drop the new copies of any added or changed endpoints from
2733 * the interval table.
2734 */
2735 if (EP_IS_ADDED(ctrl_ctx, i)) {
2736 xhci_drop_ep_from_interval_table(xhci,
2737 &virt_dev->eps[i].bw_info,
2738 virt_dev->bw_table,
2739 virt_dev->udev,
2740 &virt_dev->eps[i],
2741 virt_dev->tt_info);
2742 }
2743 /* Revert the endpoint back to its old information */
2744 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2745 sizeof(ep_bw_info[i]));
2746 /* Add any changed or dropped endpoints back into the table */
2747 if (EP_IS_DROPPED(ctrl_ctx, i))
2748 xhci_add_ep_to_interval_table(xhci,
2749 &virt_dev->eps[i].bw_info,
2750 virt_dev->bw_table,
2751 virt_dev->udev,
2752 &virt_dev->eps[i],
2753 virt_dev->tt_info);
2754 }
2755 return -ENOMEM;
2756 }
2757
2758
2759 /* Issue a configure endpoint command or evaluate context command
2760 * and wait for it to finish.
2761 */
xhci_configure_endpoint(struct xhci_hcd * xhci,struct usb_device * udev,struct xhci_command * command,bool ctx_change,bool must_succeed)2762 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2763 struct usb_device *udev,
2764 struct xhci_command *command,
2765 bool ctx_change, bool must_succeed)
2766 {
2767 int ret;
2768 unsigned long flags;
2769 struct xhci_input_control_ctx *ctrl_ctx;
2770 struct xhci_virt_device *virt_dev;
2771 struct xhci_slot_ctx *slot_ctx;
2772
2773 if (!command)
2774 return -EINVAL;
2775
2776 spin_lock_irqsave(&xhci->lock, flags);
2777
2778 if (xhci->xhc_state & XHCI_STATE_DYING) {
2779 spin_unlock_irqrestore(&xhci->lock, flags);
2780 return -ESHUTDOWN;
2781 }
2782
2783 virt_dev = xhci->devs[udev->slot_id];
2784
2785 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2786 if (!ctrl_ctx) {
2787 spin_unlock_irqrestore(&xhci->lock, flags);
2788 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2789 __func__);
2790 return -ENOMEM;
2791 }
2792
2793 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2794 xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2795 spin_unlock_irqrestore(&xhci->lock, flags);
2796 xhci_warn(xhci, "Not enough host resources, "
2797 "active endpoint contexts = %u\n",
2798 xhci->num_active_eps);
2799 return -ENOMEM;
2800 }
2801 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2802 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2803 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2804 xhci_free_host_resources(xhci, ctrl_ctx);
2805 spin_unlock_irqrestore(&xhci->lock, flags);
2806 xhci_warn(xhci, "Not enough bandwidth\n");
2807 return -ENOMEM;
2808 }
2809
2810 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
2811
2812 trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx);
2813 trace_xhci_configure_endpoint(slot_ctx);
2814
2815 if (!ctx_change)
2816 ret = xhci_queue_configure_endpoint(xhci, command,
2817 command->in_ctx->dma,
2818 udev->slot_id, must_succeed);
2819 else
2820 ret = xhci_queue_evaluate_context(xhci, command,
2821 command->in_ctx->dma,
2822 udev->slot_id, must_succeed);
2823 if (ret < 0) {
2824 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2825 xhci_free_host_resources(xhci, ctrl_ctx);
2826 spin_unlock_irqrestore(&xhci->lock, flags);
2827 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2828 "FIXME allocate a new ring segment");
2829 return -ENOMEM;
2830 }
2831 xhci_ring_cmd_db(xhci);
2832 spin_unlock_irqrestore(&xhci->lock, flags);
2833
2834 /* Wait for the configure endpoint command to complete */
2835 wait_for_completion(command->completion);
2836
2837 if (!ctx_change)
2838 ret = xhci_configure_endpoint_result(xhci, udev,
2839 &command->status);
2840 else
2841 ret = xhci_evaluate_context_result(xhci, udev,
2842 &command->status);
2843
2844 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2845 spin_lock_irqsave(&xhci->lock, flags);
2846 /* If the command failed, remove the reserved resources.
2847 * Otherwise, clean up the estimate to include dropped eps.
2848 */
2849 if (ret)
2850 xhci_free_host_resources(xhci, ctrl_ctx);
2851 else
2852 xhci_finish_resource_reservation(xhci, ctrl_ctx);
2853 spin_unlock_irqrestore(&xhci->lock, flags);
2854 }
2855 return ret;
2856 }
2857
xhci_check_bw_drop_ep_streams(struct xhci_hcd * xhci,struct xhci_virt_device * vdev,int i)2858 static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2859 struct xhci_virt_device *vdev, int i)
2860 {
2861 struct xhci_virt_ep *ep = &vdev->eps[i];
2862
2863 if (ep->ep_state & EP_HAS_STREAMS) {
2864 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
2865 xhci_get_endpoint_address(i));
2866 xhci_free_stream_info(xhci, ep->stream_info);
2867 ep->stream_info = NULL;
2868 ep->ep_state &= ~EP_HAS_STREAMS;
2869 }
2870 }
2871
2872 /* Called after one or more calls to xhci_add_endpoint() or
2873 * xhci_drop_endpoint(). If this call fails, the USB core is expected
2874 * to call xhci_reset_bandwidth().
2875 *
2876 * Since we are in the middle of changing either configuration or
2877 * installing a new alt setting, the USB core won't allow URBs to be
2878 * enqueued for any endpoint on the old config or interface. Nothing
2879 * else should be touching the xhci->devs[slot_id] structure, so we
2880 * don't need to take the xhci->lock for manipulating that.
2881 */
xhci_check_bandwidth(struct usb_hcd * hcd,struct usb_device * udev)2882 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2883 {
2884 int i;
2885 int ret = 0;
2886 struct xhci_hcd *xhci;
2887 struct xhci_virt_device *virt_dev;
2888 struct xhci_input_control_ctx *ctrl_ctx;
2889 struct xhci_slot_ctx *slot_ctx;
2890 struct xhci_command *command;
2891
2892 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2893 if (ret <= 0)
2894 return ret;
2895 xhci = hcd_to_xhci(hcd);
2896 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
2897 (xhci->xhc_state & XHCI_STATE_REMOVING))
2898 return -ENODEV;
2899
2900 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2901 virt_dev = xhci->devs[udev->slot_id];
2902
2903 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
2904 if (!command)
2905 return -ENOMEM;
2906
2907 command->in_ctx = virt_dev->in_ctx;
2908
2909 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
2910 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2911 if (!ctrl_ctx) {
2912 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2913 __func__);
2914 ret = -ENOMEM;
2915 goto command_cleanup;
2916 }
2917 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2918 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2919 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2920
2921 /* Don't issue the command if there's no endpoints to update. */
2922 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2923 ctrl_ctx->drop_flags == 0) {
2924 ret = 0;
2925 goto command_cleanup;
2926 }
2927 /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
2928 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2929 for (i = 31; i >= 1; i--) {
2930 __le32 le32 = cpu_to_le32(BIT(i));
2931
2932 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
2933 || (ctrl_ctx->add_flags & le32) || i == 1) {
2934 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2935 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
2936 break;
2937 }
2938 }
2939
2940 ret = xhci_configure_endpoint(xhci, udev, command,
2941 false, false);
2942 if (ret)
2943 /* Callee should call reset_bandwidth() */
2944 goto command_cleanup;
2945
2946 /* Free any rings that were dropped, but not changed. */
2947 for (i = 1; i < 31; i++) {
2948 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2949 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
2950 xhci_free_endpoint_ring(xhci, virt_dev, i);
2951 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2952 }
2953 }
2954 xhci_zero_in_ctx(xhci, virt_dev);
2955 /*
2956 * Install any rings for completely new endpoints or changed endpoints,
2957 * and free any old rings from changed endpoints.
2958 */
2959 for (i = 1; i < 31; i++) {
2960 if (!virt_dev->eps[i].new_ring)
2961 continue;
2962 /* Only free the old ring if it exists.
2963 * It may not if this is the first add of an endpoint.
2964 */
2965 if (virt_dev->eps[i].ring) {
2966 xhci_free_endpoint_ring(xhci, virt_dev, i);
2967 }
2968 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2969 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2970 virt_dev->eps[i].new_ring = NULL;
2971 xhci_debugfs_create_endpoint(xhci, virt_dev, i);
2972 }
2973 command_cleanup:
2974 kfree(command->completion);
2975 kfree(command);
2976
2977 return ret;
2978 }
2979 EXPORT_SYMBOL_GPL(xhci_check_bandwidth);
2980
xhci_reset_bandwidth(struct usb_hcd * hcd,struct usb_device * udev)2981 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2982 {
2983 struct xhci_hcd *xhci;
2984 struct xhci_virt_device *virt_dev;
2985 int i, ret;
2986
2987 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2988 if (ret <= 0)
2989 return;
2990 xhci = hcd_to_xhci(hcd);
2991
2992 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2993 virt_dev = xhci->devs[udev->slot_id];
2994 /* Free any rings allocated for added endpoints */
2995 for (i = 0; i < 31; i++) {
2996 if (virt_dev->eps[i].new_ring) {
2997 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
2998 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2999 virt_dev->eps[i].new_ring = NULL;
3000 }
3001 }
3002 xhci_zero_in_ctx(xhci, virt_dev);
3003 }
3004 EXPORT_SYMBOL_GPL(xhci_reset_bandwidth);
3005
xhci_setup_input_ctx_for_config_ep(struct xhci_hcd * xhci,struct xhci_container_ctx * in_ctx,struct xhci_container_ctx * out_ctx,struct xhci_input_control_ctx * ctrl_ctx,u32 add_flags,u32 drop_flags)3006 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
3007 struct xhci_container_ctx *in_ctx,
3008 struct xhci_container_ctx *out_ctx,
3009 struct xhci_input_control_ctx *ctrl_ctx,
3010 u32 add_flags, u32 drop_flags)
3011 {
3012 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
3013 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
3014 xhci_slot_copy(xhci, in_ctx, out_ctx);
3015 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
3016 }
3017
xhci_endpoint_disable(struct usb_hcd * hcd,struct usb_host_endpoint * host_ep)3018 static void xhci_endpoint_disable(struct usb_hcd *hcd,
3019 struct usb_host_endpoint *host_ep)
3020 {
3021 struct xhci_hcd *xhci;
3022 struct xhci_virt_device *vdev;
3023 struct xhci_virt_ep *ep;
3024 struct usb_device *udev;
3025 unsigned long flags;
3026 unsigned int ep_index;
3027
3028 xhci = hcd_to_xhci(hcd);
3029 rescan:
3030 spin_lock_irqsave(&xhci->lock, flags);
3031
3032 udev = (struct usb_device *)host_ep->hcpriv;
3033 if (!udev || !udev->slot_id)
3034 goto done;
3035
3036 vdev = xhci->devs[udev->slot_id];
3037 if (!vdev)
3038 goto done;
3039
3040 ep_index = xhci_get_endpoint_index(&host_ep->desc);
3041 ep = &vdev->eps[ep_index];
3042
3043 /* wait for hub_tt_work to finish clearing hub TT */
3044 if (ep->ep_state & EP_CLEARING_TT) {
3045 spin_unlock_irqrestore(&xhci->lock, flags);
3046 schedule_timeout_uninterruptible(1);
3047 goto rescan;
3048 }
3049
3050 if (ep->ep_state)
3051 xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n",
3052 ep->ep_state);
3053 done:
3054 host_ep->hcpriv = NULL;
3055 spin_unlock_irqrestore(&xhci->lock, flags);
3056 }
3057
3058 /*
3059 * Called after usb core issues a clear halt control message.
3060 * The host side of the halt should already be cleared by a reset endpoint
3061 * command issued when the STALL event was received.
3062 *
3063 * The reset endpoint command may only be issued to endpoints in the halted
3064 * state. For software that wishes to reset the data toggle or sequence number
3065 * of an endpoint that isn't in the halted state this function will issue a
3066 * configure endpoint command with the Drop and Add bits set for the target
3067 * endpoint. Refer to the additional note in xhci spcification section 4.6.8.
3068 */
3069
xhci_endpoint_reset(struct usb_hcd * hcd,struct usb_host_endpoint * host_ep)3070 static void xhci_endpoint_reset(struct usb_hcd *hcd,
3071 struct usb_host_endpoint *host_ep)
3072 {
3073 struct xhci_hcd *xhci;
3074 struct usb_device *udev;
3075 struct xhci_virt_device *vdev;
3076 struct xhci_virt_ep *ep;
3077 struct xhci_input_control_ctx *ctrl_ctx;
3078 struct xhci_command *stop_cmd, *cfg_cmd;
3079 unsigned int ep_index;
3080 unsigned long flags;
3081 u32 ep_flag;
3082 int err;
3083
3084 xhci = hcd_to_xhci(hcd);
3085 if (!host_ep->hcpriv)
3086 return;
3087 udev = (struct usb_device *) host_ep->hcpriv;
3088 vdev = xhci->devs[udev->slot_id];
3089
3090 /*
3091 * vdev may be lost due to xHC restore error and re-initialization
3092 * during S3/S4 resume. A new vdev will be allocated later by
3093 * xhci_discover_or_reset_device()
3094 */
3095 if (!udev->slot_id || !vdev)
3096 return;
3097 ep_index = xhci_get_endpoint_index(&host_ep->desc);
3098 ep = &vdev->eps[ep_index];
3099
3100 /* Bail out if toggle is already being cleared by a endpoint reset */
3101 spin_lock_irqsave(&xhci->lock, flags);
3102 if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
3103 ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
3104 spin_unlock_irqrestore(&xhci->lock, flags);
3105 return;
3106 }
3107 spin_unlock_irqrestore(&xhci->lock, flags);
3108 /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */
3109 if (usb_endpoint_xfer_control(&host_ep->desc) ||
3110 usb_endpoint_xfer_isoc(&host_ep->desc))
3111 return;
3112
3113 ep_flag = xhci_get_endpoint_flag(&host_ep->desc);
3114
3115 if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
3116 return;
3117
3118 stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
3119 if (!stop_cmd)
3120 return;
3121
3122 cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
3123 if (!cfg_cmd)
3124 goto cleanup;
3125
3126 spin_lock_irqsave(&xhci->lock, flags);
3127
3128 /* block queuing new trbs and ringing ep doorbell */
3129 ep->ep_state |= EP_SOFT_CLEAR_TOGGLE;
3130
3131 /*
3132 * Make sure endpoint ring is empty before resetting the toggle/seq.
3133 * Driver is required to synchronously cancel all transfer request.
3134 * Stop the endpoint to force xHC to update the output context
3135 */
3136
3137 if (!list_empty(&ep->ring->td_list)) {
3138 dev_err(&udev->dev, "EP not empty, refuse reset\n");
3139 spin_unlock_irqrestore(&xhci->lock, flags);
3140 xhci_free_command(xhci, cfg_cmd);
3141 goto cleanup;
3142 }
3143
3144 err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
3145 ep_index, 0);
3146 if (err < 0) {
3147 spin_unlock_irqrestore(&xhci->lock, flags);
3148 xhci_free_command(xhci, cfg_cmd);
3149 xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
3150 __func__, err);
3151 goto cleanup;
3152 }
3153
3154 xhci_ring_cmd_db(xhci);
3155 spin_unlock_irqrestore(&xhci->lock, flags);
3156
3157 wait_for_completion(stop_cmd->completion);
3158
3159 spin_lock_irqsave(&xhci->lock, flags);
3160
3161 /* config ep command clears toggle if add and drop ep flags are set */
3162 ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
3163 if (!ctrl_ctx) {
3164 spin_unlock_irqrestore(&xhci->lock, flags);
3165 xhci_free_command(xhci, cfg_cmd);
3166 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3167 __func__);
3168 goto cleanup;
3169 }
3170
3171 xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
3172 ctrl_ctx, ep_flag, ep_flag);
3173 xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
3174
3175 err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
3176 udev->slot_id, false);
3177 if (err < 0) {
3178 spin_unlock_irqrestore(&xhci->lock, flags);
3179 xhci_free_command(xhci, cfg_cmd);
3180 xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
3181 __func__, err);
3182 goto cleanup;
3183 }
3184
3185 xhci_ring_cmd_db(xhci);
3186 spin_unlock_irqrestore(&xhci->lock, flags);
3187
3188 wait_for_completion(cfg_cmd->completion);
3189
3190 xhci_free_command(xhci, cfg_cmd);
3191 cleanup:
3192 xhci_free_command(xhci, stop_cmd);
3193 spin_lock_irqsave(&xhci->lock, flags);
3194 if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
3195 ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
3196 spin_unlock_irqrestore(&xhci->lock, flags);
3197 }
3198
xhci_check_streams_endpoint(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_host_endpoint * ep,unsigned int slot_id)3199 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
3200 struct usb_device *udev, struct usb_host_endpoint *ep,
3201 unsigned int slot_id)
3202 {
3203 int ret;
3204 unsigned int ep_index;
3205 unsigned int ep_state;
3206
3207 if (!ep)
3208 return -EINVAL;
3209 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
3210 if (ret <= 0)
3211 return ret ? ret : -EINVAL;
3212 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
3213 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
3214 " descriptor for ep 0x%x does not support streams\n",
3215 ep->desc.bEndpointAddress);
3216 return -EINVAL;
3217 }
3218
3219 ep_index = xhci_get_endpoint_index(&ep->desc);
3220 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3221 if (ep_state & EP_HAS_STREAMS ||
3222 ep_state & EP_GETTING_STREAMS) {
3223 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
3224 "already has streams set up.\n",
3225 ep->desc.bEndpointAddress);
3226 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
3227 "dynamic stream context array reallocation.\n");
3228 return -EINVAL;
3229 }
3230 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
3231 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
3232 "endpoint 0x%x; URBs are pending.\n",
3233 ep->desc.bEndpointAddress);
3234 return -EINVAL;
3235 }
3236 return 0;
3237 }
3238
xhci_calculate_streams_entries(struct xhci_hcd * xhci,unsigned int * num_streams,unsigned int * num_stream_ctxs)3239 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3240 unsigned int *num_streams, unsigned int *num_stream_ctxs)
3241 {
3242 unsigned int max_streams;
3243
3244 /* The stream context array size must be a power of two */
3245 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
3246 /*
3247 * Find out how many primary stream array entries the host controller
3248 * supports. Later we may use secondary stream arrays (similar to 2nd
3249 * level page entries), but that's an optional feature for xHCI host
3250 * controllers. xHCs must support at least 4 stream IDs.
3251 */
3252 max_streams = HCC_MAX_PSA(xhci->hcc_params);
3253 if (*num_stream_ctxs > max_streams) {
3254 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3255 max_streams);
3256 *num_stream_ctxs = max_streams;
3257 *num_streams = max_streams;
3258 }
3259 }
3260
3261 /* Returns an error code if one of the endpoint already has streams.
3262 * This does not change any data structures, it only checks and gathers
3263 * information.
3264 */
xhci_calculate_streams_and_bitmask(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_host_endpoint ** eps,unsigned int num_eps,unsigned int * num_streams,u32 * changed_ep_bitmask)3265 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3266 struct usb_device *udev,
3267 struct usb_host_endpoint **eps, unsigned int num_eps,
3268 unsigned int *num_streams, u32 *changed_ep_bitmask)
3269 {
3270 unsigned int max_streams;
3271 unsigned int endpoint_flag;
3272 int i;
3273 int ret;
3274
3275 for (i = 0; i < num_eps; i++) {
3276 ret = xhci_check_streams_endpoint(xhci, udev,
3277 eps[i], udev->slot_id);
3278 if (ret < 0)
3279 return ret;
3280
3281 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
3282 if (max_streams < (*num_streams - 1)) {
3283 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3284 eps[i]->desc.bEndpointAddress,
3285 max_streams);
3286 *num_streams = max_streams+1;
3287 }
3288
3289 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
3290 if (*changed_ep_bitmask & endpoint_flag)
3291 return -EINVAL;
3292 *changed_ep_bitmask |= endpoint_flag;
3293 }
3294 return 0;
3295 }
3296
xhci_calculate_no_streams_bitmask(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_host_endpoint ** eps,unsigned int num_eps)3297 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3298 struct usb_device *udev,
3299 struct usb_host_endpoint **eps, unsigned int num_eps)
3300 {
3301 u32 changed_ep_bitmask = 0;
3302 unsigned int slot_id;
3303 unsigned int ep_index;
3304 unsigned int ep_state;
3305 int i;
3306
3307 slot_id = udev->slot_id;
3308 if (!xhci->devs[slot_id])
3309 return 0;
3310
3311 for (i = 0; i < num_eps; i++) {
3312 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3313 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3314 /* Are streams already being freed for the endpoint? */
3315 if (ep_state & EP_GETTING_NO_STREAMS) {
3316 xhci_warn(xhci, "WARN Can't disable streams for "
3317 "endpoint 0x%x, "
3318 "streams are being disabled already\n",
3319 eps[i]->desc.bEndpointAddress);
3320 return 0;
3321 }
3322 /* Are there actually any streams to free? */
3323 if (!(ep_state & EP_HAS_STREAMS) &&
3324 !(ep_state & EP_GETTING_STREAMS)) {
3325 xhci_warn(xhci, "WARN Can't disable streams for "
3326 "endpoint 0x%x, "
3327 "streams are already disabled!\n",
3328 eps[i]->desc.bEndpointAddress);
3329 xhci_warn(xhci, "WARN xhci_free_streams() called "
3330 "with non-streams endpoint\n");
3331 return 0;
3332 }
3333 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3334 }
3335 return changed_ep_bitmask;
3336 }
3337
3338 /*
3339 * The USB device drivers use this function (through the HCD interface in USB
3340 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
3341 * coordinate mass storage command queueing across multiple endpoints (basically
3342 * a stream ID == a task ID).
3343 *
3344 * Setting up streams involves allocating the same size stream context array
3345 * for each endpoint and issuing a configure endpoint command for all endpoints.
3346 *
3347 * Don't allow the call to succeed if one endpoint only supports one stream
3348 * (which means it doesn't support streams at all).
3349 *
3350 * Drivers may get less stream IDs than they asked for, if the host controller
3351 * hardware or endpoints claim they can't support the number of requested
3352 * stream IDs.
3353 */
xhci_alloc_streams(struct usb_hcd * hcd,struct usb_device * udev,struct usb_host_endpoint ** eps,unsigned int num_eps,unsigned int num_streams,gfp_t mem_flags)3354 static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3355 struct usb_host_endpoint **eps, unsigned int num_eps,
3356 unsigned int num_streams, gfp_t mem_flags)
3357 {
3358 int i, ret;
3359 struct xhci_hcd *xhci;
3360 struct xhci_virt_device *vdev;
3361 struct xhci_command *config_cmd;
3362 struct xhci_input_control_ctx *ctrl_ctx;
3363 unsigned int ep_index;
3364 unsigned int num_stream_ctxs;
3365 unsigned int max_packet;
3366 unsigned long flags;
3367 u32 changed_ep_bitmask = 0;
3368
3369 if (!eps)
3370 return -EINVAL;
3371
3372 /* Add one to the number of streams requested to account for
3373 * stream 0 that is reserved for xHCI usage.
3374 */
3375 num_streams += 1;
3376 xhci = hcd_to_xhci(hcd);
3377 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3378 num_streams);
3379
3380 /* MaxPSASize value 0 (2 streams) means streams are not supported */
3381 if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3382 HCC_MAX_PSA(xhci->hcc_params) < 4) {
3383 xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3384 return -ENOSYS;
3385 }
3386
3387 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
3388 if (!config_cmd)
3389 return -ENOMEM;
3390
3391 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
3392 if (!ctrl_ctx) {
3393 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3394 __func__);
3395 xhci_free_command(xhci, config_cmd);
3396 return -ENOMEM;
3397 }
3398
3399 /* Check to make sure all endpoints are not already configured for
3400 * streams. While we're at it, find the maximum number of streams that
3401 * all the endpoints will support and check for duplicate endpoints.
3402 */
3403 spin_lock_irqsave(&xhci->lock, flags);
3404 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3405 num_eps, &num_streams, &changed_ep_bitmask);
3406 if (ret < 0) {
3407 xhci_free_command(xhci, config_cmd);
3408 spin_unlock_irqrestore(&xhci->lock, flags);
3409 return ret;
3410 }
3411 if (num_streams <= 1) {
3412 xhci_warn(xhci, "WARN: endpoints can't handle "
3413 "more than one stream.\n");
3414 xhci_free_command(xhci, config_cmd);
3415 spin_unlock_irqrestore(&xhci->lock, flags);
3416 return -EINVAL;
3417 }
3418 vdev = xhci->devs[udev->slot_id];
3419 /* Mark each endpoint as being in transition, so
3420 * xhci_urb_enqueue() will reject all URBs.
3421 */
3422 for (i = 0; i < num_eps; i++) {
3423 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3424 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3425 }
3426 spin_unlock_irqrestore(&xhci->lock, flags);
3427
3428 /* Setup internal data structures and allocate HW data structures for
3429 * streams (but don't install the HW structures in the input context
3430 * until we're sure all memory allocation succeeded).
3431 */
3432 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3433 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3434 num_stream_ctxs, num_streams);
3435
3436 for (i = 0; i < num_eps; i++) {
3437 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3438 max_packet = usb_endpoint_maxp(&eps[i]->desc);
3439 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3440 num_stream_ctxs,
3441 num_streams,
3442 max_packet, mem_flags);
3443 if (!vdev->eps[ep_index].stream_info)
3444 goto cleanup;
3445 /* Set maxPstreams in endpoint context and update deq ptr to
3446 * point to stream context array. FIXME
3447 */
3448 }
3449
3450 /* Set up the input context for a configure endpoint command. */
3451 for (i = 0; i < num_eps; i++) {
3452 struct xhci_ep_ctx *ep_ctx;
3453
3454 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3455 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3456
3457 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3458 vdev->out_ctx, ep_index);
3459 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3460 vdev->eps[ep_index].stream_info);
3461 }
3462 /* Tell the HW to drop its old copy of the endpoint context info
3463 * and add the updated copy from the input context.
3464 */
3465 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3466 vdev->out_ctx, ctrl_ctx,
3467 changed_ep_bitmask, changed_ep_bitmask);
3468
3469 /* Issue and wait for the configure endpoint command */
3470 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3471 false, false);
3472
3473 /* xHC rejected the configure endpoint command for some reason, so we
3474 * leave the old ring intact and free our internal streams data
3475 * structure.
3476 */
3477 if (ret < 0)
3478 goto cleanup;
3479
3480 spin_lock_irqsave(&xhci->lock, flags);
3481 for (i = 0; i < num_eps; i++) {
3482 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3483 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3484 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3485 udev->slot_id, ep_index);
3486 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3487 }
3488 xhci_free_command(xhci, config_cmd);
3489 spin_unlock_irqrestore(&xhci->lock, flags);
3490
3491 for (i = 0; i < num_eps; i++) {
3492 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3493 xhci_debugfs_create_stream_files(xhci, vdev, ep_index);
3494 }
3495 /* Subtract 1 for stream 0, which drivers can't use */
3496 return num_streams - 1;
3497
3498 cleanup:
3499 /* If it didn't work, free the streams! */
3500 for (i = 0; i < num_eps; i++) {
3501 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3502 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3503 vdev->eps[ep_index].stream_info = NULL;
3504 /* FIXME Unset maxPstreams in endpoint context and
3505 * update deq ptr to point to normal string ring.
3506 */
3507 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3508 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3509 xhci_endpoint_zero(xhci, vdev, eps[i]);
3510 }
3511 xhci_free_command(xhci, config_cmd);
3512 return -ENOMEM;
3513 }
3514
3515 /* Transition the endpoint from using streams to being a "normal" endpoint
3516 * without streams.
3517 *
3518 * Modify the endpoint context state, submit a configure endpoint command,
3519 * and free all endpoint rings for streams if that completes successfully.
3520 */
xhci_free_streams(struct usb_hcd * hcd,struct usb_device * udev,struct usb_host_endpoint ** eps,unsigned int num_eps,gfp_t mem_flags)3521 static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3522 struct usb_host_endpoint **eps, unsigned int num_eps,
3523 gfp_t mem_flags)
3524 {
3525 int i, ret;
3526 struct xhci_hcd *xhci;
3527 struct xhci_virt_device *vdev;
3528 struct xhci_command *command;
3529 struct xhci_input_control_ctx *ctrl_ctx;
3530 unsigned int ep_index;
3531 unsigned long flags;
3532 u32 changed_ep_bitmask;
3533
3534 xhci = hcd_to_xhci(hcd);
3535 vdev = xhci->devs[udev->slot_id];
3536
3537 /* Set up a configure endpoint command to remove the streams rings */
3538 spin_lock_irqsave(&xhci->lock, flags);
3539 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3540 udev, eps, num_eps);
3541 if (changed_ep_bitmask == 0) {
3542 spin_unlock_irqrestore(&xhci->lock, flags);
3543 return -EINVAL;
3544 }
3545
3546 /* Use the xhci_command structure from the first endpoint. We may have
3547 * allocated too many, but the driver may call xhci_free_streams() for
3548 * each endpoint it grouped into one call to xhci_alloc_streams().
3549 */
3550 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3551 command = vdev->eps[ep_index].stream_info->free_streams_command;
3552 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3553 if (!ctrl_ctx) {
3554 spin_unlock_irqrestore(&xhci->lock, flags);
3555 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3556 __func__);
3557 return -EINVAL;
3558 }
3559
3560 for (i = 0; i < num_eps; i++) {
3561 struct xhci_ep_ctx *ep_ctx;
3562
3563 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3564 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3565 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3566 EP_GETTING_NO_STREAMS;
3567
3568 xhci_endpoint_copy(xhci, command->in_ctx,
3569 vdev->out_ctx, ep_index);
3570 xhci_setup_no_streams_ep_input_ctx(ep_ctx,
3571 &vdev->eps[ep_index]);
3572 }
3573 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3574 vdev->out_ctx, ctrl_ctx,
3575 changed_ep_bitmask, changed_ep_bitmask);
3576 spin_unlock_irqrestore(&xhci->lock, flags);
3577
3578 /* Issue and wait for the configure endpoint command,
3579 * which must succeed.
3580 */
3581 ret = xhci_configure_endpoint(xhci, udev, command,
3582 false, true);
3583
3584 /* xHC rejected the configure endpoint command for some reason, so we
3585 * leave the streams rings intact.
3586 */
3587 if (ret < 0)
3588 return ret;
3589
3590 spin_lock_irqsave(&xhci->lock, flags);
3591 for (i = 0; i < num_eps; i++) {
3592 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3593 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3594 vdev->eps[ep_index].stream_info = NULL;
3595 /* FIXME Unset maxPstreams in endpoint context and
3596 * update deq ptr to point to normal string ring.
3597 */
3598 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3599 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3600 }
3601 spin_unlock_irqrestore(&xhci->lock, flags);
3602
3603 return 0;
3604 }
3605
3606 /*
3607 * Deletes endpoint resources for endpoints that were active before a Reset
3608 * Device command, or a Disable Slot command. The Reset Device command leaves
3609 * the control endpoint intact, whereas the Disable Slot command deletes it.
3610 *
3611 * Must be called with xhci->lock held.
3612 */
xhci_free_device_endpoint_resources(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev,bool drop_control_ep)3613 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3614 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3615 {
3616 int i;
3617 unsigned int num_dropped_eps = 0;
3618 unsigned int drop_flags = 0;
3619
3620 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3621 if (virt_dev->eps[i].ring) {
3622 drop_flags |= 1 << i;
3623 num_dropped_eps++;
3624 }
3625 }
3626 xhci->num_active_eps -= num_dropped_eps;
3627 if (num_dropped_eps)
3628 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3629 "Dropped %u ep ctxs, flags = 0x%x, "
3630 "%u now active.",
3631 num_dropped_eps, drop_flags,
3632 xhci->num_active_eps);
3633 }
3634
3635 /*
3636 * This submits a Reset Device Command, which will set the device state to 0,
3637 * set the device address to 0, and disable all the endpoints except the default
3638 * control endpoint. The USB core should come back and call
3639 * xhci_address_device(), and then re-set up the configuration. If this is
3640 * called because of a usb_reset_and_verify_device(), then the old alternate
3641 * settings will be re-installed through the normal bandwidth allocation
3642 * functions.
3643 *
3644 * Wait for the Reset Device command to finish. Remove all structures
3645 * associated with the endpoints that were disabled. Clear the input device
3646 * structure? Reset the control endpoint 0 max packet size?
3647 *
3648 * If the virt_dev to be reset does not exist or does not match the udev,
3649 * it means the device is lost, possibly due to the xHC restore error and
3650 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3651 * re-allocate the device.
3652 */
xhci_discover_or_reset_device(struct usb_hcd * hcd,struct usb_device * udev)3653 static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
3654 struct usb_device *udev)
3655 {
3656 int ret, i;
3657 unsigned long flags;
3658 struct xhci_hcd *xhci;
3659 unsigned int slot_id;
3660 struct xhci_virt_device *virt_dev;
3661 struct xhci_command *reset_device_cmd;
3662 struct xhci_slot_ctx *slot_ctx;
3663 int old_active_eps = 0;
3664
3665 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3666 if (ret <= 0)
3667 return ret;
3668 xhci = hcd_to_xhci(hcd);
3669 slot_id = udev->slot_id;
3670 virt_dev = xhci->devs[slot_id];
3671 if (!virt_dev) {
3672 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3673 "not exist. Re-allocate the device\n", slot_id);
3674 ret = xhci_alloc_dev(hcd, udev);
3675 if (ret == 1)
3676 return 0;
3677 else
3678 return -EINVAL;
3679 }
3680
3681 if (virt_dev->tt_info)
3682 old_active_eps = virt_dev->tt_info->active_eps;
3683
3684 if (virt_dev->udev != udev) {
3685 /* If the virt_dev and the udev does not match, this virt_dev
3686 * may belong to another udev.
3687 * Re-allocate the device.
3688 */
3689 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3690 "not match the udev. Re-allocate the device\n",
3691 slot_id);
3692 ret = xhci_alloc_dev(hcd, udev);
3693 if (ret == 1)
3694 return 0;
3695 else
3696 return -EINVAL;
3697 }
3698
3699 /* If device is not setup, there is no point in resetting it */
3700 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3701 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3702 SLOT_STATE_DISABLED)
3703 return 0;
3704
3705 trace_xhci_discover_or_reset_device(slot_ctx);
3706
3707 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3708 /* Allocate the command structure that holds the struct completion.
3709 * Assume we're in process context, since the normal device reset
3710 * process has to wait for the device anyway. Storage devices are
3711 * reset as part of error handling, so use GFP_NOIO instead of
3712 * GFP_KERNEL.
3713 */
3714 reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
3715 if (!reset_device_cmd) {
3716 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3717 return -ENOMEM;
3718 }
3719
3720 /* Attempt to submit the Reset Device command to the command ring */
3721 spin_lock_irqsave(&xhci->lock, flags);
3722
3723 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3724 if (ret) {
3725 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3726 spin_unlock_irqrestore(&xhci->lock, flags);
3727 goto command_cleanup;
3728 }
3729 xhci_ring_cmd_db(xhci);
3730 spin_unlock_irqrestore(&xhci->lock, flags);
3731
3732 /* Wait for the Reset Device command to finish */
3733 wait_for_completion(reset_device_cmd->completion);
3734
3735 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3736 * unless we tried to reset a slot ID that wasn't enabled,
3737 * or the device wasn't in the addressed or configured state.
3738 */
3739 ret = reset_device_cmd->status;
3740 switch (ret) {
3741 case COMP_COMMAND_ABORTED:
3742 case COMP_COMMAND_RING_STOPPED:
3743 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3744 ret = -ETIME;
3745 goto command_cleanup;
3746 case COMP_SLOT_NOT_ENABLED_ERROR: /* 0.95 completion for bad slot ID */
3747 case COMP_CONTEXT_STATE_ERROR: /* 0.96 completion code for same thing */
3748 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3749 slot_id,
3750 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3751 xhci_dbg(xhci, "Not freeing device rings.\n");
3752 /* Don't treat this as an error. May change my mind later. */
3753 ret = 0;
3754 goto command_cleanup;
3755 case COMP_SUCCESS:
3756 xhci_dbg(xhci, "Successful reset device command.\n");
3757 break;
3758 default:
3759 if (xhci_is_vendor_info_code(xhci, ret))
3760 break;
3761 xhci_warn(xhci, "Unknown completion code %u for "
3762 "reset device command.\n", ret);
3763 ret = -EINVAL;
3764 goto command_cleanup;
3765 }
3766
3767 /* Free up host controller endpoint resources */
3768 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3769 spin_lock_irqsave(&xhci->lock, flags);
3770 /* Don't delete the default control endpoint resources */
3771 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3772 spin_unlock_irqrestore(&xhci->lock, flags);
3773 }
3774
3775 /* Everything but endpoint 0 is disabled, so free the rings. */
3776 for (i = 1; i < 31; i++) {
3777 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3778
3779 if (ep->ep_state & EP_HAS_STREAMS) {
3780 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3781 xhci_get_endpoint_address(i));
3782 xhci_free_stream_info(xhci, ep->stream_info);
3783 ep->stream_info = NULL;
3784 ep->ep_state &= ~EP_HAS_STREAMS;
3785 }
3786
3787 if (ep->ring) {
3788 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3789 xhci_free_endpoint_ring(xhci, virt_dev, i);
3790 }
3791 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3792 xhci_drop_ep_from_interval_table(xhci,
3793 &virt_dev->eps[i].bw_info,
3794 virt_dev->bw_table,
3795 udev,
3796 &virt_dev->eps[i],
3797 virt_dev->tt_info);
3798 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3799 }
3800 /* If necessary, update the number of active TTs on this root port */
3801 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3802 virt_dev->flags = 0;
3803 ret = 0;
3804
3805 command_cleanup:
3806 xhci_free_command(xhci, reset_device_cmd);
3807 return ret;
3808 }
3809
3810 /*
3811 * At this point, the struct usb_device is about to go away, the device has
3812 * disconnected, and all traffic has been stopped and the endpoints have been
3813 * disabled. Free any HC data structures associated with that device.
3814 */
xhci_free_dev(struct usb_hcd * hcd,struct usb_device * udev)3815 static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3816 {
3817 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3818 struct xhci_virt_device *virt_dev;
3819 struct xhci_slot_ctx *slot_ctx;
3820 unsigned long flags;
3821 int i, ret;
3822
3823 /*
3824 * We called pm_runtime_get_noresume when the device was attached.
3825 * Decrement the counter here to allow controller to runtime suspend
3826 * if no devices remain.
3827 */
3828 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3829 pm_runtime_put_noidle(hcd->self.controller);
3830
3831 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3832 /* If the host is halted due to driver unload, we still need to free the
3833 * device.
3834 */
3835 if (ret <= 0 && ret != -ENODEV)
3836 return;
3837
3838 virt_dev = xhci->devs[udev->slot_id];
3839 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3840 trace_xhci_free_dev(slot_ctx);
3841
3842 /* Stop any wayward timer functions (which may grab the lock) */
3843 for (i = 0; i < 31; i++)
3844 virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
3845 virt_dev->udev = NULL;
3846 xhci_disable_slot(xhci, udev->slot_id);
3847
3848 spin_lock_irqsave(&xhci->lock, flags);
3849 xhci_free_virt_device(xhci, udev->slot_id);
3850 spin_unlock_irqrestore(&xhci->lock, flags);
3851
3852 }
3853
xhci_disable_slot(struct xhci_hcd * xhci,u32 slot_id)3854 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
3855 {
3856 struct xhci_command *command;
3857 unsigned long flags;
3858 u32 state;
3859 int ret;
3860
3861 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3862 if (!command)
3863 return -ENOMEM;
3864
3865 xhci_debugfs_remove_slot(xhci, slot_id);
3866
3867 spin_lock_irqsave(&xhci->lock, flags);
3868 /* Don't disable the slot if the host controller is dead. */
3869 state = readl(&xhci->op_regs->status);
3870 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3871 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3872 spin_unlock_irqrestore(&xhci->lock, flags);
3873 kfree(command);
3874 return -ENODEV;
3875 }
3876
3877 ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3878 slot_id);
3879 if (ret) {
3880 spin_unlock_irqrestore(&xhci->lock, flags);
3881 kfree(command);
3882 return ret;
3883 }
3884 xhci_ring_cmd_db(xhci);
3885 spin_unlock_irqrestore(&xhci->lock, flags);
3886
3887 wait_for_completion(command->completion);
3888
3889 if (command->status != COMP_SUCCESS)
3890 xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n",
3891 slot_id, command->status);
3892
3893 xhci_free_command(xhci, command);
3894
3895 return 0;
3896 }
3897
3898 /*
3899 * Checks if we have enough host controller resources for the default control
3900 * endpoint.
3901 *
3902 * Must be called with xhci->lock held.
3903 */
xhci_reserve_host_control_ep_resources(struct xhci_hcd * xhci)3904 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3905 {
3906 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3907 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3908 "Not enough ep ctxs: "
3909 "%u active, need to add 1, limit is %u.",
3910 xhci->num_active_eps, xhci->limit_active_eps);
3911 return -ENOMEM;
3912 }
3913 xhci->num_active_eps += 1;
3914 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3915 "Adding 1 ep ctx, %u now active.",
3916 xhci->num_active_eps);
3917 return 0;
3918 }
3919
3920
3921 /*
3922 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
3923 * timed out, or allocating memory failed. Returns 1 on success.
3924 */
xhci_alloc_dev(struct usb_hcd * hcd,struct usb_device * udev)3925 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3926 {
3927 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3928 struct xhci_virt_device *vdev;
3929 struct xhci_slot_ctx *slot_ctx;
3930 unsigned long flags;
3931 int ret, slot_id;
3932 struct xhci_command *command;
3933
3934 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3935 if (!command)
3936 return 0;
3937
3938 spin_lock_irqsave(&xhci->lock, flags);
3939 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
3940 if (ret) {
3941 spin_unlock_irqrestore(&xhci->lock, flags);
3942 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3943 xhci_free_command(xhci, command);
3944 return 0;
3945 }
3946 xhci_ring_cmd_db(xhci);
3947 spin_unlock_irqrestore(&xhci->lock, flags);
3948
3949 wait_for_completion(command->completion);
3950 slot_id = command->slot_id;
3951
3952 if (!slot_id || command->status != COMP_SUCCESS) {
3953 xhci_err(xhci, "Error while assigning device slot ID: %s\n",
3954 xhci_trb_comp_code_string(command->status));
3955 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
3956 HCS_MAX_SLOTS(
3957 readl(&xhci->cap_regs->hcs_params1)));
3958 xhci_free_command(xhci, command);
3959 return 0;
3960 }
3961
3962 xhci_free_command(xhci, command);
3963
3964 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3965 spin_lock_irqsave(&xhci->lock, flags);
3966 ret = xhci_reserve_host_control_ep_resources(xhci);
3967 if (ret) {
3968 spin_unlock_irqrestore(&xhci->lock, flags);
3969 xhci_warn(xhci, "Not enough host resources, "
3970 "active endpoint contexts = %u\n",
3971 xhci->num_active_eps);
3972 goto disable_slot;
3973 }
3974 spin_unlock_irqrestore(&xhci->lock, flags);
3975 }
3976 /* Use GFP_NOIO, since this function can be called from
3977 * xhci_discover_or_reset_device(), which may be called as part of
3978 * mass storage driver error handling.
3979 */
3980 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
3981 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
3982 goto disable_slot;
3983 }
3984 vdev = xhci->devs[slot_id];
3985 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
3986 trace_xhci_alloc_dev(slot_ctx);
3987
3988 udev->slot_id = slot_id;
3989
3990 xhci_debugfs_create_slot(xhci, slot_id);
3991
3992 /*
3993 * If resetting upon resume, we can't put the controller into runtime
3994 * suspend if there is a device attached.
3995 */
3996 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3997 pm_runtime_get_noresume(hcd->self.controller);
3998
3999 /* Is this a LS or FS device under a HS hub? */
4000 /* Hub or peripherial? */
4001 return 1;
4002
4003 disable_slot:
4004 xhci_disable_slot(xhci, udev->slot_id);
4005 xhci_free_virt_device(xhci, udev->slot_id);
4006
4007 return 0;
4008 }
4009
4010 /**
4011 * xhci_setup_device - issues an Address Device command to assign a unique
4012 * USB bus address.
4013 * @hcd: USB host controller data structure.
4014 * @udev: USB dev structure representing the connected device.
4015 * @setup: Enum specifying setup mode: address only or with context.
4016 * @timeout_ms: Max wait time (ms) for the command operation to complete.
4017 *
4018 * Return: 0 if successful; otherwise, negative error code.
4019 */
xhci_setup_device(struct usb_hcd * hcd,struct usb_device * udev,enum xhci_setup_dev setup,unsigned int timeout_ms)4020 static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
4021 enum xhci_setup_dev setup, unsigned int timeout_ms)
4022 {
4023 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
4024 unsigned long flags;
4025 struct xhci_virt_device *virt_dev;
4026 int ret = 0;
4027 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4028 struct xhci_slot_ctx *slot_ctx;
4029 struct xhci_input_control_ctx *ctrl_ctx;
4030 u64 temp_64;
4031 struct xhci_command *command = NULL;
4032
4033 mutex_lock(&xhci->mutex);
4034
4035 if (xhci->xhc_state) { /* dying, removing or halted */
4036 ret = -ESHUTDOWN;
4037 goto out;
4038 }
4039
4040 if (!udev->slot_id) {
4041 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4042 "Bad Slot ID %d", udev->slot_id);
4043 ret = -EINVAL;
4044 goto out;
4045 }
4046
4047 virt_dev = xhci->devs[udev->slot_id];
4048
4049 if (WARN_ON(!virt_dev)) {
4050 /*
4051 * In plug/unplug torture test with an NEC controller,
4052 * a zero-dereference was observed once due to virt_dev = 0.
4053 * Print useful debug rather than crash if it is observed again!
4054 */
4055 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
4056 udev->slot_id);
4057 ret = -EINVAL;
4058 goto out;
4059 }
4060 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4061 trace_xhci_setup_device_slot(slot_ctx);
4062
4063 if (setup == SETUP_CONTEXT_ONLY) {
4064 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
4065 SLOT_STATE_DEFAULT) {
4066 xhci_dbg(xhci, "Slot already in default state\n");
4067 goto out;
4068 }
4069 }
4070
4071 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
4072 if (!command) {
4073 ret = -ENOMEM;
4074 goto out;
4075 }
4076
4077 command->in_ctx = virt_dev->in_ctx;
4078 command->timeout_ms = timeout_ms;
4079
4080 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
4081 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
4082 if (!ctrl_ctx) {
4083 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4084 __func__);
4085 ret = -EINVAL;
4086 goto out;
4087 }
4088 /*
4089 * If this is the first Set Address since device plug-in or
4090 * virt_device realloaction after a resume with an xHCI power loss,
4091 * then set up the slot context.
4092 */
4093 if (!slot_ctx->dev_info)
4094 xhci_setup_addressable_virt_dev(xhci, udev);
4095 /* Otherwise, update the control endpoint ring enqueue pointer. */
4096 else
4097 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
4098 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
4099 ctrl_ctx->drop_flags = 0;
4100
4101 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4102 le32_to_cpu(slot_ctx->dev_info) >> 27);
4103
4104 trace_xhci_address_ctrl_ctx(ctrl_ctx);
4105 spin_lock_irqsave(&xhci->lock, flags);
4106 trace_xhci_setup_device(virt_dev);
4107 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
4108 udev->slot_id, setup);
4109 if (ret) {
4110 spin_unlock_irqrestore(&xhci->lock, flags);
4111 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4112 "FIXME: allocate a command ring segment");
4113 goto out;
4114 }
4115 xhci_ring_cmd_db(xhci);
4116 spin_unlock_irqrestore(&xhci->lock, flags);
4117
4118 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
4119 wait_for_completion(command->completion);
4120
4121 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
4122 * the SetAddress() "recovery interval" required by USB and aborting the
4123 * command on a timeout.
4124 */
4125 switch (command->status) {
4126 case COMP_COMMAND_ABORTED:
4127 case COMP_COMMAND_RING_STOPPED:
4128 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
4129 ret = -ETIME;
4130 break;
4131 case COMP_CONTEXT_STATE_ERROR:
4132 case COMP_SLOT_NOT_ENABLED_ERROR:
4133 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
4134 act, udev->slot_id);
4135 ret = -EINVAL;
4136 break;
4137 case COMP_USB_TRANSACTION_ERROR:
4138 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
4139
4140 mutex_unlock(&xhci->mutex);
4141 ret = xhci_disable_slot(xhci, udev->slot_id);
4142 xhci_free_virt_device(xhci, udev->slot_id);
4143 if (!ret)
4144 xhci_alloc_dev(hcd, udev);
4145 kfree(command->completion);
4146 kfree(command);
4147 return -EPROTO;
4148 case COMP_INCOMPATIBLE_DEVICE_ERROR:
4149 dev_warn(&udev->dev,
4150 "ERROR: Incompatible device for setup %s command\n", act);
4151 ret = -ENODEV;
4152 break;
4153 case COMP_SUCCESS:
4154 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4155 "Successful setup %s command", act);
4156 break;
4157 default:
4158 xhci_err(xhci,
4159 "ERROR: unexpected setup %s command completion code 0x%x.\n",
4160 act, command->status);
4161 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
4162 ret = -EINVAL;
4163 break;
4164 }
4165 if (ret)
4166 goto out;
4167 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
4168 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4169 "Op regs DCBAA ptr = %#016llx", temp_64);
4170 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4171 "Slot ID %d dcbaa entry @%p = %#016llx",
4172 udev->slot_id,
4173 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
4174 (unsigned long long)
4175 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
4176 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4177 "Output Context DMA address = %#08llx",
4178 (unsigned long long)virt_dev->out_ctx->dma);
4179 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4180 le32_to_cpu(slot_ctx->dev_info) >> 27);
4181 /*
4182 * USB core uses address 1 for the roothubs, so we add one to the
4183 * address given back to us by the HC.
4184 */
4185 trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
4186 le32_to_cpu(slot_ctx->dev_info) >> 27);
4187 /* Zero the input context control for later use */
4188 ctrl_ctx->add_flags = 0;
4189 ctrl_ctx->drop_flags = 0;
4190 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4191 udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4192
4193 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4194 "Internal device address = %d",
4195 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4196 out:
4197 mutex_unlock(&xhci->mutex);
4198 if (command) {
4199 kfree(command->completion);
4200 kfree(command);
4201 }
4202 return ret;
4203 }
4204
xhci_address_device(struct usb_hcd * hcd,struct usb_device * udev,unsigned int timeout_ms)4205 static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev,
4206 unsigned int timeout_ms)
4207 {
4208 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS, timeout_ms);
4209 }
4210
xhci_enable_device(struct usb_hcd * hcd,struct usb_device * udev)4211 static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
4212 {
4213 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY,
4214 XHCI_CMD_DEFAULT_TIMEOUT);
4215 }
4216
4217 /*
4218 * Transfer the port index into real index in the HW port status
4219 * registers. Caculate offset between the port's PORTSC register
4220 * and port status base. Divide the number of per port register
4221 * to get the real index. The raw port number bases 1.
4222 */
xhci_find_raw_port_number(struct usb_hcd * hcd,int port1)4223 int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
4224 {
4225 struct xhci_hub *rhub;
4226
4227 rhub = xhci_get_rhub(hcd);
4228 return rhub->ports[port1 - 1]->hw_portnum + 1;
4229 }
4230
4231 /*
4232 * Issue an Evaluate Context command to change the Maximum Exit Latency in the
4233 * slot context. If that succeeds, store the new MEL in the xhci_virt_device.
4234 */
xhci_change_max_exit_latency(struct xhci_hcd * xhci,struct usb_device * udev,u16 max_exit_latency)4235 static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4236 struct usb_device *udev, u16 max_exit_latency)
4237 {
4238 struct xhci_virt_device *virt_dev;
4239 struct xhci_command *command;
4240 struct xhci_input_control_ctx *ctrl_ctx;
4241 struct xhci_slot_ctx *slot_ctx;
4242 unsigned long flags;
4243 int ret;
4244
4245 command = xhci_alloc_command_with_ctx(xhci, true, GFP_KERNEL);
4246 if (!command)
4247 return -ENOMEM;
4248
4249 spin_lock_irqsave(&xhci->lock, flags);
4250
4251 virt_dev = xhci->devs[udev->slot_id];
4252
4253 /*
4254 * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
4255 * xHC was re-initialized. Exit latency will be set later after
4256 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
4257 */
4258
4259 if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
4260 spin_unlock_irqrestore(&xhci->lock, flags);
4261 xhci_free_command(xhci, command);
4262 return 0;
4263 }
4264
4265 /* Attempt to issue an Evaluate Context command to change the MEL. */
4266 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
4267 if (!ctrl_ctx) {
4268 spin_unlock_irqrestore(&xhci->lock, flags);
4269 xhci_free_command(xhci, command);
4270 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4271 __func__);
4272 return -ENOMEM;
4273 }
4274
4275 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4276 spin_unlock_irqrestore(&xhci->lock, flags);
4277
4278 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4279 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4280 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4281 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4282 slot_ctx->dev_state = 0;
4283
4284 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4285 "Set up evaluate context for LPM MEL change.");
4286
4287 /* Issue and wait for the evaluate context command. */
4288 ret = xhci_configure_endpoint(xhci, udev, command,
4289 true, true);
4290
4291 if (!ret) {
4292 spin_lock_irqsave(&xhci->lock, flags);
4293 virt_dev->current_mel = max_exit_latency;
4294 spin_unlock_irqrestore(&xhci->lock, flags);
4295 }
4296
4297 xhci_free_command(xhci, command);
4298
4299 return ret;
4300 }
4301
4302 #ifdef CONFIG_PM
4303
4304 /* BESL to HIRD Encoding array for USB2 LPM */
4305 static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
4306 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
4307
4308 /* Calculate HIRD/BESL for USB2 PORTPMSC*/
xhci_calculate_hird_besl(struct xhci_hcd * xhci,struct usb_device * udev)4309 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4310 struct usb_device *udev)
4311 {
4312 int u2del, besl, besl_host;
4313 int besl_device = 0;
4314 u32 field;
4315
4316 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4317 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4318
4319 if (field & USB_BESL_SUPPORT) {
4320 for (besl_host = 0; besl_host < 16; besl_host++) {
4321 if (xhci_besl_encoding[besl_host] >= u2del)
4322 break;
4323 }
4324 /* Use baseline BESL value as default */
4325 if (field & USB_BESL_BASELINE_VALID)
4326 besl_device = USB_GET_BESL_BASELINE(field);
4327 else if (field & USB_BESL_DEEP_VALID)
4328 besl_device = USB_GET_BESL_DEEP(field);
4329 } else {
4330 if (u2del <= 50)
4331 besl_host = 0;
4332 else
4333 besl_host = (u2del - 51) / 75 + 1;
4334 }
4335
4336 besl = besl_host + besl_device;
4337 if (besl > 15)
4338 besl = 15;
4339
4340 return besl;
4341 }
4342
4343 /* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */
xhci_calculate_usb2_hw_lpm_params(struct usb_device * udev)4344 static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
4345 {
4346 u32 field;
4347 int l1;
4348 int besld = 0;
4349 int hirdm = 0;
4350
4351 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4352
4353 /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */
4354 l1 = udev->l1_params.timeout / 256;
4355
4356 /* device has preferred BESLD */
4357 if (field & USB_BESL_DEEP_VALID) {
4358 besld = USB_GET_BESL_DEEP(field);
4359 hirdm = 1;
4360 }
4361
4362 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
4363 }
4364
xhci_set_usb2_hardware_lpm(struct usb_hcd * hcd,struct usb_device * udev,int enable)4365 static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4366 struct usb_device *udev, int enable)
4367 {
4368 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4369 struct xhci_port **ports;
4370 __le32 __iomem *pm_addr, *hlpm_addr;
4371 u32 pm_val, hlpm_val, field;
4372 unsigned int port_num;
4373 unsigned long flags;
4374 int hird, exit_latency;
4375 int ret;
4376
4377 if (xhci->quirks & XHCI_HW_LPM_DISABLE)
4378 return -EPERM;
4379
4380 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
4381 !udev->lpm_capable)
4382 return -EPERM;
4383
4384 if (!udev->parent || udev->parent->parent ||
4385 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4386 return -EPERM;
4387
4388 if (udev->usb2_hw_lpm_capable != 1)
4389 return -EPERM;
4390
4391 spin_lock_irqsave(&xhci->lock, flags);
4392
4393 ports = xhci->usb2_rhub.ports;
4394 port_num = udev->portnum - 1;
4395 pm_addr = ports[port_num]->addr + PORTPMSC;
4396 pm_val = readl(pm_addr);
4397 hlpm_addr = ports[port_num]->addr + PORTHLPMC;
4398
4399 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4400 enable ? "enable" : "disable", port_num + 1);
4401
4402 if (enable) {
4403 /* Host supports BESL timeout instead of HIRD */
4404 if (udev->usb2_hw_lpm_besl_capable) {
4405 /* if device doesn't have a preferred BESL value use a
4406 * default one which works with mixed HIRD and BESL
4407 * systems. See XHCI_DEFAULT_BESL definition in xhci.h
4408 */
4409 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4410 if ((field & USB_BESL_SUPPORT) &&
4411 (field & USB_BESL_BASELINE_VALID))
4412 hird = USB_GET_BESL_BASELINE(field);
4413 else
4414 hird = udev->l1_params.besl;
4415
4416 exit_latency = xhci_besl_encoding[hird];
4417 spin_unlock_irqrestore(&xhci->lock, flags);
4418
4419 ret = xhci_change_max_exit_latency(xhci, udev,
4420 exit_latency);
4421 if (ret < 0)
4422 return ret;
4423 spin_lock_irqsave(&xhci->lock, flags);
4424
4425 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
4426 writel(hlpm_val, hlpm_addr);
4427 /* flush write */
4428 readl(hlpm_addr);
4429 } else {
4430 hird = xhci_calculate_hird_besl(xhci, udev);
4431 }
4432
4433 pm_val &= ~PORT_HIRD_MASK;
4434 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
4435 writel(pm_val, pm_addr);
4436 pm_val = readl(pm_addr);
4437 pm_val |= PORT_HLE;
4438 writel(pm_val, pm_addr);
4439 /* flush write */
4440 readl(pm_addr);
4441 } else {
4442 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
4443 writel(pm_val, pm_addr);
4444 /* flush write */
4445 readl(pm_addr);
4446 if (udev->usb2_hw_lpm_besl_capable) {
4447 spin_unlock_irqrestore(&xhci->lock, flags);
4448 xhci_change_max_exit_latency(xhci, udev, 0);
4449 readl_poll_timeout(ports[port_num]->addr, pm_val,
4450 (pm_val & PORT_PLS_MASK) == XDEV_U0,
4451 100, 10000);
4452 return 0;
4453 }
4454 }
4455
4456 spin_unlock_irqrestore(&xhci->lock, flags);
4457 return 0;
4458 }
4459
4460 /* check if a usb2 port supports a given extened capability protocol
4461 * only USB2 ports extended protocol capability values are cached.
4462 * Return 1 if capability is supported
4463 */
xhci_check_usb2_port_capability(struct xhci_hcd * xhci,int port,unsigned capability)4464 static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4465 unsigned capability)
4466 {
4467 u32 port_offset, port_count;
4468 int i;
4469
4470 for (i = 0; i < xhci->num_ext_caps; i++) {
4471 if (xhci->ext_caps[i] & capability) {
4472 /* port offsets starts at 1 */
4473 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4474 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4475 if (port >= port_offset &&
4476 port < port_offset + port_count)
4477 return 1;
4478 }
4479 }
4480 return 0;
4481 }
4482
xhci_update_device(struct usb_hcd * hcd,struct usb_device * udev)4483 static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4484 {
4485 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4486 int portnum = udev->portnum - 1;
4487
4488 if (hcd->speed >= HCD_USB3 || !udev->lpm_capable)
4489 return 0;
4490
4491 /* we only support lpm for non-hub device connected to root hub yet */
4492 if (!udev->parent || udev->parent->parent ||
4493 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4494 return 0;
4495
4496 if (xhci->hw_lpm_support == 1 &&
4497 xhci_check_usb2_port_capability(
4498 xhci, portnum, XHCI_HLC)) {
4499 udev->usb2_hw_lpm_capable = 1;
4500 udev->l1_params.timeout = XHCI_L1_TIMEOUT;
4501 udev->l1_params.besl = XHCI_DEFAULT_BESL;
4502 if (xhci_check_usb2_port_capability(xhci, portnum,
4503 XHCI_BLC))
4504 udev->usb2_hw_lpm_besl_capable = 1;
4505 }
4506
4507 return 0;
4508 }
4509
4510 /*---------------------- USB 3.0 Link PM functions ------------------------*/
4511
4512 /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
xhci_service_interval_to_ns(struct usb_endpoint_descriptor * desc)4513 static unsigned long long xhci_service_interval_to_ns(
4514 struct usb_endpoint_descriptor *desc)
4515 {
4516 return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4517 }
4518
xhci_get_timeout_no_hub_lpm(struct usb_device * udev,enum usb3_link_state state)4519 static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4520 enum usb3_link_state state)
4521 {
4522 unsigned long long sel;
4523 unsigned long long pel;
4524 unsigned int max_sel_pel;
4525 char *state_name;
4526
4527 switch (state) {
4528 case USB3_LPM_U1:
4529 /* Convert SEL and PEL stored in nanoseconds to microseconds */
4530 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4531 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4532 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4533 state_name = "U1";
4534 break;
4535 case USB3_LPM_U2:
4536 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4537 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4538 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4539 state_name = "U2";
4540 break;
4541 default:
4542 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4543 __func__);
4544 return USB3_LPM_DISABLED;
4545 }
4546
4547 if (sel <= max_sel_pel && pel <= max_sel_pel)
4548 return USB3_LPM_DEVICE_INITIATED;
4549
4550 if (sel > max_sel_pel)
4551 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4552 "due to long SEL %llu ms\n",
4553 state_name, sel);
4554 else
4555 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4556 "due to long PEL %llu ms\n",
4557 state_name, pel);
4558 return USB3_LPM_DISABLED;
4559 }
4560
4561 /* The U1 timeout should be the maximum of the following values:
4562 * - For control endpoints, U1 system exit latency (SEL) * 3
4563 * - For bulk endpoints, U1 SEL * 5
4564 * - For interrupt endpoints:
4565 * - Notification EPs, U1 SEL * 3
4566 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
4567 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
4568 */
xhci_calculate_intel_u1_timeout(struct usb_device * udev,struct usb_endpoint_descriptor * desc)4569 static unsigned long long xhci_calculate_intel_u1_timeout(
4570 struct usb_device *udev,
4571 struct usb_endpoint_descriptor *desc)
4572 {
4573 unsigned long long timeout_ns;
4574 int ep_type;
4575 int intr_type;
4576
4577 ep_type = usb_endpoint_type(desc);
4578 switch (ep_type) {
4579 case USB_ENDPOINT_XFER_CONTROL:
4580 timeout_ns = udev->u1_params.sel * 3;
4581 break;
4582 case USB_ENDPOINT_XFER_BULK:
4583 timeout_ns = udev->u1_params.sel * 5;
4584 break;
4585 case USB_ENDPOINT_XFER_INT:
4586 intr_type = usb_endpoint_interrupt_type(desc);
4587 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4588 timeout_ns = udev->u1_params.sel * 3;
4589 break;
4590 }
4591 /* Otherwise the calculation is the same as isoc eps */
4592 fallthrough;
4593 case USB_ENDPOINT_XFER_ISOC:
4594 timeout_ns = xhci_service_interval_to_ns(desc);
4595 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4596 if (timeout_ns < udev->u1_params.sel * 2)
4597 timeout_ns = udev->u1_params.sel * 2;
4598 break;
4599 default:
4600 return 0;
4601 }
4602
4603 return timeout_ns;
4604 }
4605
4606 /* Returns the hub-encoded U1 timeout value. */
xhci_calculate_u1_timeout(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_endpoint_descriptor * desc)4607 static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4608 struct usb_device *udev,
4609 struct usb_endpoint_descriptor *desc)
4610 {
4611 unsigned long long timeout_ns;
4612
4613 /* Prevent U1 if service interval is shorter than U1 exit latency */
4614 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
4615 if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
4616 dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
4617 return USB3_LPM_DISABLED;
4618 }
4619 }
4620
4621 if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST))
4622 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
4623 else
4624 timeout_ns = udev->u1_params.sel;
4625
4626 /* The U1 timeout is encoded in 1us intervals.
4627 * Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
4628 */
4629 if (timeout_ns == USB3_LPM_DISABLED)
4630 timeout_ns = 1;
4631 else
4632 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4633
4634 /* If the necessary timeout value is bigger than what we can set in the
4635 * USB 3.0 hub, we have to disable hub-initiated U1.
4636 */
4637 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4638 return timeout_ns;
4639 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4640 "due to long timeout %llu ms\n", timeout_ns);
4641 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4642 }
4643
4644 /* The U2 timeout should be the maximum of:
4645 * - 10 ms (to avoid the bandwidth impact on the scheduler)
4646 * - largest bInterval of any active periodic endpoint (to avoid going
4647 * into lower power link states between intervals).
4648 * - the U2 Exit Latency of the device
4649 */
xhci_calculate_intel_u2_timeout(struct usb_device * udev,struct usb_endpoint_descriptor * desc)4650 static unsigned long long xhci_calculate_intel_u2_timeout(
4651 struct usb_device *udev,
4652 struct usb_endpoint_descriptor *desc)
4653 {
4654 unsigned long long timeout_ns;
4655 unsigned long long u2_del_ns;
4656
4657 timeout_ns = 10 * 1000 * 1000;
4658
4659 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4660 (xhci_service_interval_to_ns(desc) > timeout_ns))
4661 timeout_ns = xhci_service_interval_to_ns(desc);
4662
4663 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4664 if (u2_del_ns > timeout_ns)
4665 timeout_ns = u2_del_ns;
4666
4667 return timeout_ns;
4668 }
4669
4670 /* Returns the hub-encoded U2 timeout value. */
xhci_calculate_u2_timeout(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_endpoint_descriptor * desc)4671 static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4672 struct usb_device *udev,
4673 struct usb_endpoint_descriptor *desc)
4674 {
4675 unsigned long long timeout_ns;
4676
4677 /* Prevent U2 if service interval is shorter than U2 exit latency */
4678 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
4679 if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
4680 dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
4681 return USB3_LPM_DISABLED;
4682 }
4683 }
4684
4685 if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST))
4686 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
4687 else
4688 timeout_ns = udev->u2_params.sel;
4689
4690 /* The U2 timeout is encoded in 256us intervals */
4691 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4692 /* If the necessary timeout value is bigger than what we can set in the
4693 * USB 3.0 hub, we have to disable hub-initiated U2.
4694 */
4695 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4696 return timeout_ns;
4697 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4698 "due to long timeout %llu ms\n", timeout_ns);
4699 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4700 }
4701
xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_endpoint_descriptor * desc,enum usb3_link_state state,u16 * timeout)4702 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4703 struct usb_device *udev,
4704 struct usb_endpoint_descriptor *desc,
4705 enum usb3_link_state state,
4706 u16 *timeout)
4707 {
4708 if (state == USB3_LPM_U1)
4709 return xhci_calculate_u1_timeout(xhci, udev, desc);
4710 else if (state == USB3_LPM_U2)
4711 return xhci_calculate_u2_timeout(xhci, udev, desc);
4712
4713 return USB3_LPM_DISABLED;
4714 }
4715
xhci_update_timeout_for_endpoint(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_endpoint_descriptor * desc,enum usb3_link_state state,u16 * timeout)4716 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4717 struct usb_device *udev,
4718 struct usb_endpoint_descriptor *desc,
4719 enum usb3_link_state state,
4720 u16 *timeout)
4721 {
4722 u16 alt_timeout;
4723
4724 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4725 desc, state, timeout);
4726
4727 /* If we found we can't enable hub-initiated LPM, and
4728 * the U1 or U2 exit latency was too high to allow
4729 * device-initiated LPM as well, then we will disable LPM
4730 * for this device, so stop searching any further.
4731 */
4732 if (alt_timeout == USB3_LPM_DISABLED) {
4733 *timeout = alt_timeout;
4734 return -E2BIG;
4735 }
4736 if (alt_timeout > *timeout)
4737 *timeout = alt_timeout;
4738 return 0;
4739 }
4740
xhci_update_timeout_for_interface(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_host_interface * alt,enum usb3_link_state state,u16 * timeout)4741 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4742 struct usb_device *udev,
4743 struct usb_host_interface *alt,
4744 enum usb3_link_state state,
4745 u16 *timeout)
4746 {
4747 int j;
4748
4749 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4750 if (xhci_update_timeout_for_endpoint(xhci, udev,
4751 &alt->endpoint[j].desc, state, timeout))
4752 return -E2BIG;
4753 }
4754 return 0;
4755 }
4756
xhci_check_tier_policy(struct xhci_hcd * xhci,struct usb_device * udev,enum usb3_link_state state)4757 static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4758 struct usb_device *udev,
4759 enum usb3_link_state state)
4760 {
4761 struct usb_device *parent = udev->parent;
4762 int tier = 1; /* roothub is tier1 */
4763
4764 while (parent) {
4765 parent = parent->parent;
4766 tier++;
4767 }
4768
4769 if (xhci->quirks & XHCI_INTEL_HOST && tier > 3)
4770 goto fail;
4771 if (xhci->quirks & XHCI_ZHAOXIN_HOST && tier > 2)
4772 goto fail;
4773
4774 return 0;
4775 fail:
4776 dev_dbg(&udev->dev, "Tier policy prevents U1/U2 LPM states for devices at tier %d\n",
4777 tier);
4778 return -E2BIG;
4779 }
4780
4781 /* Returns the U1 or U2 timeout that should be enabled.
4782 * If the tier check or timeout setting functions return with a non-zero exit
4783 * code, that means the timeout value has been finalized and we shouldn't look
4784 * at any more endpoints.
4785 */
xhci_calculate_lpm_timeout(struct usb_hcd * hcd,struct usb_device * udev,enum usb3_link_state state)4786 static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4787 struct usb_device *udev, enum usb3_link_state state)
4788 {
4789 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4790 struct usb_host_config *config;
4791 char *state_name;
4792 int i;
4793 u16 timeout = USB3_LPM_DISABLED;
4794
4795 if (state == USB3_LPM_U1)
4796 state_name = "U1";
4797 else if (state == USB3_LPM_U2)
4798 state_name = "U2";
4799 else {
4800 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4801 state);
4802 return timeout;
4803 }
4804
4805 /* Gather some information about the currently installed configuration
4806 * and alternate interface settings.
4807 */
4808 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4809 state, &timeout))
4810 return timeout;
4811
4812 config = udev->actconfig;
4813 if (!config)
4814 return timeout;
4815
4816 for (i = 0; i < config->desc.bNumInterfaces; i++) {
4817 struct usb_driver *driver;
4818 struct usb_interface *intf = config->interface[i];
4819
4820 if (!intf)
4821 continue;
4822
4823 /* Check if any currently bound drivers want hub-initiated LPM
4824 * disabled.
4825 */
4826 if (intf->dev.driver) {
4827 driver = to_usb_driver(intf->dev.driver);
4828 if (driver && driver->disable_hub_initiated_lpm) {
4829 dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n",
4830 state_name, driver->name);
4831 timeout = xhci_get_timeout_no_hub_lpm(udev,
4832 state);
4833 if (timeout == USB3_LPM_DISABLED)
4834 return timeout;
4835 }
4836 }
4837
4838 /* Not sure how this could happen... */
4839 if (!intf->cur_altsetting)
4840 continue;
4841
4842 if (xhci_update_timeout_for_interface(xhci, udev,
4843 intf->cur_altsetting,
4844 state, &timeout))
4845 return timeout;
4846 }
4847 return timeout;
4848 }
4849
calculate_max_exit_latency(struct usb_device * udev,enum usb3_link_state state_changed,u16 hub_encoded_timeout)4850 static int calculate_max_exit_latency(struct usb_device *udev,
4851 enum usb3_link_state state_changed,
4852 u16 hub_encoded_timeout)
4853 {
4854 unsigned long long u1_mel_us = 0;
4855 unsigned long long u2_mel_us = 0;
4856 unsigned long long mel_us = 0;
4857 bool disabling_u1;
4858 bool disabling_u2;
4859 bool enabling_u1;
4860 bool enabling_u2;
4861
4862 disabling_u1 = (state_changed == USB3_LPM_U1 &&
4863 hub_encoded_timeout == USB3_LPM_DISABLED);
4864 disabling_u2 = (state_changed == USB3_LPM_U2 &&
4865 hub_encoded_timeout == USB3_LPM_DISABLED);
4866
4867 enabling_u1 = (state_changed == USB3_LPM_U1 &&
4868 hub_encoded_timeout != USB3_LPM_DISABLED);
4869 enabling_u2 = (state_changed == USB3_LPM_U2 &&
4870 hub_encoded_timeout != USB3_LPM_DISABLED);
4871
4872 /* If U1 was already enabled and we're not disabling it,
4873 * or we're going to enable U1, account for the U1 max exit latency.
4874 */
4875 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4876 enabling_u1)
4877 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4878 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4879 enabling_u2)
4880 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4881
4882 mel_us = max(u1_mel_us, u2_mel_us);
4883
4884 /* xHCI host controller max exit latency field is only 16 bits wide. */
4885 if (mel_us > MAX_EXIT) {
4886 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4887 "is too big.\n", mel_us);
4888 return -E2BIG;
4889 }
4890 return mel_us;
4891 }
4892
4893 /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
xhci_enable_usb3_lpm_timeout(struct usb_hcd * hcd,struct usb_device * udev,enum usb3_link_state state)4894 static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4895 struct usb_device *udev, enum usb3_link_state state)
4896 {
4897 struct xhci_hcd *xhci;
4898 struct xhci_port *port;
4899 u16 hub_encoded_timeout;
4900 int mel;
4901 int ret;
4902
4903 xhci = hcd_to_xhci(hcd);
4904 /* The LPM timeout values are pretty host-controller specific, so don't
4905 * enable hub-initiated timeouts unless the vendor has provided
4906 * information about their timeout algorithm.
4907 */
4908 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4909 !xhci->devs[udev->slot_id])
4910 return USB3_LPM_DISABLED;
4911
4912 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4913 return USB3_LPM_DISABLED;
4914
4915 /* If connected to root port then check port can handle lpm */
4916 if (udev->parent && !udev->parent->parent) {
4917 port = xhci->usb3_rhub.ports[udev->portnum - 1];
4918 if (port->lpm_incapable)
4919 return USB3_LPM_DISABLED;
4920 }
4921
4922 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4923 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4924 if (mel < 0) {
4925 /* Max Exit Latency is too big, disable LPM. */
4926 hub_encoded_timeout = USB3_LPM_DISABLED;
4927 mel = 0;
4928 }
4929
4930 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4931 if (ret)
4932 return ret;
4933 return hub_encoded_timeout;
4934 }
4935
xhci_disable_usb3_lpm_timeout(struct usb_hcd * hcd,struct usb_device * udev,enum usb3_link_state state)4936 static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4937 struct usb_device *udev, enum usb3_link_state state)
4938 {
4939 struct xhci_hcd *xhci;
4940 u16 mel;
4941
4942 xhci = hcd_to_xhci(hcd);
4943 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4944 !xhci->devs[udev->slot_id])
4945 return 0;
4946
4947 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
4948 return xhci_change_max_exit_latency(xhci, udev, mel);
4949 }
4950 #else /* CONFIG_PM */
4951
xhci_set_usb2_hardware_lpm(struct usb_hcd * hcd,struct usb_device * udev,int enable)4952 static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4953 struct usb_device *udev, int enable)
4954 {
4955 return 0;
4956 }
4957
xhci_update_device(struct usb_hcd * hcd,struct usb_device * udev)4958 static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4959 {
4960 return 0;
4961 }
4962
xhci_enable_usb3_lpm_timeout(struct usb_hcd * hcd,struct usb_device * udev,enum usb3_link_state state)4963 static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4964 struct usb_device *udev, enum usb3_link_state state)
4965 {
4966 return USB3_LPM_DISABLED;
4967 }
4968
xhci_disable_usb3_lpm_timeout(struct usb_hcd * hcd,struct usb_device * udev,enum usb3_link_state state)4969 static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4970 struct usb_device *udev, enum usb3_link_state state)
4971 {
4972 return 0;
4973 }
4974 #endif /* CONFIG_PM */
4975
4976 /*-------------------------------------------------------------------------*/
4977
4978 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
4979 * internal data structures for the device.
4980 */
xhci_update_hub_device(struct usb_hcd * hcd,struct usb_device * hdev,struct usb_tt * tt,gfp_t mem_flags)4981 int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4982 struct usb_tt *tt, gfp_t mem_flags)
4983 {
4984 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4985 struct xhci_virt_device *vdev;
4986 struct xhci_command *config_cmd;
4987 struct xhci_input_control_ctx *ctrl_ctx;
4988 struct xhci_slot_ctx *slot_ctx;
4989 unsigned long flags;
4990 unsigned think_time;
4991 int ret;
4992
4993 /* Ignore root hubs */
4994 if (!hdev->parent)
4995 return 0;
4996
4997 vdev = xhci->devs[hdev->slot_id];
4998 if (!vdev) {
4999 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
5000 return -EINVAL;
5001 }
5002
5003 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
5004 if (!config_cmd)
5005 return -ENOMEM;
5006
5007 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
5008 if (!ctrl_ctx) {
5009 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
5010 __func__);
5011 xhci_free_command(xhci, config_cmd);
5012 return -ENOMEM;
5013 }
5014
5015 spin_lock_irqsave(&xhci->lock, flags);
5016 if (hdev->speed == USB_SPEED_HIGH &&
5017 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
5018 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
5019 xhci_free_command(xhci, config_cmd);
5020 spin_unlock_irqrestore(&xhci->lock, flags);
5021 return -ENOMEM;
5022 }
5023
5024 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
5025 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
5026 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
5027 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
5028 /*
5029 * refer to section 6.2.2: MTT should be 0 for full speed hub,
5030 * but it may be already set to 1 when setup an xHCI virtual
5031 * device, so clear it anyway.
5032 */
5033 if (tt->multi)
5034 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
5035 else if (hdev->speed == USB_SPEED_FULL)
5036 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
5037
5038 if (xhci->hci_version > 0x95) {
5039 xhci_dbg(xhci, "xHCI version %x needs hub "
5040 "TT think time and number of ports\n",
5041 (unsigned int) xhci->hci_version);
5042 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
5043 /* Set TT think time - convert from ns to FS bit times.
5044 * 0 = 8 FS bit times, 1 = 16 FS bit times,
5045 * 2 = 24 FS bit times, 3 = 32 FS bit times.
5046 *
5047 * xHCI 1.0: this field shall be 0 if the device is not a
5048 * High-spped hub.
5049 */
5050 think_time = tt->think_time;
5051 if (think_time != 0)
5052 think_time = (think_time / 666) - 1;
5053 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
5054 slot_ctx->tt_info |=
5055 cpu_to_le32(TT_THINK_TIME(think_time));
5056 } else {
5057 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
5058 "TT think time or number of ports\n",
5059 (unsigned int) xhci->hci_version);
5060 }
5061 slot_ctx->dev_state = 0;
5062 spin_unlock_irqrestore(&xhci->lock, flags);
5063
5064 xhci_dbg(xhci, "Set up %s for hub device.\n",
5065 (xhci->hci_version > 0x95) ?
5066 "configure endpoint" : "evaluate context");
5067
5068 /* Issue and wait for the configure endpoint or
5069 * evaluate context command.
5070 */
5071 if (xhci->hci_version > 0x95)
5072 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5073 false, false);
5074 else
5075 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5076 true, false);
5077
5078 xhci_free_command(xhci, config_cmd);
5079 return ret;
5080 }
5081 EXPORT_SYMBOL_GPL(xhci_update_hub_device);
5082
xhci_get_frame(struct usb_hcd * hcd)5083 static int xhci_get_frame(struct usb_hcd *hcd)
5084 {
5085 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5086 /* EHCI mods by the periodic size. Why? */
5087 return readl(&xhci->run_regs->microframe_index) >> 3;
5088 }
5089
xhci_hcd_init_usb2_data(struct xhci_hcd * xhci,struct usb_hcd * hcd)5090 static void xhci_hcd_init_usb2_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
5091 {
5092 xhci->usb2_rhub.hcd = hcd;
5093 hcd->speed = HCD_USB2;
5094 hcd->self.root_hub->speed = USB_SPEED_HIGH;
5095 /*
5096 * USB 2.0 roothub under xHCI has an integrated TT,
5097 * (rate matching hub) as opposed to having an OHCI/UHCI
5098 * companion controller.
5099 */
5100 hcd->has_tt = 1;
5101 }
5102
xhci_hcd_init_usb3_data(struct xhci_hcd * xhci,struct usb_hcd * hcd)5103 static void xhci_hcd_init_usb3_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
5104 {
5105 unsigned int minor_rev;
5106
5107 /*
5108 * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
5109 * should return 0x31 for sbrn, or that the minor revision
5110 * is a two digit BCD containig minor and sub-minor numbers.
5111 * This was later clarified in xHCI 1.2.
5112 *
5113 * Some USB 3.1 capable hosts therefore have sbrn 0x30, and
5114 * minor revision set to 0x1 instead of 0x10.
5115 */
5116 if (xhci->usb3_rhub.min_rev == 0x1)
5117 minor_rev = 1;
5118 else
5119 minor_rev = xhci->usb3_rhub.min_rev / 0x10;
5120
5121 switch (minor_rev) {
5122 case 2:
5123 hcd->speed = HCD_USB32;
5124 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5125 hcd->self.root_hub->rx_lanes = 2;
5126 hcd->self.root_hub->tx_lanes = 2;
5127 hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2;
5128 break;
5129 case 1:
5130 hcd->speed = HCD_USB31;
5131 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5132 hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1;
5133 break;
5134 }
5135 xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
5136 minor_rev, minor_rev ? "Enhanced " : "");
5137
5138 xhci->usb3_rhub.hcd = hcd;
5139 }
5140
xhci_gen_setup(struct usb_hcd * hcd,xhci_get_quirks_t get_quirks)5141 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
5142 {
5143 struct xhci_hcd *xhci;
5144 /*
5145 * TODO: Check with DWC3 clients for sysdev according to
5146 * quirks
5147 */
5148 struct device *dev = hcd->self.sysdev;
5149 int retval;
5150
5151 /* Accept arbitrarily long scatter-gather lists */
5152 hcd->self.sg_tablesize = ~0;
5153
5154 /* support to build packet from discontinuous buffers */
5155 hcd->self.no_sg_constraint = 1;
5156
5157 /* XHCI controllers don't stop the ep queue on short packets :| */
5158 hcd->self.no_stop_on_short = 1;
5159
5160 xhci = hcd_to_xhci(hcd);
5161
5162 if (!usb_hcd_is_primary_hcd(hcd)) {
5163 xhci_hcd_init_usb3_data(xhci, hcd);
5164 return 0;
5165 }
5166
5167 mutex_init(&xhci->mutex);
5168 xhci->main_hcd = hcd;
5169 xhci->cap_regs = hcd->regs;
5170 xhci->op_regs = hcd->regs +
5171 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
5172 xhci->run_regs = hcd->regs +
5173 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
5174 /* Cache read-only capability registers */
5175 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
5176 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
5177 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
5178 xhci->hci_version = HC_VERSION(readl(&xhci->cap_regs->hc_capbase));
5179 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
5180 if (xhci->hci_version > 0x100)
5181 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
5182
5183 /* xhci-plat or xhci-pci might have set max_interrupters already */
5184 if ((!xhci->max_interrupters) ||
5185 xhci->max_interrupters > HCS_MAX_INTRS(xhci->hcs_params1))
5186 xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1);
5187
5188 xhci->quirks |= quirks;
5189
5190 if (get_quirks)
5191 get_quirks(dev, xhci);
5192
5193 /* In xhci controllers which follow xhci 1.0 spec gives a spurious
5194 * success event after a short transfer. This quirk will ignore such
5195 * spurious event.
5196 */
5197 if (xhci->hci_version > 0x96)
5198 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
5199
5200 /* Make sure the HC is halted. */
5201 retval = xhci_halt(xhci);
5202 if (retval)
5203 return retval;
5204
5205 xhci_zero_64b_regs(xhci);
5206
5207 xhci_dbg(xhci, "Resetting HCD\n");
5208 /* Reset the internal HC memory state and registers. */
5209 retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
5210 if (retval)
5211 return retval;
5212 xhci_dbg(xhci, "Reset complete\n");
5213
5214 /*
5215 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
5216 * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
5217 * address memory pointers actually. So, this driver clears the AC64
5218 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
5219 * DMA_BIT_MASK(32)) in this xhci_gen_setup().
5220 */
5221 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
5222 xhci->hcc_params &= ~BIT(0);
5223
5224 /* Set dma_mask and coherent_dma_mask to 64-bits,
5225 * if xHC supports 64-bit addressing */
5226 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
5227 !dma_set_mask(dev, DMA_BIT_MASK(64))) {
5228 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
5229 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
5230 } else {
5231 /*
5232 * This is to avoid error in cases where a 32-bit USB
5233 * controller is used on a 64-bit capable system.
5234 */
5235 retval = dma_set_mask(dev, DMA_BIT_MASK(32));
5236 if (retval)
5237 return retval;
5238 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
5239 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
5240 }
5241
5242 xhci_dbg(xhci, "Calling HCD init\n");
5243 /* Initialize HCD and host controller data structures. */
5244 retval = xhci_init(hcd);
5245 if (retval)
5246 return retval;
5247 xhci_dbg(xhci, "Called HCD init\n");
5248
5249 if (xhci_hcd_is_usb3(hcd))
5250 xhci_hcd_init_usb3_data(xhci, hcd);
5251 else
5252 xhci_hcd_init_usb2_data(xhci, hcd);
5253
5254 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
5255 xhci->hcc_params, xhci->hci_version, xhci->quirks);
5256
5257 return 0;
5258 }
5259 EXPORT_SYMBOL_GPL(xhci_gen_setup);
5260
xhci_clear_tt_buffer_complete(struct usb_hcd * hcd,struct usb_host_endpoint * ep)5261 static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
5262 struct usb_host_endpoint *ep)
5263 {
5264 struct xhci_hcd *xhci;
5265 struct usb_device *udev;
5266 unsigned int slot_id;
5267 unsigned int ep_index;
5268 unsigned long flags;
5269
5270 xhci = hcd_to_xhci(hcd);
5271
5272 spin_lock_irqsave(&xhci->lock, flags);
5273 udev = (struct usb_device *)ep->hcpriv;
5274 slot_id = udev->slot_id;
5275 ep_index = xhci_get_endpoint_index(&ep->desc);
5276
5277 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT;
5278 xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
5279 spin_unlock_irqrestore(&xhci->lock, flags);
5280 }
5281
5282 static const struct hc_driver xhci_hc_driver = {
5283 .description = "xhci-hcd",
5284 .product_desc = "xHCI Host Controller",
5285 .hcd_priv_size = sizeof(struct xhci_hcd),
5286
5287 /*
5288 * generic hardware linkage
5289 */
5290 .irq = xhci_irq,
5291 .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED |
5292 HCD_BH,
5293
5294 /*
5295 * basic lifecycle operations
5296 */
5297 .reset = NULL, /* set in xhci_init_driver() */
5298 .start = xhci_run,
5299 .stop = xhci_stop,
5300 .shutdown = xhci_shutdown,
5301
5302 /*
5303 * managing i/o requests and associated device resources
5304 */
5305 .map_urb_for_dma = xhci_map_urb_for_dma,
5306 .unmap_urb_for_dma = xhci_unmap_urb_for_dma,
5307 .urb_enqueue = xhci_urb_enqueue,
5308 .urb_dequeue = xhci_urb_dequeue,
5309 .alloc_dev = xhci_alloc_dev,
5310 .free_dev = xhci_free_dev,
5311 .alloc_streams = xhci_alloc_streams,
5312 .free_streams = xhci_free_streams,
5313 .add_endpoint = xhci_add_endpoint,
5314 .drop_endpoint = xhci_drop_endpoint,
5315 .endpoint_disable = xhci_endpoint_disable,
5316 .endpoint_reset = xhci_endpoint_reset,
5317 .check_bandwidth = xhci_check_bandwidth,
5318 .reset_bandwidth = xhci_reset_bandwidth,
5319 .address_device = xhci_address_device,
5320 .enable_device = xhci_enable_device,
5321 .update_hub_device = xhci_update_hub_device,
5322 .reset_device = xhci_discover_or_reset_device,
5323
5324 /*
5325 * scheduling support
5326 */
5327 .get_frame_number = xhci_get_frame,
5328
5329 /*
5330 * root hub support
5331 */
5332 .hub_control = xhci_hub_control,
5333 .hub_status_data = xhci_hub_status_data,
5334 .bus_suspend = xhci_bus_suspend,
5335 .bus_resume = xhci_bus_resume,
5336 .get_resuming_ports = xhci_get_resuming_ports,
5337
5338 /*
5339 * call back when device connected and addressed
5340 */
5341 .update_device = xhci_update_device,
5342 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
5343 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
5344 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
5345 .find_raw_port_number = xhci_find_raw_port_number,
5346 .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete,
5347 };
5348
xhci_init_driver(struct hc_driver * drv,const struct xhci_driver_overrides * over)5349 void xhci_init_driver(struct hc_driver *drv,
5350 const struct xhci_driver_overrides *over)
5351 {
5352 BUG_ON(!over);
5353
5354 /* Copy the generic table to drv then apply the overrides */
5355 *drv = xhci_hc_driver;
5356
5357 if (over) {
5358 drv->hcd_priv_size += over->extra_priv_size;
5359 if (over->reset)
5360 drv->reset = over->reset;
5361 if (over->start)
5362 drv->start = over->start;
5363 if (over->add_endpoint)
5364 drv->add_endpoint = over->add_endpoint;
5365 if (over->drop_endpoint)
5366 drv->drop_endpoint = over->drop_endpoint;
5367 if (over->check_bandwidth)
5368 drv->check_bandwidth = over->check_bandwidth;
5369 if (over->reset_bandwidth)
5370 drv->reset_bandwidth = over->reset_bandwidth;
5371 if (over->update_hub_device)
5372 drv->update_hub_device = over->update_hub_device;
5373 if (over->hub_control)
5374 drv->hub_control = over->hub_control;
5375 }
5376 }
5377 EXPORT_SYMBOL_GPL(xhci_init_driver);
5378
5379 MODULE_DESCRIPTION(DRIVER_DESC);
5380 MODULE_AUTHOR(DRIVER_AUTHOR);
5381 MODULE_LICENSE("GPL");
5382
xhci_hcd_init(void)5383 static int __init xhci_hcd_init(void)
5384 {
5385 /*
5386 * Check the compiler generated sizes of structures that must be laid
5387 * out in specific ways for hardware access.
5388 */
5389 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
5390 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
5391 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
5392 /* xhci_device_control has eight fields, and also
5393 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
5394 */
5395 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
5396 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
5397 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
5398 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
5399 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
5400 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
5401 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
5402
5403 if (usb_disabled())
5404 return -ENODEV;
5405
5406 xhci_debugfs_create_root();
5407 xhci_dbc_init();
5408
5409 return 0;
5410 }
5411
5412 /*
5413 * If an init function is provided, an exit function must also be provided
5414 * to allow module unload.
5415 */
xhci_hcd_fini(void)5416 static void __exit xhci_hcd_fini(void)
5417 {
5418 xhci_debugfs_remove_root();
5419 xhci_dbc_exit();
5420 }
5421
5422 module_init(xhci_hcd_init);
5423 module_exit(xhci_hcd_fini);
5424