15fd54aceSGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2bc75fa38SAlex Chiang /*
3bc75fa38SAlex Chiang * xHCI host controller driver
4bc75fa38SAlex Chiang *
5bc75fa38SAlex Chiang * Copyright (C) 2008 Intel Corp.
6bc75fa38SAlex Chiang *
7bc75fa38SAlex Chiang * Author: Sarah Sharp
8bc75fa38SAlex Chiang * Some code borrowed from the Linux EHCI driver.
9bc75fa38SAlex Chiang */
10bc75fa38SAlex Chiang
11f1ece345SMichal Pecio #include <linux/jiffies.h>
1243b86af8SDong Nguyen #include <linux/pci.h>
13ecaa4902SD Scott Phillips #include <linux/iommu.h>
14f7fac17cSAndrey Smirnov #include <linux/iopoll.h>
15bc75fa38SAlex Chiang #include <linux/irq.h>
168df75f42SSarah Sharp #include <linux/log2.h>
17bc75fa38SAlex Chiang #include <linux/module.h>
18bc75fa38SAlex Chiang #include <linux/moduleparam.h>
195a0e3ad6STejun Heo #include <linux/slab.h>
2071c731a2SAlexis R. Cortes #include <linux/dmi.h>
21008eb957SJames Hogan #include <linux/dma-mapping.h>
22bc75fa38SAlex Chiang
23bc75fa38SAlex Chiang #include "xhci.h"
2484a99f6fSXenia Ragiadakou #include "xhci-trace.h"
2502b6fdc2SLu Baolu #include "xhci-debugfs.h"
26dfba2174SLu Baolu #include "xhci-dbgcap.h"
27bc75fa38SAlex Chiang
28bc75fa38SAlex Chiang #define DRIVER_AUTHOR "Sarah Sharp"
29bc75fa38SAlex Chiang #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
30bc75fa38SAlex Chiang
31a1377e53SLu Baolu #define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
32a1377e53SLu Baolu
33bc75fa38SAlex Chiang /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
34bc75fa38SAlex Chiang static int link_quirk;
35bc75fa38SAlex Chiang module_param(link_quirk, int, S_IRUGO | S_IWUSR);
36bc75fa38SAlex Chiang MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
37bc75fa38SAlex Chiang
3836b68579SMarc Zyngier static unsigned long long quirks;
3936b68579SMarc Zyngier module_param(quirks, ullong, S_IRUGO);
404e6a1ee7STakashi Iwai MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
414e6a1ee7STakashi Iwai
td_on_ring(struct xhci_td * td,struct xhci_ring * ring)424937213bSMathias Nyman static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
434937213bSMathias Nyman {
444937213bSMathias Nyman struct xhci_segment *seg = ring->first_seg;
454937213bSMathias Nyman
464937213bSMathias Nyman if (!td || !td->start_seg)
474937213bSMathias Nyman return false;
484937213bSMathias Nyman do {
494937213bSMathias Nyman if (seg == td->start_seg)
504937213bSMathias Nyman return true;
514937213bSMathias Nyman seg = seg->next;
524937213bSMathias Nyman } while (seg && seg != ring->first_seg);
534937213bSMathias Nyman
544937213bSMathias Nyman return false;
554937213bSMathias Nyman }
564937213bSMathias Nyman
57bc75fa38SAlex Chiang /*
582611bd18SSarah Sharp * xhci_handshake - spin reading hc until handshake completes or fails
59bc75fa38SAlex Chiang * @ptr: address of hc register to be read
60bc75fa38SAlex Chiang * @mask: bits to look at in result of read
61bc75fa38SAlex Chiang * @done: value of those bits when handshake succeeds
62bc75fa38SAlex Chiang * @usec: timeout in microseconds
63bc75fa38SAlex Chiang *
64bc75fa38SAlex Chiang * Returns negative errno, or zero on success
65bc75fa38SAlex Chiang *
66bc75fa38SAlex Chiang * Success happens when the "mask" bits have the specified value (hardware
67bc75fa38SAlex Chiang * handshake done). There are two failure modes: "usec" have passed (major
68bc75fa38SAlex Chiang * hardware flakeout), or the register reads as all-ones (hardware removed).
69bc75fa38SAlex Chiang */
xhci_handshake(void __iomem * ptr,u32 mask,u32 done,u64 timeout_us)7014073ce9SMathias Nyman int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
71bc75fa38SAlex Chiang {
72bc75fa38SAlex Chiang u32 result;
73f7fac17cSAndrey Smirnov int ret;
74bc75fa38SAlex Chiang
75f7fac17cSAndrey Smirnov ret = readl_poll_timeout_atomic(ptr, result,
76f7fac17cSAndrey Smirnov (result & mask) == done ||
77f7fac17cSAndrey Smirnov result == U32_MAX,
7814073ce9SMathias Nyman 1, timeout_us);
79f7fac17cSAndrey Smirnov if (result == U32_MAX) /* card removed */
80bc75fa38SAlex Chiang return -ENODEV;
81f7fac17cSAndrey Smirnov
82f7fac17cSAndrey Smirnov return ret;
83bc75fa38SAlex Chiang }
84bc75fa38SAlex Chiang
85bc75fa38SAlex Chiang /*
86bc75fa38SAlex Chiang * Disable interrupts and begin the xHCI halting process.
87bc75fa38SAlex Chiang */
xhci_quiesce(struct xhci_hcd * xhci)88bc75fa38SAlex Chiang void xhci_quiesce(struct xhci_hcd *xhci)
89bc75fa38SAlex Chiang {
90bc75fa38SAlex Chiang u32 halted;
91bc75fa38SAlex Chiang u32 cmd;
92bc75fa38SAlex Chiang u32 mask;
93bc75fa38SAlex Chiang
94bc75fa38SAlex Chiang mask = ~(XHCI_IRQS);
95b0ba9720SXenia Ragiadakou halted = readl(&xhci->op_regs->status) & STS_HALT;
96bc75fa38SAlex Chiang if (!halted)
97bc75fa38SAlex Chiang mask &= ~CMD_RUN;
98bc75fa38SAlex Chiang
99b0ba9720SXenia Ragiadakou cmd = readl(&xhci->op_regs->command);
100bc75fa38SAlex Chiang cmd &= mask;
101204b7793SXenia Ragiadakou writel(cmd, &xhci->op_regs->command);
102bc75fa38SAlex Chiang }
103bc75fa38SAlex Chiang
104bc75fa38SAlex Chiang /*
105bc75fa38SAlex Chiang * Force HC into halt state.
106bc75fa38SAlex Chiang *
107bc75fa38SAlex Chiang * Disable any IRQs and clear the run/stop bit.
108bc75fa38SAlex Chiang * HC will complete any current and actively pipelined transactions, and
109bdfca502SAndiry Xu * should halt within 16 ms of the run/stop bit being cleared.
110bc75fa38SAlex Chiang * Read HC Halted bit in the status register to see when the HC is finished.
111bc75fa38SAlex Chiang */
xhci_halt(struct xhci_hcd * xhci)112bc75fa38SAlex Chiang int xhci_halt(struct xhci_hcd *xhci)
113bc75fa38SAlex Chiang {
114c6cc27c7SSarah Sharp int ret;
115c2b0d550SLinyu Yuan
116d195fcffSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
117bc75fa38SAlex Chiang xhci_quiesce(xhci);
118bc75fa38SAlex Chiang
119dc0b177cSLin Wang ret = xhci_handshake(&xhci->op_regs->status,
120bc75fa38SAlex Chiang STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
12199154fd3SMathias Nyman if (ret) {
12299154fd3SMathias Nyman xhci_warn(xhci, "Host halt failed, %d\n", ret);
12399154fd3SMathias Nyman return ret;
12499154fd3SMathias Nyman }
125c2b0d550SLinyu Yuan
126c6cc27c7SSarah Sharp xhci->xhc_state |= XHCI_STATE_HALTED;
127c181bc5bSElric Fu xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
128c2b0d550SLinyu Yuan
129c6cc27c7SSarah Sharp return ret;
130bc75fa38SAlex Chiang }
131bc75fa38SAlex Chiang
132bc75fa38SAlex Chiang /*
133ed07453fSSarah Sharp * Set the run bit and wait for the host to be running.
134ed07453fSSarah Sharp */
xhci_start(struct xhci_hcd * xhci)13526bba5c7SGuoqing Zhang int xhci_start(struct xhci_hcd *xhci)
136ed07453fSSarah Sharp {
137ed07453fSSarah Sharp u32 temp;
138ed07453fSSarah Sharp int ret;
139ed07453fSSarah Sharp
140b0ba9720SXenia Ragiadakou temp = readl(&xhci->op_regs->command);
141ed07453fSSarah Sharp temp |= (CMD_RUN);
142d195fcffSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
143ed07453fSSarah Sharp temp);
144204b7793SXenia Ragiadakou writel(temp, &xhci->op_regs->command);
145ed07453fSSarah Sharp
146ed07453fSSarah Sharp /*
147ed07453fSSarah Sharp * Wait for the HCHalted Status bit to be 0 to indicate the host is
148ed07453fSSarah Sharp * running.
149ed07453fSSarah Sharp */
150dc0b177cSLin Wang ret = xhci_handshake(&xhci->op_regs->status,
151ed07453fSSarah Sharp STS_HALT, 0, XHCI_MAX_HALT_USEC);
152ed07453fSSarah Sharp if (ret == -ETIMEDOUT)
153ed07453fSSarah Sharp xhci_err(xhci, "Host took too long to start, "
154ed07453fSSarah Sharp "waited %u microseconds.\n",
155ed07453fSSarah Sharp XHCI_MAX_HALT_USEC);
15633e32158SMathias Nyman if (!ret) {
15798d74f9cSMathias Nyman /* clear state flags. Including dying, halted or removing */
15898d74f9cSMathias Nyman xhci->xhc_state = 0;
15933e32158SMathias Nyman xhci->run_graceperiod = jiffies + msecs_to_jiffies(500);
16033e32158SMathias Nyman }
161e5bfeab0SRoger Quadros
162ed07453fSSarah Sharp return ret;
163ed07453fSSarah Sharp }
164ed07453fSSarah Sharp
165ed07453fSSarah Sharp /*
166ac04e6ffSSarah Sharp * Reset a halted HC.
167bc75fa38SAlex Chiang *
168bc75fa38SAlex Chiang * This resets pipelines, timers, counters, state machines, etc.
169bc75fa38SAlex Chiang * Transactions will be terminated immediately, and operational registers
170bc75fa38SAlex Chiang * will be set to their defaults.
171bc75fa38SAlex Chiang */
xhci_reset(struct xhci_hcd * xhci,u64 timeout_us)17214073ce9SMathias Nyman int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
173bc75fa38SAlex Chiang {
174bc75fa38SAlex Chiang u32 command;
175bc75fa38SAlex Chiang u32 state;
176f6187f42SMathias Nyman int ret;
177bc75fa38SAlex Chiang
178b0ba9720SXenia Ragiadakou state = readl(&xhci->op_regs->status);
179c11ae038SMathias Nyman
180c11ae038SMathias Nyman if (state == ~(u32)0) {
181c11ae038SMathias Nyman xhci_warn(xhci, "Host not accessible, reset failed.\n");
182c11ae038SMathias Nyman return -ENODEV;
183c11ae038SMathias Nyman }
184c11ae038SMathias Nyman
185bc75fa38SAlex Chiang if ((state & STS_HALT) == 0) {
186bc75fa38SAlex Chiang xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
187bc75fa38SAlex Chiang return 0;
188bc75fa38SAlex Chiang }
189bc75fa38SAlex Chiang
190d195fcffSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
191b0ba9720SXenia Ragiadakou command = readl(&xhci->op_regs->command);
192bc75fa38SAlex Chiang command |= CMD_RESET;
193204b7793SXenia Ragiadakou writel(command, &xhci->op_regs->command);
194bc75fa38SAlex Chiang
195a5964396SRajmohan Mani /* Existing Intel xHCI controllers require a delay of 1 mS,
196a5964396SRajmohan Mani * after setting the CMD_RESET bit, and before accessing any
197a5964396SRajmohan Mani * HC registers. This allows the HC to complete the
198a5964396SRajmohan Mani * reset operation and be ready for HC register access.
199a5964396SRajmohan Mani * Without this delay, the subsequent HC register access,
200a5964396SRajmohan Mani * may result in a system hang very rarely.
201a5964396SRajmohan Mani */
202a5964396SRajmohan Mani if (xhci->quirks & XHCI_INTEL_HOST)
203a5964396SRajmohan Mani udelay(1000);
204a5964396SRajmohan Mani
20514073ce9SMathias Nyman ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us);
2062d62f3eeSSarah Sharp if (ret)
2072d62f3eeSSarah Sharp return ret;
2082d62f3eeSSarah Sharp
2099da5a109SJiahau Chang if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
2109da5a109SJiahau Chang usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
2119da5a109SJiahau Chang
212d195fcffSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_init,
213d195fcffSXenia Ragiadakou "Wait for controller to be ready for doorbell rings");
2142d62f3eeSSarah Sharp /*
2152d62f3eeSSarah Sharp * xHCI cannot write to any doorbells or operational registers other
2162d62f3eeSSarah Sharp * than status until the "Controller Not Ready" flag is cleared.
2172d62f3eeSSarah Sharp */
21814073ce9SMathias Nyman ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us);
219f370b996SAndiry Xu
220f6187f42SMathias Nyman xhci->usb2_rhub.bus_state.port_c_suspend = 0;
221f6187f42SMathias Nyman xhci->usb2_rhub.bus_state.suspended_ports = 0;
222f6187f42SMathias Nyman xhci->usb2_rhub.bus_state.resuming_ports = 0;
223f6187f42SMathias Nyman xhci->usb3_rhub.bus_state.port_c_suspend = 0;
224f6187f42SMathias Nyman xhci->usb3_rhub.bus_state.suspended_ports = 0;
225f6187f42SMathias Nyman xhci->usb3_rhub.bus_state.resuming_ports = 0;
226f370b996SAndiry Xu
227f370b996SAndiry Xu return ret;
228bc75fa38SAlex Chiang }
229bc75fa38SAlex Chiang
xhci_zero_64b_regs(struct xhci_hcd * xhci)23012de0a35SMarc Zyngier static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
23112de0a35SMarc Zyngier {
23212de0a35SMarc Zyngier struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
233ecaa4902SD Scott Phillips struct iommu_domain *domain;
23412de0a35SMarc Zyngier int err, i;
23512de0a35SMarc Zyngier u64 val;
236286fd02fSMathias Nyman u32 intrs;
23712de0a35SMarc Zyngier
23812de0a35SMarc Zyngier /*
23912de0a35SMarc Zyngier * Some Renesas controllers get into a weird state if they are
24012de0a35SMarc Zyngier * reset while programmed with 64bit addresses (they will preserve
24112de0a35SMarc Zyngier * the top half of the address in internal, non visible
24212de0a35SMarc Zyngier * registers). You end up with half the address coming from the
24312de0a35SMarc Zyngier * kernel, and the other half coming from the firmware. Also,
24412de0a35SMarc Zyngier * changing the programming leads to extra accesses even if the
24512de0a35SMarc Zyngier * controller is supposed to be halted. The controller ends up with
24612de0a35SMarc Zyngier * a fatal fault, and is then ripe for being properly reset.
24712de0a35SMarc Zyngier *
24812de0a35SMarc Zyngier * Special care is taken to only apply this if the device is behind
24912de0a35SMarc Zyngier * an iommu. Doing anything when there is no iommu is definitely
25012de0a35SMarc Zyngier * unsafe...
25112de0a35SMarc Zyngier */
252ecaa4902SD Scott Phillips domain = iommu_get_domain_for_dev(dev);
253ecaa4902SD Scott Phillips if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain ||
254ecaa4902SD Scott Phillips domain->type == IOMMU_DOMAIN_IDENTITY)
25512de0a35SMarc Zyngier return;
25612de0a35SMarc Zyngier
25712de0a35SMarc Zyngier xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
25812de0a35SMarc Zyngier
25912de0a35SMarc Zyngier /* Clear HSEIE so that faults do not get signaled */
26012de0a35SMarc Zyngier val = readl(&xhci->op_regs->command);
26112de0a35SMarc Zyngier val &= ~CMD_HSEIE;
26212de0a35SMarc Zyngier writel(val, &xhci->op_regs->command);
26312de0a35SMarc Zyngier
26412de0a35SMarc Zyngier /* Clear HSE (aka FATAL) */
26512de0a35SMarc Zyngier val = readl(&xhci->op_regs->status);
26612de0a35SMarc Zyngier val |= STS_FATAL;
26712de0a35SMarc Zyngier writel(val, &xhci->op_regs->status);
26812de0a35SMarc Zyngier
26912de0a35SMarc Zyngier /* Now zero the registers, and brace for impact */
27012de0a35SMarc Zyngier val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
27112de0a35SMarc Zyngier if (upper_32_bits(val))
27212de0a35SMarc Zyngier xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
27312de0a35SMarc Zyngier val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
27412de0a35SMarc Zyngier if (upper_32_bits(val))
27512de0a35SMarc Zyngier xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
27612de0a35SMarc Zyngier
277286fd02fSMathias Nyman intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1),
278286fd02fSMathias Nyman ARRAY_SIZE(xhci->run_regs->ir_set));
279286fd02fSMathias Nyman
280286fd02fSMathias Nyman for (i = 0; i < intrs; i++) {
28112de0a35SMarc Zyngier struct xhci_intr_reg __iomem *ir;
28212de0a35SMarc Zyngier
28312de0a35SMarc Zyngier ir = &xhci->run_regs->ir_set[i];
28412de0a35SMarc Zyngier val = xhci_read_64(xhci, &ir->erst_base);
28512de0a35SMarc Zyngier if (upper_32_bits(val))
28612de0a35SMarc Zyngier xhci_write_64(xhci, 0, &ir->erst_base);
28712de0a35SMarc Zyngier val= xhci_read_64(xhci, &ir->erst_dequeue);
28812de0a35SMarc Zyngier if (upper_32_bits(val))
28912de0a35SMarc Zyngier xhci_write_64(xhci, 0, &ir->erst_dequeue);
29012de0a35SMarc Zyngier }
29112de0a35SMarc Zyngier
29212de0a35SMarc Zyngier /* Wait for the fault to appear. It will be cleared on reset */
29312de0a35SMarc Zyngier err = xhci_handshake(&xhci->op_regs->status,
29412de0a35SMarc Zyngier STS_FATAL, STS_FATAL,
29512de0a35SMarc Zyngier XHCI_MAX_HALT_USEC);
29612de0a35SMarc Zyngier if (!err)
29712de0a35SMarc Zyngier xhci_info(xhci, "Fault detected\n");
29812de0a35SMarc Zyngier }
29977d45b45SChristoph Hellwig
xhci_enable_interrupter(struct xhci_interrupter * ir)30052dd0483SMathias Nyman static int xhci_enable_interrupter(struct xhci_interrupter *ir)
30152dd0483SMathias Nyman {
30252dd0483SMathias Nyman u32 iman;
30352dd0483SMathias Nyman
30452dd0483SMathias Nyman if (!ir || !ir->ir_set)
30552dd0483SMathias Nyman return -EINVAL;
30652dd0483SMathias Nyman
30752dd0483SMathias Nyman iman = readl(&ir->ir_set->irq_pending);
30852dd0483SMathias Nyman writel(ER_IRQ_ENABLE(iman), &ir->ir_set->irq_pending);
30952dd0483SMathias Nyman
31052dd0483SMathias Nyman return 0;
31152dd0483SMathias Nyman }
31252dd0483SMathias Nyman
xhci_disable_interrupter(struct xhci_interrupter * ir)31352dd0483SMathias Nyman static int xhci_disable_interrupter(struct xhci_interrupter *ir)
31452dd0483SMathias Nyman {
31552dd0483SMathias Nyman u32 iman;
31652dd0483SMathias Nyman
31752dd0483SMathias Nyman if (!ir || !ir->ir_set)
31852dd0483SMathias Nyman return -EINVAL;
31952dd0483SMathias Nyman
32052dd0483SMathias Nyman iman = readl(&ir->ir_set->irq_pending);
32152dd0483SMathias Nyman writel(ER_IRQ_DISABLE(iman), &ir->ir_set->irq_pending);
32252dd0483SMathias Nyman
32352dd0483SMathias Nyman return 0;
32452dd0483SMathias Nyman }
32552dd0483SMathias Nyman
compliance_mode_recovery(struct timer_list * t)326e99e88a9SKees Cook static void compliance_mode_recovery(struct timer_list *t)
32771c731a2SAlexis R. Cortes {
32871c731a2SAlexis R. Cortes struct xhci_hcd *xhci;
32971c731a2SAlexis R. Cortes struct usb_hcd *hcd;
33038986ffaSMathias Nyman struct xhci_hub *rhub;
33171c731a2SAlexis R. Cortes u32 temp;
33271c731a2SAlexis R. Cortes int i;
33371c731a2SAlexis R. Cortes
334e99e88a9SKees Cook xhci = from_timer(xhci, t, comp_mode_recovery_timer);
33538986ffaSMathias Nyman rhub = &xhci->usb3_rhub;
336873f3236SHeiner Kallweit hcd = rhub->hcd;
337873f3236SHeiner Kallweit
338873f3236SHeiner Kallweit if (!hcd)
339873f3236SHeiner Kallweit return;
34071c731a2SAlexis R. Cortes
34138986ffaSMathias Nyman for (i = 0; i < rhub->num_ports; i++) {
34238986ffaSMathias Nyman temp = readl(rhub->ports[i]->addr);
34371c731a2SAlexis R. Cortes if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
34471c731a2SAlexis R. Cortes /*
34571c731a2SAlexis R. Cortes * Compliance Mode Detected. Letting USB Core
34671c731a2SAlexis R. Cortes * handle the Warm Reset
34771c731a2SAlexis R. Cortes */
3484bdfe4c3SXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3494bdfe4c3SXenia Ragiadakou "Compliance mode detected->port %d",
35071c731a2SAlexis R. Cortes i + 1);
3514bdfe4c3SXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3524bdfe4c3SXenia Ragiadakou "Attempting compliance mode recovery");
35371c731a2SAlexis R. Cortes
35471c731a2SAlexis R. Cortes if (hcd->state == HC_STATE_SUSPENDED)
35571c731a2SAlexis R. Cortes usb_hcd_resume_root_hub(hcd);
35671c731a2SAlexis R. Cortes
35771c731a2SAlexis R. Cortes usb_hcd_poll_rh_status(hcd);
35871c731a2SAlexis R. Cortes }
35971c731a2SAlexis R. Cortes }
36071c731a2SAlexis R. Cortes
36138986ffaSMathias Nyman if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1))
36271c731a2SAlexis R. Cortes mod_timer(&xhci->comp_mode_recovery_timer,
36371c731a2SAlexis R. Cortes jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
36471c731a2SAlexis R. Cortes }
36571c731a2SAlexis R. Cortes
36671c731a2SAlexis R. Cortes /*
36771c731a2SAlexis R. Cortes * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
36871c731a2SAlexis R. Cortes * that causes ports behind that hardware to enter compliance mode sometimes.
36971c731a2SAlexis R. Cortes * The quirk creates a timer that polls every 2 seconds the link state of
37071c731a2SAlexis R. Cortes * each host controller's port and recovers it by issuing a Warm reset
37171c731a2SAlexis R. Cortes * if Compliance mode is detected, otherwise the port will become "dead" (no
37271c731a2SAlexis R. Cortes * device connections or disconnections will be detected anymore). Becasue no
37371c731a2SAlexis R. Cortes * status event is generated when entering compliance mode (per xhci spec),
37471c731a2SAlexis R. Cortes * this quirk is needed on systems that have the failing hardware installed.
37571c731a2SAlexis R. Cortes */
compliance_mode_recovery_timer_init(struct xhci_hcd * xhci)37671c731a2SAlexis R. Cortes static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
37771c731a2SAlexis R. Cortes {
37871c731a2SAlexis R. Cortes xhci->port_status_u0 = 0;
379e99e88a9SKees Cook timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
380e99e88a9SKees Cook 0);
38171c731a2SAlexis R. Cortes xhci->comp_mode_recovery_timer.expires = jiffies +
38271c731a2SAlexis R. Cortes msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
38371c731a2SAlexis R. Cortes
38471c731a2SAlexis R. Cortes add_timer(&xhci->comp_mode_recovery_timer);
3854bdfe4c3SXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3864bdfe4c3SXenia Ragiadakou "Compliance mode recovery timer initialized");
38771c731a2SAlexis R. Cortes }
38871c731a2SAlexis R. Cortes
38971c731a2SAlexis R. Cortes /*
39071c731a2SAlexis R. Cortes * This function identifies the systems that have installed the SN65LVPE502CP
39171c731a2SAlexis R. Cortes * USB3.0 re-driver and that need the Compliance Mode Quirk.
39271c731a2SAlexis R. Cortes * Systems:
39371c731a2SAlexis R. Cortes * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
39471c731a2SAlexis R. Cortes */
xhci_compliance_mode_recovery_timer_quirk_check(void)395e1cd9727SAndrew Bresticker static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
39671c731a2SAlexis R. Cortes {
39771c731a2SAlexis R. Cortes const char *dmi_product_name, *dmi_sys_vendor;
39871c731a2SAlexis R. Cortes
39971c731a2SAlexis R. Cortes dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
40071c731a2SAlexis R. Cortes dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
401457a73d3SVivek Gautam if (!dmi_product_name || !dmi_sys_vendor)
402457a73d3SVivek Gautam return false;
40371c731a2SAlexis R. Cortes
40471c731a2SAlexis R. Cortes if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
40571c731a2SAlexis R. Cortes return false;
40671c731a2SAlexis R. Cortes
40771c731a2SAlexis R. Cortes if (strstr(dmi_product_name, "Z420") ||
40871c731a2SAlexis R. Cortes strstr(dmi_product_name, "Z620") ||
40947080974SAlexis R. Cortes strstr(dmi_product_name, "Z820") ||
410b0e4e606SAlexis R. Cortes strstr(dmi_product_name, "Z1 Workstation"))
41171c731a2SAlexis R. Cortes return true;
41271c731a2SAlexis R. Cortes
41371c731a2SAlexis R. Cortes return false;
41471c731a2SAlexis R. Cortes }
41571c731a2SAlexis R. Cortes
xhci_all_ports_seen_u0(struct xhci_hcd * xhci)41671c731a2SAlexis R. Cortes static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
41771c731a2SAlexis R. Cortes {
41838986ffaSMathias Nyman return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
41971c731a2SAlexis R. Cortes }
42071c731a2SAlexis R. Cortes
42171c731a2SAlexis R. Cortes
422bc75fa38SAlex Chiang /*
423bc75fa38SAlex Chiang * Initialize memory for HCD and xHC (one-time init).
424bc75fa38SAlex Chiang *
425bc75fa38SAlex Chiang * Program the PAGESIZE register, initialize the device context array, create
426bc75fa38SAlex Chiang * device contexts (?), set up a command ring segment (or two?), create event
427bc75fa38SAlex Chiang * ring (one for now).
428bc75fa38SAlex Chiang */
xhci_init(struct usb_hcd * hcd)4293969384cSLu Baolu static int xhci_init(struct usb_hcd *hcd)
430bc75fa38SAlex Chiang {
431bc75fa38SAlex Chiang struct xhci_hcd *xhci = hcd_to_xhci(hcd);
43298d107b8SLinyu Yuan int retval;
433bc75fa38SAlex Chiang
434d195fcffSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
435bc75fa38SAlex Chiang spin_lock_init(&xhci->lock);
436d7826599SSebastian Andrzej Siewior if (xhci->hci_version == 0x95 && link_quirk) {
4374bdfe4c3SXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
4384bdfe4c3SXenia Ragiadakou "QUIRK: Not clearing Link TRB chain bits.");
439bc75fa38SAlex Chiang xhci->quirks |= XHCI_LINK_TRB_QUIRK;
440bc75fa38SAlex Chiang } else {
441d195fcffSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_init,
442d195fcffSXenia Ragiadakou "xHCI doesn't need link TRB QUIRK");
443bc75fa38SAlex Chiang }
444bc75fa38SAlex Chiang retval = xhci_mem_init(xhci, GFP_KERNEL);
445d195fcffSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
446bc75fa38SAlex Chiang
44771c731a2SAlexis R. Cortes /* Initializing Compliance Mode Recovery Data If Needed */
448c3897aa5SSarah Sharp if (xhci_compliance_mode_recovery_timer_quirk_check()) {
44971c731a2SAlexis R. Cortes xhci->quirks |= XHCI_COMP_MODE_QUIRK;
45071c731a2SAlexis R. Cortes compliance_mode_recovery_timer_init(xhci);
45171c731a2SAlexis R. Cortes }
45271c731a2SAlexis R. Cortes
453bc75fa38SAlex Chiang return retval;
454bc75fa38SAlex Chiang }
455bc75fa38SAlex Chiang
456bc75fa38SAlex Chiang /*-------------------------------------------------------------------------*/
457bc75fa38SAlex Chiang
xhci_run_finished(struct xhci_hcd * xhci)458f6ff0ac8SSarah Sharp static int xhci_run_finished(struct xhci_hcd *xhci)
459f6ff0ac8SSarah Sharp {
460b17a57f8SMathias Nyman struct xhci_interrupter *ir = xhci->interrupter;
461a8089250SHongyu Xie unsigned long flags;
462a8089250SHongyu Xie u32 temp;
463a8089250SHongyu Xie
464a8089250SHongyu Xie /*
465a8089250SHongyu Xie * Enable interrupts before starting the host (xhci 4.2 and 5.5.2).
466a8089250SHongyu Xie * Protect the short window before host is running with a lock
467a8089250SHongyu Xie */
468a8089250SHongyu Xie spin_lock_irqsave(&xhci->lock, flags);
469a8089250SHongyu Xie
470a8089250SHongyu Xie xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable interrupts");
471a8089250SHongyu Xie temp = readl(&xhci->op_regs->command);
472a8089250SHongyu Xie temp |= (CMD_EIE);
473a8089250SHongyu Xie writel(temp, &xhci->op_regs->command);
474a8089250SHongyu Xie
475a8089250SHongyu Xie xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable primary interrupter");
47652dd0483SMathias Nyman xhci_enable_interrupter(ir);
477a8089250SHongyu Xie
478f6ff0ac8SSarah Sharp if (xhci_start(xhci)) {
479f6ff0ac8SSarah Sharp xhci_halt(xhci);
480a8089250SHongyu Xie spin_unlock_irqrestore(&xhci->lock, flags);
481f6ff0ac8SSarah Sharp return -ENODEV;
482f6ff0ac8SSarah Sharp }
483a8089250SHongyu Xie
484c181bc5bSElric Fu xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
485f6ff0ac8SSarah Sharp
486f6ff0ac8SSarah Sharp if (xhci->quirks & XHCI_NEC_HOST)
487f6ff0ac8SSarah Sharp xhci_ring_cmd_db(xhci);
488f6ff0ac8SSarah Sharp
489a8089250SHongyu Xie spin_unlock_irqrestore(&xhci->lock, flags);
490a8089250SHongyu Xie
491f6ff0ac8SSarah Sharp return 0;
492f6ff0ac8SSarah Sharp }
493f6ff0ac8SSarah Sharp
494bc75fa38SAlex Chiang /*
495bc75fa38SAlex Chiang * Start the HC after it was halted.
496bc75fa38SAlex Chiang *
497bc75fa38SAlex Chiang * This function is called by the USB core when the HC driver is added.
498bc75fa38SAlex Chiang * Its opposite is xhci_stop().
499bc75fa38SAlex Chiang *
500bc75fa38SAlex Chiang * xhci_init() must be called once before this function can be called.
501bc75fa38SAlex Chiang * Reset the HC, enable device slot contexts, program DCBAAP, and
502bc75fa38SAlex Chiang * set command ring pointer and event ring pointer.
503bc75fa38SAlex Chiang *
504bc75fa38SAlex Chiang * Setup MSI-X vectors and enable interrupts.
505bc75fa38SAlex Chiang */
xhci_run(struct usb_hcd * hcd)506bc75fa38SAlex Chiang int xhci_run(struct usb_hcd *hcd)
507bc75fa38SAlex Chiang {
508bc75fa38SAlex Chiang u32 temp;
509bc75fa38SAlex Chiang u64 temp_64;
5103fd1ec58SSebastian Andrzej Siewior int ret;
511bc75fa38SAlex Chiang struct xhci_hcd *xhci = hcd_to_xhci(hcd);
512b17a57f8SMathias Nyman struct xhci_interrupter *ir = xhci->interrupter;
513f6ff0ac8SSarah Sharp /* Start the xHCI host controller running only after the USB 2.0 roothub
514f6ff0ac8SSarah Sharp * is setup.
515f6ff0ac8SSarah Sharp */
516bc75fa38SAlex Chiang
517bc75fa38SAlex Chiang hcd->uses_new_polling = 1;
518f6ff0ac8SSarah Sharp if (!usb_hcd_is_primary_hcd(hcd))
519f6ff0ac8SSarah Sharp return xhci_run_finished(xhci);
520bc75fa38SAlex Chiang
521d195fcffSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
522bc75fa38SAlex Chiang
523b17a57f8SMathias Nyman temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
524bc75fa38SAlex Chiang temp_64 &= ~ERST_PTR_MASK;
525d195fcffSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_init,
526d195fcffSXenia Ragiadakou "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
527bc75fa38SAlex Chiang
528d195fcffSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_init,
529d195fcffSXenia Ragiadakou "// Set the interrupt modulation register");
530b17a57f8SMathias Nyman temp = readl(&ir->ir_set->irq_control);
531bc75fa38SAlex Chiang temp &= ~ER_IRQ_INTERVAL_MASK;
532ab725cbeSAdam Wallis temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
533b17a57f8SMathias Nyman writel(temp, &ir->ir_set->irq_control);
534bc75fa38SAlex Chiang
535ddba5cd0SMathias Nyman if (xhci->quirks & XHCI_NEC_HOST) {
536ddba5cd0SMathias Nyman struct xhci_command *command;
53774e0b564SLu Baolu
538103afda0SMathias Nyman command = xhci_alloc_command(xhci, false, GFP_KERNEL);
539ddba5cd0SMathias Nyman if (!command)
540ddba5cd0SMathias Nyman return -ENOMEM;
54174e0b564SLu Baolu
542d6f5f071SShu Wang ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
5430238634dSSarah Sharp TRB_TYPE(TRB_NEC_GET_FW));
544d6f5f071SShu Wang if (ret)
545d6f5f071SShu Wang xhci_free_command(xhci, command);
546ddba5cd0SMathias Nyman }
547d195fcffSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_init,
548873f3236SHeiner Kallweit "Finished %s for main hcd", __func__);
54902b6fdc2SLu Baolu
5505c44d9d7SMathias Nyman xhci_create_dbc_dev(xhci);
551dfba2174SLu Baolu
55202b6fdc2SLu Baolu xhci_debugfs_init(xhci);
55302b6fdc2SLu Baolu
554873f3236SHeiner Kallweit if (xhci_has_one_roothub(xhci))
555873f3236SHeiner Kallweit return xhci_run_finished(xhci);
556873f3236SHeiner Kallweit
5571bd8bb7dSMathias Nyman set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
5581bd8bb7dSMathias Nyman
559f6ff0ac8SSarah Sharp return 0;
560ed07453fSSarah Sharp }
561436e8c7dSAndrew Bresticker EXPORT_SYMBOL_GPL(xhci_run);
562ed07453fSSarah Sharp
563bc75fa38SAlex Chiang /*
564bc75fa38SAlex Chiang * Stop xHCI driver.
565bc75fa38SAlex Chiang *
566bc75fa38SAlex Chiang * This function is called by the USB core when the HC driver is removed.
567bc75fa38SAlex Chiang * Its opposite is xhci_run().
568bc75fa38SAlex Chiang *
569bc75fa38SAlex Chiang * Disable device contexts, disable IRQs, and quiesce the HC.
570bc75fa38SAlex Chiang * Reset the HC, finish any completed transactions, and cleanup memory.
571bc75fa38SAlex Chiang */
xhci_stop(struct usb_hcd * hcd)572ed526ba2SJosue David Hernandez Gutierrez void xhci_stop(struct usb_hcd *hcd)
573bc75fa38SAlex Chiang {
574bc75fa38SAlex Chiang u32 temp;
575bc75fa38SAlex Chiang struct xhci_hcd *xhci = hcd_to_xhci(hcd);
57652dd0483SMathias Nyman struct xhci_interrupter *ir = xhci->interrupter;
577bc75fa38SAlex Chiang
5788c24d6d7SRoger Quadros mutex_lock(&xhci->mutex);
57927a41a83SGabriel Krisman Bertazi
580fe190ed0SJoel Stanley /* Only halt host and free memory after both hcds are removed */
581fe190ed0SJoel Stanley if (!usb_hcd_is_primary_hcd(hcd)) {
582fe190ed0SJoel Stanley mutex_unlock(&xhci->mutex);
583fe190ed0SJoel Stanley return;
584fe190ed0SJoel Stanley }
58527a41a83SGabriel Krisman Bertazi
5865c44d9d7SMathias Nyman xhci_remove_dbc_dev(xhci);
587dfba2174SLu Baolu
588fe190ed0SJoel Stanley spin_lock_irq(&xhci->lock);
5898c24d6d7SRoger Quadros xhci->xhc_state |= XHCI_STATE_HALTED;
5908c24d6d7SRoger Quadros xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
591bc75fa38SAlex Chiang xhci_halt(xhci);
59214073ce9SMathias Nyman xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
593bc75fa38SAlex Chiang spin_unlock_irq(&xhci->lock);
594bc75fa38SAlex Chiang
59571c731a2SAlexis R. Cortes /* Deleting Compliance Mode Recovery Timer */
59671c731a2SAlexis R. Cortes if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
59758b1d799STony Camuso (!(xhci_all_ports_seen_u0(xhci)))) {
59871c731a2SAlexis R. Cortes del_timer_sync(&xhci->comp_mode_recovery_timer);
5994bdfe4c3SXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
6004bdfe4c3SXenia Ragiadakou "%s: compliance mode recovery timer deleted",
60158b1d799STony Camuso __func__);
60258b1d799STony Camuso }
60371c731a2SAlexis R. Cortes
604c41136b0SAndiry Xu if (xhci->quirks & XHCI_AMD_PLL_FIX)
605c41136b0SAndiry Xu usb_amd_dev_put();
606c41136b0SAndiry Xu
607d195fcffSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_init,
608d195fcffSXenia Ragiadakou "// Disabling event ring interrupts");
609b0ba9720SXenia Ragiadakou temp = readl(&xhci->op_regs->status);
610d1001ab4SLu Baolu writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
61152dd0483SMathias Nyman xhci_disable_interrupter(ir);
612bc75fa38SAlex Chiang
613d195fcffSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
614bc75fa38SAlex Chiang xhci_mem_cleanup(xhci);
61511cd764dSZhengjun Xing xhci_debugfs_exit(xhci);
616d195fcffSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_init,
617d195fcffSXenia Ragiadakou "xhci_stop completed - status = %x",
618b0ba9720SXenia Ragiadakou readl(&xhci->op_regs->status));
61985ac90f8SRoger Quadros mutex_unlock(&xhci->mutex);
620bc75fa38SAlex Chiang }
621ed526ba2SJosue David Hernandez Gutierrez EXPORT_SYMBOL_GPL(xhci_stop);
622bc75fa38SAlex Chiang
623bc75fa38SAlex Chiang /*
624bc75fa38SAlex Chiang * Shutdown HC (not bus-specific)
625bc75fa38SAlex Chiang *
626bc75fa38SAlex Chiang * This is called when the machine is rebooting or halting. We assume that the
627bc75fa38SAlex Chiang * machine will be powered off, and the HC's internal state will be reset.
628bc75fa38SAlex Chiang * Don't bother to free memory.
629f6ff0ac8SSarah Sharp *
630f6ff0ac8SSarah Sharp * This will only ever be called with the main usb_hcd (the USB3 roothub).
631bc75fa38SAlex Chiang */
xhci_shutdown(struct usb_hcd * hcd)632f2c710f7SHenry Lin void xhci_shutdown(struct usb_hcd *hcd)
633bc75fa38SAlex Chiang {
634bc75fa38SAlex Chiang struct xhci_hcd *xhci = hcd_to_xhci(hcd);
635bc75fa38SAlex Chiang
636052c7f9fSDan Carpenter if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
6374c39d4b9SArnd Bergmann usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
638e95829f4SSarah Sharp
639dc92944aSHenry Lin /* Don't poll the roothubs after shutdown. */
640dc92944aSHenry Lin xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
641dc92944aSHenry Lin __func__, hcd->self.busnum);
642dc92944aSHenry Lin clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
643dc92944aSHenry Lin del_timer_sync(&hcd->rh_timer);
644dc92944aSHenry Lin
645dc92944aSHenry Lin if (xhci->shared_hcd) {
646dc92944aSHenry Lin clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
647dc92944aSHenry Lin del_timer_sync(&xhci->shared_hcd->rh_timer);
648dc92944aSHenry Lin }
649dc92944aSHenry Lin
6508531aa16SMathias Nyman spin_lock_irq(&xhci->lock);
651bc75fa38SAlex Chiang xhci_halt(xhci);
65234cd2db4SMathias Nyman
65334cd2db4SMathias Nyman /*
65434cd2db4SMathias Nyman * Workaround for spurious wakeps at shutdown with HSW, and for boot
65534cd2db4SMathias Nyman * firmware delay in ADL-P PCH if port are left in U3 at shutdown
65634cd2db4SMathias Nyman */
65734cd2db4SMathias Nyman if (xhci->quirks & XHCI_SPURIOUS_WAKEUP ||
65834cd2db4SMathias Nyman xhci->quirks & XHCI_RESET_TO_DEFAULT)
65914073ce9SMathias Nyman xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
66034cd2db4SMathias Nyman
6618531aa16SMathias Nyman spin_unlock_irq(&xhci->lock);
662bc75fa38SAlex Chiang
663d195fcffSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_init,
664d195fcffSXenia Ragiadakou "xhci_shutdown completed - status = %x",
665b0ba9720SXenia Ragiadakou readl(&xhci->op_regs->status));
666bc75fa38SAlex Chiang }
667f2c710f7SHenry Lin EXPORT_SYMBOL_GPL(xhci_shutdown);
668bc75fa38SAlex Chiang
669b5b5c3acSSarah Sharp #ifdef CONFIG_PM
xhci_save_registers(struct xhci_hcd * xhci)6705535b1d5SAndiry Xu static void xhci_save_registers(struct xhci_hcd *xhci)
6715535b1d5SAndiry Xu {
672b17a57f8SMathias Nyman struct xhci_interrupter *ir = xhci->interrupter;
673b17a57f8SMathias Nyman
674b0ba9720SXenia Ragiadakou xhci->s3.command = readl(&xhci->op_regs->command);
675b0ba9720SXenia Ragiadakou xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
676f7b2e403SSarah Sharp xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
677b0ba9720SXenia Ragiadakou xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
678b17a57f8SMathias Nyman
679b17a57f8SMathias Nyman if (!ir)
680b17a57f8SMathias Nyman return;
681b17a57f8SMathias Nyman
682b17a57f8SMathias Nyman ir->s3_erst_size = readl(&ir->ir_set->erst_size);
683b17a57f8SMathias Nyman ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
684b17a57f8SMathias Nyman ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
685b17a57f8SMathias Nyman ir->s3_irq_pending = readl(&ir->ir_set->irq_pending);
686b17a57f8SMathias Nyman ir->s3_irq_control = readl(&ir->ir_set->irq_control);
6875535b1d5SAndiry Xu }
6885535b1d5SAndiry Xu
xhci_restore_registers(struct xhci_hcd * xhci)6895535b1d5SAndiry Xu static void xhci_restore_registers(struct xhci_hcd *xhci)
6905535b1d5SAndiry Xu {
691b17a57f8SMathias Nyman struct xhci_interrupter *ir = xhci->interrupter;
692b17a57f8SMathias Nyman
693204b7793SXenia Ragiadakou writel(xhci->s3.command, &xhci->op_regs->command);
694204b7793SXenia Ragiadakou writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
695477632dfSSarah Sharp xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
696204b7793SXenia Ragiadakou writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
697b17a57f8SMathias Nyman writel(ir->s3_erst_size, &ir->ir_set->erst_size);
698b17a57f8SMathias Nyman xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base);
699b17a57f8SMathias Nyman xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue);
700b17a57f8SMathias Nyman writel(ir->s3_irq_pending, &ir->ir_set->irq_pending);
701b17a57f8SMathias Nyman writel(ir->s3_irq_control, &ir->ir_set->irq_control);
7025535b1d5SAndiry Xu }
7035535b1d5SAndiry Xu
xhci_set_cmd_ring_deq(struct xhci_hcd * xhci)70489821320SSarah Sharp static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
70589821320SSarah Sharp {
70689821320SSarah Sharp u64 val_64;
70789821320SSarah Sharp
70889821320SSarah Sharp /* step 2: initialize command ring buffer */
709f7b2e403SSarah Sharp val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
71089821320SSarah Sharp val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
71189821320SSarah Sharp (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
71289821320SSarah Sharp xhci->cmd_ring->dequeue) &
71389821320SSarah Sharp (u64) ~CMD_RING_RSVD_BITS) |
71489821320SSarah Sharp xhci->cmd_ring->cycle_state;
715d195fcffSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_init,
716d195fcffSXenia Ragiadakou "// Setting command ring address to 0x%llx",
71789821320SSarah Sharp (long unsigned long) val_64);
718477632dfSSarah Sharp xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
71989821320SSarah Sharp }
72089821320SSarah Sharp
72189821320SSarah Sharp /*
72289821320SSarah Sharp * The whole command ring must be cleared to zero when we suspend the host.
72389821320SSarah Sharp *
72489821320SSarah Sharp * The host doesn't save the command ring pointer in the suspend well, so we
72589821320SSarah Sharp * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
72689821320SSarah Sharp * aligned, because of the reserved bits in the command ring dequeue pointer
72789821320SSarah Sharp * register. Therefore, we can't just set the dequeue pointer back in the
72889821320SSarah Sharp * middle of the ring (TRBs are 16-byte aligned).
72989821320SSarah Sharp */
xhci_clear_command_ring(struct xhci_hcd * xhci)73089821320SSarah Sharp static void xhci_clear_command_ring(struct xhci_hcd *xhci)
73189821320SSarah Sharp {
73289821320SSarah Sharp struct xhci_ring *ring;
73389821320SSarah Sharp struct xhci_segment *seg;
73489821320SSarah Sharp
73589821320SSarah Sharp ring = xhci->cmd_ring;
73689821320SSarah Sharp seg = ring->deq_seg;
73789821320SSarah Sharp do {
738158886cdSAndiry Xu memset(seg->trbs, 0,
739158886cdSAndiry Xu sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
740158886cdSAndiry Xu seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
741158886cdSAndiry Xu cpu_to_le32(~TRB_CYCLE);
74289821320SSarah Sharp seg = seg->next;
74389821320SSarah Sharp } while (seg != ring->deq_seg);
74489821320SSarah Sharp
74589821320SSarah Sharp /* Reset the software enqueue and dequeue pointers */
74689821320SSarah Sharp ring->deq_seg = ring->first_seg;
74789821320SSarah Sharp ring->dequeue = ring->first_seg->trbs;
74889821320SSarah Sharp ring->enq_seg = ring->deq_seg;
74989821320SSarah Sharp ring->enqueue = ring->dequeue;
75089821320SSarah Sharp
751b008df60SAndiry Xu ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
75289821320SSarah Sharp /*
75389821320SSarah Sharp * Ring is now zeroed, so the HW should look for change of ownership
75489821320SSarah Sharp * when the cycle bit is set to 1.
75589821320SSarah Sharp */
75689821320SSarah Sharp ring->cycle_state = 1;
75789821320SSarah Sharp
75889821320SSarah Sharp /*
75989821320SSarah Sharp * Reset the hardware dequeue pointer.
76089821320SSarah Sharp * Yes, this will need to be re-written after resume, but we're paranoid
76189821320SSarah Sharp * and want to make sure the hardware doesn't access bogus memory
76289821320SSarah Sharp * because, say, the BIOS or an SMI started the host without changing
76389821320SSarah Sharp * the command ring pointers.
76489821320SSarah Sharp */
76589821320SSarah Sharp xhci_set_cmd_ring_deq(xhci);
76689821320SSarah Sharp }
76789821320SSarah Sharp
768d26c00e7SMathias Nyman /*
769d26c00e7SMathias Nyman * Disable port wake bits if do_wakeup is not set.
770d26c00e7SMathias Nyman *
771d26c00e7SMathias Nyman * Also clear a possible internal port wake state left hanging for ports that
772d26c00e7SMathias Nyman * detected termination but never successfully enumerated (trained to 0U).
773d26c00e7SMathias Nyman * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done
774d26c00e7SMathias Nyman * at enumeration clears this wake, force one here as well for unconnected ports
775d26c00e7SMathias Nyman */
776d26c00e7SMathias Nyman
xhci_disable_hub_port_wake(struct xhci_hcd * xhci,struct xhci_hub * rhub,bool do_wakeup)777d26c00e7SMathias Nyman static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci,
778d26c00e7SMathias Nyman struct xhci_hub *rhub,
779d26c00e7SMathias Nyman bool do_wakeup)
780a1377e53SLu Baolu {
781a1377e53SLu Baolu unsigned long flags;
782d70d5a84SMathias Nyman u32 t1, t2, portsc;
783d26c00e7SMathias Nyman int i;
784a1377e53SLu Baolu
785a1377e53SLu Baolu spin_lock_irqsave(&xhci->lock, flags);
786a1377e53SLu Baolu
787d26c00e7SMathias Nyman for (i = 0; i < rhub->num_ports; i++) {
788d26c00e7SMathias Nyman portsc = readl(rhub->ports[i]->addr);
789d26c00e7SMathias Nyman t1 = xhci_port_state_to_neutral(portsc);
790d26c00e7SMathias Nyman t2 = t1;
791a1377e53SLu Baolu
792d26c00e7SMathias Nyman /* clear wake bits if do_wake is not set */
793d26c00e7SMathias Nyman if (!do_wakeup)
794d26c00e7SMathias Nyman t2 &= ~PORT_WAKE_BITS;
795d26c00e7SMathias Nyman
796d26c00e7SMathias Nyman /* Don't touch csc bit if connected or connect change is set */
797d26c00e7SMathias Nyman if (!(portsc & (PORT_CSC | PORT_CONNECT)))
798d26c00e7SMathias Nyman t2 |= PORT_CSC;
799d26c00e7SMathias Nyman
800d70d5a84SMathias Nyman if (t1 != t2) {
801d26c00e7SMathias Nyman writel(t2, rhub->ports[i]->addr);
802d26c00e7SMathias Nyman xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n",
803d26c00e7SMathias Nyman rhub->hcd->self.busnum, i + 1, portsc, t2);
804a1377e53SLu Baolu }
805d70d5a84SMathias Nyman }
806a1377e53SLu Baolu spin_unlock_irqrestore(&xhci->lock, flags);
807a1377e53SLu Baolu }
808a1377e53SLu Baolu
xhci_pending_portevent(struct xhci_hcd * xhci)809229bc19fSMathias Nyman static bool xhci_pending_portevent(struct xhci_hcd *xhci)
810229bc19fSMathias Nyman {
811229bc19fSMathias Nyman struct xhci_port **ports;
812229bc19fSMathias Nyman int port_index;
813229bc19fSMathias Nyman u32 status;
814229bc19fSMathias Nyman u32 portsc;
815229bc19fSMathias Nyman
816229bc19fSMathias Nyman status = readl(&xhci->op_regs->status);
817229bc19fSMathias Nyman if (status & STS_EINT)
818229bc19fSMathias Nyman return true;
819229bc19fSMathias Nyman /*
820229bc19fSMathias Nyman * Checking STS_EINT is not enough as there is a lag between a change
821229bc19fSMathias Nyman * bit being set and the Port Status Change Event that it generated
822229bc19fSMathias Nyman * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
823229bc19fSMathias Nyman */
824229bc19fSMathias Nyman
825229bc19fSMathias Nyman port_index = xhci->usb2_rhub.num_ports;
826229bc19fSMathias Nyman ports = xhci->usb2_rhub.ports;
827229bc19fSMathias Nyman while (port_index--) {
828229bc19fSMathias Nyman portsc = readl(ports[port_index]->addr);
829229bc19fSMathias Nyman if (portsc & PORT_CHANGE_MASK ||
830229bc19fSMathias Nyman (portsc & PORT_PLS_MASK) == XDEV_RESUME)
831229bc19fSMathias Nyman return true;
832229bc19fSMathias Nyman }
833229bc19fSMathias Nyman port_index = xhci->usb3_rhub.num_ports;
834229bc19fSMathias Nyman ports = xhci->usb3_rhub.ports;
835229bc19fSMathias Nyman while (port_index--) {
836229bc19fSMathias Nyman portsc = readl(ports[port_index]->addr);
837b9e43779SMathias Nyman if (portsc & (PORT_CHANGE_MASK | PORT_CAS) ||
838229bc19fSMathias Nyman (portsc & PORT_PLS_MASK) == XDEV_RESUME)
839229bc19fSMathias Nyman return true;
840229bc19fSMathias Nyman }
841229bc19fSMathias Nyman return false;
842229bc19fSMathias Nyman }
843229bc19fSMathias Nyman
8445535b1d5SAndiry Xu /*
8455535b1d5SAndiry Xu * Stop HC (not bus-specific)
8465535b1d5SAndiry Xu *
8475535b1d5SAndiry Xu * This is called when the machine transition into S3/S4 mode.
8485535b1d5SAndiry Xu *
8495535b1d5SAndiry Xu */
xhci_suspend(struct xhci_hcd * xhci,bool do_wakeup)850a1377e53SLu Baolu int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
8515535b1d5SAndiry Xu {
8525535b1d5SAndiry Xu int rc = 0;
8537c67cf66SKai-Heng Feng unsigned int delay = XHCI_MAX_HALT_USEC * 2;
8545535b1d5SAndiry Xu struct usb_hcd *hcd = xhci_to_hcd(xhci);
8555535b1d5SAndiry Xu u32 command;
856a7d57abcSSandeep Singh u32 res;
8575535b1d5SAndiry Xu
8589fa733f2SRoger Quadros if (!hcd->state)
8599fa733f2SRoger Quadros return 0;
8609fa733f2SRoger Quadros
86177b84767SFelipe Balbi if (hcd->state != HC_STATE_SUSPENDED ||
862873f3236SHeiner Kallweit (xhci->shared_hcd && xhci->shared_hcd->state != HC_STATE_SUSPENDED))
86377b84767SFelipe Balbi return -EINVAL;
86477b84767SFelipe Balbi
865a1377e53SLu Baolu /* Clear root port wake on bits if wakeup not allowed. */
866d26c00e7SMathias Nyman xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup);
867d26c00e7SMathias Nyman xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup);
868a1377e53SLu Baolu
86918a367e8SPeter Chen if (!HCD_HW_ACCESSIBLE(hcd))
87018a367e8SPeter Chen return 0;
87118a367e8SPeter Chen
87218a367e8SPeter Chen xhci_dbc_suspend(xhci);
87318a367e8SPeter Chen
874c52804a4SSarah Sharp /* Don't poll the roothubs on bus suspend. */
875669bc5a1SMathias Nyman xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
876669bc5a1SMathias Nyman __func__, hcd->self.busnum);
877c52804a4SSarah Sharp clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
878c52804a4SSarah Sharp del_timer_sync(&hcd->rh_timer);
879873f3236SHeiner Kallweit if (xhci->shared_hcd) {
88014e61a1bSAl Cooper clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
88114e61a1bSAl Cooper del_timer_sync(&xhci->shared_hcd->rh_timer);
882873f3236SHeiner Kallweit }
883c52804a4SSarah Sharp
884191edc5eSKai-Heng Feng if (xhci->quirks & XHCI_SUSPEND_DELAY)
885191edc5eSKai-Heng Feng usleep_range(1000, 1500);
886191edc5eSKai-Heng Feng
8875535b1d5SAndiry Xu spin_lock_irq(&xhci->lock);
8885535b1d5SAndiry Xu clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
889873f3236SHeiner Kallweit if (xhci->shared_hcd)
890b3209379SSarah Sharp clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
8915535b1d5SAndiry Xu /* step 1: stop endpoint */
8925535b1d5SAndiry Xu /* skipped assuming that port suspend has done */
8935535b1d5SAndiry Xu
8945535b1d5SAndiry Xu /* step 2: clear Run/Stop bit */
895b0ba9720SXenia Ragiadakou command = readl(&xhci->op_regs->command);
8965535b1d5SAndiry Xu command &= ~CMD_RUN;
897204b7793SXenia Ragiadakou writel(command, &xhci->op_regs->command);
898455f5892SOliver Neukum
899455f5892SOliver Neukum /* Some chips from Fresco Logic need an extraordinary delay */
900455f5892SOliver Neukum delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
901455f5892SOliver Neukum
902dc0b177cSLin Wang if (xhci_handshake(&xhci->op_regs->status,
903455f5892SOliver Neukum STS_HALT, STS_HALT, delay)) {
9045535b1d5SAndiry Xu xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
9055535b1d5SAndiry Xu spin_unlock_irq(&xhci->lock);
9065535b1d5SAndiry Xu return -ETIMEDOUT;
9075535b1d5SAndiry Xu }
90889821320SSarah Sharp xhci_clear_command_ring(xhci);
9095535b1d5SAndiry Xu
9105535b1d5SAndiry Xu /* step 3: save registers */
9115535b1d5SAndiry Xu xhci_save_registers(xhci);
9125535b1d5SAndiry Xu
9135535b1d5SAndiry Xu /* step 4: set CSS flag */
914b0ba9720SXenia Ragiadakou command = readl(&xhci->op_regs->command);
9155535b1d5SAndiry Xu command |= CMD_CSS;
916204b7793SXenia Ragiadakou writel(command, &xhci->op_regs->command);
917a7d57abcSSandeep Singh xhci->broken_suspend = 0;
918dc0b177cSLin Wang if (xhci_handshake(&xhci->op_regs->status,
919ac343366SKai-Heng Feng STS_SAVE, 0, 20 * 1000)) {
920a7d57abcSSandeep Singh /*
921a7d57abcSSandeep Singh * AMD SNPS xHC 3.0 occasionally does not clear the
922a7d57abcSSandeep Singh * SSS bit of USBSTS and when driver tries to poll
923a7d57abcSSandeep Singh * to see if the xHC clears BIT(8) which never happens
924a7d57abcSSandeep Singh * and driver assumes that controller is not responding
925a7d57abcSSandeep Singh * and times out. To workaround this, its good to check
926a7d57abcSSandeep Singh * if SRE and HCE bits are not set (as per xhci
927a7d57abcSSandeep Singh * Section 5.4.2) and bypass the timeout.
928a7d57abcSSandeep Singh */
929a7d57abcSSandeep Singh res = readl(&xhci->op_regs->status);
930a7d57abcSSandeep Singh if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
931a7d57abcSSandeep Singh (((res & STS_SRE) == 0) &&
932a7d57abcSSandeep Singh ((res & STS_HCE) == 0))) {
933a7d57abcSSandeep Singh xhci->broken_suspend = 1;
934a7d57abcSSandeep Singh } else {
935622eb783SAndiry Xu xhci_warn(xhci, "WARN: xHC save state timeout\n");
9365535b1d5SAndiry Xu spin_unlock_irq(&xhci->lock);
9375535b1d5SAndiry Xu return -ETIMEDOUT;
9385535b1d5SAndiry Xu }
939a7d57abcSSandeep Singh }
9405535b1d5SAndiry Xu spin_unlock_irq(&xhci->lock);
9415535b1d5SAndiry Xu
94271c731a2SAlexis R. Cortes /*
94371c731a2SAlexis R. Cortes * Deleting Compliance Mode Recovery Timer because the xHCI Host
94471c731a2SAlexis R. Cortes * is about to be suspended.
94571c731a2SAlexis R. Cortes */
94671c731a2SAlexis R. Cortes if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
94771c731a2SAlexis R. Cortes (!(xhci_all_ports_seen_u0(xhci)))) {
94871c731a2SAlexis R. Cortes del_timer_sync(&xhci->comp_mode_recovery_timer);
9494bdfe4c3SXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
9504bdfe4c3SXenia Ragiadakou "%s: compliance mode recovery timer deleted",
95158b1d799STony Camuso __func__);
95271c731a2SAlexis R. Cortes }
95371c731a2SAlexis R. Cortes
9545535b1d5SAndiry Xu return rc;
9555535b1d5SAndiry Xu }
956436e8c7dSAndrew Bresticker EXPORT_SYMBOL_GPL(xhci_suspend);
9575535b1d5SAndiry Xu
9585535b1d5SAndiry Xu /*
9595535b1d5SAndiry Xu * start xHC (not bus-specific)
9605535b1d5SAndiry Xu *
9615535b1d5SAndiry Xu * This is called when the machine transition from S3/S4 mode.
9625535b1d5SAndiry Xu *
9635535b1d5SAndiry Xu */
xhci_resume(struct xhci_hcd * xhci,pm_message_t msg)9641f7d5520SBasavaraj Natikar int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
9655535b1d5SAndiry Xu {
9661f7d5520SBasavaraj Natikar bool hibernated = (msg.event == PM_EVENT_RESTORE);
967229bc19fSMathias Nyman u32 command, temp = 0;
9685535b1d5SAndiry Xu struct usb_hcd *hcd = xhci_to_hcd(xhci);
969f69e3120SAlan Stern int retval = 0;
97077df9e0bSTony Camuso bool comp_timer_running = false;
971253f588cSMathias Nyman bool pending_portevent = false;
972f83810e0SWesley Cheng bool suspended_usb3_devs = false;
9738b328f80SPuma Hsu bool reinit_xhc = false;
9745535b1d5SAndiry Xu
9759fa733f2SRoger Quadros if (!hcd->state)
9769fa733f2SRoger Quadros return 0;
9779fa733f2SRoger Quadros
978f6ff0ac8SSarah Sharp /* Wait a bit if either of the roothubs need to settle from the
97925985edcSLucas De Marchi * transition into bus suspend.
98020b67cf5SSarah Sharp */
981f6187f42SMathias Nyman
982f6187f42SMathias Nyman if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) ||
983f6187f42SMathias Nyman time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange))
9845535b1d5SAndiry Xu msleep(100);
9855535b1d5SAndiry Xu
986f69e3120SAlan Stern set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
987873f3236SHeiner Kallweit if (xhci->shared_hcd)
988f69e3120SAlan Stern set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
989f69e3120SAlan Stern
9905535b1d5SAndiry Xu spin_lock_irq(&xhci->lock);
9915535b1d5SAndiry Xu
9928b328f80SPuma Hsu if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
9938b328f80SPuma Hsu reinit_xhc = true;
9948b328f80SPuma Hsu
9958b328f80SPuma Hsu if (!reinit_xhc) {
996a70bcbc3SRick Tseng /*
997a70bcbc3SRick Tseng * Some controllers might lose power during suspend, so wait
998a70bcbc3SRick Tseng * for controller not ready bit to clear, just as in xHC init.
999a70bcbc3SRick Tseng */
1000a70bcbc3SRick Tseng retval = xhci_handshake(&xhci->op_regs->status,
1001a70bcbc3SRick Tseng STS_CNR, 0, 10 * 1000 * 1000);
1002a70bcbc3SRick Tseng if (retval) {
1003a70bcbc3SRick Tseng xhci_warn(xhci, "Controller not ready at resume %d\n",
1004a70bcbc3SRick Tseng retval);
1005a70bcbc3SRick Tseng spin_unlock_irq(&xhci->lock);
1006a70bcbc3SRick Tseng return retval;
1007a70bcbc3SRick Tseng }
10085535b1d5SAndiry Xu /* step 1: restore register */
10095535b1d5SAndiry Xu xhci_restore_registers(xhci);
10105535b1d5SAndiry Xu /* step 2: initialize command ring buffer */
101189821320SSarah Sharp xhci_set_cmd_ring_deq(xhci);
10125535b1d5SAndiry Xu /* step 3: restore state and start state*/
10135535b1d5SAndiry Xu /* step 3: set CRS flag */
1014b0ba9720SXenia Ragiadakou command = readl(&xhci->op_regs->command);
10155535b1d5SAndiry Xu command |= CMD_CRS;
1016204b7793SXenia Ragiadakou writel(command, &xhci->op_regs->command);
1017305886caSAjay Gupta /*
1018305886caSAjay Gupta * Some controllers take up to 55+ ms to complete the controller
1019305886caSAjay Gupta * restore so setting the timeout to 100ms. Xhci specification
1020305886caSAjay Gupta * doesn't mention any timeout value.
1021305886caSAjay Gupta */
1022dc0b177cSLin Wang if (xhci_handshake(&xhci->op_regs->status,
1023305886caSAjay Gupta STS_RESTORE, 0, 100 * 1000)) {
1024622eb783SAndiry Xu xhci_warn(xhci, "WARN: xHC restore state timeout\n");
10255535b1d5SAndiry Xu spin_unlock_irq(&xhci->lock);
10265535b1d5SAndiry Xu return -ETIMEDOUT;
10275535b1d5SAndiry Xu }
10285535b1d5SAndiry Xu }
10295535b1d5SAndiry Xu
10308b328f80SPuma Hsu temp = readl(&xhci->op_regs->status);
103177df9e0bSTony Camuso
10328b328f80SPuma Hsu /* re-initialize the HC on Restore Error, or Host Controller Error */
1033fb2ce178SWesley Cheng if ((temp & (STS_SRE | STS_HCE)) &&
1034fb2ce178SWesley Cheng !(xhci->xhc_state & XHCI_STATE_REMOVING)) {
10358b328f80SPuma Hsu reinit_xhc = true;
1036484d6f7aSMario Limonciello if (!xhci->broken_suspend)
10378b328f80SPuma Hsu xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
10388b328f80SPuma Hsu }
10398b328f80SPuma Hsu
10408b328f80SPuma Hsu if (reinit_xhc) {
104177df9e0bSTony Camuso if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
104277df9e0bSTony Camuso !(xhci_all_ports_seen_u0(xhci))) {
104377df9e0bSTony Camuso del_timer_sync(&xhci->comp_mode_recovery_timer);
10444bdfe4c3SXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
10454bdfe4c3SXenia Ragiadakou "Compliance Mode Recovery Timer deleted!");
104677df9e0bSTony Camuso }
104777df9e0bSTony Camuso
1048fedd383eSSarah Sharp /* Let the USB core know _both_ roothubs lost power. */
1049fedd383eSSarah Sharp usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1050873f3236SHeiner Kallweit if (xhci->shared_hcd)
1051fedd383eSSarah Sharp usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
10525535b1d5SAndiry Xu
10535535b1d5SAndiry Xu xhci_dbg(xhci, "Stop HCD\n");
10545535b1d5SAndiry Xu xhci_halt(xhci);
105512de0a35SMarc Zyngier xhci_zero_64b_regs(xhci);
105614073ce9SMathias Nyman retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
10575535b1d5SAndiry Xu spin_unlock_irq(&xhci->lock);
105872ae1947SMathias Nyman if (retval)
105972ae1947SMathias Nyman return retval;
10605535b1d5SAndiry Xu
10615535b1d5SAndiry Xu xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1062b0ba9720SXenia Ragiadakou temp = readl(&xhci->op_regs->status);
1063d1001ab4SLu Baolu writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
106452dd0483SMathias Nyman xhci_disable_interrupter(xhci->interrupter);
10655535b1d5SAndiry Xu
10665535b1d5SAndiry Xu xhci_dbg(xhci, "cleaning up memory\n");
10675535b1d5SAndiry Xu xhci_mem_cleanup(xhci);
1068d9167671SZhengjun Xing xhci_debugfs_exit(xhci);
10695535b1d5SAndiry Xu xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1070b0ba9720SXenia Ragiadakou readl(&xhci->op_regs->status));
10715535b1d5SAndiry Xu
107265b22f93SSarah Sharp /* USB core calls the PCI reinit and start functions twice:
107365b22f93SSarah Sharp * first with the primary HCD, and then with the secondary HCD.
107465b22f93SSarah Sharp * If we don't do the same, the host will never be started.
107565b22f93SSarah Sharp */
107665b22f93SSarah Sharp xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1077802dcafcSMathias Nyman retval = xhci_init(hcd);
10785535b1d5SAndiry Xu if (retval)
10795535b1d5SAndiry Xu return retval;
108077df9e0bSTony Camuso comp_timer_running = true;
108177df9e0bSTony Camuso
108265b22f93SSarah Sharp xhci_dbg(xhci, "Start the primary HCD\n");
1083802dcafcSMathias Nyman retval = xhci_run(hcd);
1084802dcafcSMathias Nyman if (!retval && xhci->shared_hcd) {
108565b22f93SSarah Sharp xhci_dbg(xhci, "Start the secondary HCD\n");
1086802dcafcSMathias Nyman retval = xhci_run(xhci->shared_hcd);
1087b3209379SSarah Sharp }
10889339641bSMathias Nyman if (retval)
10899339641bSMathias Nyman return retval;
10909339641bSMathias Nyman /*
10919339641bSMathias Nyman * Resume roothubs unconditionally as PORTSC change bits are not
10929339641bSMathias Nyman * immediately visible after xHC reset
10939339641bSMathias Nyman */
10945535b1d5SAndiry Xu hcd->state = HC_STATE_SUSPENDED;
10959339641bSMathias Nyman
10969339641bSMathias Nyman if (xhci->shared_hcd) {
1097b3209379SSarah Sharp xhci->shared_hcd->state = HC_STATE_SUSPENDED;
10989339641bSMathias Nyman usb_hcd_resume_root_hub(xhci->shared_hcd);
10999339641bSMathias Nyman }
11009339641bSMathias Nyman usb_hcd_resume_root_hub(hcd);
11019339641bSMathias Nyman
1102f69e3120SAlan Stern goto done;
11035535b1d5SAndiry Xu }
11045535b1d5SAndiry Xu
11055535b1d5SAndiry Xu /* step 4: set Run/Stop bit */
1106b0ba9720SXenia Ragiadakou command = readl(&xhci->op_regs->command);
11075535b1d5SAndiry Xu command |= CMD_RUN;
1108204b7793SXenia Ragiadakou writel(command, &xhci->op_regs->command);
1109dc0b177cSLin Wang xhci_handshake(&xhci->op_regs->status, STS_HALT,
11105535b1d5SAndiry Xu 0, 250 * 1000);
11115535b1d5SAndiry Xu
11125535b1d5SAndiry Xu /* step 5: walk topology and initialize portsc,
11135535b1d5SAndiry Xu * portpmsc and portli
11145535b1d5SAndiry Xu */
11155535b1d5SAndiry Xu /* this is done in bus_resume */
11165535b1d5SAndiry Xu
11175535b1d5SAndiry Xu /* step 6: restart each of the previously
11185535b1d5SAndiry Xu * Running endpoints by ringing their doorbells
11195535b1d5SAndiry Xu */
11205535b1d5SAndiry Xu
11215535b1d5SAndiry Xu spin_unlock_irq(&xhci->lock);
1122f69e3120SAlan Stern
1123dfba2174SLu Baolu xhci_dbc_resume(xhci);
1124dfba2174SLu Baolu
1125f69e3120SAlan Stern if (retval == 0) {
1126253f588cSMathias Nyman /*
1127253f588cSMathias Nyman * Resume roothubs only if there are pending events.
1128253f588cSMathias Nyman * USB 3 devices resend U3 LFPS wake after a 100ms delay if
1129f83810e0SWesley Cheng * the first wake signalling failed, give it that chance if
1130f83810e0SWesley Cheng * there are suspended USB 3 devices.
1131253f588cSMathias Nyman */
1132f83810e0SWesley Cheng if (xhci->usb3_rhub.bus_state.suspended_ports ||
1133f83810e0SWesley Cheng xhci->usb3_rhub.bus_state.bus_suspended)
1134f83810e0SWesley Cheng suspended_usb3_devs = true;
1135f83810e0SWesley Cheng
1136253f588cSMathias Nyman pending_portevent = xhci_pending_portevent(xhci);
1137f83810e0SWesley Cheng
1138f83810e0SWesley Cheng if (suspended_usb3_devs && !pending_portevent &&
1139f83810e0SWesley Cheng msg.event == PM_EVENT_AUTO_RESUME) {
1140253f588cSMathias Nyman msleep(120);
1141253f588cSMathias Nyman pending_portevent = xhci_pending_portevent(xhci);
1142253f588cSMathias Nyman }
1143253f588cSMathias Nyman
1144253f588cSMathias Nyman if (pending_portevent) {
1145873f3236SHeiner Kallweit if (xhci->shared_hcd)
1146f69e3120SAlan Stern usb_hcd_resume_root_hub(xhci->shared_hcd);
1147671ffdffSMathias Nyman usb_hcd_resume_root_hub(hcd);
1148f69e3120SAlan Stern }
1149d6236f6dSWang, Yu }
11509339641bSMathias Nyman done:
115171c731a2SAlexis R. Cortes /*
115271c731a2SAlexis R. Cortes * If system is subject to the Quirk, Compliance Mode Timer needs to
115371c731a2SAlexis R. Cortes * be re-initialized Always after a system resume. Ports are subject
115471c731a2SAlexis R. Cortes * to suffer the Compliance Mode issue again. It doesn't matter if
115571c731a2SAlexis R. Cortes * ports have entered previously to U0 before system's suspension.
115671c731a2SAlexis R. Cortes */
115777df9e0bSTony Camuso if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
115871c731a2SAlexis R. Cortes compliance_mode_recovery_timer_init(xhci);
115971c731a2SAlexis R. Cortes
11609da5a109SJiahau Chang if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
11619da5a109SJiahau Chang usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
11629da5a109SJiahau Chang
1163c52804a4SSarah Sharp /* Re-enable port polling. */
1164669bc5a1SMathias Nyman xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
1165669bc5a1SMathias Nyman __func__, hcd->self.busnum);
1166873f3236SHeiner Kallweit if (xhci->shared_hcd) {
116714e61a1bSAl Cooper set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
116814e61a1bSAl Cooper usb_hcd_poll_rh_status(xhci->shared_hcd);
1169873f3236SHeiner Kallweit }
1170671ffdffSMathias Nyman set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1171671ffdffSMathias Nyman usb_hcd_poll_rh_status(hcd);
1172c52804a4SSarah Sharp
1173f69e3120SAlan Stern return retval;
11745535b1d5SAndiry Xu }
1175436e8c7dSAndrew Bresticker EXPORT_SYMBOL_GPL(xhci_resume);
1176b5b5c3acSSarah Sharp #endif /* CONFIG_PM */
1177b5b5c3acSSarah Sharp
1178bc75fa38SAlex Chiang /*-------------------------------------------------------------------------*/
1179bc75fa38SAlex Chiang
xhci_map_temp_buffer(struct usb_hcd * hcd,struct urb * urb)11802017a1e5STejas Joglekar static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb)
11812017a1e5STejas Joglekar {
11822017a1e5STejas Joglekar void *temp;
11832017a1e5STejas Joglekar int ret = 0;
11842017a1e5STejas Joglekar unsigned int buf_len;
11852017a1e5STejas Joglekar enum dma_data_direction dir;
11862017a1e5STejas Joglekar
11872017a1e5STejas Joglekar dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
11882017a1e5STejas Joglekar buf_len = urb->transfer_buffer_length;
11892017a1e5STejas Joglekar
11902017a1e5STejas Joglekar temp = kzalloc_node(buf_len, GFP_ATOMIC,
11912017a1e5STejas Joglekar dev_to_node(hcd->self.sysdev));
1192620b6cf2SPrashanth K if (!temp)
1193620b6cf2SPrashanth K return -ENOMEM;
11942017a1e5STejas Joglekar
11952017a1e5STejas Joglekar if (usb_urb_dir_out(urb))
11962017a1e5STejas Joglekar sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
11972017a1e5STejas Joglekar temp, buf_len, 0);
11982017a1e5STejas Joglekar
11992017a1e5STejas Joglekar urb->transfer_buffer = temp;
12002017a1e5STejas Joglekar urb->transfer_dma = dma_map_single(hcd->self.sysdev,
12012017a1e5STejas Joglekar urb->transfer_buffer,
12022017a1e5STejas Joglekar urb->transfer_buffer_length,
12032017a1e5STejas Joglekar dir);
12042017a1e5STejas Joglekar
12052017a1e5STejas Joglekar if (dma_mapping_error(hcd->self.sysdev,
12062017a1e5STejas Joglekar urb->transfer_dma)) {
12072017a1e5STejas Joglekar ret = -EAGAIN;
12082017a1e5STejas Joglekar kfree(temp);
12092017a1e5STejas Joglekar } else {
12102017a1e5STejas Joglekar urb->transfer_flags |= URB_DMA_MAP_SINGLE;
12112017a1e5STejas Joglekar }
12122017a1e5STejas Joglekar
12132017a1e5STejas Joglekar return ret;
12142017a1e5STejas Joglekar }
12152017a1e5STejas Joglekar
xhci_urb_temp_buffer_required(struct usb_hcd * hcd,struct urb * urb)12162017a1e5STejas Joglekar static bool xhci_urb_temp_buffer_required(struct usb_hcd *hcd,
12172017a1e5STejas Joglekar struct urb *urb)
12182017a1e5STejas Joglekar {
12192017a1e5STejas Joglekar bool ret = false;
12202017a1e5STejas Joglekar unsigned int i;
12212017a1e5STejas Joglekar unsigned int len = 0;
12222017a1e5STejas Joglekar unsigned int trb_size;
12232017a1e5STejas Joglekar unsigned int max_pkt;
12242017a1e5STejas Joglekar struct scatterlist *sg;
12252017a1e5STejas Joglekar struct scatterlist *tail_sg;
12262017a1e5STejas Joglekar
12272017a1e5STejas Joglekar tail_sg = urb->sg;
12282017a1e5STejas Joglekar max_pkt = usb_endpoint_maxp(&urb->ep->desc);
12292017a1e5STejas Joglekar
12302017a1e5STejas Joglekar if (!urb->num_sgs)
12312017a1e5STejas Joglekar return ret;
12322017a1e5STejas Joglekar
12332017a1e5STejas Joglekar if (urb->dev->speed >= USB_SPEED_SUPER)
12342017a1e5STejas Joglekar trb_size = TRB_CACHE_SIZE_SS;
12352017a1e5STejas Joglekar else
12362017a1e5STejas Joglekar trb_size = TRB_CACHE_SIZE_HS;
12372017a1e5STejas Joglekar
12382017a1e5STejas Joglekar if (urb->transfer_buffer_length != 0 &&
12392017a1e5STejas Joglekar !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
12402017a1e5STejas Joglekar for_each_sg(urb->sg, sg, urb->num_sgs, i) {
12412017a1e5STejas Joglekar len = len + sg->length;
12422017a1e5STejas Joglekar if (i > trb_size - 2) {
12432017a1e5STejas Joglekar len = len - tail_sg->length;
12442017a1e5STejas Joglekar if (len < max_pkt) {
12452017a1e5STejas Joglekar ret = true;
12462017a1e5STejas Joglekar break;
12472017a1e5STejas Joglekar }
12482017a1e5STejas Joglekar
12492017a1e5STejas Joglekar tail_sg = sg_next(tail_sg);
12502017a1e5STejas Joglekar }
12512017a1e5STejas Joglekar }
12522017a1e5STejas Joglekar }
12532017a1e5STejas Joglekar return ret;
12542017a1e5STejas Joglekar }
12552017a1e5STejas Joglekar
xhci_unmap_temp_buf(struct usb_hcd * hcd,struct urb * urb)12562017a1e5STejas Joglekar static void xhci_unmap_temp_buf(struct usb_hcd *hcd, struct urb *urb)
12572017a1e5STejas Joglekar {
12582017a1e5STejas Joglekar unsigned int len;
12592017a1e5STejas Joglekar unsigned int buf_len;
12602017a1e5STejas Joglekar enum dma_data_direction dir;
12612017a1e5STejas Joglekar
12622017a1e5STejas Joglekar dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
12632017a1e5STejas Joglekar
12642017a1e5STejas Joglekar buf_len = urb->transfer_buffer_length;
12652017a1e5STejas Joglekar
12662017a1e5STejas Joglekar if (IS_ENABLED(CONFIG_HAS_DMA) &&
12672017a1e5STejas Joglekar (urb->transfer_flags & URB_DMA_MAP_SINGLE))
12682017a1e5STejas Joglekar dma_unmap_single(hcd->self.sysdev,
12692017a1e5STejas Joglekar urb->transfer_dma,
12702017a1e5STejas Joglekar urb->transfer_buffer_length,
12712017a1e5STejas Joglekar dir);
12722017a1e5STejas Joglekar
1273271a21d8SMathias Nyman if (usb_urb_dir_in(urb)) {
12742017a1e5STejas Joglekar len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs,
12752017a1e5STejas Joglekar urb->transfer_buffer,
12762017a1e5STejas Joglekar buf_len,
12772017a1e5STejas Joglekar 0);
1278271a21d8SMathias Nyman if (len != buf_len) {
1279271a21d8SMathias Nyman xhci_dbg(hcd_to_xhci(hcd),
1280271a21d8SMathias Nyman "Copy from tmp buf to urb sg list failed\n");
1281271a21d8SMathias Nyman urb->actual_length = len;
1282271a21d8SMathias Nyman }
1283271a21d8SMathias Nyman }
12842017a1e5STejas Joglekar urb->transfer_flags &= ~URB_DMA_MAP_SINGLE;
12852017a1e5STejas Joglekar kfree(urb->transfer_buffer);
12862017a1e5STejas Joglekar urb->transfer_buffer = NULL;
12872017a1e5STejas Joglekar }
12882017a1e5STejas Joglekar
128933e39350SNicolas Saenz Julienne /*
129033e39350SNicolas Saenz Julienne * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT),
129133e39350SNicolas Saenz Julienne * we'll copy the actual data into the TRB address register. This is limited to
129233e39350SNicolas Saenz Julienne * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize
129333e39350SNicolas Saenz Julienne * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed.
129433e39350SNicolas Saenz Julienne */
xhci_map_urb_for_dma(struct usb_hcd * hcd,struct urb * urb,gfp_t mem_flags)129533e39350SNicolas Saenz Julienne static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
129633e39350SNicolas Saenz Julienne gfp_t mem_flags)
129733e39350SNicolas Saenz Julienne {
12982017a1e5STejas Joglekar struct xhci_hcd *xhci;
12992017a1e5STejas Joglekar
13002017a1e5STejas Joglekar xhci = hcd_to_xhci(hcd);
13012017a1e5STejas Joglekar
130233e39350SNicolas Saenz Julienne if (xhci_urb_suitable_for_idt(urb))
130333e39350SNicolas Saenz Julienne return 0;
130433e39350SNicolas Saenz Julienne
13052017a1e5STejas Joglekar if (xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) {
13062017a1e5STejas Joglekar if (xhci_urb_temp_buffer_required(hcd, urb))
13072017a1e5STejas Joglekar return xhci_map_temp_buffer(hcd, urb);
13082017a1e5STejas Joglekar }
130933e39350SNicolas Saenz Julienne return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
131033e39350SNicolas Saenz Julienne }
131133e39350SNicolas Saenz Julienne
xhci_unmap_urb_for_dma(struct usb_hcd * hcd,struct urb * urb)13122017a1e5STejas Joglekar static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
13132017a1e5STejas Joglekar {
13142017a1e5STejas Joglekar struct xhci_hcd *xhci;
13152017a1e5STejas Joglekar bool unmap_temp_buf = false;
13162017a1e5STejas Joglekar
13172017a1e5STejas Joglekar xhci = hcd_to_xhci(hcd);
13182017a1e5STejas Joglekar
13192017a1e5STejas Joglekar if (urb->num_sgs && (urb->transfer_flags & URB_DMA_MAP_SINGLE))
13202017a1e5STejas Joglekar unmap_temp_buf = true;
13212017a1e5STejas Joglekar
13222017a1e5STejas Joglekar if ((xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) && unmap_temp_buf)
13232017a1e5STejas Joglekar xhci_unmap_temp_buf(hcd, urb);
13242017a1e5STejas Joglekar else
13252017a1e5STejas Joglekar usb_hcd_unmap_urb_for_dma(hcd, urb);
13262017a1e5STejas Joglekar }
13272017a1e5STejas Joglekar
13282017a1e5STejas Joglekar /**
1329bc75fa38SAlex Chiang * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
1330bc75fa38SAlex Chiang * HCDs. Find the index for an endpoint given its descriptor. Use the return
1331bc75fa38SAlex Chiang * value to right shift 1 for the bitmask.
1332bc75fa38SAlex Chiang *
1333bc75fa38SAlex Chiang * Index = (epnum * 2) + direction - 1,
1334bc75fa38SAlex Chiang * where direction = 0 for OUT, 1 for IN.
1335bc75fa38SAlex Chiang * For control endpoints, the IN index is used (OUT index is unused), so
1336bc75fa38SAlex Chiang * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
1337bc75fa38SAlex Chiang */
xhci_get_endpoint_index(struct usb_endpoint_descriptor * desc)1338bc75fa38SAlex Chiang unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1339bc75fa38SAlex Chiang {
1340bc75fa38SAlex Chiang unsigned int index;
1341bc75fa38SAlex Chiang if (usb_endpoint_xfer_control(desc))
1342bc75fa38SAlex Chiang index = (unsigned int) (usb_endpoint_num(desc)*2);
1343bc75fa38SAlex Chiang else
1344bc75fa38SAlex Chiang index = (unsigned int) (usb_endpoint_num(desc)*2) +
1345bc75fa38SAlex Chiang (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1346bc75fa38SAlex Chiang return index;
1347bc75fa38SAlex Chiang }
134814295a15SChunfeng Yun EXPORT_SYMBOL_GPL(xhci_get_endpoint_index);
1349bc75fa38SAlex Chiang
135001c5f447SJulius Werner /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
135101c5f447SJulius Werner * address from the XHCI endpoint index.
135201c5f447SJulius Werner */
xhci_get_endpoint_address(unsigned int ep_index)1353d017aeafSGreg Kroah-Hartman static unsigned int xhci_get_endpoint_address(unsigned int ep_index)
135401c5f447SJulius Werner {
135501c5f447SJulius Werner unsigned int number = DIV_ROUND_UP(ep_index, 2);
135601c5f447SJulius Werner unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
135701c5f447SJulius Werner return direction | number;
135801c5f447SJulius Werner }
135901c5f447SJulius Werner
1360bc75fa38SAlex Chiang /* Find the flag for this endpoint (for use in the control context). Use the
1361bc75fa38SAlex Chiang * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1362bc75fa38SAlex Chiang * bit 1, etc.
1363bc75fa38SAlex Chiang */
xhci_get_endpoint_flag(struct usb_endpoint_descriptor * desc)13643969384cSLu Baolu static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1365bc75fa38SAlex Chiang {
1366bc75fa38SAlex Chiang return 1 << (xhci_get_endpoint_index(desc) + 1);
1367bc75fa38SAlex Chiang }
1368bc75fa38SAlex Chiang
1369bc75fa38SAlex Chiang /* Compute the last valid endpoint context index. Basically, this is the
1370bc75fa38SAlex Chiang * endpoint index plus one. For slot contexts with more than valid endpoint,
1371bc75fa38SAlex Chiang * we find the most significant bit set in the added contexts flags.
1372bc75fa38SAlex Chiang * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
1373bc75fa38SAlex Chiang * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
1374bc75fa38SAlex Chiang */
xhci_last_valid_endpoint(u32 added_ctxs)1375bc75fa38SAlex Chiang unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1376bc75fa38SAlex Chiang {
1377bc75fa38SAlex Chiang return fls(added_ctxs) - 1;
1378bc75fa38SAlex Chiang }
1379bc75fa38SAlex Chiang
1380bc75fa38SAlex Chiang /* Returns 1 if the arguments are OK;
1381bc75fa38SAlex Chiang * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
1382bc75fa38SAlex Chiang */
xhci_check_args(struct usb_hcd * hcd,struct usb_device * udev,struct usb_host_endpoint * ep,int check_ep,bool check_virt_dev,const char * func)13838212a49dSDmitry Torokhov static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
138464927730SAndiry Xu struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
138564927730SAndiry Xu const char *func) {
138664927730SAndiry Xu struct xhci_hcd *xhci;
138764927730SAndiry Xu struct xhci_virt_device *virt_dev;
138864927730SAndiry Xu
1389bc75fa38SAlex Chiang if (!hcd || (check_ep && !ep) || !udev) {
13905c1127d3SXenia Ragiadakou pr_debug("xHCI %s called with invalid args\n", func);
1391bc75fa38SAlex Chiang return -EINVAL;
1392bc75fa38SAlex Chiang }
1393bc75fa38SAlex Chiang if (!udev->parent) {
13945c1127d3SXenia Ragiadakou pr_debug("xHCI %s called for root hub\n", func);
1395bc75fa38SAlex Chiang return 0;
1396bc75fa38SAlex Chiang }
139764927730SAndiry Xu
139864927730SAndiry Xu xhci = hcd_to_xhci(hcd);
13997bd89b40SSarah Sharp if (check_virt_dev) {
140073ddc247Ssifram.rajas@gmail.com if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
14015c1127d3SXenia Ragiadakou xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
14025c1127d3SXenia Ragiadakou func);
1403bc75fa38SAlex Chiang return -EINVAL;
1404bc75fa38SAlex Chiang }
140564927730SAndiry Xu
140664927730SAndiry Xu virt_dev = xhci->devs[udev->slot_id];
140764927730SAndiry Xu if (virt_dev->udev != udev) {
14085c1127d3SXenia Ragiadakou xhci_dbg(xhci, "xHCI %s called with udev and "
140964927730SAndiry Xu "virt_dev does not match\n", func);
141064927730SAndiry Xu return -EINVAL;
141164927730SAndiry Xu }
141264927730SAndiry Xu }
141364927730SAndiry Xu
1414203a8661SSarah Sharp if (xhci->xhc_state & XHCI_STATE_HALTED)
1415203a8661SSarah Sharp return -ENODEV;
1416203a8661SSarah Sharp
1417bc75fa38SAlex Chiang return 1;
1418bc75fa38SAlex Chiang }
1419bc75fa38SAlex Chiang
1420bc75fa38SAlex Chiang static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1421bc75fa38SAlex Chiang struct usb_device *udev, struct xhci_command *command,
1422bc75fa38SAlex Chiang bool ctx_change, bool must_succeed);
1423bc75fa38SAlex Chiang
1424bc75fa38SAlex Chiang /*
1425bc75fa38SAlex Chiang * Full speed devices may have a max packet size greater than 8 bytes, but the
1426bc75fa38SAlex Chiang * USB core doesn't know that until it reads the first 8 bytes of the
1427bc75fa38SAlex Chiang * descriptor. If the usb_device's max packet size changes after that point,
1428bc75fa38SAlex Chiang * we need to issue an evaluate context command and wait on it.
1429bc75fa38SAlex Chiang */
xhci_check_maxpacket(struct xhci_hcd * xhci,unsigned int slot_id,unsigned int ep_index,struct urb * urb,gfp_t mem_flags)1430bc75fa38SAlex Chiang static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1431dda32c00SChristophe JAILLET unsigned int ep_index, struct urb *urb, gfp_t mem_flags)
1432bc75fa38SAlex Chiang {
1433bc75fa38SAlex Chiang struct xhci_container_ctx *out_ctx;
1434bc75fa38SAlex Chiang struct xhci_input_control_ctx *ctrl_ctx;
1435bc75fa38SAlex Chiang struct xhci_ep_ctx *ep_ctx;
1436ddba5cd0SMathias Nyman struct xhci_command *command;
1437bc75fa38SAlex Chiang int max_packet_size;
1438bc75fa38SAlex Chiang int hw_max_packet_size;
1439bc75fa38SAlex Chiang int ret = 0;
1440bc75fa38SAlex Chiang
1441bc75fa38SAlex Chiang out_ctx = xhci->devs[slot_id]->out_ctx;
1442bc75fa38SAlex Chiang ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
144328ccd296SMatt Evans hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
144429cc8897SKuninori Morimoto max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1445bc75fa38SAlex Chiang if (hw_max_packet_size != max_packet_size) {
14463a7fa5beSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
14473a7fa5beSXenia Ragiadakou "Max Packet Size for ep 0 changed.");
14483a7fa5beSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
14493a7fa5beSXenia Ragiadakou "Max packet size in usb_device = %d",
1450bc75fa38SAlex Chiang max_packet_size);
14513a7fa5beSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
14523a7fa5beSXenia Ragiadakou "Max packet size in xHCI HW = %d",
1453bc75fa38SAlex Chiang hw_max_packet_size);
14543a7fa5beSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
14553a7fa5beSXenia Ragiadakou "Issuing evaluate context command.");
1456bc75fa38SAlex Chiang
1457bc75fa38SAlex Chiang /* Set up the input context flags for the command */
1458bc75fa38SAlex Chiang /* FIXME: This won't work if a non-default control endpoint
1459bc75fa38SAlex Chiang * changes max packet sizes.
1460bc75fa38SAlex Chiang */
1461ddba5cd0SMathias Nyman
1462dda32c00SChristophe JAILLET command = xhci_alloc_command(xhci, true, mem_flags);
1463ddba5cd0SMathias Nyman if (!command)
1464ddba5cd0SMathias Nyman return -ENOMEM;
1465ddba5cd0SMathias Nyman
1466ddba5cd0SMathias Nyman command->in_ctx = xhci->devs[slot_id]->in_ctx;
14674daf9df5SLin Wang ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
146892f8e767SSarah Sharp if (!ctrl_ctx) {
146992f8e767SSarah Sharp xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
147092f8e767SSarah Sharp __func__);
1471ddba5cd0SMathias Nyman ret = -ENOMEM;
1472ddba5cd0SMathias Nyman goto command_cleanup;
147392f8e767SSarah Sharp }
147492f8e767SSarah Sharp /* Set up the modified control endpoint 0 */
147592f8e767SSarah Sharp xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
147692f8e767SSarah Sharp xhci->devs[slot_id]->out_ctx, ep_index);
147792f8e767SSarah Sharp
1478ddba5cd0SMathias Nyman ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1479a73d9d9cSAl Cooper ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */
148092f8e767SSarah Sharp ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
148192f8e767SSarah Sharp ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
148292f8e767SSarah Sharp
148328ccd296SMatt Evans ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1484bc75fa38SAlex Chiang ctrl_ctx->drop_flags = 0;
1485bc75fa38SAlex Chiang
1486ddba5cd0SMathias Nyman ret = xhci_configure_endpoint(xhci, urb->dev, command,
1487bc75fa38SAlex Chiang true, false);
1488bc75fa38SAlex Chiang
1489bc75fa38SAlex Chiang /* Clean up the input context for later use by bandwidth
1490bc75fa38SAlex Chiang * functions.
1491bc75fa38SAlex Chiang */
149228ccd296SMatt Evans ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1493ddba5cd0SMathias Nyman command_cleanup:
1494ddba5cd0SMathias Nyman kfree(command->completion);
1495ddba5cd0SMathias Nyman kfree(command);
1496bc75fa38SAlex Chiang }
1497bc75fa38SAlex Chiang return ret;
1498bc75fa38SAlex Chiang }
1499bc75fa38SAlex Chiang
1500bc75fa38SAlex Chiang /*
1501bc75fa38SAlex Chiang * non-error returns are a promise to giveback() the urb later
1502bc75fa38SAlex Chiang * we drop ownership so next owner (or urb unlink) can get it
1503bc75fa38SAlex Chiang */
xhci_urb_enqueue(struct usb_hcd * hcd,struct urb * urb,gfp_t mem_flags)15043969384cSLu Baolu static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1505bc75fa38SAlex Chiang {
1506bc75fa38SAlex Chiang struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1507bc75fa38SAlex Chiang unsigned long flags;
1508bc75fa38SAlex Chiang int ret = 0;
150915febf5eSMathias Nyman unsigned int slot_id, ep_index;
151015febf5eSMathias Nyman unsigned int *ep_state;
15118e51adccSAndiry Xu struct urb_priv *urb_priv;
15127e64b037SMathias Nyman int num_tds;
1513bc75fa38SAlex Chiang
1514243a1dd7SHongyu Xie if (!urb)
1515bc75fa38SAlex Chiang return -EINVAL;
1516243a1dd7SHongyu Xie ret = xhci_check_args(hcd, urb->dev, urb->ep,
1517243a1dd7SHongyu Xie true, true, __func__);
1518243a1dd7SHongyu Xie if (ret <= 0)
1519243a1dd7SHongyu Xie return ret ? ret : -EINVAL;
1520bc75fa38SAlex Chiang
1521bc75fa38SAlex Chiang slot_id = urb->dev->slot_id;
1522bc75fa38SAlex Chiang ep_index = xhci_get_endpoint_index(&urb->ep->desc);
152315febf5eSMathias Nyman ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
1524bc75fa38SAlex Chiang
152596eea587SAhmed S. Darwish if (!HCD_HW_ACCESSIBLE(hcd))
15266969408dSMathias Nyman return -ESHUTDOWN;
152796eea587SAhmed S. Darwish
1528b8c3b718SMathias Nyman if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
1529b8c3b718SMathias Nyman xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
1530b8c3b718SMathias Nyman return -ENODEV;
1531b8c3b718SMathias Nyman }
15328e51adccSAndiry Xu
15338e51adccSAndiry Xu if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1534e6f7caa3SMathias Nyman num_tds = urb->number_of_packets;
15354758dcd1SReyad Attiyat else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
15364758dcd1SReyad Attiyat urb->transfer_buffer_length > 0 &&
15374758dcd1SReyad Attiyat urb->transfer_flags & URB_ZERO_PACKET &&
15384758dcd1SReyad Attiyat !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
1539e6f7caa3SMathias Nyman num_tds = 2;
15408e51adccSAndiry Xu else
1541e6f7caa3SMathias Nyman num_tds = 1;
15428e51adccSAndiry Xu
1543da79ff6eSGustavo A. R. Silva urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags);
15448e51adccSAndiry Xu if (!urb_priv)
15458e51adccSAndiry Xu return -ENOMEM;
15468e51adccSAndiry Xu
15479ef7fbbbSMathias Nyman urb_priv->num_tds = num_tds;
15489ef7fbbbSMathias Nyman urb_priv->num_tds_done = 0;
15498e51adccSAndiry Xu urb->hcpriv = urb_priv;
15508e51adccSAndiry Xu
15515abdc2e6SFelipe Balbi trace_xhci_urb_enqueue(urb);
15525abdc2e6SFelipe Balbi
1553bc75fa38SAlex Chiang if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1554bc75fa38SAlex Chiang /* Check to see if the max packet size for the default control
1555bc75fa38SAlex Chiang * endpoint changed during FS device enumeration
1556bc75fa38SAlex Chiang */
1557bc75fa38SAlex Chiang if (urb->dev->speed == USB_SPEED_FULL) {
1558bc75fa38SAlex Chiang ret = xhci_check_maxpacket(xhci, slot_id,
1559dda32c00SChristophe JAILLET ep_index, urb, mem_flags);
1560d13565c1SSarah Sharp if (ret < 0) {
15614daf9df5SLin Wang xhci_urb_free_priv(urb_priv);
1562d13565c1SSarah Sharp urb->hcpriv = NULL;
1563bc75fa38SAlex Chiang return ret;
1564bc75fa38SAlex Chiang }
1565d13565c1SSarah Sharp }
15666969408dSMathias Nyman }
1567bc75fa38SAlex Chiang
1568bc75fa38SAlex Chiang spin_lock_irqsave(&xhci->lock, flags);
15696969408dSMathias Nyman
15706969408dSMathias Nyman if (xhci->xhc_state & XHCI_STATE_DYING) {
15716969408dSMathias Nyman xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
1572bc75fa38SAlex Chiang urb->ep->desc.bEndpointAddress, urb);
1573d13565c1SSarah Sharp ret = -ESHUTDOWN;
15746969408dSMathias Nyman goto free_priv;
15756969408dSMathias Nyman }
157615febf5eSMathias Nyman if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) {
157715febf5eSMathias Nyman xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
157815febf5eSMathias Nyman *ep_state);
157915febf5eSMathias Nyman ret = -EINVAL;
158015febf5eSMathias Nyman goto free_priv;
158115febf5eSMathias Nyman }
1582f5249461SMathias Nyman if (*ep_state & EP_SOFT_CLEAR_TOGGLE) {
1583f5249461SMathias Nyman xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n");
1584f5249461SMathias Nyman ret = -EINVAL;
1585f5249461SMathias Nyman goto free_priv;
1586f5249461SMathias Nyman }
15876969408dSMathias Nyman
15886969408dSMathias Nyman switch (usb_endpoint_type(&urb->ep->desc)) {
15896969408dSMathias Nyman
15906969408dSMathias Nyman case USB_ENDPOINT_XFER_CONTROL:
15916969408dSMathias Nyman ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
15926969408dSMathias Nyman slot_id, ep_index);
15936969408dSMathias Nyman break;
15946969408dSMathias Nyman case USB_ENDPOINT_XFER_BULK:
15956969408dSMathias Nyman ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
15966969408dSMathias Nyman slot_id, ep_index);
15976969408dSMathias Nyman break;
15986969408dSMathias Nyman case USB_ENDPOINT_XFER_INT:
15996969408dSMathias Nyman ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
16006969408dSMathias Nyman slot_id, ep_index);
16016969408dSMathias Nyman break;
16026969408dSMathias Nyman case USB_ENDPOINT_XFER_ISOC:
16036969408dSMathias Nyman ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
16046969408dSMathias Nyman slot_id, ep_index);
16056969408dSMathias Nyman }
16066969408dSMathias Nyman
16076969408dSMathias Nyman if (ret) {
1608d13565c1SSarah Sharp free_priv:
16094daf9df5SLin Wang xhci_urb_free_priv(urb_priv);
1610d13565c1SSarah Sharp urb->hcpriv = NULL;
16116969408dSMathias Nyman }
1612bc75fa38SAlex Chiang spin_unlock_irqrestore(&xhci->lock, flags);
1613d13565c1SSarah Sharp return ret;
1614bc75fa38SAlex Chiang }
1615bc75fa38SAlex Chiang
1616bc75fa38SAlex Chiang /*
1617bc75fa38SAlex Chiang * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1618bc75fa38SAlex Chiang * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1619bc75fa38SAlex Chiang * should pick up where it left off in the TD, unless a Set Transfer Ring
1620bc75fa38SAlex Chiang * Dequeue Pointer is issued.
1621bc75fa38SAlex Chiang *
1622bc75fa38SAlex Chiang * The TRBs that make up the buffers for the canceled URB will be "removed" from
1623bc75fa38SAlex Chiang * the ring. Since the ring is a contiguous structure, they can't be physically
1624bc75fa38SAlex Chiang * removed. Instead, there are two options:
1625bc75fa38SAlex Chiang *
1626bc75fa38SAlex Chiang * 1) If the HC is in the middle of processing the URB to be canceled, we
1627bc75fa38SAlex Chiang * simply move the ring's dequeue pointer past those TRBs using the Set
1628bc75fa38SAlex Chiang * Transfer Ring Dequeue Pointer command. This will be the common case,
1629bc75fa38SAlex Chiang * when drivers timeout on the last submitted URB and attempt to cancel.
1630bc75fa38SAlex Chiang *
1631bc75fa38SAlex Chiang * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1632bc75fa38SAlex Chiang * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1633bc75fa38SAlex Chiang * HC will need to invalidate the any TRBs it has cached after the stop
1634bc75fa38SAlex Chiang * endpoint command, as noted in the xHCI 0.95 errata.
1635bc75fa38SAlex Chiang *
1636bc75fa38SAlex Chiang * 3) The TD may have completed by the time the Stop Endpoint Command
1637bc75fa38SAlex Chiang * completes, so software needs to handle that case too.
1638bc75fa38SAlex Chiang *
1639bc75fa38SAlex Chiang * This function should protect against the TD enqueueing code ringing the
1640bc75fa38SAlex Chiang * doorbell while this code is waiting for a Stop Endpoint command to complete.
1641bc75fa38SAlex Chiang * It also needs to account for multiple cancellations on happening at the same
1642bc75fa38SAlex Chiang * time for the same endpoint.
1643bc75fa38SAlex Chiang *
1644bc75fa38SAlex Chiang * Note that this function can be called in any context, or so says
1645bc75fa38SAlex Chiang * usb_hcd_unlink_urb()
1646bc75fa38SAlex Chiang */
xhci_urb_dequeue(struct usb_hcd * hcd,struct urb * urb,int status)16473969384cSLu Baolu static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1648bc75fa38SAlex Chiang {
1649bc75fa38SAlex Chiang unsigned long flags;
16508e51adccSAndiry Xu int ret, i;
1651bc75fa38SAlex Chiang u32 temp;
1652bc75fa38SAlex Chiang struct xhci_hcd *xhci;
16538e51adccSAndiry Xu struct urb_priv *urb_priv;
1654bc75fa38SAlex Chiang struct xhci_td *td;
1655bc75fa38SAlex Chiang unsigned int ep_index;
1656bc75fa38SAlex Chiang struct xhci_ring *ep_ring;
1657bc75fa38SAlex Chiang struct xhci_virt_ep *ep;
1658ddba5cd0SMathias Nyman struct xhci_command *command;
1659d3519b9dSMathias Nyman struct xhci_virt_device *vdev;
1660bc75fa38SAlex Chiang
1661bc75fa38SAlex Chiang xhci = hcd_to_xhci(hcd);
1662bc75fa38SAlex Chiang spin_lock_irqsave(&xhci->lock, flags);
16635abdc2e6SFelipe Balbi
16645abdc2e6SFelipe Balbi trace_xhci_urb_dequeue(urb);
16655abdc2e6SFelipe Balbi
1666bc75fa38SAlex Chiang /* Make sure the URB hasn't completed or been unlinked already */
1667bc75fa38SAlex Chiang ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1668d3519b9dSMathias Nyman if (ret)
1669bc75fa38SAlex Chiang goto done;
1670d3519b9dSMathias Nyman
1671d3519b9dSMathias Nyman /* give back URB now if we can't queue it for cancel */
1672d3519b9dSMathias Nyman vdev = xhci->devs[urb->dev->slot_id];
1673d3519b9dSMathias Nyman urb_priv = urb->hcpriv;
1674d3519b9dSMathias Nyman if (!vdev || !urb_priv)
1675d3519b9dSMathias Nyman goto err_giveback;
1676d3519b9dSMathias Nyman
1677d3519b9dSMathias Nyman ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1678d3519b9dSMathias Nyman ep = &vdev->eps[ep_index];
1679d3519b9dSMathias Nyman ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1680d3519b9dSMathias Nyman if (!ep || !ep_ring)
1681d3519b9dSMathias Nyman goto err_giveback;
1682d3519b9dSMathias Nyman
1683d9f11ba9SMathias Nyman /* If xHC is dead take it down and return ALL URBs in xhci_hc_died() */
1684b0ba9720SXenia Ragiadakou temp = readl(&xhci->op_regs->status);
1685d9f11ba9SMathias Nyman if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) {
1686d9f11ba9SMathias Nyman xhci_hc_died(xhci);
1687d9f11ba9SMathias Nyman goto done;
1688d9f11ba9SMathias Nyman }
1689d9f11ba9SMathias Nyman
16904937213bSMathias Nyman /*
16914937213bSMathias Nyman * check ring is not re-allocated since URB was enqueued. If it is, then
16924937213bSMathias Nyman * make sure none of the ring related pointers in this URB private data
16934937213bSMathias Nyman * are touched, such as td_list, otherwise we overwrite freed data
16944937213bSMathias Nyman */
16954937213bSMathias Nyman if (!td_on_ring(&urb_priv->td[0], ep_ring)) {
16964937213bSMathias Nyman xhci_err(xhci, "Canceled URB td not found on endpoint ring");
16974937213bSMathias Nyman for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) {
16984937213bSMathias Nyman td = &urb_priv->td[i];
16994937213bSMathias Nyman if (!list_empty(&td->cancelled_td_list))
17004937213bSMathias Nyman list_del_init(&td->cancelled_td_list);
17014937213bSMathias Nyman }
17024937213bSMathias Nyman goto err_giveback;
17034937213bSMathias Nyman }
17044937213bSMathias Nyman
1705d9f11ba9SMathias Nyman if (xhci->xhc_state & XHCI_STATE_HALTED) {
1706aa50b290SXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1707d9f11ba9SMathias Nyman "HC halted, freeing TD manually.");
17089ef7fbbbSMathias Nyman for (i = urb_priv->num_tds_done;
1709d3519b9dSMathias Nyman i < urb_priv->num_tds;
17105c821711SMathias Nyman i++) {
17117e64b037SMathias Nyman td = &urb_priv->td[i];
1712585df1d9SSarah Sharp if (!list_empty(&td->td_list))
1713585df1d9SSarah Sharp list_del_init(&td->td_list);
1714585df1d9SSarah Sharp if (!list_empty(&td->cancelled_td_list))
1715585df1d9SSarah Sharp list_del_init(&td->cancelled_td_list);
1716585df1d9SSarah Sharp }
1717d3519b9dSMathias Nyman goto err_giveback;
1718bc75fa38SAlex Chiang }
1719bc75fa38SAlex Chiang
17209ef7fbbbSMathias Nyman i = urb_priv->num_tds_done;
17219ef7fbbbSMathias Nyman if (i < urb_priv->num_tds)
1722aa50b290SXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1723aa50b290SXenia Ragiadakou "Cancel URB %p, dev %s, ep 0x%x, "
1724aa50b290SXenia Ragiadakou "starting at offset 0x%llx",
172579688acfSSarah Sharp urb, urb->dev->devpath,
172679688acfSSarah Sharp urb->ep->desc.bEndpointAddress,
172779688acfSSarah Sharp (unsigned long long) xhci_trb_virt_to_dma(
17287e64b037SMathias Nyman urb_priv->td[i].start_seg,
17297e64b037SMathias Nyman urb_priv->td[i].first_trb));
17308e51adccSAndiry Xu
17319ef7fbbbSMathias Nyman for (; i < urb_priv->num_tds; i++) {
17327e64b037SMathias Nyman td = &urb_priv->td[i];
1733674f8438SMathias Nyman /* TD can already be on cancelled list if ep halted on it */
1734674f8438SMathias Nyman if (list_empty(&td->cancelled_td_list)) {
1735674f8438SMathias Nyman td->cancel_status = TD_DIRTY;
1736674f8438SMathias Nyman list_add_tail(&td->cancelled_td_list,
1737674f8438SMathias Nyman &ep->cancelled_td_list);
1738674f8438SMathias Nyman }
17398e51adccSAndiry Xu }
17408e51adccSAndiry Xu
1741*8b2e38f2SMichal Pecio /* These completion handlers will sort out cancelled TDs for us */
1742*8b2e38f2SMichal Pecio if (ep->ep_state & (EP_STOP_CMD_PENDING | EP_HALTED | SET_DEQ_PENDING)) {
1743*8b2e38f2SMichal Pecio xhci_dbg(xhci, "Not queuing Stop Endpoint on slot %d ep %d in state 0x%x\n",
1744*8b2e38f2SMichal Pecio urb->dev->slot_id, ep_index, ep->ep_state);
1745*8b2e38f2SMichal Pecio goto done;
1746*8b2e38f2SMichal Pecio }
1747*8b2e38f2SMichal Pecio
1748*8b2e38f2SMichal Pecio /* In this case no commands are pending but the endpoint is stopped */
1749*8b2e38f2SMichal Pecio if (ep->ep_state & EP_CLEARING_TT) {
1750*8b2e38f2SMichal Pecio /* and cancelled TDs can be given back right away */
1751*8b2e38f2SMichal Pecio xhci_dbg(xhci, "Invalidating TDs instantly on slot %d ep %d in state 0x%x\n",
1752*8b2e38f2SMichal Pecio urb->dev->slot_id, ep_index, ep->ep_state);
1753*8b2e38f2SMichal Pecio xhci_process_cancelled_tds(ep);
1754*8b2e38f2SMichal Pecio } else {
1755*8b2e38f2SMichal Pecio /* Otherwise, queue a new Stop Endpoint command */
1756103afda0SMathias Nyman command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1757a0ee619fSHans de Goede if (!command) {
1758a0ee619fSHans de Goede ret = -ENOMEM;
1759a0ee619fSHans de Goede goto done;
1760a0ee619fSHans de Goede }
1761f1ece345SMichal Pecio ep->stop_time = jiffies;
17629983a5fcSMathias Nyman ep->ep_state |= EP_STOP_CMD_PENDING;
1763ddba5cd0SMathias Nyman xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1764ddba5cd0SMathias Nyman ep_index, 0);
1765bc75fa38SAlex Chiang xhci_ring_cmd_db(xhci);
1766bc75fa38SAlex Chiang }
1767bc75fa38SAlex Chiang done:
1768bc75fa38SAlex Chiang spin_unlock_irqrestore(&xhci->lock, flags);
1769bc75fa38SAlex Chiang return ret;
1770d3519b9dSMathias Nyman
1771d3519b9dSMathias Nyman err_giveback:
1772d3519b9dSMathias Nyman if (urb_priv)
1773d3519b9dSMathias Nyman xhci_urb_free_priv(urb_priv);
1774d3519b9dSMathias Nyman usb_hcd_unlink_urb_from_ep(hcd, urb);
1775d3519b9dSMathias Nyman spin_unlock_irqrestore(&xhci->lock, flags);
1776d3519b9dSMathias Nyman usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1777d3519b9dSMathias Nyman return ret;
1778bc75fa38SAlex Chiang }
1779bc75fa38SAlex Chiang
1780bc75fa38SAlex Chiang /* Drop an endpoint from a new bandwidth configuration for this device.
1781bc75fa38SAlex Chiang * Only one call to this function is allowed per endpoint before
1782bc75fa38SAlex Chiang * check_bandwidth() or reset_bandwidth() must be called.
1783bc75fa38SAlex Chiang * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1784bc75fa38SAlex Chiang * add the endpoint to the schedule with possibly new parameters denoted by a
1785bc75fa38SAlex Chiang * different endpoint descriptor in usb_host_endpoint.
1786bc75fa38SAlex Chiang * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1787bc75fa38SAlex Chiang * not allowed.
1788bc75fa38SAlex Chiang *
1789bc75fa38SAlex Chiang * The USB core will not allow URBs to be queued to an endpoint that is being
1790bc75fa38SAlex Chiang * disabled, so there's no need for mutual exclusion to protect
1791bc75fa38SAlex Chiang * the xhci->devs[slot_id] structure.
1792bc75fa38SAlex Chiang */
xhci_drop_endpoint(struct usb_hcd * hcd,struct usb_device * udev,struct usb_host_endpoint * ep)179314295a15SChunfeng Yun int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1794bc75fa38SAlex Chiang struct usb_host_endpoint *ep)
1795bc75fa38SAlex Chiang {
1796bc75fa38SAlex Chiang struct xhci_hcd *xhci;
1797bc75fa38SAlex Chiang struct xhci_container_ctx *in_ctx, *out_ctx;
1798bc75fa38SAlex Chiang struct xhci_input_control_ctx *ctrl_ctx;
1799bc75fa38SAlex Chiang unsigned int ep_index;
1800bc75fa38SAlex Chiang struct xhci_ep_ctx *ep_ctx;
1801bc75fa38SAlex Chiang u32 drop_flag;
1802d6759133SJulius Werner u32 new_add_flags, new_drop_flags;
1803bc75fa38SAlex Chiang int ret;
1804bc75fa38SAlex Chiang
180564927730SAndiry Xu ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1806bc75fa38SAlex Chiang if (ret <= 0)
1807bc75fa38SAlex Chiang return ret;
1808bc75fa38SAlex Chiang xhci = hcd_to_xhci(hcd);
1809fe6c6c13SSarah Sharp if (xhci->xhc_state & XHCI_STATE_DYING)
1810fe6c6c13SSarah Sharp return -ENODEV;
1811bc75fa38SAlex Chiang
1812fe6c6c13SSarah Sharp xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1813bc75fa38SAlex Chiang drop_flag = xhci_get_endpoint_flag(&ep->desc);
1814bc75fa38SAlex Chiang if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1815bc75fa38SAlex Chiang xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1816bc75fa38SAlex Chiang __func__, drop_flag);
1817bc75fa38SAlex Chiang return 0;
1818bc75fa38SAlex Chiang }
1819bc75fa38SAlex Chiang
1820bc75fa38SAlex Chiang in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1821bc75fa38SAlex Chiang out_ctx = xhci->devs[udev->slot_id]->out_ctx;
18224daf9df5SLin Wang ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
182392f8e767SSarah Sharp if (!ctrl_ctx) {
182492f8e767SSarah Sharp xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
182592f8e767SSarah Sharp __func__);
182692f8e767SSarah Sharp return 0;
182792f8e767SSarah Sharp }
182892f8e767SSarah Sharp
1829bc75fa38SAlex Chiang ep_index = xhci_get_endpoint_index(&ep->desc);
1830bc75fa38SAlex Chiang ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1831bc75fa38SAlex Chiang /* If the HC already knows the endpoint is disabled,
1832bc75fa38SAlex Chiang * or the HCD has noted it is disabled, ignore this request
1833bc75fa38SAlex Chiang */
18345071e6b2SMathias Nyman if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) ||
183528ccd296SMatt Evans le32_to_cpu(ctrl_ctx->drop_flags) &
183628ccd296SMatt Evans xhci_get_endpoint_flag(&ep->desc)) {
1837a6134136SHans de Goede /* Do not warn when called after a usb_device_reset */
1838a6134136SHans de Goede if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1839bc75fa38SAlex Chiang xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1840bc75fa38SAlex Chiang __func__, ep);
1841bc75fa38SAlex Chiang return 0;
1842bc75fa38SAlex Chiang }
1843bc75fa38SAlex Chiang
184428ccd296SMatt Evans ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
184528ccd296SMatt Evans new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1846bc75fa38SAlex Chiang
184728ccd296SMatt Evans ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
184828ccd296SMatt Evans new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1849bc75fa38SAlex Chiang
185002b6fdc2SLu Baolu xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index);
185102b6fdc2SLu Baolu
1852bc75fa38SAlex Chiang xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1853bc75fa38SAlex Chiang
1854d6759133SJulius Werner xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1855bc75fa38SAlex Chiang (unsigned int) ep->desc.bEndpointAddress,
1856bc75fa38SAlex Chiang udev->slot_id,
1857bc75fa38SAlex Chiang (unsigned int) new_drop_flags,
1858d6759133SJulius Werner (unsigned int) new_add_flags);
1859bc75fa38SAlex Chiang return 0;
1860bc75fa38SAlex Chiang }
186114295a15SChunfeng Yun EXPORT_SYMBOL_GPL(xhci_drop_endpoint);
1862bc75fa38SAlex Chiang
1863bc75fa38SAlex Chiang /* Add an endpoint to a new possible bandwidth configuration for this device.
1864bc75fa38SAlex Chiang * Only one call to this function is allowed per endpoint before
1865bc75fa38SAlex Chiang * check_bandwidth() or reset_bandwidth() must be called.
1866bc75fa38SAlex Chiang * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1867bc75fa38SAlex Chiang * add the endpoint to the schedule with possibly new parameters denoted by a
1868bc75fa38SAlex Chiang * different endpoint descriptor in usb_host_endpoint.
1869bc75fa38SAlex Chiang * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1870bc75fa38SAlex Chiang * not allowed.
1871bc75fa38SAlex Chiang *
1872bc75fa38SAlex Chiang * The USB core will not allow URBs to be queued to an endpoint until the
1873bc75fa38SAlex Chiang * configuration or alt setting is installed in the device, so there's no need
1874bc75fa38SAlex Chiang * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1875bc75fa38SAlex Chiang */
xhci_add_endpoint(struct usb_hcd * hcd,struct usb_device * udev,struct usb_host_endpoint * ep)187614295a15SChunfeng Yun int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1877bc75fa38SAlex Chiang struct usb_host_endpoint *ep)
1878bc75fa38SAlex Chiang {
1879bc75fa38SAlex Chiang struct xhci_hcd *xhci;
188092c9691bSLin Wang struct xhci_container_ctx *in_ctx;
1881bc75fa38SAlex Chiang unsigned int ep_index;
1882bc75fa38SAlex Chiang struct xhci_input_control_ctx *ctrl_ctx;
18835afa0a5eSMathias Nyman struct xhci_ep_ctx *ep_ctx;
1884bc75fa38SAlex Chiang u32 added_ctxs;
1885d6759133SJulius Werner u32 new_add_flags, new_drop_flags;
1886fa75ac37SSarah Sharp struct xhci_virt_device *virt_dev;
1887bc75fa38SAlex Chiang int ret = 0;
1888bc75fa38SAlex Chiang
188964927730SAndiry Xu ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1890bc75fa38SAlex Chiang if (ret <= 0) {
1891bc75fa38SAlex Chiang /* So we won't queue a reset ep command for a root hub */
1892bc75fa38SAlex Chiang ep->hcpriv = NULL;
1893bc75fa38SAlex Chiang return ret;
1894bc75fa38SAlex Chiang }
1895bc75fa38SAlex Chiang xhci = hcd_to_xhci(hcd);
1896fe6c6c13SSarah Sharp if (xhci->xhc_state & XHCI_STATE_DYING)
1897fe6c6c13SSarah Sharp return -ENODEV;
1898bc75fa38SAlex Chiang
1899bc75fa38SAlex Chiang added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1900bc75fa38SAlex Chiang if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1901bc75fa38SAlex Chiang /* FIXME when we have to issue an evaluate endpoint command to
1902bc75fa38SAlex Chiang * deal with ep0 max packet size changing once we get the
1903bc75fa38SAlex Chiang * descriptors
1904bc75fa38SAlex Chiang */
1905bc75fa38SAlex Chiang xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1906bc75fa38SAlex Chiang __func__, added_ctxs);
1907bc75fa38SAlex Chiang return 0;
1908bc75fa38SAlex Chiang }
1909bc75fa38SAlex Chiang
1910fa75ac37SSarah Sharp virt_dev = xhci->devs[udev->slot_id];
1911fa75ac37SSarah Sharp in_ctx = virt_dev->in_ctx;
19124daf9df5SLin Wang ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
191392f8e767SSarah Sharp if (!ctrl_ctx) {
191492f8e767SSarah Sharp xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
191592f8e767SSarah Sharp __func__);
191692f8e767SSarah Sharp return 0;
191792f8e767SSarah Sharp }
1918fa75ac37SSarah Sharp
191992f8e767SSarah Sharp ep_index = xhci_get_endpoint_index(&ep->desc);
1920fa75ac37SSarah Sharp /* If this endpoint is already in use, and the upper layers are trying
1921fa75ac37SSarah Sharp * to add it again without dropping it, reject the addition.
1922fa75ac37SSarah Sharp */
1923fa75ac37SSarah Sharp if (virt_dev->eps[ep_index].ring &&
192492c9691bSLin Wang !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
1925fa75ac37SSarah Sharp xhci_warn(xhci, "Trying to add endpoint 0x%x "
1926fa75ac37SSarah Sharp "without dropping it.\n",
1927fa75ac37SSarah Sharp (unsigned int) ep->desc.bEndpointAddress);
1928fa75ac37SSarah Sharp return -EINVAL;
1929fa75ac37SSarah Sharp }
1930fa75ac37SSarah Sharp
1931bc75fa38SAlex Chiang /* If the HCD has already noted the endpoint is enabled,
1932bc75fa38SAlex Chiang * ignore this request.
1933bc75fa38SAlex Chiang */
193492c9691bSLin Wang if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
1935bc75fa38SAlex Chiang xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1936bc75fa38SAlex Chiang __func__, ep);
1937bc75fa38SAlex Chiang return 0;
1938bc75fa38SAlex Chiang }
1939bc75fa38SAlex Chiang
1940bc75fa38SAlex Chiang /*
1941bc75fa38SAlex Chiang * Configuration and alternate setting changes must be done in
1942bc75fa38SAlex Chiang * process context, not interrupt context (or so documenation
1943bc75fa38SAlex Chiang * for usb_set_interface() and usb_set_configuration() claim).
1944bc75fa38SAlex Chiang */
1945fa75ac37SSarah Sharp if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1946bc75fa38SAlex Chiang dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1947bc75fa38SAlex Chiang __func__, ep->desc.bEndpointAddress);
1948bc75fa38SAlex Chiang return -ENOMEM;
1949bc75fa38SAlex Chiang }
1950bc75fa38SAlex Chiang
195128ccd296SMatt Evans ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
195228ccd296SMatt Evans new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1953bc75fa38SAlex Chiang
1954bc75fa38SAlex Chiang /* If xhci_endpoint_disable() was called for this endpoint, but the
1955bc75fa38SAlex Chiang * xHC hasn't been notified yet through the check_bandwidth() call,
1956bc75fa38SAlex Chiang * this re-adds a new state for the endpoint from the new endpoint
1957bc75fa38SAlex Chiang * descriptors. We must drop and re-add this endpoint, so we leave the
1958bc75fa38SAlex Chiang * drop flags alone.
1959bc75fa38SAlex Chiang */
196028ccd296SMatt Evans new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1961bc75fa38SAlex Chiang
1962bc75fa38SAlex Chiang /* Store the usb_device pointer for later use */
1963bc75fa38SAlex Chiang ep->hcpriv = udev;
1964bc75fa38SAlex Chiang
19655afa0a5eSMathias Nyman ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
19665afa0a5eSMathias Nyman trace_xhci_add_endpoint(ep_ctx);
19675afa0a5eSMathias Nyman
1968d6759133SJulius Werner xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1969bc75fa38SAlex Chiang (unsigned int) ep->desc.bEndpointAddress,
1970bc75fa38SAlex Chiang udev->slot_id,
1971bc75fa38SAlex Chiang (unsigned int) new_drop_flags,
1972d6759133SJulius Werner (unsigned int) new_add_flags);
1973bc75fa38SAlex Chiang return 0;
1974bc75fa38SAlex Chiang }
197514295a15SChunfeng Yun EXPORT_SYMBOL_GPL(xhci_add_endpoint);
1976bc75fa38SAlex Chiang
xhci_zero_in_ctx(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev)1977bc75fa38SAlex Chiang static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1978bc75fa38SAlex Chiang {
1979bc75fa38SAlex Chiang struct xhci_input_control_ctx *ctrl_ctx;
1980bc75fa38SAlex Chiang struct xhci_ep_ctx *ep_ctx;
1981bc75fa38SAlex Chiang struct xhci_slot_ctx *slot_ctx;
1982bc75fa38SAlex Chiang int i;
1983bc75fa38SAlex Chiang
19844daf9df5SLin Wang ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
198592f8e767SSarah Sharp if (!ctrl_ctx) {
198692f8e767SSarah Sharp xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
198792f8e767SSarah Sharp __func__);
198892f8e767SSarah Sharp return;
198992f8e767SSarah Sharp }
199092f8e767SSarah Sharp
1991bc75fa38SAlex Chiang /* When a device's add flag and drop flag are zero, any subsequent
1992bc75fa38SAlex Chiang * configure endpoint command will leave that endpoint's state
1993bc75fa38SAlex Chiang * untouched. Make sure we don't leave any old state in the input
1994bc75fa38SAlex Chiang * endpoint contexts.
1995bc75fa38SAlex Chiang */
1996bc75fa38SAlex Chiang ctrl_ctx->drop_flags = 0;
1997bc75fa38SAlex Chiang ctrl_ctx->add_flags = 0;
1998bc75fa38SAlex Chiang slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
199928ccd296SMatt Evans slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2000bc75fa38SAlex Chiang /* Endpoint 0 is always valid */
200128ccd296SMatt Evans slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
200298871e94SFelipe Balbi for (i = 1; i < 31; i++) {
2003bc75fa38SAlex Chiang ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
2004bc75fa38SAlex Chiang ep_ctx->ep_info = 0;
2005bc75fa38SAlex Chiang ep_ctx->ep_info2 = 0;
2006bc75fa38SAlex Chiang ep_ctx->deq = 0;
2007bc75fa38SAlex Chiang ep_ctx->tx_info = 0;
2008bc75fa38SAlex Chiang }
2009bc75fa38SAlex Chiang }
2010bc75fa38SAlex Chiang
xhci_configure_endpoint_result(struct xhci_hcd * xhci,struct usb_device * udev,u32 * cmd_status)2011bc75fa38SAlex Chiang static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
201200161f7dSSarah Sharp struct usb_device *udev, u32 *cmd_status)
2013bc75fa38SAlex Chiang {
2014bc75fa38SAlex Chiang int ret;
2015bc75fa38SAlex Chiang
2016bc75fa38SAlex Chiang switch (*cmd_status) {
20170b7c105aSFelipe Balbi case COMP_COMMAND_ABORTED:
2018604d02a2SMathias Nyman case COMP_COMMAND_RING_STOPPED:
2019c311e391SMathias Nyman xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
2020c311e391SMathias Nyman ret = -ETIME;
2021c311e391SMathias Nyman break;
20220b7c105aSFelipe Balbi case COMP_RESOURCE_ERROR:
2023288c0f44SOliver Neukum dev_warn(&udev->dev,
2024288c0f44SOliver Neukum "Not enough host controller resources for new device state.\n");
2025bc75fa38SAlex Chiang ret = -ENOMEM;
2026bc75fa38SAlex Chiang /* FIXME: can we allocate more resources for the HC? */
2027bc75fa38SAlex Chiang break;
20280b7c105aSFelipe Balbi case COMP_BANDWIDTH_ERROR:
20290b7c105aSFelipe Balbi case COMP_SECONDARY_BANDWIDTH_ERROR:
2030288c0f44SOliver Neukum dev_warn(&udev->dev,
2031288c0f44SOliver Neukum "Not enough bandwidth for new device state.\n");
2032bc75fa38SAlex Chiang ret = -ENOSPC;
2033bc75fa38SAlex Chiang /* FIXME: can we go back to the old state? */
2034bc75fa38SAlex Chiang break;
20350b7c105aSFelipe Balbi case COMP_TRB_ERROR:
2036bc75fa38SAlex Chiang /* the HCD set up something wrong */
2037bc75fa38SAlex Chiang dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
2038bc75fa38SAlex Chiang "add flag = 1, "
2039bc75fa38SAlex Chiang "and endpoint is not disabled.\n");
2040bc75fa38SAlex Chiang ret = -EINVAL;
2041bc75fa38SAlex Chiang break;
20420b7c105aSFelipe Balbi case COMP_INCOMPATIBLE_DEVICE_ERROR:
2043288c0f44SOliver Neukum dev_warn(&udev->dev,
2044288c0f44SOliver Neukum "ERROR: Incompatible device for endpoint configure command.\n");
2045f6ba6fe2SAlex He ret = -ENODEV;
2046f6ba6fe2SAlex He break;
2047bc75fa38SAlex Chiang case COMP_SUCCESS:
20483a7fa5beSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
20493a7fa5beSXenia Ragiadakou "Successful Endpoint Configure command");
2050bc75fa38SAlex Chiang ret = 0;
2051bc75fa38SAlex Chiang break;
2052bc75fa38SAlex Chiang default:
2053288c0f44SOliver Neukum xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2054288c0f44SOliver Neukum *cmd_status);
2055bc75fa38SAlex Chiang ret = -EINVAL;
2056bc75fa38SAlex Chiang break;
2057bc75fa38SAlex Chiang }
2058bc75fa38SAlex Chiang return ret;
2059bc75fa38SAlex Chiang }
2060bc75fa38SAlex Chiang
xhci_evaluate_context_result(struct xhci_hcd * xhci,struct usb_device * udev,u32 * cmd_status)2061bc75fa38SAlex Chiang static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
206200161f7dSSarah Sharp struct usb_device *udev, u32 *cmd_status)
2063bc75fa38SAlex Chiang {
2064bc75fa38SAlex Chiang int ret;
2065bc75fa38SAlex Chiang
2066bc75fa38SAlex Chiang switch (*cmd_status) {
20670b7c105aSFelipe Balbi case COMP_COMMAND_ABORTED:
2068604d02a2SMathias Nyman case COMP_COMMAND_RING_STOPPED:
2069c311e391SMathias Nyman xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
2070c311e391SMathias Nyman ret = -ETIME;
2071c311e391SMathias Nyman break;
20720b7c105aSFelipe Balbi case COMP_PARAMETER_ERROR:
2073288c0f44SOliver Neukum dev_warn(&udev->dev,
2074288c0f44SOliver Neukum "WARN: xHCI driver setup invalid evaluate context command.\n");
2075bc75fa38SAlex Chiang ret = -EINVAL;
2076bc75fa38SAlex Chiang break;
20770b7c105aSFelipe Balbi case COMP_SLOT_NOT_ENABLED_ERROR:
2078288c0f44SOliver Neukum dev_warn(&udev->dev,
2079288c0f44SOliver Neukum "WARN: slot not enabled for evaluate context command.\n");
2080b8031342SSarah Sharp ret = -EINVAL;
2081b8031342SSarah Sharp break;
20820b7c105aSFelipe Balbi case COMP_CONTEXT_STATE_ERROR:
2083288c0f44SOliver Neukum dev_warn(&udev->dev,
2084288c0f44SOliver Neukum "WARN: invalid context state for evaluate context command.\n");
2085bc75fa38SAlex Chiang ret = -EINVAL;
2086bc75fa38SAlex Chiang break;
20870b7c105aSFelipe Balbi case COMP_INCOMPATIBLE_DEVICE_ERROR:
2088288c0f44SOliver Neukum dev_warn(&udev->dev,
2089288c0f44SOliver Neukum "ERROR: Incompatible device for evaluate context command.\n");
2090f6ba6fe2SAlex He ret = -ENODEV;
2091f6ba6fe2SAlex He break;
20920b7c105aSFelipe Balbi case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
20931bb73a88SAlex He /* Max Exit Latency too large error */
20941bb73a88SAlex He dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
20951bb73a88SAlex He ret = -EINVAL;
20961bb73a88SAlex He break;
2097bc75fa38SAlex Chiang case COMP_SUCCESS:
20983a7fa5beSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
20993a7fa5beSXenia Ragiadakou "Successful evaluate context command");
2100bc75fa38SAlex Chiang ret = 0;
2101bc75fa38SAlex Chiang break;
2102bc75fa38SAlex Chiang default:
2103288c0f44SOliver Neukum xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2104288c0f44SOliver Neukum *cmd_status);
2105bc75fa38SAlex Chiang ret = -EINVAL;
2106bc75fa38SAlex Chiang break;
2107bc75fa38SAlex Chiang }
2108bc75fa38SAlex Chiang return ret;
2109bc75fa38SAlex Chiang }
2110bc75fa38SAlex Chiang
xhci_count_num_new_endpoints(struct xhci_hcd * xhci,struct xhci_input_control_ctx * ctrl_ctx)21112cf95c18SSarah Sharp static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
211292f8e767SSarah Sharp struct xhci_input_control_ctx *ctrl_ctx)
21132cf95c18SSarah Sharp {
21142cf95c18SSarah Sharp u32 valid_add_flags;
21152cf95c18SSarah Sharp u32 valid_drop_flags;
21162cf95c18SSarah Sharp
21172cf95c18SSarah Sharp /* Ignore the slot flag (bit 0), and the default control endpoint flag
21182cf95c18SSarah Sharp * (bit 1). The default control endpoint is added during the Address
21192cf95c18SSarah Sharp * Device command and is never removed until the slot is disabled.
21202cf95c18SSarah Sharp */
2121ef73400cSXenia Ragiadakou valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2122ef73400cSXenia Ragiadakou valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
21232cf95c18SSarah Sharp
21242cf95c18SSarah Sharp /* Use hweight32 to count the number of ones in the add flags, or
21252cf95c18SSarah Sharp * number of endpoints added. Don't count endpoints that are changed
21262cf95c18SSarah Sharp * (both added and dropped).
21272cf95c18SSarah Sharp */
21282cf95c18SSarah Sharp return hweight32(valid_add_flags) -
21292cf95c18SSarah Sharp hweight32(valid_add_flags & valid_drop_flags);
21302cf95c18SSarah Sharp }
21312cf95c18SSarah Sharp
xhci_count_num_dropped_endpoints(struct xhci_hcd * xhci,struct xhci_input_control_ctx * ctrl_ctx)21322cf95c18SSarah Sharp static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
213392f8e767SSarah Sharp struct xhci_input_control_ctx *ctrl_ctx)
21342cf95c18SSarah Sharp {
21352cf95c18SSarah Sharp u32 valid_add_flags;
21362cf95c18SSarah Sharp u32 valid_drop_flags;
21372cf95c18SSarah Sharp
213878d1ff02SXenia Ragiadakou valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
213978d1ff02SXenia Ragiadakou valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
21402cf95c18SSarah Sharp
21412cf95c18SSarah Sharp return hweight32(valid_drop_flags) -
21422cf95c18SSarah Sharp hweight32(valid_add_flags & valid_drop_flags);
21432cf95c18SSarah Sharp }
21442cf95c18SSarah Sharp
21452cf95c18SSarah Sharp /*
21462cf95c18SSarah Sharp * We need to reserve the new number of endpoints before the configure endpoint
21472cf95c18SSarah Sharp * command completes. We can't subtract the dropped endpoints from the number
21482cf95c18SSarah Sharp * of active endpoints until the command completes because we can oversubscribe
21492cf95c18SSarah Sharp * the host in this case:
21502cf95c18SSarah Sharp *
21512cf95c18SSarah Sharp * - the first configure endpoint command drops more endpoints than it adds
21522cf95c18SSarah Sharp * - a second configure endpoint command that adds more endpoints is queued
21532cf95c18SSarah Sharp * - the first configure endpoint command fails, so the config is unchanged
21542cf95c18SSarah Sharp * - the second command may succeed, even though there isn't enough resources
21552cf95c18SSarah Sharp *
21562cf95c18SSarah Sharp * Must be called with xhci->lock held.
21572cf95c18SSarah Sharp */
xhci_reserve_host_resources(struct xhci_hcd * xhci,struct xhci_input_control_ctx * ctrl_ctx)21582cf95c18SSarah Sharp static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
215992f8e767SSarah Sharp struct xhci_input_control_ctx *ctrl_ctx)
21602cf95c18SSarah Sharp {
21612cf95c18SSarah Sharp u32 added_eps;
21622cf95c18SSarah Sharp
216392f8e767SSarah Sharp added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
21642cf95c18SSarah Sharp if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
21654bdfe4c3SXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
21664bdfe4c3SXenia Ragiadakou "Not enough ep ctxs: "
21674bdfe4c3SXenia Ragiadakou "%u active, need to add %u, limit is %u.",
21682cf95c18SSarah Sharp xhci->num_active_eps, added_eps,
21692cf95c18SSarah Sharp xhci->limit_active_eps);
21702cf95c18SSarah Sharp return -ENOMEM;
21712cf95c18SSarah Sharp }
21722cf95c18SSarah Sharp xhci->num_active_eps += added_eps;
21734bdfe4c3SXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
21744bdfe4c3SXenia Ragiadakou "Adding %u ep ctxs, %u now active.", added_eps,
21752cf95c18SSarah Sharp xhci->num_active_eps);
21762cf95c18SSarah Sharp return 0;
21772cf95c18SSarah Sharp }
21782cf95c18SSarah Sharp
21792cf95c18SSarah Sharp /*
21802cf95c18SSarah Sharp * The configure endpoint was failed by the xHC for some other reason, so we
21812cf95c18SSarah Sharp * need to revert the resources that failed configuration would have used.
21822cf95c18SSarah Sharp *
21832cf95c18SSarah Sharp * Must be called with xhci->lock held.
21842cf95c18SSarah Sharp */
xhci_free_host_resources(struct xhci_hcd * xhci,struct xhci_input_control_ctx * ctrl_ctx)21852cf95c18SSarah Sharp static void xhci_free_host_resources(struct xhci_hcd *xhci,
218692f8e767SSarah Sharp struct xhci_input_control_ctx *ctrl_ctx)
21872cf95c18SSarah Sharp {
21882cf95c18SSarah Sharp u32 num_failed_eps;
21892cf95c18SSarah Sharp
219092f8e767SSarah Sharp num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
21912cf95c18SSarah Sharp xhci->num_active_eps -= num_failed_eps;
21924bdfe4c3SXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
21934bdfe4c3SXenia Ragiadakou "Removing %u failed ep ctxs, %u now active.",
21942cf95c18SSarah Sharp num_failed_eps,
21952cf95c18SSarah Sharp xhci->num_active_eps);
21962cf95c18SSarah Sharp }
21972cf95c18SSarah Sharp
21982cf95c18SSarah Sharp /*
21992cf95c18SSarah Sharp * Now that the command has completed, clean up the active endpoint count by
22002cf95c18SSarah Sharp * subtracting out the endpoints that were dropped (but not changed).
22012cf95c18SSarah Sharp *
22022cf95c18SSarah Sharp * Must be called with xhci->lock held.
22032cf95c18SSarah Sharp */
xhci_finish_resource_reservation(struct xhci_hcd * xhci,struct xhci_input_control_ctx * ctrl_ctx)22042cf95c18SSarah Sharp static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
220592f8e767SSarah Sharp struct xhci_input_control_ctx *ctrl_ctx)
22062cf95c18SSarah Sharp {
22072cf95c18SSarah Sharp u32 num_dropped_eps;
22082cf95c18SSarah Sharp
220992f8e767SSarah Sharp num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
22102cf95c18SSarah Sharp xhci->num_active_eps -= num_dropped_eps;
22112cf95c18SSarah Sharp if (num_dropped_eps)
22124bdfe4c3SXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
22134bdfe4c3SXenia Ragiadakou "Removing %u dropped ep ctxs, %u now active.",
22142cf95c18SSarah Sharp num_dropped_eps,
22152cf95c18SSarah Sharp xhci->num_active_eps);
22162cf95c18SSarah Sharp }
22172cf95c18SSarah Sharp
xhci_get_block_size(struct usb_device * udev)2218ed384bd3SFelipe Balbi static unsigned int xhci_get_block_size(struct usb_device *udev)
2219c29eea62SSarah Sharp {
2220c29eea62SSarah Sharp switch (udev->speed) {
2221c29eea62SSarah Sharp case USB_SPEED_LOW:
2222c29eea62SSarah Sharp case USB_SPEED_FULL:
2223c29eea62SSarah Sharp return FS_BLOCK;
2224c29eea62SSarah Sharp case USB_SPEED_HIGH:
2225c29eea62SSarah Sharp return HS_BLOCK;
2226c29eea62SSarah Sharp case USB_SPEED_SUPER:
22270caf6b33SMathias Nyman case USB_SPEED_SUPER_PLUS:
2228c29eea62SSarah Sharp return SS_BLOCK;
2229c29eea62SSarah Sharp case USB_SPEED_UNKNOWN:
2230c29eea62SSarah Sharp default:
2231c29eea62SSarah Sharp /* Should never happen */
2232c29eea62SSarah Sharp return 1;
2233c29eea62SSarah Sharp }
2234c29eea62SSarah Sharp }
2235c29eea62SSarah Sharp
2236ed384bd3SFelipe Balbi static unsigned int
xhci_get_largest_overhead(struct xhci_interval_bw * interval_bw)2237ed384bd3SFelipe Balbi xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
2238c29eea62SSarah Sharp {
2239c29eea62SSarah Sharp if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2240c29eea62SSarah Sharp return LS_OVERHEAD;
2241c29eea62SSarah Sharp if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2242c29eea62SSarah Sharp return FS_OVERHEAD;
2243c29eea62SSarah Sharp return HS_OVERHEAD;
2244c29eea62SSarah Sharp }
2245c29eea62SSarah Sharp
2246c29eea62SSarah Sharp /* If we are changing a LS/FS device under a HS hub,
2247c29eea62SSarah Sharp * make sure (if we are activating a new TT) that the HS bus has enough
2248c29eea62SSarah Sharp * bandwidth for this new TT.
2249c29eea62SSarah Sharp */
xhci_check_tt_bw_table(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev,int old_active_eps)2250c29eea62SSarah Sharp static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2251c29eea62SSarah Sharp struct xhci_virt_device *virt_dev,
2252c29eea62SSarah Sharp int old_active_eps)
2253c29eea62SSarah Sharp {
2254c29eea62SSarah Sharp struct xhci_interval_bw_table *bw_table;
2255c29eea62SSarah Sharp struct xhci_tt_bw_info *tt_info;
2256c29eea62SSarah Sharp
2257c29eea62SSarah Sharp /* Find the bandwidth table for the root port this TT is attached to. */
2258c29eea62SSarah Sharp bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2259c29eea62SSarah Sharp tt_info = virt_dev->tt_info;
2260c29eea62SSarah Sharp /* If this TT already had active endpoints, the bandwidth for this TT
2261c29eea62SSarah Sharp * has already been added. Removing all periodic endpoints (and thus
2262c29eea62SSarah Sharp * making the TT enactive) will only decrease the bandwidth used.
2263c29eea62SSarah Sharp */
2264c29eea62SSarah Sharp if (old_active_eps)
2265c29eea62SSarah Sharp return 0;
2266c29eea62SSarah Sharp if (old_active_eps == 0 && tt_info->active_eps != 0) {
2267c29eea62SSarah Sharp if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2268c29eea62SSarah Sharp return -ENOMEM;
2269c29eea62SSarah Sharp return 0;
2270c29eea62SSarah Sharp }
2271c29eea62SSarah Sharp /* Not sure why we would have no new active endpoints...
2272c29eea62SSarah Sharp *
2273c29eea62SSarah Sharp * Maybe because of an Evaluate Context change for a hub update or a
2274c29eea62SSarah Sharp * control endpoint 0 max packet size change?
2275c29eea62SSarah Sharp * FIXME: skip the bandwidth calculation in that case.
2276c29eea62SSarah Sharp */
2277c29eea62SSarah Sharp return 0;
2278c29eea62SSarah Sharp }
2279c29eea62SSarah Sharp
xhci_check_ss_bw(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev)22802b698999SSarah Sharp static int xhci_check_ss_bw(struct xhci_hcd *xhci,
22812b698999SSarah Sharp struct xhci_virt_device *virt_dev)
22822b698999SSarah Sharp {
22832b698999SSarah Sharp unsigned int bw_reserved;
22842b698999SSarah Sharp
22852b698999SSarah Sharp bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
22862b698999SSarah Sharp if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
22872b698999SSarah Sharp return -ENOMEM;
22882b698999SSarah Sharp
22892b698999SSarah Sharp bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
22902b698999SSarah Sharp if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
22912b698999SSarah Sharp return -ENOMEM;
22922b698999SSarah Sharp
22932b698999SSarah Sharp return 0;
22942b698999SSarah Sharp }
22952b698999SSarah Sharp
2296c29eea62SSarah Sharp /*
2297c29eea62SSarah Sharp * This algorithm is a very conservative estimate of the worst-case scheduling
2298c29eea62SSarah Sharp * scenario for any one interval. The hardware dynamically schedules the
2299c29eea62SSarah Sharp * packets, so we can't tell which microframe could be the limiting factor in
2300c29eea62SSarah Sharp * the bandwidth scheduling. This only takes into account periodic endpoints.
2301c29eea62SSarah Sharp *
2302c29eea62SSarah Sharp * Obviously, we can't solve an NP complete problem to find the minimum worst
2303c29eea62SSarah Sharp * case scenario. Instead, we come up with an estimate that is no less than
2304c29eea62SSarah Sharp * the worst case bandwidth used for any one microframe, but may be an
2305c29eea62SSarah Sharp * over-estimate.
2306c29eea62SSarah Sharp *
2307c29eea62SSarah Sharp * We walk the requirements for each endpoint by interval, starting with the
2308c29eea62SSarah Sharp * smallest interval, and place packets in the schedule where there is only one
2309c29eea62SSarah Sharp * possible way to schedule packets for that interval. In order to simplify
2310c29eea62SSarah Sharp * this algorithm, we record the largest max packet size for each interval, and
2311c29eea62SSarah Sharp * assume all packets will be that size.
2312c29eea62SSarah Sharp *
2313c29eea62SSarah Sharp * For interval 0, we obviously must schedule all packets for each interval.
2314c29eea62SSarah Sharp * The bandwidth for interval 0 is just the amount of data to be transmitted
2315c29eea62SSarah Sharp * (the sum of all max ESIT payload sizes, plus any overhead per packet times
2316c29eea62SSarah Sharp * the number of packets).
2317c29eea62SSarah Sharp *
2318c29eea62SSarah Sharp * For interval 1, we have two possible microframes to schedule those packets
2319c29eea62SSarah Sharp * in. For this algorithm, if we can schedule the same number of packets for
2320c29eea62SSarah Sharp * each possible scheduling opportunity (each microframe), we will do so. The
2321c29eea62SSarah Sharp * remaining number of packets will be saved to be transmitted in the gaps in
2322c29eea62SSarah Sharp * the next interval's scheduling sequence.
2323c29eea62SSarah Sharp *
2324c29eea62SSarah Sharp * As we move those remaining packets to be scheduled with interval 2 packets,
2325c29eea62SSarah Sharp * we have to double the number of remaining packets to transmit. This is
2326c29eea62SSarah Sharp * because the intervals are actually powers of 2, and we would be transmitting
2327c29eea62SSarah Sharp * the previous interval's packets twice in this interval. We also have to be
2328c29eea62SSarah Sharp * sure that when we look at the largest max packet size for this interval, we
2329c29eea62SSarah Sharp * also look at the largest max packet size for the remaining packets and take
2330c29eea62SSarah Sharp * the greater of the two.
2331c29eea62SSarah Sharp *
2332c29eea62SSarah Sharp * The algorithm continues to evenly distribute packets in each scheduling
2333c29eea62SSarah Sharp * opportunity, and push the remaining packets out, until we get to the last
2334c29eea62SSarah Sharp * interval. Then those packets and their associated overhead are just added
2335c29eea62SSarah Sharp * to the bandwidth used.
23362e27980eSSarah Sharp */
xhci_check_bw_table(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev,int old_active_eps)23372e27980eSSarah Sharp static int xhci_check_bw_table(struct xhci_hcd *xhci,
23382e27980eSSarah Sharp struct xhci_virt_device *virt_dev,
23392e27980eSSarah Sharp int old_active_eps)
23402e27980eSSarah Sharp {
2341c29eea62SSarah Sharp unsigned int bw_reserved;
2342c29eea62SSarah Sharp unsigned int max_bandwidth;
2343c29eea62SSarah Sharp unsigned int bw_used;
2344c29eea62SSarah Sharp unsigned int block_size;
2345c29eea62SSarah Sharp struct xhci_interval_bw_table *bw_table;
2346c29eea62SSarah Sharp unsigned int packet_size = 0;
2347c29eea62SSarah Sharp unsigned int overhead = 0;
2348c29eea62SSarah Sharp unsigned int packets_transmitted = 0;
2349c29eea62SSarah Sharp unsigned int packets_remaining = 0;
2350c29eea62SSarah Sharp unsigned int i;
2351c29eea62SSarah Sharp
23520caf6b33SMathias Nyman if (virt_dev->udev->speed >= USB_SPEED_SUPER)
23532b698999SSarah Sharp return xhci_check_ss_bw(xhci, virt_dev);
23542b698999SSarah Sharp
2355c29eea62SSarah Sharp if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2356c29eea62SSarah Sharp max_bandwidth = HS_BW_LIMIT;
2357c29eea62SSarah Sharp /* Convert percent of bus BW reserved to blocks reserved */
2358c29eea62SSarah Sharp bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2359c29eea62SSarah Sharp } else {
2360c29eea62SSarah Sharp max_bandwidth = FS_BW_LIMIT;
2361c29eea62SSarah Sharp bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2362c29eea62SSarah Sharp }
2363c29eea62SSarah Sharp
2364c29eea62SSarah Sharp bw_table = virt_dev->bw_table;
2365c29eea62SSarah Sharp /* We need to translate the max packet size and max ESIT payloads into
2366c29eea62SSarah Sharp * the units the hardware uses.
2367c29eea62SSarah Sharp */
2368c29eea62SSarah Sharp block_size = xhci_get_block_size(virt_dev->udev);
2369c29eea62SSarah Sharp
2370c29eea62SSarah Sharp /* If we are manipulating a LS/FS device under a HS hub, double check
2371c29eea62SSarah Sharp * that the HS bus has enough bandwidth if we are activing a new TT.
2372c29eea62SSarah Sharp */
2373c29eea62SSarah Sharp if (virt_dev->tt_info) {
23744bdfe4c3SXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
23754bdfe4c3SXenia Ragiadakou "Recalculating BW for rootport %u",
2376c29eea62SSarah Sharp virt_dev->real_port);
2377c29eea62SSarah Sharp if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2378c29eea62SSarah Sharp xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2379c29eea62SSarah Sharp "newly activated TT.\n");
2380c29eea62SSarah Sharp return -ENOMEM;
2381c29eea62SSarah Sharp }
23824bdfe4c3SXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
23834bdfe4c3SXenia Ragiadakou "Recalculating BW for TT slot %u port %u",
2384c29eea62SSarah Sharp virt_dev->tt_info->slot_id,
2385c29eea62SSarah Sharp virt_dev->tt_info->ttport);
2386c29eea62SSarah Sharp } else {
23874bdfe4c3SXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
23884bdfe4c3SXenia Ragiadakou "Recalculating BW for rootport %u",
2389c29eea62SSarah Sharp virt_dev->real_port);
2390c29eea62SSarah Sharp }
2391c29eea62SSarah Sharp
2392c29eea62SSarah Sharp /* Add in how much bandwidth will be used for interval zero, or the
2393c29eea62SSarah Sharp * rounded max ESIT payload + number of packets * largest overhead.
2394c29eea62SSarah Sharp */
2395c29eea62SSarah Sharp bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2396c29eea62SSarah Sharp bw_table->interval_bw[0].num_packets *
2397c29eea62SSarah Sharp xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2398c29eea62SSarah Sharp
2399c29eea62SSarah Sharp for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2400c29eea62SSarah Sharp unsigned int bw_added;
2401c29eea62SSarah Sharp unsigned int largest_mps;
2402c29eea62SSarah Sharp unsigned int interval_overhead;
2403c29eea62SSarah Sharp
2404c29eea62SSarah Sharp /*
2405c29eea62SSarah Sharp * How many packets could we transmit in this interval?
2406c29eea62SSarah Sharp * If packets didn't fit in the previous interval, we will need
2407c29eea62SSarah Sharp * to transmit that many packets twice within this interval.
2408c29eea62SSarah Sharp */
2409c29eea62SSarah Sharp packets_remaining = 2 * packets_remaining +
2410c29eea62SSarah Sharp bw_table->interval_bw[i].num_packets;
2411c29eea62SSarah Sharp
2412c29eea62SSarah Sharp /* Find the largest max packet size of this or the previous
2413c29eea62SSarah Sharp * interval.
2414c29eea62SSarah Sharp */
2415c29eea62SSarah Sharp if (list_empty(&bw_table->interval_bw[i].endpoints))
2416c29eea62SSarah Sharp largest_mps = 0;
2417c29eea62SSarah Sharp else {
2418c29eea62SSarah Sharp struct xhci_virt_ep *virt_ep;
2419c29eea62SSarah Sharp struct list_head *ep_entry;
2420c29eea62SSarah Sharp
2421c29eea62SSarah Sharp ep_entry = bw_table->interval_bw[i].endpoints.next;
2422c29eea62SSarah Sharp virt_ep = list_entry(ep_entry,
2423c29eea62SSarah Sharp struct xhci_virt_ep, bw_endpoint_list);
2424c29eea62SSarah Sharp /* Convert to blocks, rounding up */
2425c29eea62SSarah Sharp largest_mps = DIV_ROUND_UP(
2426c29eea62SSarah Sharp virt_ep->bw_info.max_packet_size,
2427c29eea62SSarah Sharp block_size);
2428c29eea62SSarah Sharp }
2429c29eea62SSarah Sharp if (largest_mps > packet_size)
2430c29eea62SSarah Sharp packet_size = largest_mps;
2431c29eea62SSarah Sharp
2432c29eea62SSarah Sharp /* Use the larger overhead of this or the previous interval. */
2433c29eea62SSarah Sharp interval_overhead = xhci_get_largest_overhead(
2434c29eea62SSarah Sharp &bw_table->interval_bw[i]);
2435c29eea62SSarah Sharp if (interval_overhead > overhead)
2436c29eea62SSarah Sharp overhead = interval_overhead;
2437c29eea62SSarah Sharp
2438c29eea62SSarah Sharp /* How many packets can we evenly distribute across
2439c29eea62SSarah Sharp * (1 << (i + 1)) possible scheduling opportunities?
2440c29eea62SSarah Sharp */
2441c29eea62SSarah Sharp packets_transmitted = packets_remaining >> (i + 1);
2442c29eea62SSarah Sharp
2443c29eea62SSarah Sharp /* Add in the bandwidth used for those scheduled packets */
2444c29eea62SSarah Sharp bw_added = packets_transmitted * (overhead + packet_size);
2445c29eea62SSarah Sharp
2446c29eea62SSarah Sharp /* How many packets do we have remaining to transmit? */
2447c29eea62SSarah Sharp packets_remaining = packets_remaining % (1 << (i + 1));
2448c29eea62SSarah Sharp
2449c29eea62SSarah Sharp /* What largest max packet size should those packets have? */
2450c29eea62SSarah Sharp /* If we've transmitted all packets, don't carry over the
2451c29eea62SSarah Sharp * largest packet size.
2452c29eea62SSarah Sharp */
2453c29eea62SSarah Sharp if (packets_remaining == 0) {
2454c29eea62SSarah Sharp packet_size = 0;
2455c29eea62SSarah Sharp overhead = 0;
2456c29eea62SSarah Sharp } else if (packets_transmitted > 0) {
2457c29eea62SSarah Sharp /* Otherwise if we do have remaining packets, and we've
2458c29eea62SSarah Sharp * scheduled some packets in this interval, take the
2459c29eea62SSarah Sharp * largest max packet size from endpoints with this
2460c29eea62SSarah Sharp * interval.
2461c29eea62SSarah Sharp */
2462c29eea62SSarah Sharp packet_size = largest_mps;
2463c29eea62SSarah Sharp overhead = interval_overhead;
2464c29eea62SSarah Sharp }
2465c29eea62SSarah Sharp /* Otherwise carry over packet_size and overhead from the last
2466c29eea62SSarah Sharp * time we had a remainder.
2467c29eea62SSarah Sharp */
2468c29eea62SSarah Sharp bw_used += bw_added;
2469c29eea62SSarah Sharp if (bw_used > max_bandwidth) {
2470c29eea62SSarah Sharp xhci_warn(xhci, "Not enough bandwidth. "
2471c29eea62SSarah Sharp "Proposed: %u, Max: %u\n",
2472c29eea62SSarah Sharp bw_used, max_bandwidth);
2473c29eea62SSarah Sharp return -ENOMEM;
2474c29eea62SSarah Sharp }
2475c29eea62SSarah Sharp }
2476c29eea62SSarah Sharp /*
2477c29eea62SSarah Sharp * Ok, we know we have some packets left over after even-handedly
2478c29eea62SSarah Sharp * scheduling interval 15. We don't know which microframes they will
2479c29eea62SSarah Sharp * fit into, so we over-schedule and say they will be scheduled every
2480c29eea62SSarah Sharp * microframe.
2481c29eea62SSarah Sharp */
2482c29eea62SSarah Sharp if (packets_remaining > 0)
2483c29eea62SSarah Sharp bw_used += overhead + packet_size;
2484c29eea62SSarah Sharp
2485c29eea62SSarah Sharp if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2486c29eea62SSarah Sharp unsigned int port_index = virt_dev->real_port - 1;
2487c29eea62SSarah Sharp
2488c29eea62SSarah Sharp /* OK, we're manipulating a HS device attached to a
2489c29eea62SSarah Sharp * root port bandwidth domain. Include the number of active TTs
2490c29eea62SSarah Sharp * in the bandwidth used.
2491c29eea62SSarah Sharp */
2492c29eea62SSarah Sharp bw_used += TT_HS_OVERHEAD *
2493c29eea62SSarah Sharp xhci->rh_bw[port_index].num_active_tts;
2494c29eea62SSarah Sharp }
2495c29eea62SSarah Sharp
24964bdfe4c3SXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
24974bdfe4c3SXenia Ragiadakou "Final bandwidth: %u, Limit: %u, Reserved: %u, "
24984bdfe4c3SXenia Ragiadakou "Available: %u " "percent",
2499c29eea62SSarah Sharp bw_used, max_bandwidth, bw_reserved,
2500c29eea62SSarah Sharp (max_bandwidth - bw_used - bw_reserved) * 100 /
2501c29eea62SSarah Sharp max_bandwidth);
2502c29eea62SSarah Sharp
2503c29eea62SSarah Sharp bw_used += bw_reserved;
2504c29eea62SSarah Sharp if (bw_used > max_bandwidth) {
2505c29eea62SSarah Sharp xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2506c29eea62SSarah Sharp bw_used, max_bandwidth);
2507c29eea62SSarah Sharp return -ENOMEM;
2508c29eea62SSarah Sharp }
2509c29eea62SSarah Sharp
2510c29eea62SSarah Sharp bw_table->bw_used = bw_used;
25112e27980eSSarah Sharp return 0;
25122e27980eSSarah Sharp }
25132e27980eSSarah Sharp
xhci_is_async_ep(unsigned int ep_type)25142e27980eSSarah Sharp static bool xhci_is_async_ep(unsigned int ep_type)
25152e27980eSSarah Sharp {
25162e27980eSSarah Sharp return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
25172e27980eSSarah Sharp ep_type != ISOC_IN_EP &&
25182e27980eSSarah Sharp ep_type != INT_IN_EP);
25192e27980eSSarah Sharp }
25202e27980eSSarah Sharp
xhci_is_sync_in_ep(unsigned int ep_type)25212b698999SSarah Sharp static bool xhci_is_sync_in_ep(unsigned int ep_type)
25222b698999SSarah Sharp {
2523392a07aeSSarah Sharp return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
25242b698999SSarah Sharp }
25252b698999SSarah Sharp
xhci_get_ss_bw_consumed(struct xhci_bw_info * ep_bw)25262b698999SSarah Sharp static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
25272b698999SSarah Sharp {
25282b698999SSarah Sharp unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
25292b698999SSarah Sharp
25302b698999SSarah Sharp if (ep_bw->ep_interval == 0)
25312b698999SSarah Sharp return SS_OVERHEAD_BURST +
25322b698999SSarah Sharp (ep_bw->mult * ep_bw->num_packets *
25332b698999SSarah Sharp (SS_OVERHEAD + mps));
25342b698999SSarah Sharp return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
25352b698999SSarah Sharp (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
25362b698999SSarah Sharp 1 << ep_bw->ep_interval);
25372b698999SSarah Sharp
25382b698999SSarah Sharp }
25392b698999SSarah Sharp
xhci_drop_ep_from_interval_table(struct xhci_hcd * xhci,struct xhci_bw_info * ep_bw,struct xhci_interval_bw_table * bw_table,struct usb_device * udev,struct xhci_virt_ep * virt_ep,struct xhci_tt_bw_info * tt_info)25403969384cSLu Baolu static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
25412e27980eSSarah Sharp struct xhci_bw_info *ep_bw,
25422e27980eSSarah Sharp struct xhci_interval_bw_table *bw_table,
25432e27980eSSarah Sharp struct usb_device *udev,
25442e27980eSSarah Sharp struct xhci_virt_ep *virt_ep,
25452e27980eSSarah Sharp struct xhci_tt_bw_info *tt_info)
25462e27980eSSarah Sharp {
25472e27980eSSarah Sharp struct xhci_interval_bw *interval_bw;
25482e27980eSSarah Sharp int normalized_interval;
25492e27980eSSarah Sharp
25502b698999SSarah Sharp if (xhci_is_async_ep(ep_bw->type))
25512e27980eSSarah Sharp return;
25522e27980eSSarah Sharp
25530caf6b33SMathias Nyman if (udev->speed >= USB_SPEED_SUPER) {
25542b698999SSarah Sharp if (xhci_is_sync_in_ep(ep_bw->type))
25552b698999SSarah Sharp xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
25562b698999SSarah Sharp xhci_get_ss_bw_consumed(ep_bw);
25572b698999SSarah Sharp else
25582b698999SSarah Sharp xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
25592b698999SSarah Sharp xhci_get_ss_bw_consumed(ep_bw);
25602b698999SSarah Sharp return;
25612b698999SSarah Sharp }
25622b698999SSarah Sharp
25632b698999SSarah Sharp /* SuperSpeed endpoints never get added to intervals in the table, so
25642b698999SSarah Sharp * this check is only valid for HS/FS/LS devices.
25652b698999SSarah Sharp */
25662b698999SSarah Sharp if (list_empty(&virt_ep->bw_endpoint_list))
25672b698999SSarah Sharp return;
25682e27980eSSarah Sharp /* For LS/FS devices, we need to translate the interval expressed in
25692e27980eSSarah Sharp * microframes to frames.
25702e27980eSSarah Sharp */
25712e27980eSSarah Sharp if (udev->speed == USB_SPEED_HIGH)
25722e27980eSSarah Sharp normalized_interval = ep_bw->ep_interval;
25732e27980eSSarah Sharp else
25742e27980eSSarah Sharp normalized_interval = ep_bw->ep_interval - 3;
25752e27980eSSarah Sharp
25762e27980eSSarah Sharp if (normalized_interval == 0)
25772e27980eSSarah Sharp bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
25782e27980eSSarah Sharp interval_bw = &bw_table->interval_bw[normalized_interval];
25792e27980eSSarah Sharp interval_bw->num_packets -= ep_bw->num_packets;
25802e27980eSSarah Sharp switch (udev->speed) {
25812e27980eSSarah Sharp case USB_SPEED_LOW:
25822e27980eSSarah Sharp interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
25832e27980eSSarah Sharp break;
25842e27980eSSarah Sharp case USB_SPEED_FULL:
25852e27980eSSarah Sharp interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
25862e27980eSSarah Sharp break;
25872e27980eSSarah Sharp case USB_SPEED_HIGH:
25882e27980eSSarah Sharp interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
25892e27980eSSarah Sharp break;
25901e4c5742SAlan Stern default:
25912e27980eSSarah Sharp /* Should never happen because only LS/FS/HS endpoints will get
25922e27980eSSarah Sharp * added to the endpoint list.
25932e27980eSSarah Sharp */
25942e27980eSSarah Sharp return;
25952e27980eSSarah Sharp }
25962e27980eSSarah Sharp if (tt_info)
25972e27980eSSarah Sharp tt_info->active_eps -= 1;
25982e27980eSSarah Sharp list_del_init(&virt_ep->bw_endpoint_list);
25992e27980eSSarah Sharp }
26002e27980eSSarah Sharp
xhci_add_ep_to_interval_table(struct xhci_hcd * xhci,struct xhci_bw_info * ep_bw,struct xhci_interval_bw_table * bw_table,struct usb_device * udev,struct xhci_virt_ep * virt_ep,struct xhci_tt_bw_info * tt_info)26012e27980eSSarah Sharp static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
26022e27980eSSarah Sharp struct xhci_bw_info *ep_bw,
26032e27980eSSarah Sharp struct xhci_interval_bw_table *bw_table,
26042e27980eSSarah Sharp struct usb_device *udev,
26052e27980eSSarah Sharp struct xhci_virt_ep *virt_ep,
26062e27980eSSarah Sharp struct xhci_tt_bw_info *tt_info)
26072e27980eSSarah Sharp {
26082e27980eSSarah Sharp struct xhci_interval_bw *interval_bw;
26092e27980eSSarah Sharp struct xhci_virt_ep *smaller_ep;
26102e27980eSSarah Sharp int normalized_interval;
26112e27980eSSarah Sharp
26122e27980eSSarah Sharp if (xhci_is_async_ep(ep_bw->type))
26132e27980eSSarah Sharp return;
26142e27980eSSarah Sharp
26152b698999SSarah Sharp if (udev->speed == USB_SPEED_SUPER) {
26162b698999SSarah Sharp if (xhci_is_sync_in_ep(ep_bw->type))
26172b698999SSarah Sharp xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
26182b698999SSarah Sharp xhci_get_ss_bw_consumed(ep_bw);
26192b698999SSarah Sharp else
26202b698999SSarah Sharp xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
26212b698999SSarah Sharp xhci_get_ss_bw_consumed(ep_bw);
26222b698999SSarah Sharp return;
26232b698999SSarah Sharp }
26242b698999SSarah Sharp
26252e27980eSSarah Sharp /* For LS/FS devices, we need to translate the interval expressed in
26262e27980eSSarah Sharp * microframes to frames.
26272e27980eSSarah Sharp */
26282e27980eSSarah Sharp if (udev->speed == USB_SPEED_HIGH)
26292e27980eSSarah Sharp normalized_interval = ep_bw->ep_interval;
26302e27980eSSarah Sharp else
26312e27980eSSarah Sharp normalized_interval = ep_bw->ep_interval - 3;
26322e27980eSSarah Sharp
26332e27980eSSarah Sharp if (normalized_interval == 0)
26342e27980eSSarah Sharp bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
26352e27980eSSarah Sharp interval_bw = &bw_table->interval_bw[normalized_interval];
26362e27980eSSarah Sharp interval_bw->num_packets += ep_bw->num_packets;
26372e27980eSSarah Sharp switch (udev->speed) {
26382e27980eSSarah Sharp case USB_SPEED_LOW:
26392e27980eSSarah Sharp interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
26402e27980eSSarah Sharp break;
26412e27980eSSarah Sharp case USB_SPEED_FULL:
26422e27980eSSarah Sharp interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
26432e27980eSSarah Sharp break;
26442e27980eSSarah Sharp case USB_SPEED_HIGH:
26452e27980eSSarah Sharp interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
26462e27980eSSarah Sharp break;
26471e4c5742SAlan Stern default:
26482e27980eSSarah Sharp /* Should never happen because only LS/FS/HS endpoints will get
26492e27980eSSarah Sharp * added to the endpoint list.
26502e27980eSSarah Sharp */
26512e27980eSSarah Sharp return;
26522e27980eSSarah Sharp }
26532e27980eSSarah Sharp
26542e27980eSSarah Sharp if (tt_info)
26552e27980eSSarah Sharp tt_info->active_eps += 1;
26562e27980eSSarah Sharp /* Insert the endpoint into the list, largest max packet size first. */
26572e27980eSSarah Sharp list_for_each_entry(smaller_ep, &interval_bw->endpoints,
26582e27980eSSarah Sharp bw_endpoint_list) {
26592e27980eSSarah Sharp if (ep_bw->max_packet_size >=
26602e27980eSSarah Sharp smaller_ep->bw_info.max_packet_size) {
26612e27980eSSarah Sharp /* Add the new ep before the smaller endpoint */
26622e27980eSSarah Sharp list_add_tail(&virt_ep->bw_endpoint_list,
26632e27980eSSarah Sharp &smaller_ep->bw_endpoint_list);
26642e27980eSSarah Sharp return;
26652e27980eSSarah Sharp }
26662e27980eSSarah Sharp }
26672e27980eSSarah Sharp /* Add the new endpoint at the end of the list. */
26682e27980eSSarah Sharp list_add_tail(&virt_ep->bw_endpoint_list,
26692e27980eSSarah Sharp &interval_bw->endpoints);
26702e27980eSSarah Sharp }
26712e27980eSSarah Sharp
xhci_update_tt_active_eps(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev,int old_active_eps)26722e27980eSSarah Sharp void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
26732e27980eSSarah Sharp struct xhci_virt_device *virt_dev,
26742e27980eSSarah Sharp int old_active_eps)
26752e27980eSSarah Sharp {
26762e27980eSSarah Sharp struct xhci_root_port_bw_info *rh_bw_info;
26772e27980eSSarah Sharp if (!virt_dev->tt_info)
26782e27980eSSarah Sharp return;
26792e27980eSSarah Sharp
26802e27980eSSarah Sharp rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
26812e27980eSSarah Sharp if (old_active_eps == 0 &&
26822e27980eSSarah Sharp virt_dev->tt_info->active_eps != 0) {
26832e27980eSSarah Sharp rh_bw_info->num_active_tts += 1;
2684c29eea62SSarah Sharp rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
26852e27980eSSarah Sharp } else if (old_active_eps != 0 &&
26862e27980eSSarah Sharp virt_dev->tt_info->active_eps == 0) {
26872e27980eSSarah Sharp rh_bw_info->num_active_tts -= 1;
2688c29eea62SSarah Sharp rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
26892e27980eSSarah Sharp }
26902e27980eSSarah Sharp }
26912e27980eSSarah Sharp
xhci_reserve_bandwidth(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev,struct xhci_container_ctx * in_ctx)26922e27980eSSarah Sharp static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
26932e27980eSSarah Sharp struct xhci_virt_device *virt_dev,
26942e27980eSSarah Sharp struct xhci_container_ctx *in_ctx)
26952e27980eSSarah Sharp {
26962e27980eSSarah Sharp struct xhci_bw_info ep_bw_info[31];
26972e27980eSSarah Sharp int i;
26982e27980eSSarah Sharp struct xhci_input_control_ctx *ctrl_ctx;
26992e27980eSSarah Sharp int old_active_eps = 0;
27002e27980eSSarah Sharp
27012e27980eSSarah Sharp if (virt_dev->tt_info)
27022e27980eSSarah Sharp old_active_eps = virt_dev->tt_info->active_eps;
27032e27980eSSarah Sharp
27044daf9df5SLin Wang ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
270592f8e767SSarah Sharp if (!ctrl_ctx) {
270692f8e767SSarah Sharp xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
270792f8e767SSarah Sharp __func__);
270892f8e767SSarah Sharp return -ENOMEM;
270992f8e767SSarah Sharp }
27102e27980eSSarah Sharp
27112e27980eSSarah Sharp for (i = 0; i < 31; i++) {
27122e27980eSSarah Sharp if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
27132e27980eSSarah Sharp continue;
27142e27980eSSarah Sharp
27152e27980eSSarah Sharp /* Make a copy of the BW info in case we need to revert this */
27162e27980eSSarah Sharp memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
27172e27980eSSarah Sharp sizeof(ep_bw_info[i]));
27182e27980eSSarah Sharp /* Drop the endpoint from the interval table if the endpoint is
27192e27980eSSarah Sharp * being dropped or changed.
27202e27980eSSarah Sharp */
27212e27980eSSarah Sharp if (EP_IS_DROPPED(ctrl_ctx, i))
27222e27980eSSarah Sharp xhci_drop_ep_from_interval_table(xhci,
27232e27980eSSarah Sharp &virt_dev->eps[i].bw_info,
27242e27980eSSarah Sharp virt_dev->bw_table,
27252e27980eSSarah Sharp virt_dev->udev,
27262e27980eSSarah Sharp &virt_dev->eps[i],
27272e27980eSSarah Sharp virt_dev->tt_info);
27282e27980eSSarah Sharp }
27292e27980eSSarah Sharp /* Overwrite the information stored in the endpoints' bw_info */
27302e27980eSSarah Sharp xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
27312e27980eSSarah Sharp for (i = 0; i < 31; i++) {
27322e27980eSSarah Sharp /* Add any changed or added endpoints to the interval table */
27332e27980eSSarah Sharp if (EP_IS_ADDED(ctrl_ctx, i))
27342e27980eSSarah Sharp xhci_add_ep_to_interval_table(xhci,
27352e27980eSSarah Sharp &virt_dev->eps[i].bw_info,
27362e27980eSSarah Sharp virt_dev->bw_table,
27372e27980eSSarah Sharp virt_dev->udev,
27382e27980eSSarah Sharp &virt_dev->eps[i],
27392e27980eSSarah Sharp virt_dev->tt_info);
27402e27980eSSarah Sharp }
27412e27980eSSarah Sharp
27422e27980eSSarah Sharp if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
27432e27980eSSarah Sharp /* Ok, this fits in the bandwidth we have.
27442e27980eSSarah Sharp * Update the number of active TTs.
27452e27980eSSarah Sharp */
27462e27980eSSarah Sharp xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
27472e27980eSSarah Sharp return 0;
27482e27980eSSarah Sharp }
27492e27980eSSarah Sharp
27502e27980eSSarah Sharp /* We don't have enough bandwidth for this, revert the stored info. */
27512e27980eSSarah Sharp for (i = 0; i < 31; i++) {
27522e27980eSSarah Sharp if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
27532e27980eSSarah Sharp continue;
27542e27980eSSarah Sharp
27552e27980eSSarah Sharp /* Drop the new copies of any added or changed endpoints from
27562e27980eSSarah Sharp * the interval table.
27572e27980eSSarah Sharp */
27582e27980eSSarah Sharp if (EP_IS_ADDED(ctrl_ctx, i)) {
27592e27980eSSarah Sharp xhci_drop_ep_from_interval_table(xhci,
27602e27980eSSarah Sharp &virt_dev->eps[i].bw_info,
27612e27980eSSarah Sharp virt_dev->bw_table,
27622e27980eSSarah Sharp virt_dev->udev,
27632e27980eSSarah Sharp &virt_dev->eps[i],
27642e27980eSSarah Sharp virt_dev->tt_info);
27652e27980eSSarah Sharp }
27662e27980eSSarah Sharp /* Revert the endpoint back to its old information */
27672e27980eSSarah Sharp memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
27682e27980eSSarah Sharp sizeof(ep_bw_info[i]));
27692e27980eSSarah Sharp /* Add any changed or dropped endpoints back into the table */
27702e27980eSSarah Sharp if (EP_IS_DROPPED(ctrl_ctx, i))
27712e27980eSSarah Sharp xhci_add_ep_to_interval_table(xhci,
27722e27980eSSarah Sharp &virt_dev->eps[i].bw_info,
27732e27980eSSarah Sharp virt_dev->bw_table,
27742e27980eSSarah Sharp virt_dev->udev,
27752e27980eSSarah Sharp &virt_dev->eps[i],
27762e27980eSSarah Sharp virt_dev->tt_info);
27772e27980eSSarah Sharp }
27782e27980eSSarah Sharp return -ENOMEM;
27792e27980eSSarah Sharp }
27802e27980eSSarah Sharp
27812e27980eSSarah Sharp
2782bc75fa38SAlex Chiang /* Issue a configure endpoint command or evaluate context command
2783bc75fa38SAlex Chiang * and wait for it to finish.
2784bc75fa38SAlex Chiang */
xhci_configure_endpoint(struct xhci_hcd * xhci,struct usb_device * udev,struct xhci_command * command,bool ctx_change,bool must_succeed)2785bc75fa38SAlex Chiang static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2786bc75fa38SAlex Chiang struct usb_device *udev,
2787bc75fa38SAlex Chiang struct xhci_command *command,
2788bc75fa38SAlex Chiang bool ctx_change, bool must_succeed)
2789bc75fa38SAlex Chiang {
2790bc75fa38SAlex Chiang int ret;
2791bc75fa38SAlex Chiang unsigned long flags;
279292f8e767SSarah Sharp struct xhci_input_control_ctx *ctrl_ctx;
2793bc75fa38SAlex Chiang struct xhci_virt_device *virt_dev;
2794e3a78ff0SMathias Nyman struct xhci_slot_ctx *slot_ctx;
2795ddba5cd0SMathias Nyman
2796ddba5cd0SMathias Nyman if (!command)
2797ddba5cd0SMathias Nyman return -EINVAL;
2798bc75fa38SAlex Chiang
2799bc75fa38SAlex Chiang spin_lock_irqsave(&xhci->lock, flags);
2800d9f11ba9SMathias Nyman
2801d9f11ba9SMathias Nyman if (xhci->xhc_state & XHCI_STATE_DYING) {
2802d9f11ba9SMathias Nyman spin_unlock_irqrestore(&xhci->lock, flags);
2803d9f11ba9SMathias Nyman return -ESHUTDOWN;
2804d9f11ba9SMathias Nyman }
2805d9f11ba9SMathias Nyman
2806bc75fa38SAlex Chiang virt_dev = xhci->devs[udev->slot_id];
2807750645f8SSarah Sharp
28084daf9df5SLin Wang ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
280992f8e767SSarah Sharp if (!ctrl_ctx) {
28101f21569cSEmil Goode spin_unlock_irqrestore(&xhci->lock, flags);
281192f8e767SSarah Sharp xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
281292f8e767SSarah Sharp __func__);
281392f8e767SSarah Sharp return -ENOMEM;
281492f8e767SSarah Sharp }
2815750645f8SSarah Sharp
28162cf95c18SSarah Sharp if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
281792f8e767SSarah Sharp xhci_reserve_host_resources(xhci, ctrl_ctx)) {
28182cf95c18SSarah Sharp spin_unlock_irqrestore(&xhci->lock, flags);
28192cf95c18SSarah Sharp xhci_warn(xhci, "Not enough host resources, "
28202cf95c18SSarah Sharp "active endpoint contexts = %u\n",
28212cf95c18SSarah Sharp xhci->num_active_eps);
28222cf95c18SSarah Sharp return -ENOMEM;
28232cf95c18SSarah Sharp }
28246b99de30SMathias Nyman if ((xhci->quirks & XHCI_SW_BW_CHECKING) && !ctx_change &&
2825ddba5cd0SMathias Nyman xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
28262e27980eSSarah Sharp if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
282792f8e767SSarah Sharp xhci_free_host_resources(xhci, ctrl_ctx);
28282e27980eSSarah Sharp spin_unlock_irqrestore(&xhci->lock, flags);
28292e27980eSSarah Sharp xhci_warn(xhci, "Not enough bandwidth\n");
28302e27980eSSarah Sharp return -ENOMEM;
28312e27980eSSarah Sharp }
28322cf95c18SSarah Sharp
2833e3a78ff0SMathias Nyman slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
283490d6d573SMathias Nyman
283590d6d573SMathias Nyman trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx);
2836e3a78ff0SMathias Nyman trace_xhci_configure_endpoint(slot_ctx);
2837e3a78ff0SMathias Nyman
2838bc75fa38SAlex Chiang if (!ctx_change)
2839ddba5cd0SMathias Nyman ret = xhci_queue_configure_endpoint(xhci, command,
2840ddba5cd0SMathias Nyman command->in_ctx->dma,
2841bc75fa38SAlex Chiang udev->slot_id, must_succeed);
2842bc75fa38SAlex Chiang else
2843ddba5cd0SMathias Nyman ret = xhci_queue_evaluate_context(xhci, command,
2844ddba5cd0SMathias Nyman command->in_ctx->dma,
28454b266541SSarah Sharp udev->slot_id, must_succeed);
2846bc75fa38SAlex Chiang if (ret < 0) {
28472cf95c18SSarah Sharp if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
284892f8e767SSarah Sharp xhci_free_host_resources(xhci, ctrl_ctx);
2849bc75fa38SAlex Chiang spin_unlock_irqrestore(&xhci->lock, flags);
28503a7fa5beSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
28513a7fa5beSXenia Ragiadakou "FIXME allocate a new ring segment");
2852bc75fa38SAlex Chiang return -ENOMEM;
2853bc75fa38SAlex Chiang }
2854bc75fa38SAlex Chiang xhci_ring_cmd_db(xhci);
2855bc75fa38SAlex Chiang spin_unlock_irqrestore(&xhci->lock, flags);
2856bc75fa38SAlex Chiang
2857bc75fa38SAlex Chiang /* Wait for the configure endpoint command to complete */
2858c311e391SMathias Nyman wait_for_completion(command->completion);
2859bc75fa38SAlex Chiang
2860bc75fa38SAlex Chiang if (!ctx_change)
2861ddba5cd0SMathias Nyman ret = xhci_configure_endpoint_result(xhci, udev,
2862ddba5cd0SMathias Nyman &command->status);
28632cf95c18SSarah Sharp else
2864ddba5cd0SMathias Nyman ret = xhci_evaluate_context_result(xhci, udev,
2865ddba5cd0SMathias Nyman &command->status);
28662cf95c18SSarah Sharp
28672cf95c18SSarah Sharp if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
28682cf95c18SSarah Sharp spin_lock_irqsave(&xhci->lock, flags);
28692cf95c18SSarah Sharp /* If the command failed, remove the reserved resources.
28702cf95c18SSarah Sharp * Otherwise, clean up the estimate to include dropped eps.
28712cf95c18SSarah Sharp */
28722cf95c18SSarah Sharp if (ret)
287392f8e767SSarah Sharp xhci_free_host_resources(xhci, ctrl_ctx);
28742cf95c18SSarah Sharp else
287592f8e767SSarah Sharp xhci_finish_resource_reservation(xhci, ctrl_ctx);
28762cf95c18SSarah Sharp spin_unlock_irqrestore(&xhci->lock, flags);
28772cf95c18SSarah Sharp }
28782cf95c18SSarah Sharp return ret;
2879bc75fa38SAlex Chiang }
2880bc75fa38SAlex Chiang
xhci_check_bw_drop_ep_streams(struct xhci_hcd * xhci,struct xhci_virt_device * vdev,int i)2881df613834SHans de Goede static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2882df613834SHans de Goede struct xhci_virt_device *vdev, int i)
2883df613834SHans de Goede {
2884df613834SHans de Goede struct xhci_virt_ep *ep = &vdev->eps[i];
2885df613834SHans de Goede
2886df613834SHans de Goede if (ep->ep_state & EP_HAS_STREAMS) {
2887df613834SHans de Goede xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
2888df613834SHans de Goede xhci_get_endpoint_address(i));
2889df613834SHans de Goede xhci_free_stream_info(xhci, ep->stream_info);
2890df613834SHans de Goede ep->stream_info = NULL;
2891df613834SHans de Goede ep->ep_state &= ~EP_HAS_STREAMS;
2892df613834SHans de Goede }
2893df613834SHans de Goede }
2894df613834SHans de Goede
2895bc75fa38SAlex Chiang /* Called after one or more calls to xhci_add_endpoint() or
2896bc75fa38SAlex Chiang * xhci_drop_endpoint(). If this call fails, the USB core is expected
2897bc75fa38SAlex Chiang * to call xhci_reset_bandwidth().
2898bc75fa38SAlex Chiang *
2899bc75fa38SAlex Chiang * Since we are in the middle of changing either configuration or
2900bc75fa38SAlex Chiang * installing a new alt setting, the USB core won't allow URBs to be
2901bc75fa38SAlex Chiang * enqueued for any endpoint on the old config or interface. Nothing
2902bc75fa38SAlex Chiang * else should be touching the xhci->devs[slot_id] structure, so we
2903bc75fa38SAlex Chiang * don't need to take the xhci->lock for manipulating that.
2904bc75fa38SAlex Chiang */
xhci_check_bandwidth(struct usb_hcd * hcd,struct usb_device * udev)29051d69f9d9SIkjoon Jang int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2906bc75fa38SAlex Chiang {
2907bc75fa38SAlex Chiang int i;
2908bc75fa38SAlex Chiang int ret = 0;
2909bc75fa38SAlex Chiang struct xhci_hcd *xhci;
2910bc75fa38SAlex Chiang struct xhci_virt_device *virt_dev;
2911bc75fa38SAlex Chiang struct xhci_input_control_ctx *ctrl_ctx;
2912bc75fa38SAlex Chiang struct xhci_slot_ctx *slot_ctx;
2913ddba5cd0SMathias Nyman struct xhci_command *command;
2914bc75fa38SAlex Chiang
291564927730SAndiry Xu ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2916bc75fa38SAlex Chiang if (ret <= 0)
2917bc75fa38SAlex Chiang return ret;
2918bc75fa38SAlex Chiang xhci = hcd_to_xhci(hcd);
291998d74f9cSMathias Nyman if ((xhci->xhc_state & XHCI_STATE_DYING) ||
292098d74f9cSMathias Nyman (xhci->xhc_state & XHCI_STATE_REMOVING))
2921fe6c6c13SSarah Sharp return -ENODEV;
2922bc75fa38SAlex Chiang
2923bc75fa38SAlex Chiang xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2924bc75fa38SAlex Chiang virt_dev = xhci->devs[udev->slot_id];
2925bc75fa38SAlex Chiang
2926103afda0SMathias Nyman command = xhci_alloc_command(xhci, true, GFP_KERNEL);
2927ddba5cd0SMathias Nyman if (!command)
2928ddba5cd0SMathias Nyman return -ENOMEM;
2929ddba5cd0SMathias Nyman
2930ddba5cd0SMathias Nyman command->in_ctx = virt_dev->in_ctx;
2931ddba5cd0SMathias Nyman
2932bc75fa38SAlex Chiang /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
29334daf9df5SLin Wang ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
293492f8e767SSarah Sharp if (!ctrl_ctx) {
293592f8e767SSarah Sharp xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
293692f8e767SSarah Sharp __func__);
2937ddba5cd0SMathias Nyman ret = -ENOMEM;
2938ddba5cd0SMathias Nyman goto command_cleanup;
293992f8e767SSarah Sharp }
294028ccd296SMatt Evans ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
294128ccd296SMatt Evans ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
294228ccd296SMatt Evans ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
29432dc37539SSarah Sharp
29442dc37539SSarah Sharp /* Don't issue the command if there's no endpoints to update. */
29452dc37539SSarah Sharp if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2946ddba5cd0SMathias Nyman ctrl_ctx->drop_flags == 0) {
2947ddba5cd0SMathias Nyman ret = 0;
2948ddba5cd0SMathias Nyman goto command_cleanup;
2949ddba5cd0SMathias Nyman }
2950d6759133SJulius Werner /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
2951bc75fa38SAlex Chiang slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2952d6759133SJulius Werner for (i = 31; i >= 1; i--) {
2953d6759133SJulius Werner __le32 le32 = cpu_to_le32(BIT(i));
2954d6759133SJulius Werner
2955d6759133SJulius Werner if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
2956d6759133SJulius Werner || (ctrl_ctx->add_flags & le32) || i == 1) {
2957d6759133SJulius Werner slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2958d6759133SJulius Werner slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
2959d6759133SJulius Werner break;
2960d6759133SJulius Werner }
2961d6759133SJulius Werner }
2962bc75fa38SAlex Chiang
2963ddba5cd0SMathias Nyman ret = xhci_configure_endpoint(xhci, udev, command,
2964bc75fa38SAlex Chiang false, false);
2965ddba5cd0SMathias Nyman if (ret)
2966bc75fa38SAlex Chiang /* Callee should call reset_bandwidth() */
2967ddba5cd0SMathias Nyman goto command_cleanup;
2968bc75fa38SAlex Chiang
2969834cb0fcSSarah Sharp /* Free any rings that were dropped, but not changed. */
297098871e94SFelipe Balbi for (i = 1; i < 31; i++) {
29714819fef5SMatt Evans if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2972df613834SHans de Goede !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
2973c5628a2aSMathias Nyman xhci_free_endpoint_ring(xhci, virt_dev, i);
2974df613834SHans de Goede xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2975df613834SHans de Goede }
2976834cb0fcSSarah Sharp }
2977bc75fa38SAlex Chiang xhci_zero_in_ctx(xhci, virt_dev);
2978834cb0fcSSarah Sharp /*
2979834cb0fcSSarah Sharp * Install any rings for completely new endpoints or changed endpoints,
2980c5628a2aSMathias Nyman * and free any old rings from changed endpoints.
2981834cb0fcSSarah Sharp */
298298871e94SFelipe Balbi for (i = 1; i < 31; i++) {
2983bc75fa38SAlex Chiang if (!virt_dev->eps[i].new_ring)
2984bc75fa38SAlex Chiang continue;
2985c5628a2aSMathias Nyman /* Only free the old ring if it exists.
2986bc75fa38SAlex Chiang * It may not if this is the first add of an endpoint.
2987bc75fa38SAlex Chiang */
2988bc75fa38SAlex Chiang if (virt_dev->eps[i].ring) {
2989c5628a2aSMathias Nyman xhci_free_endpoint_ring(xhci, virt_dev, i);
2990bc75fa38SAlex Chiang }
2991df613834SHans de Goede xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2992bc75fa38SAlex Chiang virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2993bc75fa38SAlex Chiang virt_dev->eps[i].new_ring = NULL;
2994167657a1SMathias Nyman xhci_debugfs_create_endpoint(xhci, virt_dev, i);
2995bc75fa38SAlex Chiang }
2996ddba5cd0SMathias Nyman command_cleanup:
2997ddba5cd0SMathias Nyman kfree(command->completion);
2998ddba5cd0SMathias Nyman kfree(command);
2999bc75fa38SAlex Chiang
3000bc75fa38SAlex Chiang return ret;
3001bc75fa38SAlex Chiang }
300214295a15SChunfeng Yun EXPORT_SYMBOL_GPL(xhci_check_bandwidth);
3003bc75fa38SAlex Chiang
xhci_reset_bandwidth(struct usb_hcd * hcd,struct usb_device * udev)30041d69f9d9SIkjoon Jang void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
3005bc75fa38SAlex Chiang {
3006bc75fa38SAlex Chiang struct xhci_hcd *xhci;
3007bc75fa38SAlex Chiang struct xhci_virt_device *virt_dev;
3008bc75fa38SAlex Chiang int i, ret;
3009bc75fa38SAlex Chiang
301064927730SAndiry Xu ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3011bc75fa38SAlex Chiang if (ret <= 0)
3012bc75fa38SAlex Chiang return;
3013bc75fa38SAlex Chiang xhci = hcd_to_xhci(hcd);
3014bc75fa38SAlex Chiang
3015bc75fa38SAlex Chiang xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
3016bc75fa38SAlex Chiang virt_dev = xhci->devs[udev->slot_id];
3017bc75fa38SAlex Chiang /* Free any rings allocated for added endpoints */
301898871e94SFelipe Balbi for (i = 0; i < 31; i++) {
3019bc75fa38SAlex Chiang if (virt_dev->eps[i].new_ring) {
302002b6fdc2SLu Baolu xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3021bc75fa38SAlex Chiang xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
3022bc75fa38SAlex Chiang virt_dev->eps[i].new_ring = NULL;
3023bc75fa38SAlex Chiang }
3024bc75fa38SAlex Chiang }
3025bc75fa38SAlex Chiang xhci_zero_in_ctx(xhci, virt_dev);
3026bc75fa38SAlex Chiang }
302714295a15SChunfeng Yun EXPORT_SYMBOL_GPL(xhci_reset_bandwidth);
3028bc75fa38SAlex Chiang
xhci_setup_input_ctx_for_config_ep(struct xhci_hcd * xhci,struct xhci_container_ctx * in_ctx,struct xhci_container_ctx * out_ctx,struct xhci_input_control_ctx * ctrl_ctx,u32 add_flags,u32 drop_flags)3029bc75fa38SAlex Chiang static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
3030bc75fa38SAlex Chiang struct xhci_container_ctx *in_ctx,
3031bc75fa38SAlex Chiang struct xhci_container_ctx *out_ctx,
303292f8e767SSarah Sharp struct xhci_input_control_ctx *ctrl_ctx,
3033bc75fa38SAlex Chiang u32 add_flags, u32 drop_flags)
3034bc75fa38SAlex Chiang {
303528ccd296SMatt Evans ctrl_ctx->add_flags = cpu_to_le32(add_flags);
303628ccd296SMatt Evans ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
3037bc75fa38SAlex Chiang xhci_slot_copy(xhci, in_ctx, out_ctx);
303828ccd296SMatt Evans ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
3039bc75fa38SAlex Chiang }
3040bc75fa38SAlex Chiang
xhci_endpoint_disable(struct usb_hcd * hcd,struct usb_host_endpoint * host_ep)304118b74067SMathias Nyman static void xhci_endpoint_disable(struct usb_hcd *hcd,
304218b74067SMathias Nyman struct usb_host_endpoint *host_ep)
304318b74067SMathias Nyman {
304418b74067SMathias Nyman struct xhci_hcd *xhci;
304518b74067SMathias Nyman struct xhci_virt_device *vdev;
304618b74067SMathias Nyman struct xhci_virt_ep *ep;
304718b74067SMathias Nyman struct usb_device *udev;
304818b74067SMathias Nyman unsigned long flags;
304918b74067SMathias Nyman unsigned int ep_index;
305018b74067SMathias Nyman
305118b74067SMathias Nyman xhci = hcd_to_xhci(hcd);
305218b74067SMathias Nyman rescan:
305318b74067SMathias Nyman spin_lock_irqsave(&xhci->lock, flags);
305418b74067SMathias Nyman
305518b74067SMathias Nyman udev = (struct usb_device *)host_ep->hcpriv;
305618b74067SMathias Nyman if (!udev || !udev->slot_id)
305718b74067SMathias Nyman goto done;
305818b74067SMathias Nyman
305918b74067SMathias Nyman vdev = xhci->devs[udev->slot_id];
306018b74067SMathias Nyman if (!vdev)
306118b74067SMathias Nyman goto done;
306218b74067SMathias Nyman
306318b74067SMathias Nyman ep_index = xhci_get_endpoint_index(&host_ep->desc);
306418b74067SMathias Nyman ep = &vdev->eps[ep_index];
306518b74067SMathias Nyman
306618b74067SMathias Nyman /* wait for hub_tt_work to finish clearing hub TT */
306718b74067SMathias Nyman if (ep->ep_state & EP_CLEARING_TT) {
306818b74067SMathias Nyman spin_unlock_irqrestore(&xhci->lock, flags);
306918b74067SMathias Nyman schedule_timeout_uninterruptible(1);
307018b74067SMathias Nyman goto rescan;
307118b74067SMathias Nyman }
307218b74067SMathias Nyman
307318b74067SMathias Nyman if (ep->ep_state)
307418b74067SMathias Nyman xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n",
307518b74067SMathias Nyman ep->ep_state);
307618b74067SMathias Nyman done:
307718b74067SMathias Nyman host_ep->hcpriv = NULL;
307818b74067SMathias Nyman spin_unlock_irqrestore(&xhci->lock, flags);
307918b74067SMathias Nyman }
308018b74067SMathias Nyman
3081f5249461SMathias Nyman /*
3082f5249461SMathias Nyman * Called after usb core issues a clear halt control message.
3083f5249461SMathias Nyman * The host side of the halt should already be cleared by a reset endpoint
3084f5249461SMathias Nyman * command issued when the STALL event was received.
3085d0167ad2SMathias Nyman *
3086f5249461SMathias Nyman * The reset endpoint command may only be issued to endpoints in the halted
3087f5249461SMathias Nyman * state. For software that wishes to reset the data toggle or sequence number
3088f5249461SMathias Nyman * of an endpoint that isn't in the halted state this function will issue a
3089f5249461SMathias Nyman * configure endpoint command with the Drop and Add bits set for the target
3090f5249461SMathias Nyman * endpoint. Refer to the additional note in xhci spcification section 4.6.8.
3091bc75fa38SAlex Chiang */
30928e71a322SMathias Nyman
xhci_endpoint_reset(struct usb_hcd * hcd,struct usb_host_endpoint * host_ep)30933969384cSLu Baolu static void xhci_endpoint_reset(struct usb_hcd *hcd,
3094f5249461SMathias Nyman struct usb_host_endpoint *host_ep)
3095bc75fa38SAlex Chiang {
3096bc75fa38SAlex Chiang struct xhci_hcd *xhci;
3097f5249461SMathias Nyman struct usb_device *udev;
3098f5249461SMathias Nyman struct xhci_virt_device *vdev;
3099f5249461SMathias Nyman struct xhci_virt_ep *ep;
3100f5249461SMathias Nyman struct xhci_input_control_ctx *ctrl_ctx;
3101f5249461SMathias Nyman struct xhci_command *stop_cmd, *cfg_cmd;
3102f5249461SMathias Nyman unsigned int ep_index;
3103f5249461SMathias Nyman unsigned long flags;
3104f5249461SMathias Nyman u32 ep_flag;
31058de66b0eSBill Kuzeja int err;
3106bc75fa38SAlex Chiang
3107bc75fa38SAlex Chiang xhci = hcd_to_xhci(hcd);
3108f5249461SMathias Nyman if (!host_ep->hcpriv)
3109f5249461SMathias Nyman return;
3110f5249461SMathias Nyman udev = (struct usb_device *) host_ep->hcpriv;
3111f5249461SMathias Nyman vdev = xhci->devs[udev->slot_id];
3112cb53c517SMathias Nyman
3113cb53c517SMathias Nyman /*
3114cb53c517SMathias Nyman * vdev may be lost due to xHC restore error and re-initialization
3115cb53c517SMathias Nyman * during S3/S4 resume. A new vdev will be allocated later by
3116cb53c517SMathias Nyman * xhci_discover_or_reset_device()
3117cb53c517SMathias Nyman */
3118cb53c517SMathias Nyman if (!udev->slot_id || !vdev)
3119cb53c517SMathias Nyman return;
3120f5249461SMathias Nyman ep_index = xhci_get_endpoint_index(&host_ep->desc);
3121f5249461SMathias Nyman ep = &vdev->eps[ep_index];
3122f5249461SMathias Nyman
3123f5249461SMathias Nyman /* Bail out if toggle is already being cleared by a endpoint reset */
3124a01ba2a3SJonathan Bell spin_lock_irqsave(&xhci->lock, flags);
3125f5249461SMathias Nyman if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
3126f5249461SMathias Nyman ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
3127a01ba2a3SJonathan Bell spin_unlock_irqrestore(&xhci->lock, flags);
3128f5249461SMathias Nyman return;
3129f5249461SMathias Nyman }
3130a01ba2a3SJonathan Bell spin_unlock_irqrestore(&xhci->lock, flags);
3131f5249461SMathias Nyman /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */
3132f5249461SMathias Nyman if (usb_endpoint_xfer_control(&host_ep->desc) ||
3133f5249461SMathias Nyman usb_endpoint_xfer_isoc(&host_ep->desc))
3134f5249461SMathias Nyman return;
3135f5249461SMathias Nyman
3136f5249461SMathias Nyman ep_flag = xhci_get_endpoint_flag(&host_ep->desc);
3137f5249461SMathias Nyman
3138f5249461SMathias Nyman if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
3139f5249461SMathias Nyman return;
3140f5249461SMathias Nyman
3141f5249461SMathias Nyman stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
3142f5249461SMathias Nyman if (!stop_cmd)
3143f5249461SMathias Nyman return;
3144f5249461SMathias Nyman
3145f5249461SMathias Nyman cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
3146f5249461SMathias Nyman if (!cfg_cmd)
3147f5249461SMathias Nyman goto cleanup;
3148f5249461SMathias Nyman
3149f5249461SMathias Nyman spin_lock_irqsave(&xhci->lock, flags);
3150f5249461SMathias Nyman
3151f5249461SMathias Nyman /* block queuing new trbs and ringing ep doorbell */
3152f5249461SMathias Nyman ep->ep_state |= EP_SOFT_CLEAR_TOGGLE;
3153bc75fa38SAlex Chiang
3154bc75fa38SAlex Chiang /*
3155f5249461SMathias Nyman * Make sure endpoint ring is empty before resetting the toggle/seq.
3156f5249461SMathias Nyman * Driver is required to synchronously cancel all transfer request.
3157f5249461SMathias Nyman * Stop the endpoint to force xHC to update the output context
3158bc75fa38SAlex Chiang */
3159bc75fa38SAlex Chiang
3160f5249461SMathias Nyman if (!list_empty(&ep->ring->td_list)) {
3161f5249461SMathias Nyman dev_err(&udev->dev, "EP not empty, refuse reset\n");
3162f5249461SMathias Nyman spin_unlock_irqrestore(&xhci->lock, flags);
3163d89b7664SZheng Xiaowei xhci_free_command(xhci, cfg_cmd);
3164f5249461SMathias Nyman goto cleanup;
3165f5249461SMathias Nyman }
31668de66b0eSBill Kuzeja
31678de66b0eSBill Kuzeja err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
31688de66b0eSBill Kuzeja ep_index, 0);
31698de66b0eSBill Kuzeja if (err < 0) {
31708de66b0eSBill Kuzeja spin_unlock_irqrestore(&xhci->lock, flags);
31718de66b0eSBill Kuzeja xhci_free_command(xhci, cfg_cmd);
31728de66b0eSBill Kuzeja xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
31738de66b0eSBill Kuzeja __func__, err);
31748de66b0eSBill Kuzeja goto cleanup;
31758de66b0eSBill Kuzeja }
31768de66b0eSBill Kuzeja
3177f5249461SMathias Nyman xhci_ring_cmd_db(xhci);
3178f5249461SMathias Nyman spin_unlock_irqrestore(&xhci->lock, flags);
3179f5249461SMathias Nyman
3180f5249461SMathias Nyman wait_for_completion(stop_cmd->completion);
3181f5249461SMathias Nyman
3182f5249461SMathias Nyman spin_lock_irqsave(&xhci->lock, flags);
3183f5249461SMathias Nyman
3184f5249461SMathias Nyman /* config ep command clears toggle if add and drop ep flags are set */
3185f5249461SMathias Nyman ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
3186597899d2SMathias Nyman if (!ctrl_ctx) {
3187597899d2SMathias Nyman spin_unlock_irqrestore(&xhci->lock, flags);
3188597899d2SMathias Nyman xhci_free_command(xhci, cfg_cmd);
3189597899d2SMathias Nyman xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3190597899d2SMathias Nyman __func__);
3191597899d2SMathias Nyman goto cleanup;
3192597899d2SMathias Nyman }
3193597899d2SMathias Nyman
3194f5249461SMathias Nyman xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
3195f5249461SMathias Nyman ctrl_ctx, ep_flag, ep_flag);
3196f5249461SMathias Nyman xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
3197f5249461SMathias Nyman
31988de66b0eSBill Kuzeja err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
3199f5249461SMathias Nyman udev->slot_id, false);
32008de66b0eSBill Kuzeja if (err < 0) {
32018de66b0eSBill Kuzeja spin_unlock_irqrestore(&xhci->lock, flags);
32028de66b0eSBill Kuzeja xhci_free_command(xhci, cfg_cmd);
32038de66b0eSBill Kuzeja xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
32048de66b0eSBill Kuzeja __func__, err);
32058de66b0eSBill Kuzeja goto cleanup;
32068de66b0eSBill Kuzeja }
32078de66b0eSBill Kuzeja
3208f5249461SMathias Nyman xhci_ring_cmd_db(xhci);
3209f5249461SMathias Nyman spin_unlock_irqrestore(&xhci->lock, flags);
3210f5249461SMathias Nyman
3211f5249461SMathias Nyman wait_for_completion(cfg_cmd->completion);
3212f5249461SMathias Nyman
3213f5249461SMathias Nyman xhci_free_command(xhci, cfg_cmd);
3214f5249461SMathias Nyman cleanup:
3215f5249461SMathias Nyman xhci_free_command(xhci, stop_cmd);
3216a01ba2a3SJonathan Bell spin_lock_irqsave(&xhci->lock, flags);
3217f1ec7ae6SDing Hui if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
3218f1ec7ae6SDing Hui ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
3219a01ba2a3SJonathan Bell spin_unlock_irqrestore(&xhci->lock, flags);
3220bc75fa38SAlex Chiang }
3221bc75fa38SAlex Chiang
xhci_check_streams_endpoint(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_host_endpoint * ep,unsigned int slot_id)32228df75f42SSarah Sharp static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
32238df75f42SSarah Sharp struct usb_device *udev, struct usb_host_endpoint *ep,
32248df75f42SSarah Sharp unsigned int slot_id)
32258df75f42SSarah Sharp {
32268df75f42SSarah Sharp int ret;
32278df75f42SSarah Sharp unsigned int ep_index;
32288df75f42SSarah Sharp unsigned int ep_state;
32298df75f42SSarah Sharp
32308df75f42SSarah Sharp if (!ep)
32318df75f42SSarah Sharp return -EINVAL;
323264927730SAndiry Xu ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
32338df75f42SSarah Sharp if (ret <= 0)
3234243a1dd7SHongyu Xie return ret ? ret : -EINVAL;
3235a3901538SHans de Goede if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
32368df75f42SSarah Sharp xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
32378df75f42SSarah Sharp " descriptor for ep 0x%x does not support streams\n",
32388df75f42SSarah Sharp ep->desc.bEndpointAddress);
32398df75f42SSarah Sharp return -EINVAL;
32408df75f42SSarah Sharp }
32418df75f42SSarah Sharp
32428df75f42SSarah Sharp ep_index = xhci_get_endpoint_index(&ep->desc);
32438df75f42SSarah Sharp ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
32448df75f42SSarah Sharp if (ep_state & EP_HAS_STREAMS ||
32458df75f42SSarah Sharp ep_state & EP_GETTING_STREAMS) {
32468df75f42SSarah Sharp xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
32478df75f42SSarah Sharp "already has streams set up.\n",
32488df75f42SSarah Sharp ep->desc.bEndpointAddress);
32498df75f42SSarah Sharp xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
32508df75f42SSarah Sharp "dynamic stream context array reallocation.\n");
32518df75f42SSarah Sharp return -EINVAL;
32528df75f42SSarah Sharp }
32538df75f42SSarah Sharp if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
32548df75f42SSarah Sharp xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
32558df75f42SSarah Sharp "endpoint 0x%x; URBs are pending.\n",
32568df75f42SSarah Sharp ep->desc.bEndpointAddress);
32578df75f42SSarah Sharp return -EINVAL;
32588df75f42SSarah Sharp }
32598df75f42SSarah Sharp return 0;
32608df75f42SSarah Sharp }
32618df75f42SSarah Sharp
xhci_calculate_streams_entries(struct xhci_hcd * xhci,unsigned int * num_streams,unsigned int * num_stream_ctxs)32628df75f42SSarah Sharp static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
32638df75f42SSarah Sharp unsigned int *num_streams, unsigned int *num_stream_ctxs)
32648df75f42SSarah Sharp {
32658df75f42SSarah Sharp unsigned int max_streams;
32668df75f42SSarah Sharp
32678df75f42SSarah Sharp /* The stream context array size must be a power of two */
32688df75f42SSarah Sharp *num_stream_ctxs = roundup_pow_of_two(*num_streams);
32698df75f42SSarah Sharp /*
32708df75f42SSarah Sharp * Find out how many primary stream array entries the host controller
32718df75f42SSarah Sharp * supports. Later we may use secondary stream arrays (similar to 2nd
32728df75f42SSarah Sharp * level page entries), but that's an optional feature for xHCI host
32738df75f42SSarah Sharp * controllers. xHCs must support at least 4 stream IDs.
32748df75f42SSarah Sharp */
32758df75f42SSarah Sharp max_streams = HCC_MAX_PSA(xhci->hcc_params);
32768df75f42SSarah Sharp if (*num_stream_ctxs > max_streams) {
32778df75f42SSarah Sharp xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
32788df75f42SSarah Sharp max_streams);
32798df75f42SSarah Sharp *num_stream_ctxs = max_streams;
32808df75f42SSarah Sharp *num_streams = max_streams;
32818df75f42SSarah Sharp }
32828df75f42SSarah Sharp }
32838df75f42SSarah Sharp
32848df75f42SSarah Sharp /* Returns an error code if one of the endpoint already has streams.
32858df75f42SSarah Sharp * This does not change any data structures, it only checks and gathers
32868df75f42SSarah Sharp * information.
32878df75f42SSarah Sharp */
xhci_calculate_streams_and_bitmask(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_host_endpoint ** eps,unsigned int num_eps,unsigned int * num_streams,u32 * changed_ep_bitmask)32888df75f42SSarah Sharp static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
32898df75f42SSarah Sharp struct usb_device *udev,
32908df75f42SSarah Sharp struct usb_host_endpoint **eps, unsigned int num_eps,
32918df75f42SSarah Sharp unsigned int *num_streams, u32 *changed_ep_bitmask)
32928df75f42SSarah Sharp {
32938df75f42SSarah Sharp unsigned int max_streams;
32948df75f42SSarah Sharp unsigned int endpoint_flag;
32958df75f42SSarah Sharp int i;
32968df75f42SSarah Sharp int ret;
32978df75f42SSarah Sharp
32988df75f42SSarah Sharp for (i = 0; i < num_eps; i++) {
32998df75f42SSarah Sharp ret = xhci_check_streams_endpoint(xhci, udev,
33008df75f42SSarah Sharp eps[i], udev->slot_id);
33018df75f42SSarah Sharp if (ret < 0)
33028df75f42SSarah Sharp return ret;
33038df75f42SSarah Sharp
330418b7ede5SFelipe Balbi max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
33058df75f42SSarah Sharp if (max_streams < (*num_streams - 1)) {
33068df75f42SSarah Sharp xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
33078df75f42SSarah Sharp eps[i]->desc.bEndpointAddress,
33088df75f42SSarah Sharp max_streams);
33098df75f42SSarah Sharp *num_streams = max_streams+1;
33108df75f42SSarah Sharp }
33118df75f42SSarah Sharp
33128df75f42SSarah Sharp endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
33138df75f42SSarah Sharp if (*changed_ep_bitmask & endpoint_flag)
33148df75f42SSarah Sharp return -EINVAL;
33158df75f42SSarah Sharp *changed_ep_bitmask |= endpoint_flag;
33168df75f42SSarah Sharp }
33178df75f42SSarah Sharp return 0;
33188df75f42SSarah Sharp }
33198df75f42SSarah Sharp
xhci_calculate_no_streams_bitmask(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_host_endpoint ** eps,unsigned int num_eps)33208df75f42SSarah Sharp static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
33218df75f42SSarah Sharp struct usb_device *udev,
33228df75f42SSarah Sharp struct usb_host_endpoint **eps, unsigned int num_eps)
33238df75f42SSarah Sharp {
33248df75f42SSarah Sharp u32 changed_ep_bitmask = 0;
33258df75f42SSarah Sharp unsigned int slot_id;
33268df75f42SSarah Sharp unsigned int ep_index;
33278df75f42SSarah Sharp unsigned int ep_state;
33288df75f42SSarah Sharp int i;
33298df75f42SSarah Sharp
33308df75f42SSarah Sharp slot_id = udev->slot_id;
33318df75f42SSarah Sharp if (!xhci->devs[slot_id])
33328df75f42SSarah Sharp return 0;
33338df75f42SSarah Sharp
33348df75f42SSarah Sharp for (i = 0; i < num_eps; i++) {
33358df75f42SSarah Sharp ep_index = xhci_get_endpoint_index(&eps[i]->desc);
33368df75f42SSarah Sharp ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
33378df75f42SSarah Sharp /* Are streams already being freed for the endpoint? */
33388df75f42SSarah Sharp if (ep_state & EP_GETTING_NO_STREAMS) {
33398df75f42SSarah Sharp xhci_warn(xhci, "WARN Can't disable streams for "
334003e64e96SJoe Perches "endpoint 0x%x, "
334103e64e96SJoe Perches "streams are being disabled already\n",
33428df75f42SSarah Sharp eps[i]->desc.bEndpointAddress);
33438df75f42SSarah Sharp return 0;
33448df75f42SSarah Sharp }
33458df75f42SSarah Sharp /* Are there actually any streams to free? */
33468df75f42SSarah Sharp if (!(ep_state & EP_HAS_STREAMS) &&
33478df75f42SSarah Sharp !(ep_state & EP_GETTING_STREAMS)) {
33488df75f42SSarah Sharp xhci_warn(xhci, "WARN Can't disable streams for "
334903e64e96SJoe Perches "endpoint 0x%x, "
335003e64e96SJoe Perches "streams are already disabled!\n",
33518df75f42SSarah Sharp eps[i]->desc.bEndpointAddress);
33528df75f42SSarah Sharp xhci_warn(xhci, "WARN xhci_free_streams() called "
33538df75f42SSarah Sharp "with non-streams endpoint\n");
33548df75f42SSarah Sharp return 0;
33558df75f42SSarah Sharp }
33568df75f42SSarah Sharp changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
33578df75f42SSarah Sharp }
33588df75f42SSarah Sharp return changed_ep_bitmask;
33598df75f42SSarah Sharp }
33608df75f42SSarah Sharp
33618df75f42SSarah Sharp /*
3362c2a298d9SLuis de Bethencourt * The USB device drivers use this function (through the HCD interface in USB
33638df75f42SSarah Sharp * core) to prepare a set of bulk endpoints to use streams. Streams are used to
33648df75f42SSarah Sharp * coordinate mass storage command queueing across multiple endpoints (basically
33658df75f42SSarah Sharp * a stream ID == a task ID).
33668df75f42SSarah Sharp *
33678df75f42SSarah Sharp * Setting up streams involves allocating the same size stream context array
33688df75f42SSarah Sharp * for each endpoint and issuing a configure endpoint command for all endpoints.
33698df75f42SSarah Sharp *
33708df75f42SSarah Sharp * Don't allow the call to succeed if one endpoint only supports one stream
33718df75f42SSarah Sharp * (which means it doesn't support streams at all).
33728df75f42SSarah Sharp *
33738df75f42SSarah Sharp * Drivers may get less stream IDs than they asked for, if the host controller
33748df75f42SSarah Sharp * hardware or endpoints claim they can't support the number of requested
33758df75f42SSarah Sharp * stream IDs.
33768df75f42SSarah Sharp */
xhci_alloc_streams(struct usb_hcd * hcd,struct usb_device * udev,struct usb_host_endpoint ** eps,unsigned int num_eps,unsigned int num_streams,gfp_t mem_flags)33773969384cSLu Baolu static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
33788df75f42SSarah Sharp struct usb_host_endpoint **eps, unsigned int num_eps,
33798df75f42SSarah Sharp unsigned int num_streams, gfp_t mem_flags)
33808df75f42SSarah Sharp {
33818df75f42SSarah Sharp int i, ret;
33828df75f42SSarah Sharp struct xhci_hcd *xhci;
33838df75f42SSarah Sharp struct xhci_virt_device *vdev;
33848df75f42SSarah Sharp struct xhci_command *config_cmd;
338592f8e767SSarah Sharp struct xhci_input_control_ctx *ctrl_ctx;
33868df75f42SSarah Sharp unsigned int ep_index;
33878df75f42SSarah Sharp unsigned int num_stream_ctxs;
3388f9c589e1SMathias Nyman unsigned int max_packet;
33898df75f42SSarah Sharp unsigned long flags;
33908df75f42SSarah Sharp u32 changed_ep_bitmask = 0;
33918df75f42SSarah Sharp
33928df75f42SSarah Sharp if (!eps)
33938df75f42SSarah Sharp return -EINVAL;
33948df75f42SSarah Sharp
33958df75f42SSarah Sharp /* Add one to the number of streams requested to account for
33968df75f42SSarah Sharp * stream 0 that is reserved for xHCI usage.
33978df75f42SSarah Sharp */
33988df75f42SSarah Sharp num_streams += 1;
33998df75f42SSarah Sharp xhci = hcd_to_xhci(hcd);
34008df75f42SSarah Sharp xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
34018df75f42SSarah Sharp num_streams);
34028df75f42SSarah Sharp
3403f7920884SHans de Goede /* MaxPSASize value 0 (2 streams) means streams are not supported */
34048f873c1fSHans de Goede if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
34058f873c1fSHans de Goede HCC_MAX_PSA(xhci->hcc_params) < 4) {
3406f7920884SHans de Goede xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3407f7920884SHans de Goede return -ENOSYS;
3408f7920884SHans de Goede }
3409f7920884SHans de Goede
341014d49b7aSMathias Nyman config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
341174e0b564SLu Baolu if (!config_cmd)
34128df75f42SSarah Sharp return -ENOMEM;
341374e0b564SLu Baolu
34144daf9df5SLin Wang ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
341592f8e767SSarah Sharp if (!ctrl_ctx) {
341692f8e767SSarah Sharp xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
341792f8e767SSarah Sharp __func__);
341892f8e767SSarah Sharp xhci_free_command(xhci, config_cmd);
341992f8e767SSarah Sharp return -ENOMEM;
342092f8e767SSarah Sharp }
34218df75f42SSarah Sharp
34228df75f42SSarah Sharp /* Check to make sure all endpoints are not already configured for
34238df75f42SSarah Sharp * streams. While we're at it, find the maximum number of streams that
34248df75f42SSarah Sharp * all the endpoints will support and check for duplicate endpoints.
34258df75f42SSarah Sharp */
34268df75f42SSarah Sharp spin_lock_irqsave(&xhci->lock, flags);
34278df75f42SSarah Sharp ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
34288df75f42SSarah Sharp num_eps, &num_streams, &changed_ep_bitmask);
34298df75f42SSarah Sharp if (ret < 0) {
34308df75f42SSarah Sharp xhci_free_command(xhci, config_cmd);
34318df75f42SSarah Sharp spin_unlock_irqrestore(&xhci->lock, flags);
34328df75f42SSarah Sharp return ret;
34338df75f42SSarah Sharp }
34348df75f42SSarah Sharp if (num_streams <= 1) {
34358df75f42SSarah Sharp xhci_warn(xhci, "WARN: endpoints can't handle "
34368df75f42SSarah Sharp "more than one stream.\n");
34378df75f42SSarah Sharp xhci_free_command(xhci, config_cmd);
34388df75f42SSarah Sharp spin_unlock_irqrestore(&xhci->lock, flags);
34398df75f42SSarah Sharp return -EINVAL;
34408df75f42SSarah Sharp }
34418df75f42SSarah Sharp vdev = xhci->devs[udev->slot_id];
344225985edcSLucas De Marchi /* Mark each endpoint as being in transition, so
34438df75f42SSarah Sharp * xhci_urb_enqueue() will reject all URBs.
34448df75f42SSarah Sharp */
34458df75f42SSarah Sharp for (i = 0; i < num_eps; i++) {
34468df75f42SSarah Sharp ep_index = xhci_get_endpoint_index(&eps[i]->desc);
34478df75f42SSarah Sharp vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
34488df75f42SSarah Sharp }
34498df75f42SSarah Sharp spin_unlock_irqrestore(&xhci->lock, flags);
34508df75f42SSarah Sharp
34518df75f42SSarah Sharp /* Setup internal data structures and allocate HW data structures for
34528df75f42SSarah Sharp * streams (but don't install the HW structures in the input context
34538df75f42SSarah Sharp * until we're sure all memory allocation succeeded).
34548df75f42SSarah Sharp */
34558df75f42SSarah Sharp xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
34568df75f42SSarah Sharp xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
34578df75f42SSarah Sharp num_stream_ctxs, num_streams);
34588df75f42SSarah Sharp
34598df75f42SSarah Sharp for (i = 0; i < num_eps; i++) {
34608df75f42SSarah Sharp ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3461734d3dddSFelipe Balbi max_packet = usb_endpoint_maxp(&eps[i]->desc);
34628df75f42SSarah Sharp vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
34638df75f42SSarah Sharp num_stream_ctxs,
3464f9c589e1SMathias Nyman num_streams,
3465f9c589e1SMathias Nyman max_packet, mem_flags);
34668df75f42SSarah Sharp if (!vdev->eps[ep_index].stream_info)
34678df75f42SSarah Sharp goto cleanup;
34688df75f42SSarah Sharp /* Set maxPstreams in endpoint context and update deq ptr to
34698df75f42SSarah Sharp * point to stream context array. FIXME
34708df75f42SSarah Sharp */
34718df75f42SSarah Sharp }
34728df75f42SSarah Sharp
34738df75f42SSarah Sharp /* Set up the input context for a configure endpoint command. */
34748df75f42SSarah Sharp for (i = 0; i < num_eps; i++) {
34758df75f42SSarah Sharp struct xhci_ep_ctx *ep_ctx;
34768df75f42SSarah Sharp
34778df75f42SSarah Sharp ep_index = xhci_get_endpoint_index(&eps[i]->desc);
34788df75f42SSarah Sharp ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
34798df75f42SSarah Sharp
34808df75f42SSarah Sharp xhci_endpoint_copy(xhci, config_cmd->in_ctx,
34818df75f42SSarah Sharp vdev->out_ctx, ep_index);
34828df75f42SSarah Sharp xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
34838df75f42SSarah Sharp vdev->eps[ep_index].stream_info);
34848df75f42SSarah Sharp }
34858df75f42SSarah Sharp /* Tell the HW to drop its old copy of the endpoint context info
34868df75f42SSarah Sharp * and add the updated copy from the input context.
34878df75f42SSarah Sharp */
34888df75f42SSarah Sharp xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
348992f8e767SSarah Sharp vdev->out_ctx, ctrl_ctx,
349092f8e767SSarah Sharp changed_ep_bitmask, changed_ep_bitmask);
34918df75f42SSarah Sharp
34928df75f42SSarah Sharp /* Issue and wait for the configure endpoint command */
34938df75f42SSarah Sharp ret = xhci_configure_endpoint(xhci, udev, config_cmd,
34948df75f42SSarah Sharp false, false);
34958df75f42SSarah Sharp
34968df75f42SSarah Sharp /* xHC rejected the configure endpoint command for some reason, so we
34978df75f42SSarah Sharp * leave the old ring intact and free our internal streams data
34988df75f42SSarah Sharp * structure.
34998df75f42SSarah Sharp */
35008df75f42SSarah Sharp if (ret < 0)
35018df75f42SSarah Sharp goto cleanup;
35028df75f42SSarah Sharp
35038df75f42SSarah Sharp spin_lock_irqsave(&xhci->lock, flags);
35048df75f42SSarah Sharp for (i = 0; i < num_eps; i++) {
35058df75f42SSarah Sharp ep_index = xhci_get_endpoint_index(&eps[i]->desc);
35068df75f42SSarah Sharp vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
35078df75f42SSarah Sharp xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
35088df75f42SSarah Sharp udev->slot_id, ep_index);
35098df75f42SSarah Sharp vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
35108df75f42SSarah Sharp }
35118df75f42SSarah Sharp xhci_free_command(xhci, config_cmd);
35128df75f42SSarah Sharp spin_unlock_irqrestore(&xhci->lock, flags);
35138df75f42SSarah Sharp
3514712da5fcSMathias Nyman for (i = 0; i < num_eps; i++) {
3515712da5fcSMathias Nyman ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3516712da5fcSMathias Nyman xhci_debugfs_create_stream_files(xhci, vdev, ep_index);
3517712da5fcSMathias Nyman }
35188df75f42SSarah Sharp /* Subtract 1 for stream 0, which drivers can't use */
35198df75f42SSarah Sharp return num_streams - 1;
35208df75f42SSarah Sharp
35218df75f42SSarah Sharp cleanup:
35228df75f42SSarah Sharp /* If it didn't work, free the streams! */
35238df75f42SSarah Sharp for (i = 0; i < num_eps; i++) {
35248df75f42SSarah Sharp ep_index = xhci_get_endpoint_index(&eps[i]->desc);
35258df75f42SSarah Sharp xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
35268a007748SSarah Sharp vdev->eps[ep_index].stream_info = NULL;
35278df75f42SSarah Sharp /* FIXME Unset maxPstreams in endpoint context and
35288df75f42SSarah Sharp * update deq ptr to point to normal string ring.
35298df75f42SSarah Sharp */
35308df75f42SSarah Sharp vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
35318df75f42SSarah Sharp vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
35328df75f42SSarah Sharp xhci_endpoint_zero(xhci, vdev, eps[i]);
35338df75f42SSarah Sharp }
35348df75f42SSarah Sharp xhci_free_command(xhci, config_cmd);
35358df75f42SSarah Sharp return -ENOMEM;
35368df75f42SSarah Sharp }
35378df75f42SSarah Sharp
35388df75f42SSarah Sharp /* Transition the endpoint from using streams to being a "normal" endpoint
35398df75f42SSarah Sharp * without streams.
35408df75f42SSarah Sharp *
35418df75f42SSarah Sharp * Modify the endpoint context state, submit a configure endpoint command,
35428df75f42SSarah Sharp * and free all endpoint rings for streams if that completes successfully.
35438df75f42SSarah Sharp */
xhci_free_streams(struct usb_hcd * hcd,struct usb_device * udev,struct usb_host_endpoint ** eps,unsigned int num_eps,gfp_t mem_flags)35443969384cSLu Baolu static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
35458df75f42SSarah Sharp struct usb_host_endpoint **eps, unsigned int num_eps,
35468df75f42SSarah Sharp gfp_t mem_flags)
35478df75f42SSarah Sharp {
35488df75f42SSarah Sharp int i, ret;
35498df75f42SSarah Sharp struct xhci_hcd *xhci;
35508df75f42SSarah Sharp struct xhci_virt_device *vdev;
35518df75f42SSarah Sharp struct xhci_command *command;
355292f8e767SSarah Sharp struct xhci_input_control_ctx *ctrl_ctx;
35538df75f42SSarah Sharp unsigned int ep_index;
35548df75f42SSarah Sharp unsigned long flags;
35558df75f42SSarah Sharp u32 changed_ep_bitmask;
35568df75f42SSarah Sharp
35578df75f42SSarah Sharp xhci = hcd_to_xhci(hcd);
35588df75f42SSarah Sharp vdev = xhci->devs[udev->slot_id];
35598df75f42SSarah Sharp
35608df75f42SSarah Sharp /* Set up a configure endpoint command to remove the streams rings */
35618df75f42SSarah Sharp spin_lock_irqsave(&xhci->lock, flags);
35628df75f42SSarah Sharp changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
35638df75f42SSarah Sharp udev, eps, num_eps);
35648df75f42SSarah Sharp if (changed_ep_bitmask == 0) {
35658df75f42SSarah Sharp spin_unlock_irqrestore(&xhci->lock, flags);
35668df75f42SSarah Sharp return -EINVAL;
35678df75f42SSarah Sharp }
35688df75f42SSarah Sharp
35698df75f42SSarah Sharp /* Use the xhci_command structure from the first endpoint. We may have
35708df75f42SSarah Sharp * allocated too many, but the driver may call xhci_free_streams() for
35718df75f42SSarah Sharp * each endpoint it grouped into one call to xhci_alloc_streams().
35728df75f42SSarah Sharp */
35738df75f42SSarah Sharp ep_index = xhci_get_endpoint_index(&eps[0]->desc);
35748df75f42SSarah Sharp command = vdev->eps[ep_index].stream_info->free_streams_command;
35754daf9df5SLin Wang ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
357692f8e767SSarah Sharp if (!ctrl_ctx) {
35771f21569cSEmil Goode spin_unlock_irqrestore(&xhci->lock, flags);
357892f8e767SSarah Sharp xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
357992f8e767SSarah Sharp __func__);
358092f8e767SSarah Sharp return -EINVAL;
358192f8e767SSarah Sharp }
358292f8e767SSarah Sharp
35838df75f42SSarah Sharp for (i = 0; i < num_eps; i++) {
35848df75f42SSarah Sharp struct xhci_ep_ctx *ep_ctx;
35858df75f42SSarah Sharp
35868df75f42SSarah Sharp ep_index = xhci_get_endpoint_index(&eps[i]->desc);
35878df75f42SSarah Sharp ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
35888df75f42SSarah Sharp xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
35898df75f42SSarah Sharp EP_GETTING_NO_STREAMS;
35908df75f42SSarah Sharp
35918df75f42SSarah Sharp xhci_endpoint_copy(xhci, command->in_ctx,
35928df75f42SSarah Sharp vdev->out_ctx, ep_index);
35934daf9df5SLin Wang xhci_setup_no_streams_ep_input_ctx(ep_ctx,
35948df75f42SSarah Sharp &vdev->eps[ep_index]);
35958df75f42SSarah Sharp }
35968df75f42SSarah Sharp xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
359792f8e767SSarah Sharp vdev->out_ctx, ctrl_ctx,
359892f8e767SSarah Sharp changed_ep_bitmask, changed_ep_bitmask);
35998df75f42SSarah Sharp spin_unlock_irqrestore(&xhci->lock, flags);
36008df75f42SSarah Sharp
36018df75f42SSarah Sharp /* Issue and wait for the configure endpoint command,
36028df75f42SSarah Sharp * which must succeed.
36038df75f42SSarah Sharp */
36048df75f42SSarah Sharp ret = xhci_configure_endpoint(xhci, udev, command,
36058df75f42SSarah Sharp false, true);
36068df75f42SSarah Sharp
36078df75f42SSarah Sharp /* xHC rejected the configure endpoint command for some reason, so we
36088df75f42SSarah Sharp * leave the streams rings intact.
36098df75f42SSarah Sharp */
36108df75f42SSarah Sharp if (ret < 0)
36118df75f42SSarah Sharp return ret;
36128df75f42SSarah Sharp
36138df75f42SSarah Sharp spin_lock_irqsave(&xhci->lock, flags);
36148df75f42SSarah Sharp for (i = 0; i < num_eps; i++) {
36158df75f42SSarah Sharp ep_index = xhci_get_endpoint_index(&eps[i]->desc);
36168df75f42SSarah Sharp xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
36178a007748SSarah Sharp vdev->eps[ep_index].stream_info = NULL;
36188df75f42SSarah Sharp /* FIXME Unset maxPstreams in endpoint context and
36198df75f42SSarah Sharp * update deq ptr to point to normal string ring.
36208df75f42SSarah Sharp */
36218df75f42SSarah Sharp vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
36228df75f42SSarah Sharp vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
36238df75f42SSarah Sharp }
36248df75f42SSarah Sharp spin_unlock_irqrestore(&xhci->lock, flags);
36258df75f42SSarah Sharp
36268df75f42SSarah Sharp return 0;
36278df75f42SSarah Sharp }
36288df75f42SSarah Sharp
3629bc75fa38SAlex Chiang /*
36302cf95c18SSarah Sharp * Deletes endpoint resources for endpoints that were active before a Reset
36312cf95c18SSarah Sharp * Device command, or a Disable Slot command. The Reset Device command leaves
36322cf95c18SSarah Sharp * the control endpoint intact, whereas the Disable Slot command deletes it.
36332cf95c18SSarah Sharp *
36342cf95c18SSarah Sharp * Must be called with xhci->lock held.
36352cf95c18SSarah Sharp */
xhci_free_device_endpoint_resources(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev,bool drop_control_ep)36362cf95c18SSarah Sharp void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
36372cf95c18SSarah Sharp struct xhci_virt_device *virt_dev, bool drop_control_ep)
36382cf95c18SSarah Sharp {
36392cf95c18SSarah Sharp int i;
36402cf95c18SSarah Sharp unsigned int num_dropped_eps = 0;
36412cf95c18SSarah Sharp unsigned int drop_flags = 0;
36422cf95c18SSarah Sharp
36432cf95c18SSarah Sharp for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
36442cf95c18SSarah Sharp if (virt_dev->eps[i].ring) {
36452cf95c18SSarah Sharp drop_flags |= 1 << i;
36462cf95c18SSarah Sharp num_dropped_eps++;
36472cf95c18SSarah Sharp }
36482cf95c18SSarah Sharp }
36492cf95c18SSarah Sharp xhci->num_active_eps -= num_dropped_eps;
36502cf95c18SSarah Sharp if (num_dropped_eps)
36514bdfe4c3SXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
36524bdfe4c3SXenia Ragiadakou "Dropped %u ep ctxs, flags = 0x%x, "
36534bdfe4c3SXenia Ragiadakou "%u now active.",
36542cf95c18SSarah Sharp num_dropped_eps, drop_flags,
36552cf95c18SSarah Sharp xhci->num_active_eps);
36562cf95c18SSarah Sharp }
36572cf95c18SSarah Sharp
36584a2422f6SKuangyi Chiang static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
36594a2422f6SKuangyi Chiang
36602cf95c18SSarah Sharp /*
3661bc75fa38SAlex Chiang * This submits a Reset Device Command, which will set the device state to 0,
3662bc75fa38SAlex Chiang * set the device address to 0, and disable all the endpoints except the default
3663bc75fa38SAlex Chiang * control endpoint. The USB core should come back and call
3664bc75fa38SAlex Chiang * xhci_address_device(), and then re-set up the configuration. If this is
3665bc75fa38SAlex Chiang * called because of a usb_reset_and_verify_device(), then the old alternate
3666bc75fa38SAlex Chiang * settings will be re-installed through the normal bandwidth allocation
3667bc75fa38SAlex Chiang * functions.
3668bc75fa38SAlex Chiang *
3669bc75fa38SAlex Chiang * Wait for the Reset Device command to finish. Remove all structures
3670bc75fa38SAlex Chiang * associated with the endpoints that were disabled. Clear the input device
3671c5628a2aSMathias Nyman * structure? Reset the control endpoint 0 max packet size?
3672f0615c45SAndiry Xu *
3673f0615c45SAndiry Xu * If the virt_dev to be reset does not exist or does not match the udev,
3674f0615c45SAndiry Xu * it means the device is lost, possibly due to the xHC restore error and
3675f0615c45SAndiry Xu * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3676f0615c45SAndiry Xu * re-allocate the device.
3677bc75fa38SAlex Chiang */
xhci_discover_or_reset_device(struct usb_hcd * hcd,struct usb_device * udev)36783969384cSLu Baolu static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
36793969384cSLu Baolu struct usb_device *udev)
3680bc75fa38SAlex Chiang {
3681bc75fa38SAlex Chiang int ret, i;
3682bc75fa38SAlex Chiang unsigned long flags;
3683bc75fa38SAlex Chiang struct xhci_hcd *xhci;
3684bc75fa38SAlex Chiang unsigned int slot_id;
3685bc75fa38SAlex Chiang struct xhci_virt_device *virt_dev;
3686bc75fa38SAlex Chiang struct xhci_command *reset_device_cmd;
3687001fd382SMaarten Lankhorst struct xhci_slot_ctx *slot_ctx;
36882e27980eSSarah Sharp int old_active_eps = 0;
3689bc75fa38SAlex Chiang
3690f0615c45SAndiry Xu ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3691bc75fa38SAlex Chiang if (ret <= 0)
3692bc75fa38SAlex Chiang return ret;
3693bc75fa38SAlex Chiang xhci = hcd_to_xhci(hcd);
3694bc75fa38SAlex Chiang slot_id = udev->slot_id;
3695bc75fa38SAlex Chiang virt_dev = xhci->devs[slot_id];
3696f0615c45SAndiry Xu if (!virt_dev) {
3697f0615c45SAndiry Xu xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3698f0615c45SAndiry Xu "not exist. Re-allocate the device\n", slot_id);
3699f0615c45SAndiry Xu ret = xhci_alloc_dev(hcd, udev);
3700f0615c45SAndiry Xu if (ret == 1)
3701f0615c45SAndiry Xu return 0;
3702f0615c45SAndiry Xu else
3703f0615c45SAndiry Xu return -EINVAL;
3704f0615c45SAndiry Xu }
3705f0615c45SAndiry Xu
3706326124a0SBrian Campbell if (virt_dev->tt_info)
3707326124a0SBrian Campbell old_active_eps = virt_dev->tt_info->active_eps;
3708326124a0SBrian Campbell
3709f0615c45SAndiry Xu if (virt_dev->udev != udev) {
3710f0615c45SAndiry Xu /* If the virt_dev and the udev does not match, this virt_dev
3711f0615c45SAndiry Xu * may belong to another udev.
3712f0615c45SAndiry Xu * Re-allocate the device.
3713f0615c45SAndiry Xu */
3714f0615c45SAndiry Xu xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3715f0615c45SAndiry Xu "not match the udev. Re-allocate the device\n",
3716f0615c45SAndiry Xu slot_id);
3717f0615c45SAndiry Xu ret = xhci_alloc_dev(hcd, udev);
3718f0615c45SAndiry Xu if (ret == 1)
3719f0615c45SAndiry Xu return 0;
3720f0615c45SAndiry Xu else
3721f0615c45SAndiry Xu return -EINVAL;
3722f0615c45SAndiry Xu }
3723bc75fa38SAlex Chiang
3724001fd382SMaarten Lankhorst /* If device is not setup, there is no point in resetting it */
3725001fd382SMaarten Lankhorst slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3726001fd382SMaarten Lankhorst if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3727001fd382SMaarten Lankhorst SLOT_STATE_DISABLED)
3728001fd382SMaarten Lankhorst return 0;
3729001fd382SMaarten Lankhorst
37304a2422f6SKuangyi Chiang if (xhci->quirks & XHCI_ETRON_HOST) {
37314a2422f6SKuangyi Chiang /*
37324a2422f6SKuangyi Chiang * Obtaining a new device slot to inform the xHCI host that
37334a2422f6SKuangyi Chiang * the USB device has been reset.
37344a2422f6SKuangyi Chiang */
37354a2422f6SKuangyi Chiang ret = xhci_disable_slot(xhci, udev->slot_id);
37364a2422f6SKuangyi Chiang xhci_free_virt_device(xhci, udev->slot_id);
37374a2422f6SKuangyi Chiang if (!ret) {
37384a2422f6SKuangyi Chiang ret = xhci_alloc_dev(hcd, udev);
37394a2422f6SKuangyi Chiang if (ret == 1)
37404a2422f6SKuangyi Chiang ret = 0;
37414a2422f6SKuangyi Chiang else
37424a2422f6SKuangyi Chiang ret = -EINVAL;
37434a2422f6SKuangyi Chiang }
37444a2422f6SKuangyi Chiang return ret;
37454a2422f6SKuangyi Chiang }
37464a2422f6SKuangyi Chiang
374719a7d0d6SFelipe Balbi trace_xhci_discover_or_reset_device(slot_ctx);
374819a7d0d6SFelipe Balbi
3749bc75fa38SAlex Chiang xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3750bc75fa38SAlex Chiang /* Allocate the command structure that holds the struct completion.
3751bc75fa38SAlex Chiang * Assume we're in process context, since the normal device reset
3752bc75fa38SAlex Chiang * process has to wait for the device anyway. Storage devices are
3753bc75fa38SAlex Chiang * reset as part of error handling, so use GFP_NOIO instead of
3754bc75fa38SAlex Chiang * GFP_KERNEL.
3755bc75fa38SAlex Chiang */
3756103afda0SMathias Nyman reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
3757bc75fa38SAlex Chiang if (!reset_device_cmd) {
3758bc75fa38SAlex Chiang xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3759bc75fa38SAlex Chiang return -ENOMEM;
3760bc75fa38SAlex Chiang }
3761bc75fa38SAlex Chiang
3762bc75fa38SAlex Chiang /* Attempt to submit the Reset Device command to the command ring */
3763bc75fa38SAlex Chiang spin_lock_irqsave(&xhci->lock, flags);
37647a3783efSPaul Zimmerman
3765ddba5cd0SMathias Nyman ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3766bc75fa38SAlex Chiang if (ret) {
3767bc75fa38SAlex Chiang xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3768bc75fa38SAlex Chiang spin_unlock_irqrestore(&xhci->lock, flags);
3769bc75fa38SAlex Chiang goto command_cleanup;
3770bc75fa38SAlex Chiang }
3771bc75fa38SAlex Chiang xhci_ring_cmd_db(xhci);
3772bc75fa38SAlex Chiang spin_unlock_irqrestore(&xhci->lock, flags);
3773bc75fa38SAlex Chiang
3774bc75fa38SAlex Chiang /* Wait for the Reset Device command to finish */
3775c311e391SMathias Nyman wait_for_completion(reset_device_cmd->completion);
3776bc75fa38SAlex Chiang
3777bc75fa38SAlex Chiang /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3778bc75fa38SAlex Chiang * unless we tried to reset a slot ID that wasn't enabled,
3779bc75fa38SAlex Chiang * or the device wasn't in the addressed or configured state.
3780bc75fa38SAlex Chiang */
3781bc75fa38SAlex Chiang ret = reset_device_cmd->status;
3782bc75fa38SAlex Chiang switch (ret) {
37830b7c105aSFelipe Balbi case COMP_COMMAND_ABORTED:
3784604d02a2SMathias Nyman case COMP_COMMAND_RING_STOPPED:
3785c311e391SMathias Nyman xhci_warn(xhci, "Timeout waiting for reset device command\n");
3786c311e391SMathias Nyman ret = -ETIME;
3787c311e391SMathias Nyman goto command_cleanup;
37880b7c105aSFelipe Balbi case COMP_SLOT_NOT_ENABLED_ERROR: /* 0.95 completion for bad slot ID */
37890b7c105aSFelipe Balbi case COMP_CONTEXT_STATE_ERROR: /* 0.96 completion code for same thing */
379038a532a6SXenia Ragiadakou xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3791bc75fa38SAlex Chiang slot_id,
3792bc75fa38SAlex Chiang xhci_get_slot_state(xhci, virt_dev->out_ctx));
379338a532a6SXenia Ragiadakou xhci_dbg(xhci, "Not freeing device rings.\n");
3794bc75fa38SAlex Chiang /* Don't treat this as an error. May change my mind later. */
3795bc75fa38SAlex Chiang ret = 0;
3796bc75fa38SAlex Chiang goto command_cleanup;
3797bc75fa38SAlex Chiang case COMP_SUCCESS:
3798bc75fa38SAlex Chiang xhci_dbg(xhci, "Successful reset device command.\n");
3799bc75fa38SAlex Chiang break;
3800bc75fa38SAlex Chiang default:
3801bc75fa38SAlex Chiang if (xhci_is_vendor_info_code(xhci, ret))
3802bc75fa38SAlex Chiang break;
3803bc75fa38SAlex Chiang xhci_warn(xhci, "Unknown completion code %u for "
3804bc75fa38SAlex Chiang "reset device command.\n", ret);
3805bc75fa38SAlex Chiang ret = -EINVAL;
3806bc75fa38SAlex Chiang goto command_cleanup;
3807bc75fa38SAlex Chiang }
3808bc75fa38SAlex Chiang
38092cf95c18SSarah Sharp /* Free up host controller endpoint resources */
38102cf95c18SSarah Sharp if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
38112cf95c18SSarah Sharp spin_lock_irqsave(&xhci->lock, flags);
38122cf95c18SSarah Sharp /* Don't delete the default control endpoint resources */
38132cf95c18SSarah Sharp xhci_free_device_endpoint_resources(xhci, virt_dev, false);
38142cf95c18SSarah Sharp spin_unlock_irqrestore(&xhci->lock, flags);
38152cf95c18SSarah Sharp }
38162cf95c18SSarah Sharp
3817c5628a2aSMathias Nyman /* Everything but endpoint 0 is disabled, so free the rings. */
381898871e94SFelipe Balbi for (i = 1; i < 31; i++) {
38192dea75d9SDmitry Torokhov struct xhci_virt_ep *ep = &virt_dev->eps[i];
38202dea75d9SDmitry Torokhov
38212dea75d9SDmitry Torokhov if (ep->ep_state & EP_HAS_STREAMS) {
3822df613834SHans de Goede xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3823df613834SHans de Goede xhci_get_endpoint_address(i));
38242dea75d9SDmitry Torokhov xhci_free_stream_info(xhci, ep->stream_info);
38252dea75d9SDmitry Torokhov ep->stream_info = NULL;
38262dea75d9SDmitry Torokhov ep->ep_state &= ~EP_HAS_STREAMS;
38272dea75d9SDmitry Torokhov }
38282dea75d9SDmitry Torokhov
38292dea75d9SDmitry Torokhov if (ep->ring) {
383002b6fdc2SLu Baolu xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3831c5628a2aSMathias Nyman xhci_free_endpoint_ring(xhci, virt_dev, i);
3832bc75fa38SAlex Chiang }
38332e27980eSSarah Sharp if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
38342e27980eSSarah Sharp xhci_drop_ep_from_interval_table(xhci,
38352e27980eSSarah Sharp &virt_dev->eps[i].bw_info,
38362e27980eSSarah Sharp virt_dev->bw_table,
38372e27980eSSarah Sharp udev,
38382e27980eSSarah Sharp &virt_dev->eps[i],
38392e27980eSSarah Sharp virt_dev->tt_info);
38409af5d71dSSarah Sharp xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
38412dea75d9SDmitry Torokhov }
38422e27980eSSarah Sharp /* If necessary, update the number of active TTs on this root port */
38432e27980eSSarah Sharp xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3844b8c3b718SMathias Nyman virt_dev->flags = 0;
3845bc75fa38SAlex Chiang ret = 0;
3846bc75fa38SAlex Chiang
3847bc75fa38SAlex Chiang command_cleanup:
3848bc75fa38SAlex Chiang xhci_free_command(xhci, reset_device_cmd);
3849bc75fa38SAlex Chiang return ret;
3850bc75fa38SAlex Chiang }
3851bc75fa38SAlex Chiang
3852bc75fa38SAlex Chiang /*
3853bc75fa38SAlex Chiang * At this point, the struct usb_device is about to go away, the device has
3854bc75fa38SAlex Chiang * disconnected, and all traffic has been stopped and the endpoints have been
3855bc75fa38SAlex Chiang * disabled. Free any HC data structures associated with that device.
3856bc75fa38SAlex Chiang */
xhci_free_dev(struct usb_hcd * hcd,struct usb_device * udev)38573969384cSLu Baolu static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3858bc75fa38SAlex Chiang {
3859bc75fa38SAlex Chiang struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3860bc75fa38SAlex Chiang struct xhci_virt_device *virt_dev;
386119a7d0d6SFelipe Balbi struct xhci_slot_ctx *slot_ctx;
3862a2bc47c4SMathias Nyman unsigned long flags;
386364927730SAndiry Xu int i, ret;
3864ddba5cd0SMathias Nyman
3865c8476fb8SShawn Nematbakhsh /*
3866c8476fb8SShawn Nematbakhsh * We called pm_runtime_get_noresume when the device was attached.
3867c8476fb8SShawn Nematbakhsh * Decrement the counter here to allow controller to runtime suspend
3868c8476fb8SShawn Nematbakhsh * if no devices remain.
3869c8476fb8SShawn Nematbakhsh */
3870c8476fb8SShawn Nematbakhsh if (xhci->quirks & XHCI_RESET_ON_RESUME)
3871e7ecf069SSarah Sharp pm_runtime_put_noidle(hcd->self.controller);
3872c8476fb8SShawn Nematbakhsh
387364927730SAndiry Xu ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
38747bd89b40SSarah Sharp /* If the host is halted due to driver unload, we still need to free the
38757bd89b40SSarah Sharp * device.
38767bd89b40SSarah Sharp */
3877cd3f1790SLu Baolu if (ret <= 0 && ret != -ENODEV)
3878bc75fa38SAlex Chiang return;
387964927730SAndiry Xu
3880bc75fa38SAlex Chiang virt_dev = xhci->devs[udev->slot_id];
388119a7d0d6SFelipe Balbi slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
388219a7d0d6SFelipe Balbi trace_xhci_free_dev(slot_ctx);
3883bc75fa38SAlex Chiang
3884bc75fa38SAlex Chiang /* Stop any wayward timer functions (which may grab the lock) */
388525355e04SMathias Nyman for (i = 0; i < 31; i++)
38869983a5fcSMathias Nyman virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
388744a182b9SMathias Nyman virt_dev->udev = NULL;
38887faac195SMathias Nyman xhci_disable_slot(xhci, udev->slot_id);
3889a2bc47c4SMathias Nyman
3890a2bc47c4SMathias Nyman spin_lock_irqsave(&xhci->lock, flags);
389111ec7588SLu Baolu xhci_free_virt_device(xhci, udev->slot_id);
3892a2bc47c4SMathias Nyman spin_unlock_irqrestore(&xhci->lock, flags);
3893a2bc47c4SMathias Nyman
3894f9e609b8SGuoqing Zhang }
3895f9e609b8SGuoqing Zhang
xhci_disable_slot(struct xhci_hcd * xhci,u32 slot_id)3896cd3f1790SLu Baolu int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
3897f9e609b8SGuoqing Zhang {
3898cd3f1790SLu Baolu struct xhci_command *command;
3899f9e609b8SGuoqing Zhang unsigned long flags;
3900f9e609b8SGuoqing Zhang u32 state;
390198d107b8SLinyu Yuan int ret;
3902f9e609b8SGuoqing Zhang
39037faac195SMathias Nyman command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3904f9e609b8SGuoqing Zhang if (!command)
3905f9e609b8SGuoqing Zhang return -ENOMEM;
3906f9e609b8SGuoqing Zhang
39079334367cSIkjoon Jang xhci_debugfs_remove_slot(xhci, slot_id);
39089334367cSIkjoon Jang
3909bc75fa38SAlex Chiang spin_lock_irqsave(&xhci->lock, flags);
3910bc75fa38SAlex Chiang /* Don't disable the slot if the host controller is dead. */
3911b0ba9720SXenia Ragiadakou state = readl(&xhci->op_regs->status);
39127bd89b40SSarah Sharp if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
39137bd89b40SSarah Sharp (xhci->xhc_state & XHCI_STATE_HALTED)) {
3914bc75fa38SAlex Chiang spin_unlock_irqrestore(&xhci->lock, flags);
3915ddba5cd0SMathias Nyman kfree(command);
3916dcabc76fSLu Baolu return -ENODEV;
3917bc75fa38SAlex Chiang }
3918bc75fa38SAlex Chiang
3919f9e609b8SGuoqing Zhang ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3920f9e609b8SGuoqing Zhang slot_id);
3921f9e609b8SGuoqing Zhang if (ret) {
3922bc75fa38SAlex Chiang spin_unlock_irqrestore(&xhci->lock, flags);
3923cd3f1790SLu Baolu kfree(command);
3924f9e609b8SGuoqing Zhang return ret;
3925bc75fa38SAlex Chiang }
3926bc75fa38SAlex Chiang xhci_ring_cmd_db(xhci);
3927bc75fa38SAlex Chiang spin_unlock_irqrestore(&xhci->lock, flags);
39287faac195SMathias Nyman
39297faac195SMathias Nyman wait_for_completion(command->completion);
39307faac195SMathias Nyman
39317faac195SMathias Nyman if (command->status != COMP_SUCCESS)
39327faac195SMathias Nyman xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n",
39337faac195SMathias Nyman slot_id, command->status);
39347faac195SMathias Nyman
39357faac195SMathias Nyman xhci_free_command(xhci, command);
39367faac195SMathias Nyman
393798d107b8SLinyu Yuan return 0;
3938bc75fa38SAlex Chiang }
3939bc75fa38SAlex Chiang
3940bc75fa38SAlex Chiang /*
39412cf95c18SSarah Sharp * Checks if we have enough host controller resources for the default control
39422cf95c18SSarah Sharp * endpoint.
39432cf95c18SSarah Sharp *
39442cf95c18SSarah Sharp * Must be called with xhci->lock held.
39452cf95c18SSarah Sharp */
xhci_reserve_host_control_ep_resources(struct xhci_hcd * xhci)39462cf95c18SSarah Sharp static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
39472cf95c18SSarah Sharp {
39482cf95c18SSarah Sharp if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
39494bdfe4c3SXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
39504bdfe4c3SXenia Ragiadakou "Not enough ep ctxs: "
39514bdfe4c3SXenia Ragiadakou "%u active, need to add 1, limit is %u.",
39522cf95c18SSarah Sharp xhci->num_active_eps, xhci->limit_active_eps);
39532cf95c18SSarah Sharp return -ENOMEM;
39542cf95c18SSarah Sharp }
39552cf95c18SSarah Sharp xhci->num_active_eps += 1;
39564bdfe4c3SXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
39574bdfe4c3SXenia Ragiadakou "Adding 1 ep ctx, %u now active.",
39582cf95c18SSarah Sharp xhci->num_active_eps);
39592cf95c18SSarah Sharp return 0;
39602cf95c18SSarah Sharp }
39612cf95c18SSarah Sharp
39622cf95c18SSarah Sharp
39632cf95c18SSarah Sharp /*
3964bc75fa38SAlex Chiang * Returns 0 if the xHC ran out of device slots, the Enable Slot command
3965bc75fa38SAlex Chiang * timed out, or allocating memory failed. Returns 1 on success.
3966bc75fa38SAlex Chiang */
xhci_alloc_dev(struct usb_hcd * hcd,struct usb_device * udev)3967bc75fa38SAlex Chiang int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3968bc75fa38SAlex Chiang {
3969bc75fa38SAlex Chiang struct xhci_hcd *xhci = hcd_to_xhci(hcd);
397019a7d0d6SFelipe Balbi struct xhci_virt_device *vdev;
397119a7d0d6SFelipe Balbi struct xhci_slot_ctx *slot_ctx;
3972bc75fa38SAlex Chiang unsigned long flags;
3973a00918d0SChris Bainbridge int ret, slot_id;
3974ddba5cd0SMathias Nyman struct xhci_command *command;
3975ddba5cd0SMathias Nyman
3976103afda0SMathias Nyman command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3977ddba5cd0SMathias Nyman if (!command)
3978ddba5cd0SMathias Nyman return 0;
3979bc75fa38SAlex Chiang
3980bc75fa38SAlex Chiang spin_lock_irqsave(&xhci->lock, flags);
3981ddba5cd0SMathias Nyman ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
3982bc75fa38SAlex Chiang if (ret) {
3983bc75fa38SAlex Chiang spin_unlock_irqrestore(&xhci->lock, flags);
3984bc75fa38SAlex Chiang xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
398587e44f2aSLu Baolu xhci_free_command(xhci, command);
3986bc75fa38SAlex Chiang return 0;
3987bc75fa38SAlex Chiang }
3988bc75fa38SAlex Chiang xhci_ring_cmd_db(xhci);
3989bc75fa38SAlex Chiang spin_unlock_irqrestore(&xhci->lock, flags);
3990bc75fa38SAlex Chiang
3991c311e391SMathias Nyman wait_for_completion(command->completion);
3992c2d3d49bSLu Baolu slot_id = command->slot_id;
3993bc75fa38SAlex Chiang
3994a00918d0SChris Bainbridge if (!slot_id || command->status != COMP_SUCCESS) {
3995e11487f1SMathias Nyman xhci_err(xhci, "Error while assigning device slot ID: %s\n",
3996e11487f1SMathias Nyman xhci_trb_comp_code_string(command->status));
3997be982038SSarah Sharp xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
3998be982038SSarah Sharp HCS_MAX_SLOTS(
3999be982038SSarah Sharp readl(&xhci->cap_regs->hcs_params1)));
400087e44f2aSLu Baolu xhci_free_command(xhci, command);
4001bc75fa38SAlex Chiang return 0;
4002bc75fa38SAlex Chiang }
40032cf95c18SSarah Sharp
4004cd3f1790SLu Baolu xhci_free_command(xhci, command);
4005cd3f1790SLu Baolu
40062cf95c18SSarah Sharp if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
40072cf95c18SSarah Sharp spin_lock_irqsave(&xhci->lock, flags);
40082cf95c18SSarah Sharp ret = xhci_reserve_host_control_ep_resources(xhci);
40092cf95c18SSarah Sharp if (ret) {
40102cf95c18SSarah Sharp spin_unlock_irqrestore(&xhci->lock, flags);
40112cf95c18SSarah Sharp xhci_warn(xhci, "Not enough host resources, "
40122cf95c18SSarah Sharp "active endpoint contexts = %u\n",
40132cf95c18SSarah Sharp xhci->num_active_eps);
40142cf95c18SSarah Sharp goto disable_slot;
40152cf95c18SSarah Sharp }
40162cf95c18SSarah Sharp spin_unlock_irqrestore(&xhci->lock, flags);
40172cf95c18SSarah Sharp }
40182cf95c18SSarah Sharp /* Use GFP_NOIO, since this function can be called from
4019a6d940ddSSarah Sharp * xhci_discover_or_reset_device(), which may be called as part of
4020a6d940ddSSarah Sharp * mass storage driver error handling.
4021a6d940ddSSarah Sharp */
4022a00918d0SChris Bainbridge if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
4023bc75fa38SAlex Chiang xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
40242cf95c18SSarah Sharp goto disable_slot;
4025bc75fa38SAlex Chiang }
402619a7d0d6SFelipe Balbi vdev = xhci->devs[slot_id];
402719a7d0d6SFelipe Balbi slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
402819a7d0d6SFelipe Balbi trace_xhci_alloc_dev(slot_ctx);
402919a7d0d6SFelipe Balbi
4030a00918d0SChris Bainbridge udev->slot_id = slot_id;
4031c8476fb8SShawn Nematbakhsh
403202b6fdc2SLu Baolu xhci_debugfs_create_slot(xhci, slot_id);
403302b6fdc2SLu Baolu
4034c8476fb8SShawn Nematbakhsh /*
4035c8476fb8SShawn Nematbakhsh * If resetting upon resume, we can't put the controller into runtime
4036c8476fb8SShawn Nematbakhsh * suspend if there is a device attached.
4037c8476fb8SShawn Nematbakhsh */
4038c8476fb8SShawn Nematbakhsh if (xhci->quirks & XHCI_RESET_ON_RESUME)
4039e7ecf069SSarah Sharp pm_runtime_get_noresume(hcd->self.controller);
4040c8476fb8SShawn Nematbakhsh
4041bc75fa38SAlex Chiang /* Is this a LS or FS device under a HS hub? */
4042bc75fa38SAlex Chiang /* Hub or peripherial? */
4043bc75fa38SAlex Chiang return 1;
40442cf95c18SSarah Sharp
40452cf95c18SSarah Sharp disable_slot:
40467faac195SMathias Nyman xhci_disable_slot(xhci, udev->slot_id);
404711ec7588SLu Baolu xhci_free_virt_device(xhci, udev->slot_id);
404811ec7588SLu Baolu
404911ec7588SLu Baolu return 0;
4050bc75fa38SAlex Chiang }
4051bc75fa38SAlex Chiang
405226cc5cb0SHardik Gajjar /**
405326cc5cb0SHardik Gajjar * xhci_setup_device - issues an Address Device command to assign a unique
405426cc5cb0SHardik Gajjar * USB bus address.
405526cc5cb0SHardik Gajjar * @hcd: USB host controller data structure.
405626cc5cb0SHardik Gajjar * @udev: USB dev structure representing the connected device.
405726cc5cb0SHardik Gajjar * @setup: Enum specifying setup mode: address only or with context.
405826cc5cb0SHardik Gajjar * @timeout_ms: Max wait time (ms) for the command operation to complete.
405926cc5cb0SHardik Gajjar *
406026cc5cb0SHardik Gajjar * Return: 0 if successful; otherwise, negative error code.
4061bc75fa38SAlex Chiang */
xhci_setup_device(struct usb_hcd * hcd,struct usb_device * udev,enum xhci_setup_dev setup,unsigned int timeout_ms)406248fc7dbdSDan Williams static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
406326cc5cb0SHardik Gajjar enum xhci_setup_dev setup, unsigned int timeout_ms)
4064bc75fa38SAlex Chiang {
40656f8ffc0bSDan Williams const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
4066bc75fa38SAlex Chiang unsigned long flags;
4067bc75fa38SAlex Chiang struct xhci_virt_device *virt_dev;
4068bc75fa38SAlex Chiang int ret = 0;
4069bc75fa38SAlex Chiang struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4070bc75fa38SAlex Chiang struct xhci_slot_ctx *slot_ctx;
4071bc75fa38SAlex Chiang struct xhci_input_control_ctx *ctrl_ctx;
4072bc75fa38SAlex Chiang u64 temp_64;
4073a00918d0SChris Bainbridge struct xhci_command *command = NULL;
4074a00918d0SChris Bainbridge
4075a00918d0SChris Bainbridge mutex_lock(&xhci->mutex);
4076bc75fa38SAlex Chiang
407790797aeeSLu Baolu if (xhci->xhc_state) { /* dying, removing or halted */
407890797aeeSLu Baolu ret = -ESHUTDOWN;
4079448116bfSRoger Quadros goto out;
408090797aeeSLu Baolu }
4081448116bfSRoger Quadros
4082bc75fa38SAlex Chiang if (!udev->slot_id) {
408384a99f6fSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_address,
408484a99f6fSXenia Ragiadakou "Bad Slot ID %d", udev->slot_id);
4085a00918d0SChris Bainbridge ret = -EINVAL;
4086a00918d0SChris Bainbridge goto out;
4087bc75fa38SAlex Chiang }
4088bc75fa38SAlex Chiang
4089bc75fa38SAlex Chiang virt_dev = xhci->devs[udev->slot_id];
4090bc75fa38SAlex Chiang
40917ed603ecSMatt Evans if (WARN_ON(!virt_dev)) {
40927ed603ecSMatt Evans /*
40937ed603ecSMatt Evans * In plug/unplug torture test with an NEC controller,
40947ed603ecSMatt Evans * a zero-dereference was observed once due to virt_dev = 0.
40957ed603ecSMatt Evans * Print useful debug rather than crash if it is observed again!
40967ed603ecSMatt Evans */
40977ed603ecSMatt Evans xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
40987ed603ecSMatt Evans udev->slot_id);
4099a00918d0SChris Bainbridge ret = -EINVAL;
4100a00918d0SChris Bainbridge goto out;
41017ed603ecSMatt Evans }
410219a7d0d6SFelipe Balbi slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
410319a7d0d6SFelipe Balbi trace_xhci_setup_device_slot(slot_ctx);
41047ed603ecSMatt Evans
4105f161ead7SMathias Nyman if (setup == SETUP_CONTEXT_ONLY) {
4106f161ead7SMathias Nyman if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
4107f161ead7SMathias Nyman SLOT_STATE_DEFAULT) {
4108f161ead7SMathias Nyman xhci_dbg(xhci, "Slot already in default state\n");
4109a00918d0SChris Bainbridge goto out;
4110f161ead7SMathias Nyman }
4111f161ead7SMathias Nyman }
4112f161ead7SMathias Nyman
4113103afda0SMathias Nyman command = xhci_alloc_command(xhci, true, GFP_KERNEL);
4114a00918d0SChris Bainbridge if (!command) {
4115a00918d0SChris Bainbridge ret = -ENOMEM;
4116a00918d0SChris Bainbridge goto out;
4117a00918d0SChris Bainbridge }
4118ddba5cd0SMathias Nyman
4119ddba5cd0SMathias Nyman command->in_ctx = virt_dev->in_ctx;
412026cc5cb0SHardik Gajjar command->timeout_ms = timeout_ms;
4121ddba5cd0SMathias Nyman
4122f0615c45SAndiry Xu slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
41234daf9df5SLin Wang ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
412492f8e767SSarah Sharp if (!ctrl_ctx) {
412592f8e767SSarah Sharp xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
412692f8e767SSarah Sharp __func__);
4127a00918d0SChris Bainbridge ret = -EINVAL;
4128a00918d0SChris Bainbridge goto out;
412992f8e767SSarah Sharp }
4130f0615c45SAndiry Xu /*
4131f0615c45SAndiry Xu * If this is the first Set Address since device plug-in or
4132f0615c45SAndiry Xu * virt_device realloaction after a resume with an xHCI power loss,
4133f0615c45SAndiry Xu * then set up the slot context.
4134f0615c45SAndiry Xu */
4135f0615c45SAndiry Xu if (!slot_ctx->dev_info)
4136bc75fa38SAlex Chiang xhci_setup_addressable_virt_dev(xhci, udev);
4137f0615c45SAndiry Xu /* Otherwise, update the control endpoint ring enqueue pointer. */
41382d1ee590SSarah Sharp else
41392d1ee590SSarah Sharp xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
4140d31c285bSSarah Sharp ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
4141d31c285bSSarah Sharp ctrl_ctx->drop_flags = 0;
4142d31c285bSSarah Sharp
41431d27fabeSXenia Ragiadakou trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
41440c052aabSXenia Ragiadakou le32_to_cpu(slot_ctx->dev_info) >> 27);
4145bc75fa38SAlex Chiang
414690d6d573SMathias Nyman trace_xhci_address_ctrl_ctx(ctrl_ctx);
4147bc75fa38SAlex Chiang spin_lock_irqsave(&xhci->lock, flags);
4148a711edeeSFelipe Balbi trace_xhci_setup_device(virt_dev);
4149ddba5cd0SMathias Nyman ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
415048fc7dbdSDan Williams udev->slot_id, setup);
4151bc75fa38SAlex Chiang if (ret) {
4152bc75fa38SAlex Chiang spin_unlock_irqrestore(&xhci->lock, flags);
415384a99f6fSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_address,
415484a99f6fSXenia Ragiadakou "FIXME: allocate a command ring segment");
4155a00918d0SChris Bainbridge goto out;
4156bc75fa38SAlex Chiang }
4157bc75fa38SAlex Chiang xhci_ring_cmd_db(xhci);
4158bc75fa38SAlex Chiang spin_unlock_irqrestore(&xhci->lock, flags);
4159bc75fa38SAlex Chiang
4160bc75fa38SAlex Chiang /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
4161c311e391SMathias Nyman wait_for_completion(command->completion);
4162c311e391SMathias Nyman
4163bc75fa38SAlex Chiang /* FIXME: From section 4.3.4: "Software shall be responsible for timing
4164bc75fa38SAlex Chiang * the SetAddress() "recovery interval" required by USB and aborting the
4165bc75fa38SAlex Chiang * command on a timeout.
4166bc75fa38SAlex Chiang */
41679ea1833eSMathias Nyman switch (command->status) {
41680b7c105aSFelipe Balbi case COMP_COMMAND_ABORTED:
4169604d02a2SMathias Nyman case COMP_COMMAND_RING_STOPPED:
4170c311e391SMathias Nyman xhci_warn(xhci, "Timeout while waiting for setup device command\n");
4171c311e391SMathias Nyman ret = -ETIME;
4172c311e391SMathias Nyman break;
41730b7c105aSFelipe Balbi case COMP_CONTEXT_STATE_ERROR:
41740b7c105aSFelipe Balbi case COMP_SLOT_NOT_ENABLED_ERROR:
41756f8ffc0bSDan Williams xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
41766f8ffc0bSDan Williams act, udev->slot_id);
4177bc75fa38SAlex Chiang ret = -EINVAL;
4178bc75fa38SAlex Chiang break;
41790b7c105aSFelipe Balbi case COMP_USB_TRANSACTION_ERROR:
41806f8ffc0bSDan Williams dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
4181651aaf36SLu Baolu
4182651aaf36SLu Baolu mutex_unlock(&xhci->mutex);
4183651aaf36SLu Baolu ret = xhci_disable_slot(xhci, udev->slot_id);
41847faac195SMathias Nyman xhci_free_virt_device(xhci, udev->slot_id);
41856b99de30SMathias Nyman if (!ret) {
41866b99de30SMathias Nyman if (xhci_alloc_dev(hcd, udev) == 1)
41876b99de30SMathias Nyman xhci_setup_addressable_virt_dev(xhci, udev);
41886b99de30SMathias Nyman }
4189651aaf36SLu Baolu kfree(command->completion);
4190651aaf36SLu Baolu kfree(command);
4191651aaf36SLu Baolu return -EPROTO;
41920b7c105aSFelipe Balbi case COMP_INCOMPATIBLE_DEVICE_ERROR:
41936f8ffc0bSDan Williams dev_warn(&udev->dev,
41946f8ffc0bSDan Williams "ERROR: Incompatible device for setup %s command\n", act);
4195f6ba6fe2SAlex He ret = -ENODEV;
4196f6ba6fe2SAlex He break;
4197bc75fa38SAlex Chiang case COMP_SUCCESS:
419884a99f6fSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_address,
41996f8ffc0bSDan Williams "Successful setup %s command", act);
4200bc75fa38SAlex Chiang break;
4201bc75fa38SAlex Chiang default:
42026f8ffc0bSDan Williams xhci_err(xhci,
42036f8ffc0bSDan Williams "ERROR: unexpected setup %s command completion code 0x%x.\n",
42049ea1833eSMathias Nyman act, command->status);
42051d27fabeSXenia Ragiadakou trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
4206bc75fa38SAlex Chiang ret = -EINVAL;
4207bc75fa38SAlex Chiang break;
4208bc75fa38SAlex Chiang }
4209a00918d0SChris Bainbridge if (ret)
4210a00918d0SChris Bainbridge goto out;
4211f7b2e403SSarah Sharp temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
421284a99f6fSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_address,
421384a99f6fSXenia Ragiadakou "Op regs DCBAA ptr = %#016llx", temp_64);
421484a99f6fSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_address,
421584a99f6fSXenia Ragiadakou "Slot ID %d dcbaa entry @%p = %#016llx",
4216bc75fa38SAlex Chiang udev->slot_id,
4217bc75fa38SAlex Chiang &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
4218bc75fa38SAlex Chiang (unsigned long long)
421928ccd296SMatt Evans le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
422084a99f6fSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_address,
422184a99f6fSXenia Ragiadakou "Output Context DMA address = %#08llx",
4222bc75fa38SAlex Chiang (unsigned long long)virt_dev->out_ctx->dma);
42231d27fabeSXenia Ragiadakou trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
42240c052aabSXenia Ragiadakou le32_to_cpu(slot_ctx->dev_info) >> 27);
4225bc75fa38SAlex Chiang /*
4226bc75fa38SAlex Chiang * USB core uses address 1 for the roothubs, so we add one to the
4227bc75fa38SAlex Chiang * address given back to us by the HC.
4228bc75fa38SAlex Chiang */
42291d27fabeSXenia Ragiadakou trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
42300c052aabSXenia Ragiadakou le32_to_cpu(slot_ctx->dev_info) >> 27);
4231bc75fa38SAlex Chiang /* Zero the input context control for later use */
4232bc75fa38SAlex Chiang ctrl_ctx->add_flags = 0;
4233bc75fa38SAlex Chiang ctrl_ctx->drop_flags = 0;
42344998f1efSJim Lin slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
42354998f1efSJim Lin udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4236bc75fa38SAlex Chiang
423784a99f6fSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4238a2cdc343SDan Williams "Internal device address = %d",
4239a2cdc343SDan Williams le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4240a00918d0SChris Bainbridge out:
4241a00918d0SChris Bainbridge mutex_unlock(&xhci->mutex);
424287e44f2aSLu Baolu if (command) {
424387e44f2aSLu Baolu kfree(command->completion);
4244ddba5cd0SMathias Nyman kfree(command);
424587e44f2aSLu Baolu }
4246a00918d0SChris Bainbridge return ret;
4247bc75fa38SAlex Chiang }
4248bc75fa38SAlex Chiang
xhci_address_device(struct usb_hcd * hcd,struct usb_device * udev,unsigned int timeout_ms)424926cc5cb0SHardik Gajjar static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev,
425026cc5cb0SHardik Gajjar unsigned int timeout_ms)
425148fc7dbdSDan Williams {
425226cc5cb0SHardik Gajjar return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS, timeout_ms);
425348fc7dbdSDan Williams }
425448fc7dbdSDan Williams
xhci_enable_device(struct usb_hcd * hcd,struct usb_device * udev)42553969384cSLu Baolu static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
425648fc7dbdSDan Williams {
425726cc5cb0SHardik Gajjar return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY,
425826cc5cb0SHardik Gajjar XHCI_CMD_DEFAULT_TIMEOUT);
425948fc7dbdSDan Williams }
426048fc7dbdSDan Williams
42613f5eb141SLan Tianyu /*
42623f5eb141SLan Tianyu * Transfer the port index into real index in the HW port status
42633f5eb141SLan Tianyu * registers. Caculate offset between the port's PORTSC register
42643f5eb141SLan Tianyu * and port status base. Divide the number of per port register
42653f5eb141SLan Tianyu * to get the real index. The raw port number bases 1.
42663f5eb141SLan Tianyu */
xhci_find_raw_port_number(struct usb_hcd * hcd,int port1)42673f5eb141SLan Tianyu int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
42683f5eb141SLan Tianyu {
426938986ffaSMathias Nyman struct xhci_hub *rhub;
42703f5eb141SLan Tianyu
427138986ffaSMathias Nyman rhub = xhci_get_rhub(hcd);
427238986ffaSMathias Nyman return rhub->ports[port1 - 1]->hw_portnum + 1;
42733f5eb141SLan Tianyu }
42743f5eb141SLan Tianyu
4275a558ccdcSMathias Nyman /*
4276a558ccdcSMathias Nyman * Issue an Evaluate Context command to change the Maximum Exit Latency in the
4277a558ccdcSMathias Nyman * slot context. If that succeeds, store the new MEL in the xhci_virt_device.
4278a558ccdcSMathias Nyman */
xhci_change_max_exit_latency(struct xhci_hcd * xhci,struct usb_device * udev,u16 max_exit_latency)4279d5c82febSOlof Johansson static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4280a558ccdcSMathias Nyman struct usb_device *udev, u16 max_exit_latency)
4281a558ccdcSMathias Nyman {
4282a558ccdcSMathias Nyman struct xhci_virt_device *virt_dev;
4283a558ccdcSMathias Nyman struct xhci_command *command;
4284a558ccdcSMathias Nyman struct xhci_input_control_ctx *ctrl_ctx;
4285a558ccdcSMathias Nyman struct xhci_slot_ctx *slot_ctx;
4286a558ccdcSMathias Nyman unsigned long flags;
4287a558ccdcSMathias Nyman int ret;
4288a558ccdcSMathias Nyman
42895c2a380aSMathias Nyman command = xhci_alloc_command_with_ctx(xhci, true, GFP_KERNEL);
42905c2a380aSMathias Nyman if (!command)
42915c2a380aSMathias Nyman return -ENOMEM;
42925c2a380aSMathias Nyman
4293a558ccdcSMathias Nyman spin_lock_irqsave(&xhci->lock, flags);
429496044694SMathias Nyman
429596044694SMathias Nyman virt_dev = xhci->devs[udev->slot_id];
429696044694SMathias Nyman
429796044694SMathias Nyman /*
429896044694SMathias Nyman * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
429996044694SMathias Nyman * xHC was re-initialized. Exit latency will be set later after
430096044694SMathias Nyman * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
430196044694SMathias Nyman */
430296044694SMathias Nyman
430396044694SMathias Nyman if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
4304a558ccdcSMathias Nyman spin_unlock_irqrestore(&xhci->lock, flags);
4305f6caea48SMathias Nyman xhci_free_command(xhci, command);
4306a558ccdcSMathias Nyman return 0;
4307a558ccdcSMathias Nyman }
4308a558ccdcSMathias Nyman
4309a558ccdcSMathias Nyman /* Attempt to issue an Evaluate Context command to change the MEL. */
43104daf9df5SLin Wang ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
431192f8e767SSarah Sharp if (!ctrl_ctx) {
431292f8e767SSarah Sharp spin_unlock_irqrestore(&xhci->lock, flags);
43135c2a380aSMathias Nyman xhci_free_command(xhci, command);
431492f8e767SSarah Sharp xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
431592f8e767SSarah Sharp __func__);
431692f8e767SSarah Sharp return -ENOMEM;
431792f8e767SSarah Sharp }
431892f8e767SSarah Sharp
4319a558ccdcSMathias Nyman xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4320a558ccdcSMathias Nyman spin_unlock_irqrestore(&xhci->lock, flags);
4321a558ccdcSMathias Nyman
4322a558ccdcSMathias Nyman ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4323a558ccdcSMathias Nyman slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4324a558ccdcSMathias Nyman slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4325a558ccdcSMathias Nyman slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
43264801d4eaSMathias Nyman slot_ctx->dev_state = 0;
4327a558ccdcSMathias Nyman
43283a7fa5beSXenia Ragiadakou xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
43293a7fa5beSXenia Ragiadakou "Set up evaluate context for LPM MEL change.");
4330a558ccdcSMathias Nyman
4331a558ccdcSMathias Nyman /* Issue and wait for the evaluate context command. */
4332a558ccdcSMathias Nyman ret = xhci_configure_endpoint(xhci, udev, command,
4333a558ccdcSMathias Nyman true, true);
4334a558ccdcSMathias Nyman
4335a558ccdcSMathias Nyman if (!ret) {
4336a558ccdcSMathias Nyman spin_lock_irqsave(&xhci->lock, flags);
4337a558ccdcSMathias Nyman virt_dev->current_mel = max_exit_latency;
4338a558ccdcSMathias Nyman spin_unlock_irqrestore(&xhci->lock, flags);
4339a558ccdcSMathias Nyman }
43405c2a380aSMathias Nyman
43415c2a380aSMathias Nyman xhci_free_command(xhci, command);
43425c2a380aSMathias Nyman
4343a558ccdcSMathias Nyman return ret;
4344a558ccdcSMathias Nyman }
4345a558ccdcSMathias Nyman
4346ceb6c9c8SRafael J. Wysocki #ifdef CONFIG_PM
43479574323cSAndiry Xu
43489574323cSAndiry Xu /* BESL to HIRD Encoding array for USB2 LPM */
43499574323cSAndiry Xu static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
43509574323cSAndiry Xu 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
43519574323cSAndiry Xu
43529574323cSAndiry Xu /* Calculate HIRD/BESL for USB2 PORTPMSC*/
xhci_calculate_hird_besl(struct xhci_hcd * xhci,struct usb_device * udev)4353f99298bfSAndiry Xu static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4354f99298bfSAndiry Xu struct usb_device *udev)
43559574323cSAndiry Xu {
4356f99298bfSAndiry Xu int u2del, besl, besl_host;
4357f99298bfSAndiry Xu int besl_device = 0;
4358f99298bfSAndiry Xu u32 field;
43599574323cSAndiry Xu
4360f99298bfSAndiry Xu u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4361f99298bfSAndiry Xu field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4362f99298bfSAndiry Xu
4363f99298bfSAndiry Xu if (field & USB_BESL_SUPPORT) {
4364f99298bfSAndiry Xu for (besl_host = 0; besl_host < 16; besl_host++) {
4365f99298bfSAndiry Xu if (xhci_besl_encoding[besl_host] >= u2del)
43669574323cSAndiry Xu break;
43679574323cSAndiry Xu }
4368f99298bfSAndiry Xu /* Use baseline BESL value as default */
4369f99298bfSAndiry Xu if (field & USB_BESL_BASELINE_VALID)
4370f99298bfSAndiry Xu besl_device = USB_GET_BESL_BASELINE(field);
4371f99298bfSAndiry Xu else if (field & USB_BESL_DEEP_VALID)
4372f99298bfSAndiry Xu besl_device = USB_GET_BESL_DEEP(field);
43739574323cSAndiry Xu } else {
43749574323cSAndiry Xu if (u2del <= 50)
4375f99298bfSAndiry Xu besl_host = 0;
43769574323cSAndiry Xu else
4377f99298bfSAndiry Xu besl_host = (u2del - 51) / 75 + 1;
43789574323cSAndiry Xu }
43799574323cSAndiry Xu
4380f99298bfSAndiry Xu besl = besl_host + besl_device;
4381f99298bfSAndiry Xu if (besl > 15)
4382f99298bfSAndiry Xu besl = 15;
4383f99298bfSAndiry Xu
4384f99298bfSAndiry Xu return besl;
43859574323cSAndiry Xu }
43869574323cSAndiry Xu
4387a558ccdcSMathias Nyman /* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */
xhci_calculate_usb2_hw_lpm_params(struct usb_device * udev)4388a558ccdcSMathias Nyman static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
4389a558ccdcSMathias Nyman {
4390a558ccdcSMathias Nyman u32 field;
4391a558ccdcSMathias Nyman int l1;
4392a558ccdcSMathias Nyman int besld = 0;
4393a558ccdcSMathias Nyman int hirdm = 0;
4394a558ccdcSMathias Nyman
4395a558ccdcSMathias Nyman field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4396a558ccdcSMathias Nyman
4397a558ccdcSMathias Nyman /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */
439817f34867SMathias Nyman l1 = udev->l1_params.timeout / 256;
4399a558ccdcSMathias Nyman
4400a558ccdcSMathias Nyman /* device has preferred BESLD */
4401a558ccdcSMathias Nyman if (field & USB_BESL_DEEP_VALID) {
4402a558ccdcSMathias Nyman besld = USB_GET_BESL_DEEP(field);
4403a558ccdcSMathias Nyman hirdm = 1;
4404a558ccdcSMathias Nyman }
4405a558ccdcSMathias Nyman
4406a558ccdcSMathias Nyman return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
4407a558ccdcSMathias Nyman }
4408a558ccdcSMathias Nyman
xhci_set_usb2_hardware_lpm(struct usb_hcd * hcd,struct usb_device * udev,int enable)44093969384cSLu Baolu static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
441065580b43SAndiry Xu struct usb_device *udev, int enable)
441165580b43SAndiry Xu {
441265580b43SAndiry Xu struct xhci_hcd *xhci = hcd_to_xhci(hcd);
441338986ffaSMathias Nyman struct xhci_port **ports;
4414a558ccdcSMathias Nyman __le32 __iomem *pm_addr, *hlpm_addr;
4415a558ccdcSMathias Nyman u32 pm_val, hlpm_val, field;
441665580b43SAndiry Xu unsigned int port_num;
441765580b43SAndiry Xu unsigned long flags;
4418a558ccdcSMathias Nyman int hird, exit_latency;
4419a558ccdcSMathias Nyman int ret;
442065580b43SAndiry Xu
4421f0c472a6SKai-Heng Feng if (xhci->quirks & XHCI_HW_LPM_DISABLE)
4422f0c472a6SKai-Heng Feng return -EPERM;
4423f0c472a6SKai-Heng Feng
4424b50107bbSMathias Nyman if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
442565580b43SAndiry Xu !udev->lpm_capable)
442665580b43SAndiry Xu return -EPERM;
442765580b43SAndiry Xu
442865580b43SAndiry Xu if (!udev->parent || udev->parent->parent ||
442965580b43SAndiry Xu udev->descriptor.bDeviceClass == USB_CLASS_HUB)
443065580b43SAndiry Xu return -EPERM;
443165580b43SAndiry Xu
443265580b43SAndiry Xu if (udev->usb2_hw_lpm_capable != 1)
443365580b43SAndiry Xu return -EPERM;
443465580b43SAndiry Xu
443565580b43SAndiry Xu spin_lock_irqsave(&xhci->lock, flags);
443665580b43SAndiry Xu
443738986ffaSMathias Nyman ports = xhci->usb2_rhub.ports;
443865580b43SAndiry Xu port_num = udev->portnum - 1;
443938986ffaSMathias Nyman pm_addr = ports[port_num]->addr + PORTPMSC;
4440b0ba9720SXenia Ragiadakou pm_val = readl(pm_addr);
444138986ffaSMathias Nyman hlpm_addr = ports[port_num]->addr + PORTHLPMC;
444265580b43SAndiry Xu
444365580b43SAndiry Xu xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4444654a55d3SLin Wang enable ? "enable" : "disable", port_num + 1);
444565580b43SAndiry Xu
4446f0c472a6SKai-Heng Feng if (enable) {
4447a558ccdcSMathias Nyman /* Host supports BESL timeout instead of HIRD */
4448a558ccdcSMathias Nyman if (udev->usb2_hw_lpm_besl_capable) {
4449a558ccdcSMathias Nyman /* if device doesn't have a preferred BESL value use a
4450a558ccdcSMathias Nyman * default one which works with mixed HIRD and BESL
4451a558ccdcSMathias Nyman * systems. See XHCI_DEFAULT_BESL definition in xhci.h
4452a558ccdcSMathias Nyman */
44537aa1bb2fSCarsten Schmid field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4454a558ccdcSMathias Nyman if ((field & USB_BESL_SUPPORT) &&
4455a558ccdcSMathias Nyman (field & USB_BESL_BASELINE_VALID))
4456a558ccdcSMathias Nyman hird = USB_GET_BESL_BASELINE(field);
4457a558ccdcSMathias Nyman else
445817f34867SMathias Nyman hird = udev->l1_params.besl;
4459a558ccdcSMathias Nyman
4460a558ccdcSMathias Nyman exit_latency = xhci_besl_encoding[hird];
4461a558ccdcSMathias Nyman spin_unlock_irqrestore(&xhci->lock, flags);
4462a558ccdcSMathias Nyman
4463a558ccdcSMathias Nyman ret = xhci_change_max_exit_latency(xhci, udev,
4464a558ccdcSMathias Nyman exit_latency);
4465a558ccdcSMathias Nyman if (ret < 0)
4466a558ccdcSMathias Nyman return ret;
4467a558ccdcSMathias Nyman spin_lock_irqsave(&xhci->lock, flags);
4468a558ccdcSMathias Nyman
4469a558ccdcSMathias Nyman hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
4470204b7793SXenia Ragiadakou writel(hlpm_val, hlpm_addr);
4471a558ccdcSMathias Nyman /* flush write */
4472b0ba9720SXenia Ragiadakou readl(hlpm_addr);
447365580b43SAndiry Xu } else {
4474a558ccdcSMathias Nyman hird = xhci_calculate_hird_besl(xhci, udev);
4475a558ccdcSMathias Nyman }
4476a558ccdcSMathias Nyman
4477a558ccdcSMathias Nyman pm_val &= ~PORT_HIRD_MASK;
447858e21f73SSarah Sharp pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
4479204b7793SXenia Ragiadakou writel(pm_val, pm_addr);
4480b0ba9720SXenia Ragiadakou pm_val = readl(pm_addr);
4481a558ccdcSMathias Nyman pm_val |= PORT_HLE;
4482204b7793SXenia Ragiadakou writel(pm_val, pm_addr);
4483a558ccdcSMathias Nyman /* flush write */
4484b0ba9720SXenia Ragiadakou readl(pm_addr);
4485a558ccdcSMathias Nyman } else {
448658e21f73SSarah Sharp pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
4487204b7793SXenia Ragiadakou writel(pm_val, pm_addr);
4488a558ccdcSMathias Nyman /* flush write */
4489b0ba9720SXenia Ragiadakou readl(pm_addr);
4490a558ccdcSMathias Nyman if (udev->usb2_hw_lpm_besl_capable) {
4491a558ccdcSMathias Nyman spin_unlock_irqrestore(&xhci->lock, flags);
4492a558ccdcSMathias Nyman xhci_change_max_exit_latency(xhci, udev, 0);
4493b3d71abdSKai-Heng Feng readl_poll_timeout(ports[port_num]->addr, pm_val,
4494b3d71abdSKai-Heng Feng (pm_val & PORT_PLS_MASK) == XDEV_U0,
4495b3d71abdSKai-Heng Feng 100, 10000);
4496a558ccdcSMathias Nyman return 0;
4497a558ccdcSMathias Nyman }
449865580b43SAndiry Xu }
449965580b43SAndiry Xu
450065580b43SAndiry Xu spin_unlock_irqrestore(&xhci->lock, flags);
450165580b43SAndiry Xu return 0;
450265580b43SAndiry Xu }
450365580b43SAndiry Xu
4504b630d4b9SMathias Nyman /* check if a usb2 port supports a given extened capability protocol
4505b630d4b9SMathias Nyman * only USB2 ports extended protocol capability values are cached.
4506b630d4b9SMathias Nyman * Return 1 if capability is supported
4507b630d4b9SMathias Nyman */
xhci_check_usb2_port_capability(struct xhci_hcd * xhci,int port,unsigned capability)4508b630d4b9SMathias Nyman static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4509b630d4b9SMathias Nyman unsigned capability)
4510b630d4b9SMathias Nyman {
4511b630d4b9SMathias Nyman u32 port_offset, port_count;
4512b630d4b9SMathias Nyman int i;
4513b630d4b9SMathias Nyman
4514b630d4b9SMathias Nyman for (i = 0; i < xhci->num_ext_caps; i++) {
4515b630d4b9SMathias Nyman if (xhci->ext_caps[i] & capability) {
4516b630d4b9SMathias Nyman /* port offsets starts at 1 */
4517b630d4b9SMathias Nyman port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4518b630d4b9SMathias Nyman port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4519b630d4b9SMathias Nyman if (port >= port_offset &&
4520b630d4b9SMathias Nyman port < port_offset + port_count)
4521b630d4b9SMathias Nyman return 1;
4522b630d4b9SMathias Nyman }
4523b630d4b9SMathias Nyman }
4524b630d4b9SMathias Nyman return 0;
4525b630d4b9SMathias Nyman }
4526b630d4b9SMathias Nyman
xhci_update_device(struct usb_hcd * hcd,struct usb_device * udev)45273969384cSLu Baolu static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4528b01bcbf7SSarah Sharp {
4529b01bcbf7SSarah Sharp struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4530b630d4b9SMathias Nyman int portnum = udev->portnum - 1;
4531b01bcbf7SSarah Sharp
4532f1fd62a6SZeng Tao if (hcd->speed >= HCD_USB3 || !udev->lpm_capable)
4533de68bab4SSarah Sharp return 0;
4534de68bab4SSarah Sharp
4535de68bab4SSarah Sharp /* we only support lpm for non-hub device connected to root hub yet */
4536de68bab4SSarah Sharp if (!udev->parent || udev->parent->parent ||
4537de68bab4SSarah Sharp udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4538de68bab4SSarah Sharp return 0;
4539de68bab4SSarah Sharp
4540b630d4b9SMathias Nyman if (xhci->hw_lpm_support == 1 &&
4541de68bab4SSarah Sharp xhci_check_usb2_port_capability(
4542de68bab4SSarah Sharp xhci, portnum, XHCI_HLC)) {
4543b01bcbf7SSarah Sharp udev->usb2_hw_lpm_capable = 1;
454417f34867SMathias Nyman udev->l1_params.timeout = XHCI_L1_TIMEOUT;
454517f34867SMathias Nyman udev->l1_params.besl = XHCI_DEFAULT_BESL;
4546a558ccdcSMathias Nyman if (xhci_check_usb2_port_capability(xhci, portnum,
4547a558ccdcSMathias Nyman XHCI_BLC))
4548a558ccdcSMathias Nyman udev->usb2_hw_lpm_besl_capable = 1;
4549b01bcbf7SSarah Sharp }
4550b01bcbf7SSarah Sharp
4551b01bcbf7SSarah Sharp return 0;
4552b01bcbf7SSarah Sharp }
4553b01bcbf7SSarah Sharp
45543b3db026SSarah Sharp /*---------------------- USB 3.0 Link PM functions ------------------------*/
45553b3db026SSarah Sharp
4556e3567d2cSSarah Sharp /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
xhci_service_interval_to_ns(struct usb_endpoint_descriptor * desc)4557e3567d2cSSarah Sharp static unsigned long long xhci_service_interval_to_ns(
4558e3567d2cSSarah Sharp struct usb_endpoint_descriptor *desc)
4559e3567d2cSSarah Sharp {
456016b45fdfSOliver Neukum return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4561e3567d2cSSarah Sharp }
4562e3567d2cSSarah Sharp
xhci_get_timeout_no_hub_lpm(struct usb_device * udev,enum usb3_link_state state)45633b3db026SSarah Sharp static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
45643b3db026SSarah Sharp enum usb3_link_state state)
45653b3db026SSarah Sharp {
45663b3db026SSarah Sharp unsigned long long sel;
45673b3db026SSarah Sharp unsigned long long pel;
45683b3db026SSarah Sharp unsigned int max_sel_pel;
45693b3db026SSarah Sharp char *state_name;
45703b3db026SSarah Sharp
45713b3db026SSarah Sharp switch (state) {
45723b3db026SSarah Sharp case USB3_LPM_U1:
45733b3db026SSarah Sharp /* Convert SEL and PEL stored in nanoseconds to microseconds */
45743b3db026SSarah Sharp sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
45753b3db026SSarah Sharp pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
45763b3db026SSarah Sharp max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
45773b3db026SSarah Sharp state_name = "U1";
45783b3db026SSarah Sharp break;
45793b3db026SSarah Sharp case USB3_LPM_U2:
45803b3db026SSarah Sharp sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
45813b3db026SSarah Sharp pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
45823b3db026SSarah Sharp max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
45833b3db026SSarah Sharp state_name = "U2";
45843b3db026SSarah Sharp break;
45853b3db026SSarah Sharp default:
45863b3db026SSarah Sharp dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
45873b3db026SSarah Sharp __func__);
4588e25e62aeSSarah Sharp return USB3_LPM_DISABLED;
45893b3db026SSarah Sharp }
45903b3db026SSarah Sharp
45913b3db026SSarah Sharp if (sel <= max_sel_pel && pel <= max_sel_pel)
45923b3db026SSarah Sharp return USB3_LPM_DEVICE_INITIATED;
45933b3db026SSarah Sharp
45943b3db026SSarah Sharp if (sel > max_sel_pel)
45953b3db026SSarah Sharp dev_dbg(&udev->dev, "Device-initiated %s disabled "
45963b3db026SSarah Sharp "due to long SEL %llu ms\n",
45973b3db026SSarah Sharp state_name, sel);
45983b3db026SSarah Sharp else
45993b3db026SSarah Sharp dev_dbg(&udev->dev, "Device-initiated %s disabled "
460003e64e96SJoe Perches "due to long PEL %llu ms\n",
46013b3db026SSarah Sharp state_name, pel);
46023b3db026SSarah Sharp return USB3_LPM_DISABLED;
46033b3db026SSarah Sharp }
46043b3db026SSarah Sharp
46059502c46cSPratyush Anand /* The U1 timeout should be the maximum of the following values:
4606e3567d2cSSarah Sharp * - For control endpoints, U1 system exit latency (SEL) * 3
4607e3567d2cSSarah Sharp * - For bulk endpoints, U1 SEL * 5
4608e3567d2cSSarah Sharp * - For interrupt endpoints:
4609e3567d2cSSarah Sharp * - Notification EPs, U1 SEL * 3
4610e3567d2cSSarah Sharp * - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
4611e3567d2cSSarah Sharp * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
4612e3567d2cSSarah Sharp */
xhci_calculate_intel_u1_timeout(struct usb_device * udev,struct usb_endpoint_descriptor * desc)46139502c46cSPratyush Anand static unsigned long long xhci_calculate_intel_u1_timeout(
46149502c46cSPratyush Anand struct usb_device *udev,
4615e3567d2cSSarah Sharp struct usb_endpoint_descriptor *desc)
4616e3567d2cSSarah Sharp {
4617e3567d2cSSarah Sharp unsigned long long timeout_ns;
4618e3567d2cSSarah Sharp int ep_type;
4619e3567d2cSSarah Sharp int intr_type;
4620e3567d2cSSarah Sharp
4621e3567d2cSSarah Sharp ep_type = usb_endpoint_type(desc);
4622e3567d2cSSarah Sharp switch (ep_type) {
4623e3567d2cSSarah Sharp case USB_ENDPOINT_XFER_CONTROL:
4624e3567d2cSSarah Sharp timeout_ns = udev->u1_params.sel * 3;
4625e3567d2cSSarah Sharp break;
4626e3567d2cSSarah Sharp case USB_ENDPOINT_XFER_BULK:
4627e3567d2cSSarah Sharp timeout_ns = udev->u1_params.sel * 5;
4628e3567d2cSSarah Sharp break;
4629e3567d2cSSarah Sharp case USB_ENDPOINT_XFER_INT:
4630e3567d2cSSarah Sharp intr_type = usb_endpoint_interrupt_type(desc);
4631e3567d2cSSarah Sharp if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4632e3567d2cSSarah Sharp timeout_ns = udev->u1_params.sel * 3;
4633e3567d2cSSarah Sharp break;
4634e3567d2cSSarah Sharp }
4635e3567d2cSSarah Sharp /* Otherwise the calculation is the same as isoc eps */
4636df561f66SGustavo A. R. Silva fallthrough;
4637e3567d2cSSarah Sharp case USB_ENDPOINT_XFER_ISOC:
4638e3567d2cSSarah Sharp timeout_ns = xhci_service_interval_to_ns(desc);
4639c88db160SSarah Sharp timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4640e3567d2cSSarah Sharp if (timeout_ns < udev->u1_params.sel * 2)
4641e3567d2cSSarah Sharp timeout_ns = udev->u1_params.sel * 2;
4642e3567d2cSSarah Sharp break;
4643e3567d2cSSarah Sharp default:
4644e3567d2cSSarah Sharp return 0;
4645e3567d2cSSarah Sharp }
4646e3567d2cSSarah Sharp
46479502c46cSPratyush Anand return timeout_ns;
46489502c46cSPratyush Anand }
46499502c46cSPratyush Anand
46509502c46cSPratyush Anand /* Returns the hub-encoded U1 timeout value. */
xhci_calculate_u1_timeout(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_endpoint_descriptor * desc)46519502c46cSPratyush Anand static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
46529502c46cSPratyush Anand struct usb_device *udev,
46539502c46cSPratyush Anand struct usb_endpoint_descriptor *desc)
46549502c46cSPratyush Anand {
46559502c46cSPratyush Anand unsigned long long timeout_ns;
46569502c46cSPratyush Anand
46575d5323a6SMichael Grzeschik /* Prevent U1 if service interval is shorter than U1 exit latency */
46585d5323a6SMichael Grzeschik if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
46592847c46cSMathias Nyman if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
46605d5323a6SMichael Grzeschik dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
46615d5323a6SMichael Grzeschik return USB3_LPM_DISABLED;
46625d5323a6SMichael Grzeschik }
46635d5323a6SMichael Grzeschik }
46645d5323a6SMichael Grzeschik
4665d5e234ffSWeitao Wang if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST))
46662847c46cSMathias Nyman timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
46672847c46cSMathias Nyman else
46682847c46cSMathias Nyman timeout_ns = udev->u1_params.sel;
46692847c46cSMathias Nyman
46709502c46cSPratyush Anand /* The U1 timeout is encoded in 1us intervals.
46719502c46cSPratyush Anand * Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
46729502c46cSPratyush Anand */
4673e3567d2cSSarah Sharp if (timeout_ns == USB3_LPM_DISABLED)
46749502c46cSPratyush Anand timeout_ns = 1;
46759502c46cSPratyush Anand else
46769502c46cSPratyush Anand timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4677e3567d2cSSarah Sharp
4678e3567d2cSSarah Sharp /* If the necessary timeout value is bigger than what we can set in the
4679e3567d2cSSarah Sharp * USB 3.0 hub, we have to disable hub-initiated U1.
4680e3567d2cSSarah Sharp */
4681e3567d2cSSarah Sharp if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4682e3567d2cSSarah Sharp return timeout_ns;
4683e3567d2cSSarah Sharp dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4684e3567d2cSSarah Sharp "due to long timeout %llu ms\n", timeout_ns);
4685e3567d2cSSarah Sharp return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4686e3567d2cSSarah Sharp }
4687e3567d2cSSarah Sharp
46889502c46cSPratyush Anand /* The U2 timeout should be the maximum of:
4689e3567d2cSSarah Sharp * - 10 ms (to avoid the bandwidth impact on the scheduler)
4690e3567d2cSSarah Sharp * - largest bInterval of any active periodic endpoint (to avoid going
4691e3567d2cSSarah Sharp * into lower power link states between intervals).
4692e3567d2cSSarah Sharp * - the U2 Exit Latency of the device
4693e3567d2cSSarah Sharp */
xhci_calculate_intel_u2_timeout(struct usb_device * udev,struct usb_endpoint_descriptor * desc)46949502c46cSPratyush Anand static unsigned long long xhci_calculate_intel_u2_timeout(
46959502c46cSPratyush Anand struct usb_device *udev,
4696e3567d2cSSarah Sharp struct usb_endpoint_descriptor *desc)
4697e3567d2cSSarah Sharp {
4698e3567d2cSSarah Sharp unsigned long long timeout_ns;
4699e3567d2cSSarah Sharp unsigned long long u2_del_ns;
4700e3567d2cSSarah Sharp
4701e3567d2cSSarah Sharp timeout_ns = 10 * 1000 * 1000;
4702e3567d2cSSarah Sharp
4703e3567d2cSSarah Sharp if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4704e3567d2cSSarah Sharp (xhci_service_interval_to_ns(desc) > timeout_ns))
4705e3567d2cSSarah Sharp timeout_ns = xhci_service_interval_to_ns(desc);
4706e3567d2cSSarah Sharp
4707966e7a85SOliver Neukum u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4708e3567d2cSSarah Sharp if (u2_del_ns > timeout_ns)
4709e3567d2cSSarah Sharp timeout_ns = u2_del_ns;
4710e3567d2cSSarah Sharp
47119502c46cSPratyush Anand return timeout_ns;
47129502c46cSPratyush Anand }
47139502c46cSPratyush Anand
47149502c46cSPratyush Anand /* Returns the hub-encoded U2 timeout value. */
xhci_calculate_u2_timeout(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_endpoint_descriptor * desc)47159502c46cSPratyush Anand static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
47169502c46cSPratyush Anand struct usb_device *udev,
47179502c46cSPratyush Anand struct usb_endpoint_descriptor *desc)
47189502c46cSPratyush Anand {
47199502c46cSPratyush Anand unsigned long long timeout_ns;
47209502c46cSPratyush Anand
47215d5323a6SMichael Grzeschik /* Prevent U2 if service interval is shorter than U2 exit latency */
47225d5323a6SMichael Grzeschik if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
47232847c46cSMathias Nyman if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
47245d5323a6SMichael Grzeschik dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
47255d5323a6SMichael Grzeschik return USB3_LPM_DISABLED;
47265d5323a6SMichael Grzeschik }
47275d5323a6SMichael Grzeschik }
47285d5323a6SMichael Grzeschik
4729d5e234ffSWeitao Wang if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST))
47302847c46cSMathias Nyman timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
47312847c46cSMathias Nyman else
47322847c46cSMathias Nyman timeout_ns = udev->u2_params.sel;
47332847c46cSMathias Nyman
4734e3567d2cSSarah Sharp /* The U2 timeout is encoded in 256us intervals */
4735c88db160SSarah Sharp timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4736e3567d2cSSarah Sharp /* If the necessary timeout value is bigger than what we can set in the
4737e3567d2cSSarah Sharp * USB 3.0 hub, we have to disable hub-initiated U2.
4738e3567d2cSSarah Sharp */
4739e3567d2cSSarah Sharp if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4740e3567d2cSSarah Sharp return timeout_ns;
4741e3567d2cSSarah Sharp dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4742e3567d2cSSarah Sharp "due to long timeout %llu ms\n", timeout_ns);
4743e3567d2cSSarah Sharp return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4744e3567d2cSSarah Sharp }
4745e3567d2cSSarah Sharp
xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_endpoint_descriptor * desc,enum usb3_link_state state,u16 * timeout)47463b3db026SSarah Sharp static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
47473b3db026SSarah Sharp struct usb_device *udev,
47483b3db026SSarah Sharp struct usb_endpoint_descriptor *desc,
47493b3db026SSarah Sharp enum usb3_link_state state,
47503b3db026SSarah Sharp u16 *timeout)
47513b3db026SSarah Sharp {
47529502c46cSPratyush Anand if (state == USB3_LPM_U1)
47539502c46cSPratyush Anand return xhci_calculate_u1_timeout(xhci, udev, desc);
47549502c46cSPratyush Anand else if (state == USB3_LPM_U2)
47559502c46cSPratyush Anand return xhci_calculate_u2_timeout(xhci, udev, desc);
4756e3567d2cSSarah Sharp
47573b3db026SSarah Sharp return USB3_LPM_DISABLED;
47583b3db026SSarah Sharp }
47593b3db026SSarah Sharp
xhci_update_timeout_for_endpoint(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_endpoint_descriptor * desc,enum usb3_link_state state,u16 * timeout)47603b3db026SSarah Sharp static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
47613b3db026SSarah Sharp struct usb_device *udev,
47623b3db026SSarah Sharp struct usb_endpoint_descriptor *desc,
47633b3db026SSarah Sharp enum usb3_link_state state,
47643b3db026SSarah Sharp u16 *timeout)
47653b3db026SSarah Sharp {
47663b3db026SSarah Sharp u16 alt_timeout;
47673b3db026SSarah Sharp
47683b3db026SSarah Sharp alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
47693b3db026SSarah Sharp desc, state, timeout);
47703b3db026SSarah Sharp
4771d500c63fSJan Schmidt /* If we found we can't enable hub-initiated LPM, and
47723b3db026SSarah Sharp * the U1 or U2 exit latency was too high to allow
4773d500c63fSJan Schmidt * device-initiated LPM as well, then we will disable LPM
4774d500c63fSJan Schmidt * for this device, so stop searching any further.
47753b3db026SSarah Sharp */
4776d500c63fSJan Schmidt if (alt_timeout == USB3_LPM_DISABLED) {
47773b3db026SSarah Sharp *timeout = alt_timeout;
47783b3db026SSarah Sharp return -E2BIG;
47793b3db026SSarah Sharp }
47803b3db026SSarah Sharp if (alt_timeout > *timeout)
47813b3db026SSarah Sharp *timeout = alt_timeout;
47823b3db026SSarah Sharp return 0;
47833b3db026SSarah Sharp }
47843b3db026SSarah Sharp
xhci_update_timeout_for_interface(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_host_interface * alt,enum usb3_link_state state,u16 * timeout)47853b3db026SSarah Sharp static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
47863b3db026SSarah Sharp struct usb_device *udev,
47873b3db026SSarah Sharp struct usb_host_interface *alt,
47883b3db026SSarah Sharp enum usb3_link_state state,
47893b3db026SSarah Sharp u16 *timeout)
47903b3db026SSarah Sharp {
47913b3db026SSarah Sharp int j;
47923b3db026SSarah Sharp
47933b3db026SSarah Sharp for (j = 0; j < alt->desc.bNumEndpoints; j++) {
47943b3db026SSarah Sharp if (xhci_update_timeout_for_endpoint(xhci, udev,
47953b3db026SSarah Sharp &alt->endpoint[j].desc, state, timeout))
47963b3db026SSarah Sharp return -E2BIG;
47973b3db026SSarah Sharp }
47983b3db026SSarah Sharp return 0;
47993b3db026SSarah Sharp }
48003b3db026SSarah Sharp
xhci_check_tier_policy(struct xhci_hcd * xhci,struct usb_device * udev,enum usb3_link_state state)48013b3db026SSarah Sharp static int xhci_check_tier_policy(struct xhci_hcd *xhci,
48023b3db026SSarah Sharp struct usb_device *udev,
48033b3db026SSarah Sharp enum usb3_link_state state)
48043b3db026SSarah Sharp {
4805d5e234ffSWeitao Wang struct usb_device *parent = udev->parent;
4806d5e234ffSWeitao Wang int tier = 1; /* roothub is tier1 */
4807d5e234ffSWeitao Wang
4808d5e234ffSWeitao Wang while (parent) {
4809d5e234ffSWeitao Wang parent = parent->parent;
4810d5e234ffSWeitao Wang tier++;
4811d5e234ffSWeitao Wang }
4812d5e234ffSWeitao Wang
4813d5e234ffSWeitao Wang if (xhci->quirks & XHCI_INTEL_HOST && tier > 3)
4814d5e234ffSWeitao Wang goto fail;
4815d5e234ffSWeitao Wang if (xhci->quirks & XHCI_ZHAOXIN_HOST && tier > 2)
4816d5e234ffSWeitao Wang goto fail;
4817d5e234ffSWeitao Wang
48189502c46cSPratyush Anand return 0;
4819d5e234ffSWeitao Wang fail:
4820d5e234ffSWeitao Wang dev_dbg(&udev->dev, "Tier policy prevents U1/U2 LPM states for devices at tier %d\n",
4821d5e234ffSWeitao Wang tier);
4822d5e234ffSWeitao Wang return -E2BIG;
48233b3db026SSarah Sharp }
48243b3db026SSarah Sharp
48253b3db026SSarah Sharp /* Returns the U1 or U2 timeout that should be enabled.
48263b3db026SSarah Sharp * If the tier check or timeout setting functions return with a non-zero exit
48273b3db026SSarah Sharp * code, that means the timeout value has been finalized and we shouldn't look
48283b3db026SSarah Sharp * at any more endpoints.
48293b3db026SSarah Sharp */
xhci_calculate_lpm_timeout(struct usb_hcd * hcd,struct usb_device * udev,enum usb3_link_state state)48303b3db026SSarah Sharp static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
48313b3db026SSarah Sharp struct usb_device *udev, enum usb3_link_state state)
48323b3db026SSarah Sharp {
48333b3db026SSarah Sharp struct xhci_hcd *xhci = hcd_to_xhci(hcd);
48343b3db026SSarah Sharp struct usb_host_config *config;
48353b3db026SSarah Sharp char *state_name;
48363b3db026SSarah Sharp int i;
48373b3db026SSarah Sharp u16 timeout = USB3_LPM_DISABLED;
48383b3db026SSarah Sharp
48393b3db026SSarah Sharp if (state == USB3_LPM_U1)
48403b3db026SSarah Sharp state_name = "U1";
48413b3db026SSarah Sharp else if (state == USB3_LPM_U2)
48423b3db026SSarah Sharp state_name = "U2";
48433b3db026SSarah Sharp else {
48443b3db026SSarah Sharp dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
48453b3db026SSarah Sharp state);
48463b3db026SSarah Sharp return timeout;
48473b3db026SSarah Sharp }
48483b3db026SSarah Sharp
48493b3db026SSarah Sharp /* Gather some information about the currently installed configuration
48503b3db026SSarah Sharp * and alternate interface settings.
48513b3db026SSarah Sharp */
48523b3db026SSarah Sharp if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
48533b3db026SSarah Sharp state, &timeout))
48543b3db026SSarah Sharp return timeout;
48553b3db026SSarah Sharp
48563b3db026SSarah Sharp config = udev->actconfig;
48573b3db026SSarah Sharp if (!config)
48583b3db026SSarah Sharp return timeout;
48593b3db026SSarah Sharp
486064ba419bSXenia Ragiadakou for (i = 0; i < config->desc.bNumInterfaces; i++) {
48613b3db026SSarah Sharp struct usb_driver *driver;
48623b3db026SSarah Sharp struct usb_interface *intf = config->interface[i];
48633b3db026SSarah Sharp
48643b3db026SSarah Sharp if (!intf)
48653b3db026SSarah Sharp continue;
48663b3db026SSarah Sharp
48673b3db026SSarah Sharp /* Check if any currently bound drivers want hub-initiated LPM
48683b3db026SSarah Sharp * disabled.
48693b3db026SSarah Sharp */
48703b3db026SSarah Sharp if (intf->dev.driver) {
48713b3db026SSarah Sharp driver = to_usb_driver(intf->dev.driver);
48723b3db026SSarah Sharp if (driver && driver->disable_hub_initiated_lpm) {
4873cd9d9491SMathias Nyman dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n",
48743b3db026SSarah Sharp state_name, driver->name);
4875cd9d9491SMathias Nyman timeout = xhci_get_timeout_no_hub_lpm(udev,
4876cd9d9491SMathias Nyman state);
4877cd9d9491SMathias Nyman if (timeout == USB3_LPM_DISABLED)
4878cd9d9491SMathias Nyman return timeout;
48793b3db026SSarah Sharp }
48803b3db026SSarah Sharp }
48813b3db026SSarah Sharp
48823b3db026SSarah Sharp /* Not sure how this could happen... */
48833b3db026SSarah Sharp if (!intf->cur_altsetting)
48843b3db026SSarah Sharp continue;
48853b3db026SSarah Sharp
48863b3db026SSarah Sharp if (xhci_update_timeout_for_interface(xhci, udev,
48873b3db026SSarah Sharp intf->cur_altsetting,
48883b3db026SSarah Sharp state, &timeout))
48893b3db026SSarah Sharp return timeout;
48903b3db026SSarah Sharp }
48913b3db026SSarah Sharp return timeout;
48923b3db026SSarah Sharp }
48933b3db026SSarah Sharp
calculate_max_exit_latency(struct usb_device * udev,enum usb3_link_state state_changed,u16 hub_encoded_timeout)48943b3db026SSarah Sharp static int calculate_max_exit_latency(struct usb_device *udev,
48953b3db026SSarah Sharp enum usb3_link_state state_changed,
48963b3db026SSarah Sharp u16 hub_encoded_timeout)
48973b3db026SSarah Sharp {
48983b3db026SSarah Sharp unsigned long long u1_mel_us = 0;
48993b3db026SSarah Sharp unsigned long long u2_mel_us = 0;
49003b3db026SSarah Sharp unsigned long long mel_us = 0;
49013b3db026SSarah Sharp bool disabling_u1;
49023b3db026SSarah Sharp bool disabling_u2;
49033b3db026SSarah Sharp bool enabling_u1;
49043b3db026SSarah Sharp bool enabling_u2;
49053b3db026SSarah Sharp
49063b3db026SSarah Sharp disabling_u1 = (state_changed == USB3_LPM_U1 &&
49073b3db026SSarah Sharp hub_encoded_timeout == USB3_LPM_DISABLED);
49083b3db026SSarah Sharp disabling_u2 = (state_changed == USB3_LPM_U2 &&
49093b3db026SSarah Sharp hub_encoded_timeout == USB3_LPM_DISABLED);
49103b3db026SSarah Sharp
49113b3db026SSarah Sharp enabling_u1 = (state_changed == USB3_LPM_U1 &&
49123b3db026SSarah Sharp hub_encoded_timeout != USB3_LPM_DISABLED);
49133b3db026SSarah Sharp enabling_u2 = (state_changed == USB3_LPM_U2 &&
49143b3db026SSarah Sharp hub_encoded_timeout != USB3_LPM_DISABLED);
49153b3db026SSarah Sharp
49163b3db026SSarah Sharp /* If U1 was already enabled and we're not disabling it,
49173b3db026SSarah Sharp * or we're going to enable U1, account for the U1 max exit latency.
49183b3db026SSarah Sharp */
49193b3db026SSarah Sharp if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
49203b3db026SSarah Sharp enabling_u1)
49213b3db026SSarah Sharp u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
49223b3db026SSarah Sharp if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
49233b3db026SSarah Sharp enabling_u2)
49243b3db026SSarah Sharp u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
49253b3db026SSarah Sharp
4926f28fb27eSChangcheng Deng mel_us = max(u1_mel_us, u2_mel_us);
4927f28fb27eSChangcheng Deng
49283b3db026SSarah Sharp /* xHCI host controller max exit latency field is only 16 bits wide. */
49293b3db026SSarah Sharp if (mel_us > MAX_EXIT) {
49303b3db026SSarah Sharp dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
49313b3db026SSarah Sharp "is too big.\n", mel_us);
49323b3db026SSarah Sharp return -E2BIG;
49333b3db026SSarah Sharp }
49343b3db026SSarah Sharp return mel_us;
49353b3db026SSarah Sharp }
49363b3db026SSarah Sharp
49373b3db026SSarah Sharp /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
xhci_enable_usb3_lpm_timeout(struct usb_hcd * hcd,struct usb_device * udev,enum usb3_link_state state)49383969384cSLu Baolu static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
49393b3db026SSarah Sharp struct usb_device *udev, enum usb3_link_state state)
49403b3db026SSarah Sharp {
49413b3db026SSarah Sharp struct xhci_hcd *xhci;
49420522b9a1SMathias Nyman struct xhci_port *port;
49433b3db026SSarah Sharp u16 hub_encoded_timeout;
49443b3db026SSarah Sharp int mel;
49453b3db026SSarah Sharp int ret;
49463b3db026SSarah Sharp
49473b3db026SSarah Sharp xhci = hcd_to_xhci(hcd);
49483b3db026SSarah Sharp /* The LPM timeout values are pretty host-controller specific, so don't
49493b3db026SSarah Sharp * enable hub-initiated timeouts unless the vendor has provided
49503b3db026SSarah Sharp * information about their timeout algorithm.
49513b3db026SSarah Sharp */
49523b3db026SSarah Sharp if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
49533b3db026SSarah Sharp !xhci->devs[udev->slot_id])
49543b3db026SSarah Sharp return USB3_LPM_DISABLED;
49553b3db026SSarah Sharp
4956424140d3SMathias Nyman if (xhci_check_tier_policy(xhci, udev, state) < 0)
4957424140d3SMathias Nyman return USB3_LPM_DISABLED;
4958424140d3SMathias Nyman
49590522b9a1SMathias Nyman /* If connected to root port then check port can handle lpm */
49600522b9a1SMathias Nyman if (udev->parent && !udev->parent->parent) {
49610522b9a1SMathias Nyman port = xhci->usb3_rhub.ports[udev->portnum - 1];
49620522b9a1SMathias Nyman if (port->lpm_incapable)
49630522b9a1SMathias Nyman return USB3_LPM_DISABLED;
49640522b9a1SMathias Nyman }
49650522b9a1SMathias Nyman
49663b3db026SSarah Sharp hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
49673b3db026SSarah Sharp mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
49683b3db026SSarah Sharp if (mel < 0) {
49693b3db026SSarah Sharp /* Max Exit Latency is too big, disable LPM. */
49703b3db026SSarah Sharp hub_encoded_timeout = USB3_LPM_DISABLED;
49713b3db026SSarah Sharp mel = 0;
49723b3db026SSarah Sharp }
49733b3db026SSarah Sharp
49743b3db026SSarah Sharp ret = xhci_change_max_exit_latency(xhci, udev, mel);
49753b3db026SSarah Sharp if (ret)
49763b3db026SSarah Sharp return ret;
49773b3db026SSarah Sharp return hub_encoded_timeout;
49783b3db026SSarah Sharp }
49793b3db026SSarah Sharp
xhci_disable_usb3_lpm_timeout(struct usb_hcd * hcd,struct usb_device * udev,enum usb3_link_state state)49803969384cSLu Baolu static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
49813b3db026SSarah Sharp struct usb_device *udev, enum usb3_link_state state)
49823b3db026SSarah Sharp {
49833b3db026SSarah Sharp struct xhci_hcd *xhci;
49843b3db026SSarah Sharp u16 mel;
49853b3db026SSarah Sharp
49863b3db026SSarah Sharp xhci = hcd_to_xhci(hcd);
49873b3db026SSarah Sharp if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
49883b3db026SSarah Sharp !xhci->devs[udev->slot_id])
49893b3db026SSarah Sharp return 0;
49903b3db026SSarah Sharp
49913b3db026SSarah Sharp mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
4992f1cda54cSSaurabh Karajgaonkar return xhci_change_max_exit_latency(xhci, udev, mel);
49933b3db026SSarah Sharp }
4994b01bcbf7SSarah Sharp #else /* CONFIG_PM */
4995b01bcbf7SSarah Sharp
xhci_set_usb2_hardware_lpm(struct usb_hcd * hcd,struct usb_device * udev,int enable)49963969384cSLu Baolu static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4997ceb6c9c8SRafael J. Wysocki struct usb_device *udev, int enable)
4998ceb6c9c8SRafael J. Wysocki {
4999ceb6c9c8SRafael J. Wysocki return 0;
5000ceb6c9c8SRafael J. Wysocki }
5001ceb6c9c8SRafael J. Wysocki
xhci_update_device(struct usb_hcd * hcd,struct usb_device * udev)50023969384cSLu Baolu static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
5003ceb6c9c8SRafael J. Wysocki {
5004ceb6c9c8SRafael J. Wysocki return 0;
5005ceb6c9c8SRafael J. Wysocki }
5006ceb6c9c8SRafael J. Wysocki
xhci_enable_usb3_lpm_timeout(struct usb_hcd * hcd,struct usb_device * udev,enum usb3_link_state state)50073969384cSLu Baolu static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
5008b01bcbf7SSarah Sharp struct usb_device *udev, enum usb3_link_state state)
5009b01bcbf7SSarah Sharp {
5010b01bcbf7SSarah Sharp return USB3_LPM_DISABLED;
5011b01bcbf7SSarah Sharp }
5012b01bcbf7SSarah Sharp
xhci_disable_usb3_lpm_timeout(struct usb_hcd * hcd,struct usb_device * udev,enum usb3_link_state state)50133969384cSLu Baolu static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
5014b01bcbf7SSarah Sharp struct usb_device *udev, enum usb3_link_state state)
5015b01bcbf7SSarah Sharp {
5016b01bcbf7SSarah Sharp return 0;
5017b01bcbf7SSarah Sharp }
5018b01bcbf7SSarah Sharp #endif /* CONFIG_PM */
5019b01bcbf7SSarah Sharp
50203b3db026SSarah Sharp /*-------------------------------------------------------------------------*/
50213b3db026SSarah Sharp
5022bc75fa38SAlex Chiang /* Once a hub descriptor is fetched for a device, we need to update the xHC's
5023bc75fa38SAlex Chiang * internal data structures for the device.
5024bc75fa38SAlex Chiang */
xhci_update_hub_device(struct usb_hcd * hcd,struct usb_device * hdev,struct usb_tt * tt,gfp_t mem_flags)502523a3b8d5SMathias Nyman int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
5026bc75fa38SAlex Chiang struct usb_tt *tt, gfp_t mem_flags)
5027bc75fa38SAlex Chiang {
5028bc75fa38SAlex Chiang struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5029bc75fa38SAlex Chiang struct xhci_virt_device *vdev;
5030bc75fa38SAlex Chiang struct xhci_command *config_cmd;
5031bc75fa38SAlex Chiang struct xhci_input_control_ctx *ctrl_ctx;
5032bc75fa38SAlex Chiang struct xhci_slot_ctx *slot_ctx;
5033bc75fa38SAlex Chiang unsigned long flags;
5034bc75fa38SAlex Chiang unsigned think_time;
5035bc75fa38SAlex Chiang int ret;
5036bc75fa38SAlex Chiang
5037bc75fa38SAlex Chiang /* Ignore root hubs */
5038bc75fa38SAlex Chiang if (!hdev->parent)
5039bc75fa38SAlex Chiang return 0;
5040bc75fa38SAlex Chiang
5041bc75fa38SAlex Chiang vdev = xhci->devs[hdev->slot_id];
5042bc75fa38SAlex Chiang if (!vdev) {
5043bc75fa38SAlex Chiang xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
5044bc75fa38SAlex Chiang return -EINVAL;
5045bc75fa38SAlex Chiang }
504674e0b564SLu Baolu
504714d49b7aSMathias Nyman config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
504874e0b564SLu Baolu if (!config_cmd)
5049bc75fa38SAlex Chiang return -ENOMEM;
505074e0b564SLu Baolu
50514daf9df5SLin Wang ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
505292f8e767SSarah Sharp if (!ctrl_ctx) {
505392f8e767SSarah Sharp xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
505492f8e767SSarah Sharp __func__);
505592f8e767SSarah Sharp xhci_free_command(xhci, config_cmd);
505692f8e767SSarah Sharp return -ENOMEM;
505792f8e767SSarah Sharp }
5058bc75fa38SAlex Chiang
5059bc75fa38SAlex Chiang spin_lock_irqsave(&xhci->lock, flags);
5060839c817cSSarah Sharp if (hdev->speed == USB_SPEED_HIGH &&
5061839c817cSSarah Sharp xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
5062839c817cSSarah Sharp xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
5063839c817cSSarah Sharp xhci_free_command(xhci, config_cmd);
5064839c817cSSarah Sharp spin_unlock_irqrestore(&xhci->lock, flags);
5065839c817cSSarah Sharp return -ENOMEM;
5066839c817cSSarah Sharp }
5067839c817cSSarah Sharp
5068bc75fa38SAlex Chiang xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
506928ccd296SMatt Evans ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
5070bc75fa38SAlex Chiang slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
507128ccd296SMatt Evans slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
5072096b110aSChunfeng Yun /*
5073096b110aSChunfeng Yun * refer to section 6.2.2: MTT should be 0 for full speed hub,
5074096b110aSChunfeng Yun * but it may be already set to 1 when setup an xHCI virtual
5075096b110aSChunfeng Yun * device, so clear it anyway.
5076096b110aSChunfeng Yun */
5077bc75fa38SAlex Chiang if (tt->multi)
507828ccd296SMatt Evans slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
5079096b110aSChunfeng Yun else if (hdev->speed == USB_SPEED_FULL)
5080096b110aSChunfeng Yun slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
5081096b110aSChunfeng Yun
5082bc75fa38SAlex Chiang if (xhci->hci_version > 0x95) {
5083bc75fa38SAlex Chiang xhci_dbg(xhci, "xHCI version %x needs hub "
5084bc75fa38SAlex Chiang "TT think time and number of ports\n",
5085bc75fa38SAlex Chiang (unsigned int) xhci->hci_version);
508628ccd296SMatt Evans slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
5087bc75fa38SAlex Chiang /* Set TT think time - convert from ns to FS bit times.
5088bc75fa38SAlex Chiang * 0 = 8 FS bit times, 1 = 16 FS bit times,
5089bc75fa38SAlex Chiang * 2 = 24 FS bit times, 3 = 32 FS bit times.
5090700b4173SAndiry Xu *
5091700b4173SAndiry Xu * xHCI 1.0: this field shall be 0 if the device is not a
5092700b4173SAndiry Xu * High-spped hub.
5093bc75fa38SAlex Chiang */
5094bc75fa38SAlex Chiang think_time = tt->think_time;
5095bc75fa38SAlex Chiang if (think_time != 0)
5096bc75fa38SAlex Chiang think_time = (think_time / 666) - 1;
5097700b4173SAndiry Xu if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
5098700b4173SAndiry Xu slot_ctx->tt_info |=
5099700b4173SAndiry Xu cpu_to_le32(TT_THINK_TIME(think_time));
5100bc75fa38SAlex Chiang } else {
5101bc75fa38SAlex Chiang xhci_dbg(xhci, "xHCI version %x doesn't need hub "
5102bc75fa38SAlex Chiang "TT think time or number of ports\n",
5103bc75fa38SAlex Chiang (unsigned int) xhci->hci_version);
5104bc75fa38SAlex Chiang }
5105bc75fa38SAlex Chiang slot_ctx->dev_state = 0;
5106bc75fa38SAlex Chiang spin_unlock_irqrestore(&xhci->lock, flags);
5107bc75fa38SAlex Chiang
5108bc75fa38SAlex Chiang xhci_dbg(xhci, "Set up %s for hub device.\n",
5109bc75fa38SAlex Chiang (xhci->hci_version > 0x95) ?
5110bc75fa38SAlex Chiang "configure endpoint" : "evaluate context");
5111bc75fa38SAlex Chiang
5112bc75fa38SAlex Chiang /* Issue and wait for the configure endpoint or
5113bc75fa38SAlex Chiang * evaluate context command.
5114bc75fa38SAlex Chiang */
5115bc75fa38SAlex Chiang if (xhci->hci_version > 0x95)
5116bc75fa38SAlex Chiang ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5117bc75fa38SAlex Chiang false, false);
5118bc75fa38SAlex Chiang else
5119bc75fa38SAlex Chiang ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5120bc75fa38SAlex Chiang true, false);
5121bc75fa38SAlex Chiang
5122bc75fa38SAlex Chiang xhci_free_command(xhci, config_cmd);
5123bc75fa38SAlex Chiang return ret;
5124bc75fa38SAlex Chiang }
512523a3b8d5SMathias Nyman EXPORT_SYMBOL_GPL(xhci_update_hub_device);
5126bc75fa38SAlex Chiang
xhci_get_frame(struct usb_hcd * hcd)51273969384cSLu Baolu static int xhci_get_frame(struct usb_hcd *hcd)
5128bc75fa38SAlex Chiang {
5129bc75fa38SAlex Chiang struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5130bc75fa38SAlex Chiang /* EHCI mods by the periodic size. Why? */
5131b0ba9720SXenia Ragiadakou return readl(&xhci->run_regs->microframe_index) >> 3;
5132bc75fa38SAlex Chiang }
5133bc75fa38SAlex Chiang
xhci_hcd_init_usb2_data(struct xhci_hcd * xhci,struct usb_hcd * hcd)513457f23cd0SHeiner Kallweit static void xhci_hcd_init_usb2_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
5135552e0c4fSSebastian Andrzej Siewior {
51369ea95eccSMathias Nyman xhci->usb2_rhub.hcd = hcd;
5137552e0c4fSSebastian Andrzej Siewior hcd->speed = HCD_USB2;
5138552e0c4fSSebastian Andrzej Siewior hcd->self.root_hub->speed = USB_SPEED_HIGH;
5139552e0c4fSSebastian Andrzej Siewior /*
5140552e0c4fSSebastian Andrzej Siewior * USB 2.0 roothub under xHCI has an integrated TT,
5141552e0c4fSSebastian Andrzej Siewior * (rate matching hub) as opposed to having an OHCI/UHCI
5142552e0c4fSSebastian Andrzej Siewior * companion controller.
5143552e0c4fSSebastian Andrzej Siewior */
5144552e0c4fSSebastian Andrzej Siewior hcd->has_tt = 1;
514557f23cd0SHeiner Kallweit }
514657f23cd0SHeiner Kallweit
xhci_hcd_init_usb3_data(struct xhci_hcd * xhci,struct usb_hcd * hcd)514757f23cd0SHeiner Kallweit static void xhci_hcd_init_usb3_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
514857f23cd0SHeiner Kallweit {
514957f23cd0SHeiner Kallweit unsigned int minor_rev;
515057f23cd0SHeiner Kallweit
51510ee78c10SMathias Nyman /*
515247f50d61SMathias Nyman * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
515347f50d61SMathias Nyman * should return 0x31 for sbrn, or that the minor revision
515447f50d61SMathias Nyman * is a two digit BCD containig minor and sub-minor numbers.
515547f50d61SMathias Nyman * This was later clarified in xHCI 1.2.
515647f50d61SMathias Nyman *
515747f50d61SMathias Nyman * Some USB 3.1 capable hosts therefore have sbrn 0x30, and
515847f50d61SMathias Nyman * minor revision set to 0x1 instead of 0x10.
51590ee78c10SMathias Nyman */
516047f50d61SMathias Nyman if (xhci->usb3_rhub.min_rev == 0x1)
516147f50d61SMathias Nyman minor_rev = 1;
516247f50d61SMathias Nyman else
5163ddd57980SMathias Nyman minor_rev = xhci->usb3_rhub.min_rev / 0x10;
5164ddd57980SMathias Nyman
5165ddd57980SMathias Nyman switch (minor_rev) {
5166ddd57980SMathias Nyman case 2:
5167ddd57980SMathias Nyman hcd->speed = HCD_USB32;
5168ddd57980SMathias Nyman hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5169ddd57980SMathias Nyman hcd->self.root_hub->rx_lanes = 2;
5170ddd57980SMathias Nyman hcd->self.root_hub->tx_lanes = 2;
5171cd8d66cfSThinh Nguyen hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2;
5172ddd57980SMathias Nyman break;
5173ddd57980SMathias Nyman case 1:
5174b50107bbSMathias Nyman hcd->speed = HCD_USB31;
51752c0e06f8SMathias Nyman hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5176cd8d66cfSThinh Nguyen hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1;
5177ddd57980SMathias Nyman break;
5178b50107bbSMathias Nyman }
51790ee78c10SMathias Nyman xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
518057f23cd0SHeiner Kallweit minor_rev, minor_rev ? "Enhanced " : "");
51810ee78c10SMathias Nyman
51829ea95eccSMathias Nyman xhci->usb3_rhub.hcd = hcd;
518357f23cd0SHeiner Kallweit }
518457f23cd0SHeiner Kallweit
xhci_gen_setup(struct usb_hcd * hcd,xhci_get_quirks_t get_quirks)518557f23cd0SHeiner Kallweit int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
518657f23cd0SHeiner Kallweit {
518757f23cd0SHeiner Kallweit struct xhci_hcd *xhci;
518857f23cd0SHeiner Kallweit /*
518957f23cd0SHeiner Kallweit * TODO: Check with DWC3 clients for sysdev according to
519057f23cd0SHeiner Kallweit * quirks
5191552e0c4fSSebastian Andrzej Siewior */
519257f23cd0SHeiner Kallweit struct device *dev = hcd->self.sysdev;
519357f23cd0SHeiner Kallweit int retval;
519457f23cd0SHeiner Kallweit
519557f23cd0SHeiner Kallweit /* Accept arbitrarily long scatter-gather lists */
519657f23cd0SHeiner Kallweit hcd->self.sg_tablesize = ~0;
519757f23cd0SHeiner Kallweit
519857f23cd0SHeiner Kallweit /* support to build packet from discontinuous buffers */
519957f23cd0SHeiner Kallweit hcd->self.no_sg_constraint = 1;
520057f23cd0SHeiner Kallweit
520157f23cd0SHeiner Kallweit /* XHCI controllers don't stop the ep queue on short packets :| */
520257f23cd0SHeiner Kallweit hcd->self.no_stop_on_short = 1;
520357f23cd0SHeiner Kallweit
520457f23cd0SHeiner Kallweit xhci = hcd_to_xhci(hcd);
520557f23cd0SHeiner Kallweit
5206873f3236SHeiner Kallweit if (!usb_hcd_is_primary_hcd(hcd)) {
520757f23cd0SHeiner Kallweit xhci_hcd_init_usb3_data(xhci, hcd);
5208552e0c4fSSebastian Andrzej Siewior return 0;
5209552e0c4fSSebastian Andrzej Siewior }
5210552e0c4fSSebastian Andrzej Siewior
5211a00918d0SChris Bainbridge mutex_init(&xhci->mutex);
521257f23cd0SHeiner Kallweit xhci->main_hcd = hcd;
5213552e0c4fSSebastian Andrzej Siewior xhci->cap_regs = hcd->regs;
5214552e0c4fSSebastian Andrzej Siewior xhci->op_regs = hcd->regs +
5215b0ba9720SXenia Ragiadakou HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
5216552e0c4fSSebastian Andrzej Siewior xhci->run_regs = hcd->regs +
5217b0ba9720SXenia Ragiadakou (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
5218552e0c4fSSebastian Andrzej Siewior /* Cache read-only capability registers */
5219b0ba9720SXenia Ragiadakou xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
5220b0ba9720SXenia Ragiadakou xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
5221b0ba9720SXenia Ragiadakou xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
5222c63d5757SLinyu Yuan xhci->hci_version = HC_VERSION(readl(&xhci->cap_regs->hc_capbase));
5223b0ba9720SXenia Ragiadakou xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
522404abb6deSLu Baolu if (xhci->hci_version > 0x100)
522504abb6deSLu Baolu xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
5226552e0c4fSSebastian Andrzej Siewior
5227b17a57f8SMathias Nyman /* xhci-plat or xhci-pci might have set max_interrupters already */
5228b17a57f8SMathias Nyman if ((!xhci->max_interrupters) ||
5229b17a57f8SMathias Nyman xhci->max_interrupters > HCS_MAX_INTRS(xhci->hcs_params1))
5230b17a57f8SMathias Nyman xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1);
5231b17a57f8SMathias Nyman
5232757de492SMathias Nyman xhci->quirks |= quirks;
52334e6a1ee7STakashi Iwai
52349b907c91SMathias Nyman if (get_quirks)
5235552e0c4fSSebastian Andrzej Siewior get_quirks(dev, xhci);
5236552e0c4fSSebastian Andrzej Siewior
523707f3cb7cSGeorge Cherian /* In xhci controllers which follow xhci 1.0 spec gives a spurious
523807f3cb7cSGeorge Cherian * success event after a short transfer. This quirk will ignore such
523907f3cb7cSGeorge Cherian * spurious event.
524007f3cb7cSGeorge Cherian */
524107f3cb7cSGeorge Cherian if (xhci->hci_version > 0x96)
524207f3cb7cSGeorge Cherian xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
524307f3cb7cSGeorge Cherian
5244552e0c4fSSebastian Andrzej Siewior /* Make sure the HC is halted. */
5245552e0c4fSSebastian Andrzej Siewior retval = xhci_halt(xhci);
5246552e0c4fSSebastian Andrzej Siewior if (retval)
5247cd33a321SRoger Quadros return retval;
5248552e0c4fSSebastian Andrzej Siewior
524912de0a35SMarc Zyngier xhci_zero_64b_regs(xhci);
525012de0a35SMarc Zyngier
5251552e0c4fSSebastian Andrzej Siewior xhci_dbg(xhci, "Resetting HCD\n");
5252552e0c4fSSebastian Andrzej Siewior /* Reset the internal HC memory state and registers. */
525314073ce9SMathias Nyman retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
5254552e0c4fSSebastian Andrzej Siewior if (retval)
5255cd33a321SRoger Quadros return retval;
5256552e0c4fSSebastian Andrzej Siewior xhci_dbg(xhci, "Reset complete\n");
5257552e0c4fSSebastian Andrzej Siewior
52580a380be8SYoshihiro Shimoda /*
52590a380be8SYoshihiro Shimoda * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
52600a380be8SYoshihiro Shimoda * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
52610a380be8SYoshihiro Shimoda * address memory pointers actually. So, this driver clears the AC64
52620a380be8SYoshihiro Shimoda * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
52630a380be8SYoshihiro Shimoda * DMA_BIT_MASK(32)) in this xhci_gen_setup().
52640a380be8SYoshihiro Shimoda */
52650a380be8SYoshihiro Shimoda if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
52660a380be8SYoshihiro Shimoda xhci->hcc_params &= ~BIT(0);
52670a380be8SYoshihiro Shimoda
5268c10cf118SXenia Ragiadakou /* Set dma_mask and coherent_dma_mask to 64-bits,
5269c10cf118SXenia Ragiadakou * if xHC supports 64-bit addressing */
5270c10cf118SXenia Ragiadakou if (HCC_64BIT_ADDR(xhci->hcc_params) &&
5271c10cf118SXenia Ragiadakou !dma_set_mask(dev, DMA_BIT_MASK(64))) {
5272552e0c4fSSebastian Andrzej Siewior xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
5273c10cf118SXenia Ragiadakou dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
5274fda182d8SDuc Dang } else {
5275fda182d8SDuc Dang /*
5276fda182d8SDuc Dang * This is to avoid error in cases where a 32-bit USB
5277fda182d8SDuc Dang * controller is used on a 64-bit capable system.
5278fda182d8SDuc Dang */
5279fda182d8SDuc Dang retval = dma_set_mask(dev, DMA_BIT_MASK(32));
5280fda182d8SDuc Dang if (retval)
5281fda182d8SDuc Dang return retval;
5282fda182d8SDuc Dang xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
5283fda182d8SDuc Dang dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
5284552e0c4fSSebastian Andrzej Siewior }
5285552e0c4fSSebastian Andrzej Siewior
5286552e0c4fSSebastian Andrzej Siewior xhci_dbg(xhci, "Calling HCD init\n");
5287552e0c4fSSebastian Andrzej Siewior /* Initialize HCD and host controller data structures. */
5288552e0c4fSSebastian Andrzej Siewior retval = xhci_init(hcd);
5289552e0c4fSSebastian Andrzej Siewior if (retval)
5290cd33a321SRoger Quadros return retval;
5291552e0c4fSSebastian Andrzej Siewior xhci_dbg(xhci, "Called HCD init\n");
529299705092SHans de Goede
5293873f3236SHeiner Kallweit if (xhci_hcd_is_usb3(hcd))
5294873f3236SHeiner Kallweit xhci_hcd_init_usb3_data(xhci, hcd);
5295873f3236SHeiner Kallweit else
5296873f3236SHeiner Kallweit xhci_hcd_init_usb2_data(xhci, hcd);
5297873f3236SHeiner Kallweit
529836b68579SMarc Zyngier xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
529999705092SHans de Goede xhci->hcc_params, xhci->hci_version, xhci->quirks);
530099705092SHans de Goede
5301552e0c4fSSebastian Andrzej Siewior return 0;
5302552e0c4fSSebastian Andrzej Siewior }
5303436e8c7dSAndrew Bresticker EXPORT_SYMBOL_GPL(xhci_gen_setup);
5304552e0c4fSSebastian Andrzej Siewior
xhci_clear_tt_buffer_complete(struct usb_hcd * hcd,struct usb_host_endpoint * ep)5305ef513be0SJim Lin static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
5306ef513be0SJim Lin struct usb_host_endpoint *ep)
5307ef513be0SJim Lin {
5308ef513be0SJim Lin struct xhci_hcd *xhci;
5309ef513be0SJim Lin struct usb_device *udev;
5310ef513be0SJim Lin unsigned int slot_id;
5311ef513be0SJim Lin unsigned int ep_index;
5312ef513be0SJim Lin unsigned long flags;
5313ef513be0SJim Lin
5314ef513be0SJim Lin xhci = hcd_to_xhci(hcd);
531518b74067SMathias Nyman
531618b74067SMathias Nyman spin_lock_irqsave(&xhci->lock, flags);
5317ef513be0SJim Lin udev = (struct usb_device *)ep->hcpriv;
5318ef513be0SJim Lin slot_id = udev->slot_id;
5319ef513be0SJim Lin ep_index = xhci_get_endpoint_index(&ep->desc);
5320ef513be0SJim Lin
5321ef513be0SJim Lin xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT;
5322ef513be0SJim Lin xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
5323ef513be0SJim Lin spin_unlock_irqrestore(&xhci->lock, flags);
5324ef513be0SJim Lin }
5325ef513be0SJim Lin
53261885d9a3SAndrew Bresticker static const struct hc_driver xhci_hc_driver = {
53271885d9a3SAndrew Bresticker .description = "xhci-hcd",
53281885d9a3SAndrew Bresticker .product_desc = "xHCI Host Controller",
532932479d4bSYoshihiro Shimoda .hcd_priv_size = sizeof(struct xhci_hcd),
53301885d9a3SAndrew Bresticker
53311885d9a3SAndrew Bresticker /*
53321885d9a3SAndrew Bresticker * generic hardware linkage
53331885d9a3SAndrew Bresticker */
53341885d9a3SAndrew Bresticker .irq = xhci_irq,
533536dc0165SSuwan Kim .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED |
533636dc0165SSuwan Kim HCD_BH,
53371885d9a3SAndrew Bresticker
53381885d9a3SAndrew Bresticker /*
53391885d9a3SAndrew Bresticker * basic lifecycle operations
53401885d9a3SAndrew Bresticker */
53411885d9a3SAndrew Bresticker .reset = NULL, /* set in xhci_init_driver() */
53421885d9a3SAndrew Bresticker .start = xhci_run,
53431885d9a3SAndrew Bresticker .stop = xhci_stop,
53441885d9a3SAndrew Bresticker .shutdown = xhci_shutdown,
53451885d9a3SAndrew Bresticker
53461885d9a3SAndrew Bresticker /*
53471885d9a3SAndrew Bresticker * managing i/o requests and associated device resources
53481885d9a3SAndrew Bresticker */
534933e39350SNicolas Saenz Julienne .map_urb_for_dma = xhci_map_urb_for_dma,
53502017a1e5STejas Joglekar .unmap_urb_for_dma = xhci_unmap_urb_for_dma,
53511885d9a3SAndrew Bresticker .urb_enqueue = xhci_urb_enqueue,
53521885d9a3SAndrew Bresticker .urb_dequeue = xhci_urb_dequeue,
53531885d9a3SAndrew Bresticker .alloc_dev = xhci_alloc_dev,
53541885d9a3SAndrew Bresticker .free_dev = xhci_free_dev,
53551885d9a3SAndrew Bresticker .alloc_streams = xhci_alloc_streams,
53561885d9a3SAndrew Bresticker .free_streams = xhci_free_streams,
53571885d9a3SAndrew Bresticker .add_endpoint = xhci_add_endpoint,
53581885d9a3SAndrew Bresticker .drop_endpoint = xhci_drop_endpoint,
535918b74067SMathias Nyman .endpoint_disable = xhci_endpoint_disable,
53601885d9a3SAndrew Bresticker .endpoint_reset = xhci_endpoint_reset,
53611885d9a3SAndrew Bresticker .check_bandwidth = xhci_check_bandwidth,
53621885d9a3SAndrew Bresticker .reset_bandwidth = xhci_reset_bandwidth,
53631885d9a3SAndrew Bresticker .address_device = xhci_address_device,
53641885d9a3SAndrew Bresticker .enable_device = xhci_enable_device,
53651885d9a3SAndrew Bresticker .update_hub_device = xhci_update_hub_device,
53661885d9a3SAndrew Bresticker .reset_device = xhci_discover_or_reset_device,
53671885d9a3SAndrew Bresticker
53681885d9a3SAndrew Bresticker /*
53691885d9a3SAndrew Bresticker * scheduling support
53701885d9a3SAndrew Bresticker */
53711885d9a3SAndrew Bresticker .get_frame_number = xhci_get_frame,
53721885d9a3SAndrew Bresticker
53731885d9a3SAndrew Bresticker /*
53741885d9a3SAndrew Bresticker * root hub support
53751885d9a3SAndrew Bresticker */
53761885d9a3SAndrew Bresticker .hub_control = xhci_hub_control,
53771885d9a3SAndrew Bresticker .hub_status_data = xhci_hub_status_data,
53781885d9a3SAndrew Bresticker .bus_suspend = xhci_bus_suspend,
53791885d9a3SAndrew Bresticker .bus_resume = xhci_bus_resume,
53808f9cc83cSAlan Stern .get_resuming_ports = xhci_get_resuming_ports,
53811885d9a3SAndrew Bresticker
53821885d9a3SAndrew Bresticker /*
53831885d9a3SAndrew Bresticker * call back when device connected and addressed
53841885d9a3SAndrew Bresticker */
53851885d9a3SAndrew Bresticker .update_device = xhci_update_device,
53861885d9a3SAndrew Bresticker .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
53871885d9a3SAndrew Bresticker .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
53881885d9a3SAndrew Bresticker .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
53891885d9a3SAndrew Bresticker .find_raw_port_number = xhci_find_raw_port_number,
5390ef513be0SJim Lin .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete,
53911885d9a3SAndrew Bresticker };
53921885d9a3SAndrew Bresticker
xhci_init_driver(struct hc_driver * drv,const struct xhci_driver_overrides * over)5393cd33a321SRoger Quadros void xhci_init_driver(struct hc_driver *drv,
5394cd33a321SRoger Quadros const struct xhci_driver_overrides *over)
53951885d9a3SAndrew Bresticker {
5396cd33a321SRoger Quadros BUG_ON(!over);
5397cd33a321SRoger Quadros
5398cd33a321SRoger Quadros /* Copy the generic table to drv then apply the overrides */
53991885d9a3SAndrew Bresticker *drv = xhci_hc_driver;
5400cd33a321SRoger Quadros
5401cd33a321SRoger Quadros if (over) {
5402cd33a321SRoger Quadros drv->hcd_priv_size += over->extra_priv_size;
5403cd33a321SRoger Quadros if (over->reset)
5404cd33a321SRoger Quadros drv->reset = over->reset;
5405cd33a321SRoger Quadros if (over->start)
5406cd33a321SRoger Quadros drv->start = over->start;
540714295a15SChunfeng Yun if (over->add_endpoint)
540814295a15SChunfeng Yun drv->add_endpoint = over->add_endpoint;
540914295a15SChunfeng Yun if (over->drop_endpoint)
541014295a15SChunfeng Yun drv->drop_endpoint = over->drop_endpoint;
54111d69f9d9SIkjoon Jang if (over->check_bandwidth)
54121d69f9d9SIkjoon Jang drv->check_bandwidth = over->check_bandwidth;
54131d69f9d9SIkjoon Jang if (over->reset_bandwidth)
54141d69f9d9SIkjoon Jang drv->reset_bandwidth = over->reset_bandwidth;
541523a3b8d5SMathias Nyman if (over->update_hub_device)
541623a3b8d5SMathias Nyman drv->update_hub_device = over->update_hub_device;
5417592338ddSJim Lin if (over->hub_control)
5418592338ddSJim Lin drv->hub_control = over->hub_control;
5419cd33a321SRoger Quadros }
54201885d9a3SAndrew Bresticker }
54211885d9a3SAndrew Bresticker EXPORT_SYMBOL_GPL(xhci_init_driver);
54221885d9a3SAndrew Bresticker
5423bc75fa38SAlex Chiang MODULE_DESCRIPTION(DRIVER_DESC);
5424bc75fa38SAlex Chiang MODULE_AUTHOR(DRIVER_AUTHOR);
5425bc75fa38SAlex Chiang MODULE_LICENSE("GPL");
5426bc75fa38SAlex Chiang
xhci_hcd_init(void)5427bc75fa38SAlex Chiang static int __init xhci_hcd_init(void)
5428bc75fa38SAlex Chiang {
5429bc75fa38SAlex Chiang /*
5430bc75fa38SAlex Chiang * Check the compiler generated sizes of structures that must be laid
5431bc75fa38SAlex Chiang * out in specific ways for hardware access.
5432bc75fa38SAlex Chiang */
5433bc75fa38SAlex Chiang BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
5434bc75fa38SAlex Chiang BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
5435bc75fa38SAlex Chiang BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
5436bc75fa38SAlex Chiang /* xhci_device_control has eight fields, and also
5437bc75fa38SAlex Chiang * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
5438bc75fa38SAlex Chiang */
5439bc75fa38SAlex Chiang BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
5440bc75fa38SAlex Chiang BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
5441bc75fa38SAlex Chiang BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
544204abb6deSLu Baolu BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
5443bc75fa38SAlex Chiang BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
5444bc75fa38SAlex Chiang /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
5445bc75fa38SAlex Chiang BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
54461eaf35e4SOliver Neukum
54471eaf35e4SOliver Neukum if (usb_disabled())
54481eaf35e4SOliver Neukum return -ENODEV;
54491eaf35e4SOliver Neukum
545002b6fdc2SLu Baolu xhci_debugfs_create_root();
54516aec5000SMathias Nyman xhci_dbc_init();
545202b6fdc2SLu Baolu
5453bc75fa38SAlex Chiang return 0;
5454bc75fa38SAlex Chiang }
5455b04c846cSArthur Demchenkov
5456b04c846cSArthur Demchenkov /*
5457b04c846cSArthur Demchenkov * If an init function is provided, an exit function must also be provided
5458b04c846cSArthur Demchenkov * to allow module unload.
5459b04c846cSArthur Demchenkov */
xhci_hcd_fini(void)546002b6fdc2SLu Baolu static void __exit xhci_hcd_fini(void)
546102b6fdc2SLu Baolu {
546202b6fdc2SLu Baolu xhci_debugfs_remove_root();
54636aec5000SMathias Nyman xhci_dbc_exit();
546402b6fdc2SLu Baolu }
5465b04c846cSArthur Demchenkov
5466bc75fa38SAlex Chiang module_init(xhci_hcd_init);
5467b04c846cSArthur Demchenkov module_exit(xhci_hcd_fini);
5468