xref: /openbmc/linux/drivers/usb/host/xhci-ring.c (revision 9b9c2cd4)
1 /*
2  * xHCI host controller driver
3  *
4  * Copyright (C) 2008 Intel Corp.
5  *
6  * Author: Sarah Sharp
7  * Some code borrowed from the Linux EHCI driver.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16  * for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software Foundation,
20  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22 
23 /*
24  * Ring initialization rules:
25  * 1. Each segment is initialized to zero, except for link TRBs.
26  * 2. Ring cycle state = 0.  This represents Producer Cycle State (PCS) or
27  *    Consumer Cycle State (CCS), depending on ring function.
28  * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
29  *
30  * Ring behavior rules:
31  * 1. A ring is empty if enqueue == dequeue.  This means there will always be at
32  *    least one free TRB in the ring.  This is useful if you want to turn that
33  *    into a link TRB and expand the ring.
34  * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
35  *    link TRB, then load the pointer with the address in the link TRB.  If the
36  *    link TRB had its toggle bit set, you may need to update the ring cycle
37  *    state (see cycle bit rules).  You may have to do this multiple times
38  *    until you reach a non-link TRB.
39  * 3. A ring is full if enqueue++ (for the definition of increment above)
40  *    equals the dequeue pointer.
41  *
42  * Cycle bit rules:
43  * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
44  *    in a link TRB, it must toggle the ring cycle state.
45  * 2. When a producer increments an enqueue pointer and encounters a toggle bit
46  *    in a link TRB, it must toggle the ring cycle state.
47  *
48  * Producer rules:
49  * 1. Check if ring is full before you enqueue.
50  * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
51  *    Update enqueue pointer between each write (which may update the ring
52  *    cycle state).
53  * 3. Notify consumer.  If SW is producer, it rings the doorbell for command
54  *    and endpoint rings.  If HC is the producer for the event ring,
55  *    and it generates an interrupt according to interrupt modulation rules.
56  *
57  * Consumer rules:
58  * 1. Check if TRB belongs to you.  If the cycle bit == your ring cycle state,
59  *    the TRB is owned by the consumer.
60  * 2. Update dequeue pointer (which may update the ring cycle state) and
61  *    continue processing TRBs until you reach a TRB which is not owned by you.
62  * 3. Notify the producer.  SW is the consumer for the event ring, and it
63  *   updates event ring dequeue pointer.  HC is the consumer for the command and
64  *   endpoint rings; it generates events on the event ring for these.
65  */
66 
67 #include <linux/scatterlist.h>
68 #include <linux/slab.h>
69 #include "xhci.h"
70 #include "xhci-trace.h"
71 
72 /*
73  * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
74  * address of the TRB.
75  */
76 dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
77 		union xhci_trb *trb)
78 {
79 	unsigned long segment_offset;
80 
81 	if (!seg || !trb || trb < seg->trbs)
82 		return 0;
83 	/* offset in TRBs */
84 	segment_offset = trb - seg->trbs;
85 	if (segment_offset >= TRBS_PER_SEGMENT)
86 		return 0;
87 	return seg->dma + (segment_offset * sizeof(*trb));
88 }
89 
90 /* Does this link TRB point to the first segment in a ring,
91  * or was the previous TRB the last TRB on the last segment in the ERST?
92  */
93 static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
94 		struct xhci_segment *seg, union xhci_trb *trb)
95 {
96 	if (ring == xhci->event_ring)
97 		return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
98 			(seg->next == xhci->event_ring->first_seg);
99 	else
100 		return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
101 }
102 
103 /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
104  * segment?  I.e. would the updated event TRB pointer step off the end of the
105  * event seg?
106  */
107 static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
108 		struct xhci_segment *seg, union xhci_trb *trb)
109 {
110 	if (ring == xhci->event_ring)
111 		return trb == &seg->trbs[TRBS_PER_SEGMENT];
112 	else
113 		return TRB_TYPE_LINK_LE32(trb->link.control);
114 }
115 
116 static int enqueue_is_link_trb(struct xhci_ring *ring)
117 {
118 	struct xhci_link_trb *link = &ring->enqueue->link;
119 	return TRB_TYPE_LINK_LE32(link->control);
120 }
121 
122 /* Updates trb to point to the next TRB in the ring, and updates seg if the next
123  * TRB is in a new segment.  This does not skip over link TRBs, and it does not
124  * effect the ring dequeue or enqueue pointers.
125  */
126 static void next_trb(struct xhci_hcd *xhci,
127 		struct xhci_ring *ring,
128 		struct xhci_segment **seg,
129 		union xhci_trb **trb)
130 {
131 	if (last_trb(xhci, ring, *seg, *trb)) {
132 		*seg = (*seg)->next;
133 		*trb = ((*seg)->trbs);
134 	} else {
135 		(*trb)++;
136 	}
137 }
138 
139 /*
140  * See Cycle bit rules. SW is the consumer for the event ring only.
141  * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
142  */
143 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
144 {
145 	ring->deq_updates++;
146 
147 	/*
148 	 * If this is not event ring, and the dequeue pointer
149 	 * is not on a link TRB, there is one more usable TRB
150 	 */
151 	if (ring->type != TYPE_EVENT &&
152 			!last_trb(xhci, ring, ring->deq_seg, ring->dequeue))
153 		ring->num_trbs_free++;
154 
155 	do {
156 		/*
157 		 * Update the dequeue pointer further if that was a link TRB or
158 		 * we're at the end of an event ring segment (which doesn't have
159 		 * link TRBS)
160 		 */
161 		if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) {
162 			if (ring->type == TYPE_EVENT &&
163 					last_trb_on_last_seg(xhci, ring,
164 						ring->deq_seg, ring->dequeue)) {
165 				ring->cycle_state ^= 1;
166 			}
167 			ring->deq_seg = ring->deq_seg->next;
168 			ring->dequeue = ring->deq_seg->trbs;
169 		} else {
170 			ring->dequeue++;
171 		}
172 	} while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
173 }
174 
175 /*
176  * See Cycle bit rules. SW is the consumer for the event ring only.
177  * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
178  *
179  * If we've just enqueued a TRB that is in the middle of a TD (meaning the
180  * chain bit is set), then set the chain bit in all the following link TRBs.
181  * If we've enqueued the last TRB in a TD, make sure the following link TRBs
182  * have their chain bit cleared (so that each Link TRB is a separate TD).
183  *
184  * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
185  * set, but other sections talk about dealing with the chain bit set.  This was
186  * fixed in the 0.96 specification errata, but we have to assume that all 0.95
187  * xHCI hardware can't handle the chain bit being cleared on a link TRB.
188  *
189  * @more_trbs_coming:	Will you enqueue more TRBs before calling
190  *			prepare_transfer()?
191  */
192 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
193 			bool more_trbs_coming)
194 {
195 	u32 chain;
196 	union xhci_trb *next;
197 
198 	chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
199 	/* If this is not event ring, there is one less usable TRB */
200 	if (ring->type != TYPE_EVENT &&
201 			!last_trb(xhci, ring, ring->enq_seg, ring->enqueue))
202 		ring->num_trbs_free--;
203 	next = ++(ring->enqueue);
204 
205 	ring->enq_updates++;
206 	/* Update the dequeue pointer further if that was a link TRB or we're at
207 	 * the end of an event ring segment (which doesn't have link TRBS)
208 	 */
209 	while (last_trb(xhci, ring, ring->enq_seg, next)) {
210 		if (ring->type != TYPE_EVENT) {
211 			/*
212 			 * If the caller doesn't plan on enqueueing more
213 			 * TDs before ringing the doorbell, then we
214 			 * don't want to give the link TRB to the
215 			 * hardware just yet.  We'll give the link TRB
216 			 * back in prepare_ring() just before we enqueue
217 			 * the TD at the top of the ring.
218 			 */
219 			if (!chain && !more_trbs_coming)
220 				break;
221 
222 			/* If we're not dealing with 0.95 hardware or
223 			 * isoc rings on AMD 0.96 host,
224 			 * carry over the chain bit of the previous TRB
225 			 * (which may mean the chain bit is cleared).
226 			 */
227 			if (!(ring->type == TYPE_ISOC &&
228 					(xhci->quirks & XHCI_AMD_0x96_HOST))
229 						&& !xhci_link_trb_quirk(xhci)) {
230 				next->link.control &=
231 					cpu_to_le32(~TRB_CHAIN);
232 				next->link.control |=
233 					cpu_to_le32(chain);
234 			}
235 			/* Give this link TRB to the hardware */
236 			wmb();
237 			next->link.control ^= cpu_to_le32(TRB_CYCLE);
238 
239 			/* Toggle the cycle bit after the last ring segment. */
240 			if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
241 				ring->cycle_state ^= 1;
242 			}
243 		}
244 		ring->enq_seg = ring->enq_seg->next;
245 		ring->enqueue = ring->enq_seg->trbs;
246 		next = ring->enqueue;
247 	}
248 }
249 
250 /*
251  * Check to see if there's room to enqueue num_trbs on the ring and make sure
252  * enqueue pointer will not advance into dequeue segment. See rules above.
253  */
254 static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
255 		unsigned int num_trbs)
256 {
257 	int num_trbs_in_deq_seg;
258 
259 	if (ring->num_trbs_free < num_trbs)
260 		return 0;
261 
262 	if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
263 		num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
264 		if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
265 			return 0;
266 	}
267 
268 	return 1;
269 }
270 
271 /* Ring the host controller doorbell after placing a command on the ring */
272 void xhci_ring_cmd_db(struct xhci_hcd *xhci)
273 {
274 	if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
275 		return;
276 
277 	xhci_dbg(xhci, "// Ding dong!\n");
278 	writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
279 	/* Flush PCI posted writes */
280 	readl(&xhci->dba->doorbell[0]);
281 }
282 
283 static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
284 {
285 	u64 temp_64;
286 	int ret;
287 
288 	xhci_dbg(xhci, "Abort command ring\n");
289 
290 	temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
291 	xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
292 	xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
293 			&xhci->op_regs->cmd_ring);
294 
295 	/* Section 4.6.1.2 of xHCI 1.0 spec says software should
296 	 * time the completion od all xHCI commands, including
297 	 * the Command Abort operation. If software doesn't see
298 	 * CRR negated in a timely manner (e.g. longer than 5
299 	 * seconds), then it should assume that the there are
300 	 * larger problems with the xHC and assert HCRST.
301 	 */
302 	ret = xhci_handshake(&xhci->op_regs->cmd_ring,
303 			CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
304 	if (ret < 0) {
305 		/* we are about to kill xhci, give it one more chance */
306 		xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
307 			      &xhci->op_regs->cmd_ring);
308 		udelay(1000);
309 		ret = xhci_handshake(&xhci->op_regs->cmd_ring,
310 				     CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
311 		if (ret == 0)
312 			return 0;
313 
314 		xhci_err(xhci, "Stopped the command ring failed, "
315 				"maybe the host is dead\n");
316 		xhci->xhc_state |= XHCI_STATE_DYING;
317 		xhci_quiesce(xhci);
318 		xhci_halt(xhci);
319 		return -ESHUTDOWN;
320 	}
321 
322 	return 0;
323 }
324 
325 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
326 		unsigned int slot_id,
327 		unsigned int ep_index,
328 		unsigned int stream_id)
329 {
330 	__le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
331 	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
332 	unsigned int ep_state = ep->ep_state;
333 
334 	/* Don't ring the doorbell for this endpoint if there are pending
335 	 * cancellations because we don't want to interrupt processing.
336 	 * We don't want to restart any stream rings if there's a set dequeue
337 	 * pointer command pending because the device can choose to start any
338 	 * stream once the endpoint is on the HW schedule.
339 	 */
340 	if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
341 	    (ep_state & EP_HALTED))
342 		return;
343 	writel(DB_VALUE(ep_index, stream_id), db_addr);
344 	/* The CPU has better things to do at this point than wait for a
345 	 * write-posting flush.  It'll get there soon enough.
346 	 */
347 }
348 
349 /* Ring the doorbell for any rings with pending URBs */
350 static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
351 		unsigned int slot_id,
352 		unsigned int ep_index)
353 {
354 	unsigned int stream_id;
355 	struct xhci_virt_ep *ep;
356 
357 	ep = &xhci->devs[slot_id]->eps[ep_index];
358 
359 	/* A ring has pending URBs if its TD list is not empty */
360 	if (!(ep->ep_state & EP_HAS_STREAMS)) {
361 		if (ep->ring && !(list_empty(&ep->ring->td_list)))
362 			xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
363 		return;
364 	}
365 
366 	for (stream_id = 1; stream_id < ep->stream_info->num_streams;
367 			stream_id++) {
368 		struct xhci_stream_info *stream_info = ep->stream_info;
369 		if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
370 			xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
371 						stream_id);
372 	}
373 }
374 
375 static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
376 		unsigned int slot_id, unsigned int ep_index,
377 		unsigned int stream_id)
378 {
379 	struct xhci_virt_ep *ep;
380 
381 	ep = &xhci->devs[slot_id]->eps[ep_index];
382 	/* Common case: no streams */
383 	if (!(ep->ep_state & EP_HAS_STREAMS))
384 		return ep->ring;
385 
386 	if (stream_id == 0) {
387 		xhci_warn(xhci,
388 				"WARN: Slot ID %u, ep index %u has streams, "
389 				"but URB has no stream ID.\n",
390 				slot_id, ep_index);
391 		return NULL;
392 	}
393 
394 	if (stream_id < ep->stream_info->num_streams)
395 		return ep->stream_info->stream_rings[stream_id];
396 
397 	xhci_warn(xhci,
398 			"WARN: Slot ID %u, ep index %u has "
399 			"stream IDs 1 to %u allocated, "
400 			"but stream ID %u is requested.\n",
401 			slot_id, ep_index,
402 			ep->stream_info->num_streams - 1,
403 			stream_id);
404 	return NULL;
405 }
406 
407 /* Get the right ring for the given URB.
408  * If the endpoint supports streams, boundary check the URB's stream ID.
409  * If the endpoint doesn't support streams, return the singular endpoint ring.
410  */
411 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
412 		struct urb *urb)
413 {
414 	return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
415 		xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
416 }
417 
418 /*
419  * Move the xHC's endpoint ring dequeue pointer past cur_td.
420  * Record the new state of the xHC's endpoint ring dequeue segment,
421  * dequeue pointer, and new consumer cycle state in state.
422  * Update our internal representation of the ring's dequeue pointer.
423  *
424  * We do this in three jumps:
425  *  - First we update our new ring state to be the same as when the xHC stopped.
426  *  - Then we traverse the ring to find the segment that contains
427  *    the last TRB in the TD.  We toggle the xHC's new cycle state when we pass
428  *    any link TRBs with the toggle cycle bit set.
429  *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
430  *    if we've moved it past a link TRB with the toggle cycle bit set.
431  *
432  * Some of the uses of xhci_generic_trb are grotty, but if they're done
433  * with correct __le32 accesses they should work fine.  Only users of this are
434  * in here.
435  */
436 void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
437 		unsigned int slot_id, unsigned int ep_index,
438 		unsigned int stream_id, struct xhci_td *cur_td,
439 		struct xhci_dequeue_state *state)
440 {
441 	struct xhci_virt_device *dev = xhci->devs[slot_id];
442 	struct xhci_virt_ep *ep = &dev->eps[ep_index];
443 	struct xhci_ring *ep_ring;
444 	struct xhci_segment *new_seg;
445 	union xhci_trb *new_deq;
446 	dma_addr_t addr;
447 	u64 hw_dequeue;
448 	bool cycle_found = false;
449 	bool td_last_trb_found = false;
450 
451 	ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
452 			ep_index, stream_id);
453 	if (!ep_ring) {
454 		xhci_warn(xhci, "WARN can't find new dequeue state "
455 				"for invalid stream ID %u.\n",
456 				stream_id);
457 		return;
458 	}
459 
460 	/* Dig out the cycle state saved by the xHC during the stop ep cmd */
461 	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
462 			"Finding endpoint context");
463 	/* 4.6.9 the css flag is written to the stream context for streams */
464 	if (ep->ep_state & EP_HAS_STREAMS) {
465 		struct xhci_stream_ctx *ctx =
466 			&ep->stream_info->stream_ctx_array[stream_id];
467 		hw_dequeue = le64_to_cpu(ctx->stream_ring);
468 	} else {
469 		struct xhci_ep_ctx *ep_ctx
470 			= xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
471 		hw_dequeue = le64_to_cpu(ep_ctx->deq);
472 	}
473 
474 	new_seg = ep_ring->deq_seg;
475 	new_deq = ep_ring->dequeue;
476 	state->new_cycle_state = hw_dequeue & 0x1;
477 
478 	/*
479 	 * We want to find the pointer, segment and cycle state of the new trb
480 	 * (the one after current TD's last_trb). We know the cycle state at
481 	 * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
482 	 * found.
483 	 */
484 	do {
485 		if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
486 		    == (dma_addr_t)(hw_dequeue & ~0xf)) {
487 			cycle_found = true;
488 			if (td_last_trb_found)
489 				break;
490 		}
491 		if (new_deq == cur_td->last_trb)
492 			td_last_trb_found = true;
493 
494 		if (cycle_found &&
495 		    TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) &&
496 		    new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE))
497 			state->new_cycle_state ^= 0x1;
498 
499 		next_trb(xhci, ep_ring, &new_seg, &new_deq);
500 
501 		/* Search wrapped around, bail out */
502 		if (new_deq == ep->ring->dequeue) {
503 			xhci_err(xhci, "Error: Failed finding new dequeue state\n");
504 			state->new_deq_seg = NULL;
505 			state->new_deq_ptr = NULL;
506 			return;
507 		}
508 
509 	} while (!cycle_found || !td_last_trb_found);
510 
511 	state->new_deq_seg = new_seg;
512 	state->new_deq_ptr = new_deq;
513 
514 	/* Don't update the ring cycle state for the producer (us). */
515 	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
516 			"Cycle state = 0x%x", state->new_cycle_state);
517 
518 	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
519 			"New dequeue segment = %p (virtual)",
520 			state->new_deq_seg);
521 	addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
522 	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
523 			"New dequeue pointer = 0x%llx (DMA)",
524 			(unsigned long long) addr);
525 }
526 
527 /* flip_cycle means flip the cycle bit of all but the first and last TRB.
528  * (The last TRB actually points to the ring enqueue pointer, which is not part
529  * of this TD.)  This is used to remove partially enqueued isoc TDs from a ring.
530  */
531 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
532 		struct xhci_td *cur_td, bool flip_cycle)
533 {
534 	struct xhci_segment *cur_seg;
535 	union xhci_trb *cur_trb;
536 
537 	for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
538 			true;
539 			next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
540 		if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
541 			/* Unchain any chained Link TRBs, but
542 			 * leave the pointers intact.
543 			 */
544 			cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
545 			/* Flip the cycle bit (link TRBs can't be the first
546 			 * or last TRB).
547 			 */
548 			if (flip_cycle)
549 				cur_trb->generic.field[3] ^=
550 					cpu_to_le32(TRB_CYCLE);
551 			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
552 					"Cancel (unchain) link TRB");
553 			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
554 					"Address = %p (0x%llx dma); "
555 					"in seg %p (0x%llx dma)",
556 					cur_trb,
557 					(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
558 					cur_seg,
559 					(unsigned long long)cur_seg->dma);
560 		} else {
561 			cur_trb->generic.field[0] = 0;
562 			cur_trb->generic.field[1] = 0;
563 			cur_trb->generic.field[2] = 0;
564 			/* Preserve only the cycle bit of this TRB */
565 			cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
566 			/* Flip the cycle bit except on the first or last TRB */
567 			if (flip_cycle && cur_trb != cur_td->first_trb &&
568 					cur_trb != cur_td->last_trb)
569 				cur_trb->generic.field[3] ^=
570 					cpu_to_le32(TRB_CYCLE);
571 			cur_trb->generic.field[3] |= cpu_to_le32(
572 				TRB_TYPE(TRB_TR_NOOP));
573 			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
574 					"TRB to noop at offset 0x%llx",
575 					(unsigned long long)
576 					xhci_trb_virt_to_dma(cur_seg, cur_trb));
577 		}
578 		if (cur_trb == cur_td->last_trb)
579 			break;
580 	}
581 }
582 
583 static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
584 		struct xhci_virt_ep *ep)
585 {
586 	ep->ep_state &= ~EP_HALT_PENDING;
587 	/* Can't del_timer_sync in interrupt, so we attempt to cancel.  If the
588 	 * timer is running on another CPU, we don't decrement stop_cmds_pending
589 	 * (since we didn't successfully stop the watchdog timer).
590 	 */
591 	if (del_timer(&ep->stop_cmd_timer))
592 		ep->stop_cmds_pending--;
593 }
594 
595 /* Must be called with xhci->lock held in interrupt context */
596 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
597 		struct xhci_td *cur_td, int status)
598 {
599 	struct usb_hcd *hcd;
600 	struct urb	*urb;
601 	struct urb_priv	*urb_priv;
602 
603 	urb = cur_td->urb;
604 	urb_priv = urb->hcpriv;
605 	urb_priv->td_cnt++;
606 	hcd = bus_to_hcd(urb->dev->bus);
607 
608 	/* Only giveback urb when this is the last td in urb */
609 	if (urb_priv->td_cnt == urb_priv->length) {
610 		if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
611 			xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
612 			if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs	== 0) {
613 				if (xhci->quirks & XHCI_AMD_PLL_FIX)
614 					usb_amd_quirk_pll_enable();
615 			}
616 		}
617 		usb_hcd_unlink_urb_from_ep(hcd, urb);
618 
619 		spin_unlock(&xhci->lock);
620 		usb_hcd_giveback_urb(hcd, urb, status);
621 		xhci_urb_free_priv(urb_priv);
622 		spin_lock(&xhci->lock);
623 	}
624 }
625 
626 /*
627  * When we get a command completion for a Stop Endpoint Command, we need to
628  * unlink any cancelled TDs from the ring.  There are two ways to do that:
629  *
630  *  1. If the HW was in the middle of processing the TD that needs to be
631  *     cancelled, then we must move the ring's dequeue pointer past the last TRB
632  *     in the TD with a Set Dequeue Pointer Command.
633  *  2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
634  *     bit cleared) so that the HW will skip over them.
635  */
636 static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
637 		union xhci_trb *trb, struct xhci_event_cmd *event)
638 {
639 	unsigned int ep_index;
640 	struct xhci_ring *ep_ring;
641 	struct xhci_virt_ep *ep;
642 	struct list_head *entry;
643 	struct xhci_td *cur_td = NULL;
644 	struct xhci_td *last_unlinked_td;
645 
646 	struct xhci_dequeue_state deq_state;
647 
648 	if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
649 		if (!xhci->devs[slot_id])
650 			xhci_warn(xhci, "Stop endpoint command "
651 				"completion for disabled slot %u\n",
652 				slot_id);
653 		return;
654 	}
655 
656 	memset(&deq_state, 0, sizeof(deq_state));
657 	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
658 	ep = &xhci->devs[slot_id]->eps[ep_index];
659 
660 	if (list_empty(&ep->cancelled_td_list)) {
661 		xhci_stop_watchdog_timer_in_irq(xhci, ep);
662 		ep->stopped_td = NULL;
663 		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
664 		return;
665 	}
666 
667 	/* Fix up the ep ring first, so HW stops executing cancelled TDs.
668 	 * We have the xHCI lock, so nothing can modify this list until we drop
669 	 * it.  We're also in the event handler, so we can't get re-interrupted
670 	 * if another Stop Endpoint command completes
671 	 */
672 	list_for_each(entry, &ep->cancelled_td_list) {
673 		cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
674 		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
675 				"Removing canceled TD starting at 0x%llx (dma).",
676 				(unsigned long long)xhci_trb_virt_to_dma(
677 					cur_td->start_seg, cur_td->first_trb));
678 		ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
679 		if (!ep_ring) {
680 			/* This shouldn't happen unless a driver is mucking
681 			 * with the stream ID after submission.  This will
682 			 * leave the TD on the hardware ring, and the hardware
683 			 * will try to execute it, and may access a buffer
684 			 * that has already been freed.  In the best case, the
685 			 * hardware will execute it, and the event handler will
686 			 * ignore the completion event for that TD, since it was
687 			 * removed from the td_list for that endpoint.  In
688 			 * short, don't muck with the stream ID after
689 			 * submission.
690 			 */
691 			xhci_warn(xhci, "WARN Cancelled URB %p "
692 					"has invalid stream ID %u.\n",
693 					cur_td->urb,
694 					cur_td->urb->stream_id);
695 			goto remove_finished_td;
696 		}
697 		/*
698 		 * If we stopped on the TD we need to cancel, then we have to
699 		 * move the xHC endpoint ring dequeue pointer past this TD.
700 		 */
701 		if (cur_td == ep->stopped_td)
702 			xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
703 					cur_td->urb->stream_id,
704 					cur_td, &deq_state);
705 		else
706 			td_to_noop(xhci, ep_ring, cur_td, false);
707 remove_finished_td:
708 		/*
709 		 * The event handler won't see a completion for this TD anymore,
710 		 * so remove it from the endpoint ring's TD list.  Keep it in
711 		 * the cancelled TD list for URB completion later.
712 		 */
713 		list_del_init(&cur_td->td_list);
714 	}
715 	last_unlinked_td = cur_td;
716 	xhci_stop_watchdog_timer_in_irq(xhci, ep);
717 
718 	/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
719 	if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
720 		xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
721 				ep->stopped_td->urb->stream_id, &deq_state);
722 		xhci_ring_cmd_db(xhci);
723 	} else {
724 		/* Otherwise ring the doorbell(s) to restart queued transfers */
725 		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
726 	}
727 
728 	ep->stopped_td = NULL;
729 
730 	/*
731 	 * Drop the lock and complete the URBs in the cancelled TD list.
732 	 * New TDs to be cancelled might be added to the end of the list before
733 	 * we can complete all the URBs for the TDs we already unlinked.
734 	 * So stop when we've completed the URB for the last TD we unlinked.
735 	 */
736 	do {
737 		cur_td = list_entry(ep->cancelled_td_list.next,
738 				struct xhci_td, cancelled_td_list);
739 		list_del_init(&cur_td->cancelled_td_list);
740 
741 		/* Clean up the cancelled URB */
742 		/* Doesn't matter what we pass for status, since the core will
743 		 * just overwrite it (because the URB has been unlinked).
744 		 */
745 		xhci_giveback_urb_in_irq(xhci, cur_td, 0);
746 
747 		/* Stop processing the cancelled list if the watchdog timer is
748 		 * running.
749 		 */
750 		if (xhci->xhc_state & XHCI_STATE_DYING)
751 			return;
752 	} while (cur_td != last_unlinked_td);
753 
754 	/* Return to the event handler with xhci->lock re-acquired */
755 }
756 
757 static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
758 {
759 	struct xhci_td *cur_td;
760 
761 	while (!list_empty(&ring->td_list)) {
762 		cur_td = list_first_entry(&ring->td_list,
763 				struct xhci_td, td_list);
764 		list_del_init(&cur_td->td_list);
765 		if (!list_empty(&cur_td->cancelled_td_list))
766 			list_del_init(&cur_td->cancelled_td_list);
767 		xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
768 	}
769 }
770 
771 static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
772 		int slot_id, int ep_index)
773 {
774 	struct xhci_td *cur_td;
775 	struct xhci_virt_ep *ep;
776 	struct xhci_ring *ring;
777 
778 	ep = &xhci->devs[slot_id]->eps[ep_index];
779 	if ((ep->ep_state & EP_HAS_STREAMS) ||
780 			(ep->ep_state & EP_GETTING_NO_STREAMS)) {
781 		int stream_id;
782 
783 		for (stream_id = 0; stream_id < ep->stream_info->num_streams;
784 				stream_id++) {
785 			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
786 					"Killing URBs for slot ID %u, ep index %u, stream %u",
787 					slot_id, ep_index, stream_id + 1);
788 			xhci_kill_ring_urbs(xhci,
789 					ep->stream_info->stream_rings[stream_id]);
790 		}
791 	} else {
792 		ring = ep->ring;
793 		if (!ring)
794 			return;
795 		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
796 				"Killing URBs for slot ID %u, ep index %u",
797 				slot_id, ep_index);
798 		xhci_kill_ring_urbs(xhci, ring);
799 	}
800 	while (!list_empty(&ep->cancelled_td_list)) {
801 		cur_td = list_first_entry(&ep->cancelled_td_list,
802 				struct xhci_td, cancelled_td_list);
803 		list_del_init(&cur_td->cancelled_td_list);
804 		xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
805 	}
806 }
807 
808 /* Watchdog timer function for when a stop endpoint command fails to complete.
809  * In this case, we assume the host controller is broken or dying or dead.  The
810  * host may still be completing some other events, so we have to be careful to
811  * let the event ring handler and the URB dequeueing/enqueueing functions know
812  * through xhci->state.
813  *
814  * The timer may also fire if the host takes a very long time to respond to the
815  * command, and the stop endpoint command completion handler cannot delete the
816  * timer before the timer function is called.  Another endpoint cancellation may
817  * sneak in before the timer function can grab the lock, and that may queue
818  * another stop endpoint command and add the timer back.  So we cannot use a
819  * simple flag to say whether there is a pending stop endpoint command for a
820  * particular endpoint.
821  *
822  * Instead we use a combination of that flag and a counter for the number of
823  * pending stop endpoint commands.  If the timer is the tail end of the last
824  * stop endpoint command, and the endpoint's command is still pending, we assume
825  * the host is dying.
826  */
827 void xhci_stop_endpoint_command_watchdog(unsigned long arg)
828 {
829 	struct xhci_hcd *xhci;
830 	struct xhci_virt_ep *ep;
831 	int ret, i, j;
832 	unsigned long flags;
833 
834 	ep = (struct xhci_virt_ep *) arg;
835 	xhci = ep->xhci;
836 
837 	spin_lock_irqsave(&xhci->lock, flags);
838 
839 	ep->stop_cmds_pending--;
840 	if (xhci->xhc_state & XHCI_STATE_DYING) {
841 		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
842 				"Stop EP timer ran, but another timer marked "
843 				"xHCI as DYING, exiting.");
844 		spin_unlock_irqrestore(&xhci->lock, flags);
845 		return;
846 	}
847 	if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
848 		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
849 				"Stop EP timer ran, but no command pending, "
850 				"exiting.");
851 		spin_unlock_irqrestore(&xhci->lock, flags);
852 		return;
853 	}
854 
855 	xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
856 	xhci_warn(xhci, "Assuming host is dying, halting host.\n");
857 	/* Oops, HC is dead or dying or at least not responding to the stop
858 	 * endpoint command.
859 	 */
860 	xhci->xhc_state |= XHCI_STATE_DYING;
861 	/* Disable interrupts from the host controller and start halting it */
862 	xhci_quiesce(xhci);
863 	spin_unlock_irqrestore(&xhci->lock, flags);
864 
865 	ret = xhci_halt(xhci);
866 
867 	spin_lock_irqsave(&xhci->lock, flags);
868 	if (ret < 0) {
869 		/* This is bad; the host is not responding to commands and it's
870 		 * not allowing itself to be halted.  At least interrupts are
871 		 * disabled. If we call usb_hc_died(), it will attempt to
872 		 * disconnect all device drivers under this host.  Those
873 		 * disconnect() methods will wait for all URBs to be unlinked,
874 		 * so we must complete them.
875 		 */
876 		xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
877 		xhci_warn(xhci, "Completing active URBs anyway.\n");
878 		/* We could turn all TDs on the rings to no-ops.  This won't
879 		 * help if the host has cached part of the ring, and is slow if
880 		 * we want to preserve the cycle bit.  Skip it and hope the host
881 		 * doesn't touch the memory.
882 		 */
883 	}
884 	for (i = 0; i < MAX_HC_SLOTS; i++) {
885 		if (!xhci->devs[i])
886 			continue;
887 		for (j = 0; j < 31; j++)
888 			xhci_kill_endpoint_urbs(xhci, i, j);
889 	}
890 	spin_unlock_irqrestore(&xhci->lock, flags);
891 	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
892 			"Calling usb_hc_died()");
893 	usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
894 	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
895 			"xHCI host controller is dead.");
896 }
897 
898 
899 static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
900 		struct xhci_virt_device *dev,
901 		struct xhci_ring *ep_ring,
902 		unsigned int ep_index)
903 {
904 	union xhci_trb *dequeue_temp;
905 	int num_trbs_free_temp;
906 	bool revert = false;
907 
908 	num_trbs_free_temp = ep_ring->num_trbs_free;
909 	dequeue_temp = ep_ring->dequeue;
910 
911 	/* If we get two back-to-back stalls, and the first stalled transfer
912 	 * ends just before a link TRB, the dequeue pointer will be left on
913 	 * the link TRB by the code in the while loop.  So we have to update
914 	 * the dequeue pointer one segment further, or we'll jump off
915 	 * the segment into la-la-land.
916 	 */
917 	if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
918 		ep_ring->deq_seg = ep_ring->deq_seg->next;
919 		ep_ring->dequeue = ep_ring->deq_seg->trbs;
920 	}
921 
922 	while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
923 		/* We have more usable TRBs */
924 		ep_ring->num_trbs_free++;
925 		ep_ring->dequeue++;
926 		if (last_trb(xhci, ep_ring, ep_ring->deq_seg,
927 				ep_ring->dequeue)) {
928 			if (ep_ring->dequeue ==
929 					dev->eps[ep_index].queued_deq_ptr)
930 				break;
931 			ep_ring->deq_seg = ep_ring->deq_seg->next;
932 			ep_ring->dequeue = ep_ring->deq_seg->trbs;
933 		}
934 		if (ep_ring->dequeue == dequeue_temp) {
935 			revert = true;
936 			break;
937 		}
938 	}
939 
940 	if (revert) {
941 		xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
942 		ep_ring->num_trbs_free = num_trbs_free_temp;
943 	}
944 }
945 
946 /*
947  * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
948  * we need to clear the set deq pending flag in the endpoint ring state, so that
949  * the TD queueing code can ring the doorbell again.  We also need to ring the
950  * endpoint doorbell to restart the ring, but only if there aren't more
951  * cancellations pending.
952  */
953 static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
954 		union xhci_trb *trb, u32 cmd_comp_code)
955 {
956 	unsigned int ep_index;
957 	unsigned int stream_id;
958 	struct xhci_ring *ep_ring;
959 	struct xhci_virt_device *dev;
960 	struct xhci_virt_ep *ep;
961 	struct xhci_ep_ctx *ep_ctx;
962 	struct xhci_slot_ctx *slot_ctx;
963 
964 	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
965 	stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
966 	dev = xhci->devs[slot_id];
967 	ep = &dev->eps[ep_index];
968 
969 	ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
970 	if (!ep_ring) {
971 		xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
972 				stream_id);
973 		/* XXX: Harmless??? */
974 		goto cleanup;
975 	}
976 
977 	ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
978 	slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
979 
980 	if (cmd_comp_code != COMP_SUCCESS) {
981 		unsigned int ep_state;
982 		unsigned int slot_state;
983 
984 		switch (cmd_comp_code) {
985 		case COMP_TRB_ERR:
986 			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
987 			break;
988 		case COMP_CTX_STATE:
989 			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
990 			ep_state = le32_to_cpu(ep_ctx->ep_info);
991 			ep_state &= EP_STATE_MASK;
992 			slot_state = le32_to_cpu(slot_ctx->dev_state);
993 			slot_state = GET_SLOT_STATE(slot_state);
994 			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
995 					"Slot state = %u, EP state = %u",
996 					slot_state, ep_state);
997 			break;
998 		case COMP_EBADSLT:
999 			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
1000 					slot_id);
1001 			break;
1002 		default:
1003 			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
1004 					cmd_comp_code);
1005 			break;
1006 		}
1007 		/* OK what do we do now?  The endpoint state is hosed, and we
1008 		 * should never get to this point if the synchronization between
1009 		 * queueing, and endpoint state are correct.  This might happen
1010 		 * if the device gets disconnected after we've finished
1011 		 * cancelling URBs, which might not be an error...
1012 		 */
1013 	} else {
1014 		u64 deq;
1015 		/* 4.6.10 deq ptr is written to the stream ctx for streams */
1016 		if (ep->ep_state & EP_HAS_STREAMS) {
1017 			struct xhci_stream_ctx *ctx =
1018 				&ep->stream_info->stream_ctx_array[stream_id];
1019 			deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
1020 		} else {
1021 			deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
1022 		}
1023 		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1024 			"Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
1025 		if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
1026 					 ep->queued_deq_ptr) == deq) {
1027 			/* Update the ring's dequeue segment and dequeue pointer
1028 			 * to reflect the new position.
1029 			 */
1030 			update_ring_for_set_deq_completion(xhci, dev,
1031 				ep_ring, ep_index);
1032 		} else {
1033 			xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
1034 			xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
1035 				  ep->queued_deq_seg, ep->queued_deq_ptr);
1036 		}
1037 	}
1038 
1039 cleanup:
1040 	dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
1041 	dev->eps[ep_index].queued_deq_seg = NULL;
1042 	dev->eps[ep_index].queued_deq_ptr = NULL;
1043 	/* Restart any rings with pending URBs */
1044 	ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1045 }
1046 
1047 static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1048 		union xhci_trb *trb, u32 cmd_comp_code)
1049 {
1050 	unsigned int ep_index;
1051 
1052 	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1053 	/* This command will only fail if the endpoint wasn't halted,
1054 	 * but we don't care.
1055 	 */
1056 	xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
1057 		"Ignoring reset ep completion code of %u", cmd_comp_code);
1058 
1059 	/* HW with the reset endpoint quirk needs to have a configure endpoint
1060 	 * command complete before the endpoint can be used.  Queue that here
1061 	 * because the HW can't handle two commands being queued in a row.
1062 	 */
1063 	if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
1064 		struct xhci_command *command;
1065 		command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1066 		if (!command) {
1067 			xhci_warn(xhci, "WARN Cannot submit cfg ep: ENOMEM\n");
1068 			return;
1069 		}
1070 		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1071 				"Queueing configure endpoint command");
1072 		xhci_queue_configure_endpoint(xhci, command,
1073 				xhci->devs[slot_id]->in_ctx->dma, slot_id,
1074 				false);
1075 		xhci_ring_cmd_db(xhci);
1076 	} else {
1077 		/* Clear our internal halted state */
1078 		xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
1079 	}
1080 }
1081 
1082 static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
1083 		u32 cmd_comp_code)
1084 {
1085 	if (cmd_comp_code == COMP_SUCCESS)
1086 		xhci->slot_id = slot_id;
1087 	else
1088 		xhci->slot_id = 0;
1089 }
1090 
1091 static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
1092 {
1093 	struct xhci_virt_device *virt_dev;
1094 
1095 	virt_dev = xhci->devs[slot_id];
1096 	if (!virt_dev)
1097 		return;
1098 	if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1099 		/* Delete default control endpoint resources */
1100 		xhci_free_device_endpoint_resources(xhci, virt_dev, true);
1101 	xhci_free_virt_device(xhci, slot_id);
1102 }
1103 
1104 static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
1105 		struct xhci_event_cmd *event, u32 cmd_comp_code)
1106 {
1107 	struct xhci_virt_device *virt_dev;
1108 	struct xhci_input_control_ctx *ctrl_ctx;
1109 	unsigned int ep_index;
1110 	unsigned int ep_state;
1111 	u32 add_flags, drop_flags;
1112 
1113 	/*
1114 	 * Configure endpoint commands can come from the USB core
1115 	 * configuration or alt setting changes, or because the HW
1116 	 * needed an extra configure endpoint command after a reset
1117 	 * endpoint command or streams were being configured.
1118 	 * If the command was for a halted endpoint, the xHCI driver
1119 	 * is not waiting on the configure endpoint command.
1120 	 */
1121 	virt_dev = xhci->devs[slot_id];
1122 	ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1123 	if (!ctrl_ctx) {
1124 		xhci_warn(xhci, "Could not get input context, bad type.\n");
1125 		return;
1126 	}
1127 
1128 	add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1129 	drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1130 	/* Input ctx add_flags are the endpoint index plus one */
1131 	ep_index = xhci_last_valid_endpoint(add_flags) - 1;
1132 
1133 	/* A usb_set_interface() call directly after clearing a halted
1134 	 * condition may race on this quirky hardware.  Not worth
1135 	 * worrying about, since this is prototype hardware.  Not sure
1136 	 * if this will work for streams, but streams support was
1137 	 * untested on this prototype.
1138 	 */
1139 	if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1140 			ep_index != (unsigned int) -1 &&
1141 			add_flags - SLOT_FLAG == drop_flags) {
1142 		ep_state = virt_dev->eps[ep_index].ep_state;
1143 		if (!(ep_state & EP_HALTED))
1144 			return;
1145 		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1146 				"Completed config ep cmd - "
1147 				"last ep index = %d, state = %d",
1148 				ep_index, ep_state);
1149 		/* Clear internal halted state and restart ring(s) */
1150 		virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
1151 		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1152 		return;
1153 	}
1154 	return;
1155 }
1156 
1157 static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
1158 		struct xhci_event_cmd *event)
1159 {
1160 	xhci_dbg(xhci, "Completed reset device command.\n");
1161 	if (!xhci->devs[slot_id])
1162 		xhci_warn(xhci, "Reset device command completion "
1163 				"for disabled slot %u\n", slot_id);
1164 }
1165 
1166 static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
1167 		struct xhci_event_cmd *event)
1168 {
1169 	if (!(xhci->quirks & XHCI_NEC_HOST)) {
1170 		xhci->error_bitmask |= 1 << 6;
1171 		return;
1172 	}
1173 	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1174 			"NEC firmware version %2x.%02x",
1175 			NEC_FW_MAJOR(le32_to_cpu(event->status)),
1176 			NEC_FW_MINOR(le32_to_cpu(event->status)));
1177 }
1178 
1179 static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
1180 {
1181 	list_del(&cmd->cmd_list);
1182 
1183 	if (cmd->completion) {
1184 		cmd->status = status;
1185 		complete(cmd->completion);
1186 	} else {
1187 		kfree(cmd);
1188 	}
1189 }
1190 
1191 void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
1192 {
1193 	struct xhci_command *cur_cmd, *tmp_cmd;
1194 	list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
1195 		xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT);
1196 }
1197 
1198 /*
1199  * Turn all commands on command ring with status set to "aborted" to no-op trbs.
1200  * If there are other commands waiting then restart the ring and kick the timer.
1201  * This must be called with command ring stopped and xhci->lock held.
1202  */
1203 static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
1204 					 struct xhci_command *cur_cmd)
1205 {
1206 	struct xhci_command *i_cmd, *tmp_cmd;
1207 	u32 cycle_state;
1208 
1209 	/* Turn all aborted commands in list to no-ops, then restart */
1210 	list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list,
1211 				 cmd_list) {
1212 
1213 		if (i_cmd->status != COMP_CMD_ABORT)
1214 			continue;
1215 
1216 		i_cmd->status = COMP_CMD_STOP;
1217 
1218 		xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
1219 			 i_cmd->command_trb);
1220 		/* get cycle state from the original cmd trb */
1221 		cycle_state = le32_to_cpu(
1222 			i_cmd->command_trb->generic.field[3]) &	TRB_CYCLE;
1223 		/* modify the command trb to no-op command */
1224 		i_cmd->command_trb->generic.field[0] = 0;
1225 		i_cmd->command_trb->generic.field[1] = 0;
1226 		i_cmd->command_trb->generic.field[2] = 0;
1227 		i_cmd->command_trb->generic.field[3] = cpu_to_le32(
1228 			TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
1229 
1230 		/*
1231 		 * caller waiting for completion is called when command
1232 		 *  completion event is received for these no-op commands
1233 		 */
1234 	}
1235 
1236 	xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
1237 
1238 	/* ring command ring doorbell to restart the command ring */
1239 	if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
1240 	    !(xhci->xhc_state & XHCI_STATE_DYING)) {
1241 		xhci->current_cmd = cur_cmd;
1242 		mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
1243 		xhci_ring_cmd_db(xhci);
1244 	}
1245 	return;
1246 }
1247 
1248 
1249 void xhci_handle_command_timeout(unsigned long data)
1250 {
1251 	struct xhci_hcd *xhci;
1252 	int ret;
1253 	unsigned long flags;
1254 	u64 hw_ring_state;
1255 	struct xhci_command *cur_cmd = NULL;
1256 	xhci = (struct xhci_hcd *) data;
1257 
1258 	/* mark this command to be cancelled */
1259 	spin_lock_irqsave(&xhci->lock, flags);
1260 	if (xhci->current_cmd) {
1261 		cur_cmd = xhci->current_cmd;
1262 		cur_cmd->status = COMP_CMD_ABORT;
1263 	}
1264 
1265 
1266 	/* Make sure command ring is running before aborting it */
1267 	hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1268 	if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
1269 	    (hw_ring_state & CMD_RING_RUNNING))  {
1270 
1271 		spin_unlock_irqrestore(&xhci->lock, flags);
1272 		xhci_dbg(xhci, "Command timeout\n");
1273 		ret = xhci_abort_cmd_ring(xhci);
1274 		if (unlikely(ret == -ESHUTDOWN)) {
1275 			xhci_err(xhci, "Abort command ring failed\n");
1276 			xhci_cleanup_command_queue(xhci);
1277 			usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
1278 			xhci_dbg(xhci, "xHCI host controller is dead.\n");
1279 		}
1280 		return;
1281 	}
1282 	/* command timeout on stopped ring, ring can't be aborted */
1283 	xhci_dbg(xhci, "Command timeout on stopped ring\n");
1284 	xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
1285 	spin_unlock_irqrestore(&xhci->lock, flags);
1286 	return;
1287 }
1288 
1289 static void handle_cmd_completion(struct xhci_hcd *xhci,
1290 		struct xhci_event_cmd *event)
1291 {
1292 	int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1293 	u64 cmd_dma;
1294 	dma_addr_t cmd_dequeue_dma;
1295 	u32 cmd_comp_code;
1296 	union xhci_trb *cmd_trb;
1297 	struct xhci_command *cmd;
1298 	u32 cmd_type;
1299 
1300 	cmd_dma = le64_to_cpu(event->cmd_trb);
1301 	cmd_trb = xhci->cmd_ring->dequeue;
1302 	cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1303 			cmd_trb);
1304 	/* Is the command ring deq ptr out of sync with the deq seg ptr? */
1305 	if (cmd_dequeue_dma == 0) {
1306 		xhci->error_bitmask |= 1 << 4;
1307 		return;
1308 	}
1309 	/* Does the DMA address match our internal dequeue pointer address? */
1310 	if (cmd_dma != (u64) cmd_dequeue_dma) {
1311 		xhci->error_bitmask |= 1 << 5;
1312 		return;
1313 	}
1314 
1315 	cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
1316 
1317 	if (cmd->command_trb != xhci->cmd_ring->dequeue) {
1318 		xhci_err(xhci,
1319 			 "Command completion event does not match command\n");
1320 		return;
1321 	}
1322 
1323 	del_timer(&xhci->cmd_timer);
1324 
1325 	trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
1326 
1327 	cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
1328 
1329 	/* If CMD ring stopped we own the trbs between enqueue and dequeue */
1330 	if (cmd_comp_code == COMP_CMD_STOP) {
1331 		xhci_handle_stopped_cmd_ring(xhci, cmd);
1332 		return;
1333 	}
1334 	/*
1335 	 * Host aborted the command ring, check if the current command was
1336 	 * supposed to be aborted, otherwise continue normally.
1337 	 * The command ring is stopped now, but the xHC will issue a Command
1338 	 * Ring Stopped event which will cause us to restart it.
1339 	 */
1340 	if (cmd_comp_code == COMP_CMD_ABORT) {
1341 		xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1342 		if (cmd->status == COMP_CMD_ABORT)
1343 			goto event_handled;
1344 	}
1345 
1346 	cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
1347 	switch (cmd_type) {
1348 	case TRB_ENABLE_SLOT:
1349 		xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code);
1350 		break;
1351 	case TRB_DISABLE_SLOT:
1352 		xhci_handle_cmd_disable_slot(xhci, slot_id);
1353 		break;
1354 	case TRB_CONFIG_EP:
1355 		if (!cmd->completion)
1356 			xhci_handle_cmd_config_ep(xhci, slot_id, event,
1357 						  cmd_comp_code);
1358 		break;
1359 	case TRB_EVAL_CONTEXT:
1360 		break;
1361 	case TRB_ADDR_DEV:
1362 		break;
1363 	case TRB_STOP_RING:
1364 		WARN_ON(slot_id != TRB_TO_SLOT_ID(
1365 				le32_to_cpu(cmd_trb->generic.field[3])));
1366 		xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
1367 		break;
1368 	case TRB_SET_DEQ:
1369 		WARN_ON(slot_id != TRB_TO_SLOT_ID(
1370 				le32_to_cpu(cmd_trb->generic.field[3])));
1371 		xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
1372 		break;
1373 	case TRB_CMD_NOOP:
1374 		/* Is this an aborted command turned to NO-OP? */
1375 		if (cmd->status == COMP_CMD_STOP)
1376 			cmd_comp_code = COMP_CMD_STOP;
1377 		break;
1378 	case TRB_RESET_EP:
1379 		WARN_ON(slot_id != TRB_TO_SLOT_ID(
1380 				le32_to_cpu(cmd_trb->generic.field[3])));
1381 		xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
1382 		break;
1383 	case TRB_RESET_DEV:
1384 		/* SLOT_ID field in reset device cmd completion event TRB is 0.
1385 		 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
1386 		 */
1387 		slot_id = TRB_TO_SLOT_ID(
1388 				le32_to_cpu(cmd_trb->generic.field[3]));
1389 		xhci_handle_cmd_reset_dev(xhci, slot_id, event);
1390 		break;
1391 	case TRB_NEC_GET_FW:
1392 		xhci_handle_cmd_nec_get_fw(xhci, event);
1393 		break;
1394 	default:
1395 		/* Skip over unknown commands on the event ring */
1396 		xhci->error_bitmask |= 1 << 6;
1397 		break;
1398 	}
1399 
1400 	/* restart timer if this wasn't the last command */
1401 	if (cmd->cmd_list.next != &xhci->cmd_list) {
1402 		xhci->current_cmd = list_entry(cmd->cmd_list.next,
1403 					       struct xhci_command, cmd_list);
1404 		mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
1405 	}
1406 
1407 event_handled:
1408 	xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
1409 
1410 	inc_deq(xhci, xhci->cmd_ring);
1411 }
1412 
1413 static void handle_vendor_event(struct xhci_hcd *xhci,
1414 		union xhci_trb *event)
1415 {
1416 	u32 trb_type;
1417 
1418 	trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
1419 	xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1420 	if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1421 		handle_cmd_completion(xhci, &event->event_cmd);
1422 }
1423 
1424 /* @port_id: the one-based port ID from the hardware (indexed from array of all
1425  * port registers -- USB 3.0 and USB 2.0).
1426  *
1427  * Returns a zero-based port number, which is suitable for indexing into each of
1428  * the split roothubs' port arrays and bus state arrays.
1429  * Add one to it in order to call xhci_find_slot_id_by_port.
1430  */
1431 static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
1432 		struct xhci_hcd *xhci, u32 port_id)
1433 {
1434 	unsigned int i;
1435 	unsigned int num_similar_speed_ports = 0;
1436 
1437 	/* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
1438 	 * and usb2_ports are 0-based indexes.  Count the number of similar
1439 	 * speed ports, up to 1 port before this port.
1440 	 */
1441 	for (i = 0; i < (port_id - 1); i++) {
1442 		u8 port_speed = xhci->port_array[i];
1443 
1444 		/*
1445 		 * Skip ports that don't have known speeds, or have duplicate
1446 		 * Extended Capabilities port speed entries.
1447 		 */
1448 		if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1449 			continue;
1450 
1451 		/*
1452 		 * USB 3.0 ports are always under a USB 3.0 hub.  USB 2.0 and
1453 		 * 1.1 ports are under the USB 2.0 hub.  If the port speed
1454 		 * matches the device speed, it's a similar speed port.
1455 		 */
1456 		if ((port_speed == 0x03) == (hcd->speed >= HCD_USB3))
1457 			num_similar_speed_ports++;
1458 	}
1459 	return num_similar_speed_ports;
1460 }
1461 
1462 static void handle_device_notification(struct xhci_hcd *xhci,
1463 		union xhci_trb *event)
1464 {
1465 	u32 slot_id;
1466 	struct usb_device *udev;
1467 
1468 	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
1469 	if (!xhci->devs[slot_id]) {
1470 		xhci_warn(xhci, "Device Notification event for "
1471 				"unused slot %u\n", slot_id);
1472 		return;
1473 	}
1474 
1475 	xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
1476 			slot_id);
1477 	udev = xhci->devs[slot_id]->udev;
1478 	if (udev && udev->parent)
1479 		usb_wakeup_notification(udev->parent, udev->portnum);
1480 }
1481 
1482 static void handle_port_status(struct xhci_hcd *xhci,
1483 		union xhci_trb *event)
1484 {
1485 	struct usb_hcd *hcd;
1486 	u32 port_id;
1487 	u32 temp, temp1;
1488 	int max_ports;
1489 	int slot_id;
1490 	unsigned int faked_port_index;
1491 	u8 major_revision;
1492 	struct xhci_bus_state *bus_state;
1493 	__le32 __iomem **port_array;
1494 	bool bogus_port_status = false;
1495 
1496 	/* Port status change events always have a successful completion code */
1497 	if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
1498 		xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
1499 		xhci->error_bitmask |= 1 << 8;
1500 	}
1501 	port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
1502 	xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
1503 
1504 	max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1505 	if ((port_id <= 0) || (port_id > max_ports)) {
1506 		xhci_warn(xhci, "Invalid port id %d\n", port_id);
1507 		inc_deq(xhci, xhci->event_ring);
1508 		return;
1509 	}
1510 
1511 	/* Figure out which usb_hcd this port is attached to:
1512 	 * is it a USB 3.0 port or a USB 2.0/1.1 port?
1513 	 */
1514 	major_revision = xhci->port_array[port_id - 1];
1515 
1516 	/* Find the right roothub. */
1517 	hcd = xhci_to_hcd(xhci);
1518 	if ((major_revision == 0x03) != (hcd->speed >= HCD_USB3))
1519 		hcd = xhci->shared_hcd;
1520 
1521 	if (major_revision == 0) {
1522 		xhci_warn(xhci, "Event for port %u not in "
1523 				"Extended Capabilities, ignoring.\n",
1524 				port_id);
1525 		bogus_port_status = true;
1526 		goto cleanup;
1527 	}
1528 	if (major_revision == DUPLICATE_ENTRY) {
1529 		xhci_warn(xhci, "Event for port %u duplicated in"
1530 				"Extended Capabilities, ignoring.\n",
1531 				port_id);
1532 		bogus_port_status = true;
1533 		goto cleanup;
1534 	}
1535 
1536 	/*
1537 	 * Hardware port IDs reported by a Port Status Change Event include USB
1538 	 * 3.0 and USB 2.0 ports.  We want to check if the port has reported a
1539 	 * resume event, but we first need to translate the hardware port ID
1540 	 * into the index into the ports on the correct split roothub, and the
1541 	 * correct bus_state structure.
1542 	 */
1543 	bus_state = &xhci->bus_state[hcd_index(hcd)];
1544 	if (hcd->speed >= HCD_USB3)
1545 		port_array = xhci->usb3_ports;
1546 	else
1547 		port_array = xhci->usb2_ports;
1548 	/* Find the faked port hub number */
1549 	faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
1550 			port_id);
1551 
1552 	temp = readl(port_array[faked_port_index]);
1553 	if (hcd->state == HC_STATE_SUSPENDED) {
1554 		xhci_dbg(xhci, "resume root hub\n");
1555 		usb_hcd_resume_root_hub(hcd);
1556 	}
1557 
1558 	if (hcd->speed >= HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
1559 		bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
1560 
1561 	if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
1562 		xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1563 
1564 		temp1 = readl(&xhci->op_regs->command);
1565 		if (!(temp1 & CMD_RUN)) {
1566 			xhci_warn(xhci, "xHC is not running.\n");
1567 			goto cleanup;
1568 		}
1569 
1570 		if (DEV_SUPERSPEED_ANY(temp)) {
1571 			xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
1572 			/* Set a flag to say the port signaled remote wakeup,
1573 			 * so we can tell the difference between the end of
1574 			 * device and host initiated resume.
1575 			 */
1576 			bus_state->port_remote_wakeup |= 1 << faked_port_index;
1577 			xhci_test_and_clear_bit(xhci, port_array,
1578 					faked_port_index, PORT_PLC);
1579 			xhci_set_link_state(xhci, port_array, faked_port_index,
1580 						XDEV_U0);
1581 			/* Need to wait until the next link state change
1582 			 * indicates the device is actually in U0.
1583 			 */
1584 			bogus_port_status = true;
1585 			goto cleanup;
1586 		} else if (!test_bit(faked_port_index,
1587 				     &bus_state->resuming_ports)) {
1588 			xhci_dbg(xhci, "resume HS port %d\n", port_id);
1589 			bus_state->resume_done[faked_port_index] = jiffies +
1590 				msecs_to_jiffies(USB_RESUME_TIMEOUT);
1591 			set_bit(faked_port_index, &bus_state->resuming_ports);
1592 			mod_timer(&hcd->rh_timer,
1593 				  bus_state->resume_done[faked_port_index]);
1594 			/* Do the rest in GetPortStatus */
1595 		}
1596 	}
1597 
1598 	if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
1599 			DEV_SUPERSPEED_ANY(temp)) {
1600 		xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1601 		/* We've just brought the device into U0 through either the
1602 		 * Resume state after a device remote wakeup, or through the
1603 		 * U3Exit state after a host-initiated resume.  If it's a device
1604 		 * initiated remote wake, don't pass up the link state change,
1605 		 * so the roothub behavior is consistent with external
1606 		 * USB 3.0 hub behavior.
1607 		 */
1608 		slot_id = xhci_find_slot_id_by_port(hcd, xhci,
1609 				faked_port_index + 1);
1610 		if (slot_id && xhci->devs[slot_id])
1611 			xhci_ring_device(xhci, slot_id);
1612 		if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
1613 			bus_state->port_remote_wakeup &=
1614 				~(1 << faked_port_index);
1615 			xhci_test_and_clear_bit(xhci, port_array,
1616 					faked_port_index, PORT_PLC);
1617 			usb_wakeup_notification(hcd->self.root_hub,
1618 					faked_port_index + 1);
1619 			bogus_port_status = true;
1620 			goto cleanup;
1621 		}
1622 	}
1623 
1624 	/*
1625 	 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
1626 	 * RExit to a disconnect state).  If so, let the the driver know it's
1627 	 * out of the RExit state.
1628 	 */
1629 	if (!DEV_SUPERSPEED_ANY(temp) &&
1630 			test_and_clear_bit(faked_port_index,
1631 				&bus_state->rexit_ports)) {
1632 		complete(&bus_state->rexit_done[faked_port_index]);
1633 		bogus_port_status = true;
1634 		goto cleanup;
1635 	}
1636 
1637 	if (hcd->speed < HCD_USB3)
1638 		xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
1639 					PORT_PLC);
1640 
1641 cleanup:
1642 	/* Update event ring dequeue pointer before dropping the lock */
1643 	inc_deq(xhci, xhci->event_ring);
1644 
1645 	/* Don't make the USB core poll the roothub if we got a bad port status
1646 	 * change event.  Besides, at that point we can't tell which roothub
1647 	 * (USB 2.0 or USB 3.0) to kick.
1648 	 */
1649 	if (bogus_port_status)
1650 		return;
1651 
1652 	/*
1653 	 * xHCI port-status-change events occur when the "or" of all the
1654 	 * status-change bits in the portsc register changes from 0 to 1.
1655 	 * New status changes won't cause an event if any other change
1656 	 * bits are still set.  When an event occurs, switch over to
1657 	 * polling to avoid losing status changes.
1658 	 */
1659 	xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1660 	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1661 	spin_unlock(&xhci->lock);
1662 	/* Pass this up to the core */
1663 	usb_hcd_poll_rh_status(hcd);
1664 	spin_lock(&xhci->lock);
1665 }
1666 
1667 /*
1668  * This TD is defined by the TRBs starting at start_trb in start_seg and ending
1669  * at end_trb, which may be in another segment.  If the suspect DMA address is a
1670  * TRB in this TD, this function returns that TRB's segment.  Otherwise it
1671  * returns 0.
1672  */
1673 struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
1674 		struct xhci_segment *start_seg,
1675 		union xhci_trb	*start_trb,
1676 		union xhci_trb	*end_trb,
1677 		dma_addr_t	suspect_dma,
1678 		bool		debug)
1679 {
1680 	dma_addr_t start_dma;
1681 	dma_addr_t end_seg_dma;
1682 	dma_addr_t end_trb_dma;
1683 	struct xhci_segment *cur_seg;
1684 
1685 	start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
1686 	cur_seg = start_seg;
1687 
1688 	do {
1689 		if (start_dma == 0)
1690 			return NULL;
1691 		/* We may get an event for a Link TRB in the middle of a TD */
1692 		end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1693 				&cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
1694 		/* If the end TRB isn't in this segment, this is set to 0 */
1695 		end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
1696 
1697 		if (debug)
1698 			xhci_warn(xhci,
1699 				"Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
1700 				(unsigned long long)suspect_dma,
1701 				(unsigned long long)start_dma,
1702 				(unsigned long long)end_trb_dma,
1703 				(unsigned long long)cur_seg->dma,
1704 				(unsigned long long)end_seg_dma);
1705 
1706 		if (end_trb_dma > 0) {
1707 			/* The end TRB is in this segment, so suspect should be here */
1708 			if (start_dma <= end_trb_dma) {
1709 				if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
1710 					return cur_seg;
1711 			} else {
1712 				/* Case for one segment with
1713 				 * a TD wrapped around to the top
1714 				 */
1715 				if ((suspect_dma >= start_dma &&
1716 							suspect_dma <= end_seg_dma) ||
1717 						(suspect_dma >= cur_seg->dma &&
1718 						 suspect_dma <= end_trb_dma))
1719 					return cur_seg;
1720 			}
1721 			return NULL;
1722 		} else {
1723 			/* Might still be somewhere in this segment */
1724 			if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
1725 				return cur_seg;
1726 		}
1727 		cur_seg = cur_seg->next;
1728 		start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1729 	} while (cur_seg != start_seg);
1730 
1731 	return NULL;
1732 }
1733 
1734 static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1735 		unsigned int slot_id, unsigned int ep_index,
1736 		unsigned int stream_id,
1737 		struct xhci_td *td, union xhci_trb *event_trb)
1738 {
1739 	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1740 	struct xhci_command *command;
1741 	command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1742 	if (!command)
1743 		return;
1744 
1745 	ep->ep_state |= EP_HALTED;
1746 	ep->stopped_stream = stream_id;
1747 
1748 	xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
1749 	xhci_cleanup_stalled_ring(xhci, ep_index, td);
1750 
1751 	ep->stopped_stream = 0;
1752 
1753 	xhci_ring_cmd_db(xhci);
1754 }
1755 
1756 /* Check if an error has halted the endpoint ring.  The class driver will
1757  * cleanup the halt for a non-default control endpoint if we indicate a stall.
1758  * However, a babble and other errors also halt the endpoint ring, and the class
1759  * driver won't clear the halt in that case, so we need to issue a Set Transfer
1760  * Ring Dequeue Pointer command manually.
1761  */
1762 static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1763 		struct xhci_ep_ctx *ep_ctx,
1764 		unsigned int trb_comp_code)
1765 {
1766 	/* TRB completion codes that may require a manual halt cleanup */
1767 	if (trb_comp_code == COMP_TX_ERR ||
1768 			trb_comp_code == COMP_BABBLE ||
1769 			trb_comp_code == COMP_SPLIT_ERR)
1770 		/* The 0.96 spec says a babbling control endpoint
1771 		 * is not halted. The 0.96 spec says it is.  Some HW
1772 		 * claims to be 0.95 compliant, but it halts the control
1773 		 * endpoint anyway.  Check if a babble halted the
1774 		 * endpoint.
1775 		 */
1776 		if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1777 		    cpu_to_le32(EP_STATE_HALTED))
1778 			return 1;
1779 
1780 	return 0;
1781 }
1782 
1783 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1784 {
1785 	if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1786 		/* Vendor defined "informational" completion code,
1787 		 * treat as not-an-error.
1788 		 */
1789 		xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1790 				trb_comp_code);
1791 		xhci_dbg(xhci, "Treating code as success.\n");
1792 		return 1;
1793 	}
1794 	return 0;
1795 }
1796 
1797 /*
1798  * Finish the td processing, remove the td from td list;
1799  * Return 1 if the urb can be given back.
1800  */
1801 static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1802 	union xhci_trb *event_trb, struct xhci_transfer_event *event,
1803 	struct xhci_virt_ep *ep, int *status, bool skip)
1804 {
1805 	struct xhci_virt_device *xdev;
1806 	struct xhci_ring *ep_ring;
1807 	unsigned int slot_id;
1808 	int ep_index;
1809 	struct urb *urb = NULL;
1810 	struct xhci_ep_ctx *ep_ctx;
1811 	int ret = 0;
1812 	struct urb_priv	*urb_priv;
1813 	u32 trb_comp_code;
1814 
1815 	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1816 	xdev = xhci->devs[slot_id];
1817 	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1818 	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1819 	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1820 	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1821 
1822 	if (skip)
1823 		goto td_cleanup;
1824 
1825 	if (trb_comp_code == COMP_STOP_INVAL ||
1826 			trb_comp_code == COMP_STOP ||
1827 			trb_comp_code == COMP_STOP_SHORT) {
1828 		/* The Endpoint Stop Command completion will take care of any
1829 		 * stopped TDs.  A stopped TD may be restarted, so don't update
1830 		 * the ring dequeue pointer or take this TD off any lists yet.
1831 		 */
1832 		ep->stopped_td = td;
1833 		return 0;
1834 	}
1835 	if (trb_comp_code == COMP_STALL ||
1836 		xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
1837 						trb_comp_code)) {
1838 		/* Issue a reset endpoint command to clear the host side
1839 		 * halt, followed by a set dequeue command to move the
1840 		 * dequeue pointer past the TD.
1841 		 * The class driver clears the device side halt later.
1842 		 */
1843 		xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
1844 					ep_ring->stream_id, td, event_trb);
1845 	} else {
1846 		/* Update ring dequeue pointer */
1847 		while (ep_ring->dequeue != td->last_trb)
1848 			inc_deq(xhci, ep_ring);
1849 		inc_deq(xhci, ep_ring);
1850 	}
1851 
1852 td_cleanup:
1853 	/* Clean up the endpoint's TD list */
1854 	urb = td->urb;
1855 	urb_priv = urb->hcpriv;
1856 
1857 	/* Do one last check of the actual transfer length.
1858 	 * If the host controller said we transferred more data than the buffer
1859 	 * length, urb->actual_length will be a very big number (since it's
1860 	 * unsigned).  Play it safe and say we didn't transfer anything.
1861 	 */
1862 	if (urb->actual_length > urb->transfer_buffer_length) {
1863 		xhci_warn(xhci, "URB transfer length is wrong, xHC issue? req. len = %u, act. len = %u\n",
1864 			urb->transfer_buffer_length,
1865 			urb->actual_length);
1866 		urb->actual_length = 0;
1867 		if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1868 			*status = -EREMOTEIO;
1869 		else
1870 			*status = 0;
1871 	}
1872 	list_del_init(&td->td_list);
1873 	/* Was this TD slated to be cancelled but completed anyway? */
1874 	if (!list_empty(&td->cancelled_td_list))
1875 		list_del_init(&td->cancelled_td_list);
1876 
1877 	urb_priv->td_cnt++;
1878 	/* Giveback the urb when all the tds are completed */
1879 	if (urb_priv->td_cnt == urb_priv->length) {
1880 		ret = 1;
1881 		if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1882 			xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
1883 			if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
1884 				if (xhci->quirks & XHCI_AMD_PLL_FIX)
1885 					usb_amd_quirk_pll_enable();
1886 			}
1887 		}
1888 	}
1889 
1890 	return ret;
1891 }
1892 
1893 /*
1894  * Process control tds, update urb status and actual_length.
1895  */
1896 static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1897 	union xhci_trb *event_trb, struct xhci_transfer_event *event,
1898 	struct xhci_virt_ep *ep, int *status)
1899 {
1900 	struct xhci_virt_device *xdev;
1901 	struct xhci_ring *ep_ring;
1902 	unsigned int slot_id;
1903 	int ep_index;
1904 	struct xhci_ep_ctx *ep_ctx;
1905 	u32 trb_comp_code;
1906 
1907 	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1908 	xdev = xhci->devs[slot_id];
1909 	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1910 	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1911 	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1912 	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1913 
1914 	switch (trb_comp_code) {
1915 	case COMP_SUCCESS:
1916 		if (event_trb == ep_ring->dequeue) {
1917 			xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
1918 					"without IOC set??\n");
1919 			*status = -ESHUTDOWN;
1920 		} else if (event_trb != td->last_trb) {
1921 			xhci_warn(xhci, "WARN: Success on ctrl data TRB "
1922 					"without IOC set??\n");
1923 			*status = -ESHUTDOWN;
1924 		} else {
1925 			*status = 0;
1926 		}
1927 		break;
1928 	case COMP_SHORT_TX:
1929 		if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1930 			*status = -EREMOTEIO;
1931 		else
1932 			*status = 0;
1933 		break;
1934 	case COMP_STOP_SHORT:
1935 		if (event_trb == ep_ring->dequeue || event_trb == td->last_trb)
1936 			xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
1937 		else
1938 			td->urb->actual_length =
1939 				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1940 
1941 		return finish_td(xhci, td, event_trb, event, ep, status, false);
1942 	case COMP_STOP:
1943 		/* Did we stop at data stage? */
1944 		if (event_trb != ep_ring->dequeue && event_trb != td->last_trb)
1945 			td->urb->actual_length =
1946 				td->urb->transfer_buffer_length -
1947 				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1948 		/* fall through */
1949 	case COMP_STOP_INVAL:
1950 		return finish_td(xhci, td, event_trb, event, ep, status, false);
1951 	default:
1952 		if (!xhci_requires_manual_halt_cleanup(xhci,
1953 					ep_ctx, trb_comp_code))
1954 			break;
1955 		xhci_dbg(xhci, "TRB error code %u, "
1956 				"halted endpoint index = %u\n",
1957 				trb_comp_code, ep_index);
1958 		/* else fall through */
1959 	case COMP_STALL:
1960 		/* Did we transfer part of the data (middle) phase? */
1961 		if (event_trb != ep_ring->dequeue &&
1962 				event_trb != td->last_trb)
1963 			td->urb->actual_length =
1964 				td->urb->transfer_buffer_length -
1965 				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1966 		else if (!td->urb_length_set)
1967 			td->urb->actual_length = 0;
1968 
1969 		return finish_td(xhci, td, event_trb, event, ep, status, false);
1970 	}
1971 	/*
1972 	 * Did we transfer any data, despite the errors that might have
1973 	 * happened?  I.e. did we get past the setup stage?
1974 	 */
1975 	if (event_trb != ep_ring->dequeue) {
1976 		/* The event was for the status stage */
1977 		if (event_trb == td->last_trb) {
1978 			if (td->urb_length_set) {
1979 				/* Don't overwrite a previously set error code
1980 				 */
1981 				if ((*status == -EINPROGRESS || *status == 0) &&
1982 						(td->urb->transfer_flags
1983 						 & URB_SHORT_NOT_OK))
1984 					/* Did we already see a short data
1985 					 * stage? */
1986 					*status = -EREMOTEIO;
1987 			} else {
1988 				td->urb->actual_length =
1989 					td->urb->transfer_buffer_length;
1990 			}
1991 		} else {
1992 			/*
1993 			 * Maybe the event was for the data stage? If so, update
1994 			 * already the actual_length of the URB and flag it as
1995 			 * set, so that it is not overwritten in the event for
1996 			 * the last TRB.
1997 			 */
1998 			td->urb_length_set = true;
1999 			td->urb->actual_length =
2000 				td->urb->transfer_buffer_length -
2001 				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2002 			xhci_dbg(xhci, "Waiting for status "
2003 					"stage event\n");
2004 			return 0;
2005 		}
2006 	}
2007 
2008 	return finish_td(xhci, td, event_trb, event, ep, status, false);
2009 }
2010 
2011 /*
2012  * Process isochronous tds, update urb packet status and actual_length.
2013  */
2014 static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2015 	union xhci_trb *event_trb, struct xhci_transfer_event *event,
2016 	struct xhci_virt_ep *ep, int *status)
2017 {
2018 	struct xhci_ring *ep_ring;
2019 	struct urb_priv *urb_priv;
2020 	int idx;
2021 	int len = 0;
2022 	union xhci_trb *cur_trb;
2023 	struct xhci_segment *cur_seg;
2024 	struct usb_iso_packet_descriptor *frame;
2025 	u32 trb_comp_code;
2026 	bool skip_td = false;
2027 
2028 	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2029 	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2030 	urb_priv = td->urb->hcpriv;
2031 	idx = urb_priv->td_cnt;
2032 	frame = &td->urb->iso_frame_desc[idx];
2033 
2034 	/* handle completion code */
2035 	switch (trb_comp_code) {
2036 	case COMP_SUCCESS:
2037 		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
2038 			frame->status = 0;
2039 			break;
2040 		}
2041 		if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
2042 			trb_comp_code = COMP_SHORT_TX;
2043 	/* fallthrough */
2044 	case COMP_STOP_SHORT:
2045 	case COMP_SHORT_TX:
2046 		frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
2047 				-EREMOTEIO : 0;
2048 		break;
2049 	case COMP_BW_OVER:
2050 		frame->status = -ECOMM;
2051 		skip_td = true;
2052 		break;
2053 	case COMP_BUFF_OVER:
2054 	case COMP_BABBLE:
2055 		frame->status = -EOVERFLOW;
2056 		skip_td = true;
2057 		break;
2058 	case COMP_DEV_ERR:
2059 	case COMP_STALL:
2060 		frame->status = -EPROTO;
2061 		skip_td = true;
2062 		break;
2063 	case COMP_TX_ERR:
2064 		frame->status = -EPROTO;
2065 		if (event_trb != td->last_trb)
2066 			return 0;
2067 		skip_td = true;
2068 		break;
2069 	case COMP_STOP:
2070 	case COMP_STOP_INVAL:
2071 		break;
2072 	default:
2073 		frame->status = -1;
2074 		break;
2075 	}
2076 
2077 	if (trb_comp_code == COMP_SUCCESS || skip_td) {
2078 		frame->actual_length = frame->length;
2079 		td->urb->actual_length += frame->length;
2080 	} else if (trb_comp_code == COMP_STOP_SHORT) {
2081 		frame->actual_length =
2082 			EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2083 		td->urb->actual_length += frame->actual_length;
2084 	} else {
2085 		for (cur_trb = ep_ring->dequeue,
2086 		     cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
2087 		     next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2088 			if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
2089 			    !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
2090 				len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2091 		}
2092 		len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2093 			EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2094 
2095 		if (trb_comp_code != COMP_STOP_INVAL) {
2096 			frame->actual_length = len;
2097 			td->urb->actual_length += len;
2098 		}
2099 	}
2100 
2101 	return finish_td(xhci, td, event_trb, event, ep, status, false);
2102 }
2103 
2104 static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2105 			struct xhci_transfer_event *event,
2106 			struct xhci_virt_ep *ep, int *status)
2107 {
2108 	struct xhci_ring *ep_ring;
2109 	struct urb_priv *urb_priv;
2110 	struct usb_iso_packet_descriptor *frame;
2111 	int idx;
2112 
2113 	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2114 	urb_priv = td->urb->hcpriv;
2115 	idx = urb_priv->td_cnt;
2116 	frame = &td->urb->iso_frame_desc[idx];
2117 
2118 	/* The transfer is partly done. */
2119 	frame->status = -EXDEV;
2120 
2121 	/* calc actual length */
2122 	frame->actual_length = 0;
2123 
2124 	/* Update ring dequeue pointer */
2125 	while (ep_ring->dequeue != td->last_trb)
2126 		inc_deq(xhci, ep_ring);
2127 	inc_deq(xhci, ep_ring);
2128 
2129 	return finish_td(xhci, td, NULL, event, ep, status, true);
2130 }
2131 
2132 /*
2133  * Process bulk and interrupt tds, update urb status and actual_length.
2134  */
2135 static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2136 	union xhci_trb *event_trb, struct xhci_transfer_event *event,
2137 	struct xhci_virt_ep *ep, int *status)
2138 {
2139 	struct xhci_ring *ep_ring;
2140 	union xhci_trb *cur_trb;
2141 	struct xhci_segment *cur_seg;
2142 	u32 trb_comp_code;
2143 
2144 	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2145 	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2146 
2147 	switch (trb_comp_code) {
2148 	case COMP_SUCCESS:
2149 		/* Double check that the HW transferred everything. */
2150 		if (event_trb != td->last_trb ||
2151 		    EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2152 			xhci_warn(xhci, "WARN Successful completion "
2153 					"on short TX\n");
2154 			if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2155 				*status = -EREMOTEIO;
2156 			else
2157 				*status = 0;
2158 			if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
2159 				trb_comp_code = COMP_SHORT_TX;
2160 		} else {
2161 			*status = 0;
2162 		}
2163 		break;
2164 	case COMP_STOP_SHORT:
2165 	case COMP_SHORT_TX:
2166 		if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2167 			*status = -EREMOTEIO;
2168 		else
2169 			*status = 0;
2170 		break;
2171 	default:
2172 		/* Others already handled above */
2173 		break;
2174 	}
2175 	if (trb_comp_code == COMP_SHORT_TX)
2176 		xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
2177 				"%d bytes untransferred\n",
2178 				td->urb->ep->desc.bEndpointAddress,
2179 				td->urb->transfer_buffer_length,
2180 				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2181 	/* Stopped - short packet completion */
2182 	if (trb_comp_code == COMP_STOP_SHORT) {
2183 		td->urb->actual_length =
2184 			EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2185 
2186 		if (td->urb->transfer_buffer_length <
2187 				td->urb->actual_length) {
2188 			xhci_warn(xhci, "HC gave bad length of %d bytes txed\n",
2189 				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2190 			td->urb->actual_length = 0;
2191 			 /* status will be set by usb core for canceled urbs */
2192 		}
2193 	/* Fast path - was this the last TRB in the TD for this URB? */
2194 	} else if (event_trb == td->last_trb) {
2195 		if (td->urb_length_set && trb_comp_code == COMP_SHORT_TX)
2196 			return finish_td(xhci, td, event_trb, event, ep,
2197 					 status, false);
2198 
2199 		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2200 			td->urb->actual_length =
2201 				td->urb->transfer_buffer_length -
2202 				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2203 			if (td->urb->transfer_buffer_length <
2204 					td->urb->actual_length) {
2205 				xhci_warn(xhci, "HC gave bad length "
2206 						"of %d bytes left\n",
2207 					  EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2208 				td->urb->actual_length = 0;
2209 				if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2210 					*status = -EREMOTEIO;
2211 				else
2212 					*status = 0;
2213 			}
2214 			/* Don't overwrite a previously set error code */
2215 			if (*status == -EINPROGRESS) {
2216 				if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2217 					*status = -EREMOTEIO;
2218 				else
2219 					*status = 0;
2220 			}
2221 		} else {
2222 			td->urb->actual_length =
2223 				td->urb->transfer_buffer_length;
2224 			/* Ignore a short packet completion if the
2225 			 * untransferred length was zero.
2226 			 */
2227 			if (*status == -EREMOTEIO)
2228 				*status = 0;
2229 		}
2230 	} else {
2231 		/* Slow path - walk the list, starting from the dequeue
2232 		 * pointer, to get the actual length transferred.
2233 		 */
2234 		td->urb->actual_length = 0;
2235 		for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
2236 				cur_trb != event_trb;
2237 				next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2238 			if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
2239 			    !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
2240 				td->urb->actual_length +=
2241 					TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2242 		}
2243 		/* If the ring didn't stop on a Link or No-op TRB, add
2244 		 * in the actual bytes transferred from the Normal TRB
2245 		 */
2246 		if (trb_comp_code != COMP_STOP_INVAL)
2247 			td->urb->actual_length +=
2248 				TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2249 				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2250 
2251 		if (trb_comp_code == COMP_SHORT_TX) {
2252 			xhci_dbg(xhci, "mid bulk/intr SP, wait for last TRB event\n");
2253 			td->urb_length_set = true;
2254 			return 0;
2255 		}
2256 	}
2257 
2258 	return finish_td(xhci, td, event_trb, event, ep, status, false);
2259 }
2260 
2261 /*
2262  * If this function returns an error condition, it means it got a Transfer
2263  * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
2264  * At this point, the host controller is probably hosed and should be reset.
2265  */
2266 static int handle_tx_event(struct xhci_hcd *xhci,
2267 		struct xhci_transfer_event *event)
2268 	__releases(&xhci->lock)
2269 	__acquires(&xhci->lock)
2270 {
2271 	struct xhci_virt_device *xdev;
2272 	struct xhci_virt_ep *ep;
2273 	struct xhci_ring *ep_ring;
2274 	unsigned int slot_id;
2275 	int ep_index;
2276 	struct xhci_td *td = NULL;
2277 	dma_addr_t event_dma;
2278 	struct xhci_segment *event_seg;
2279 	union xhci_trb *event_trb;
2280 	struct urb *urb = NULL;
2281 	int status = -EINPROGRESS;
2282 	struct urb_priv *urb_priv;
2283 	struct xhci_ep_ctx *ep_ctx;
2284 	struct list_head *tmp;
2285 	u32 trb_comp_code;
2286 	int ret = 0;
2287 	int td_num = 0;
2288 	bool handling_skipped_tds = false;
2289 
2290 	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2291 	xdev = xhci->devs[slot_id];
2292 	if (!xdev) {
2293 		xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
2294 		xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2295 			 (unsigned long long) xhci_trb_virt_to_dma(
2296 				 xhci->event_ring->deq_seg,
2297 				 xhci->event_ring->dequeue),
2298 			 lower_32_bits(le64_to_cpu(event->buffer)),
2299 			 upper_32_bits(le64_to_cpu(event->buffer)),
2300 			 le32_to_cpu(event->transfer_len),
2301 			 le32_to_cpu(event->flags));
2302 		xhci_dbg(xhci, "Event ring:\n");
2303 		xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2304 		return -ENODEV;
2305 	}
2306 
2307 	/* Endpoint ID is 1 based, our index is zero based */
2308 	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2309 	ep = &xdev->eps[ep_index];
2310 	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2311 	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2312 	if (!ep_ring ||
2313 	    (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
2314 	    EP_STATE_DISABLED) {
2315 		xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
2316 				"or incorrect stream ring\n");
2317 		xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2318 			 (unsigned long long) xhci_trb_virt_to_dma(
2319 				 xhci->event_ring->deq_seg,
2320 				 xhci->event_ring->dequeue),
2321 			 lower_32_bits(le64_to_cpu(event->buffer)),
2322 			 upper_32_bits(le64_to_cpu(event->buffer)),
2323 			 le32_to_cpu(event->transfer_len),
2324 			 le32_to_cpu(event->flags));
2325 		xhci_dbg(xhci, "Event ring:\n");
2326 		xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2327 		return -ENODEV;
2328 	}
2329 
2330 	/* Count current td numbers if ep->skip is set */
2331 	if (ep->skip) {
2332 		list_for_each(tmp, &ep_ring->td_list)
2333 			td_num++;
2334 	}
2335 
2336 	event_dma = le64_to_cpu(event->buffer);
2337 	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2338 	/* Look for common error cases */
2339 	switch (trb_comp_code) {
2340 	/* Skip codes that require special handling depending on
2341 	 * transfer type
2342 	 */
2343 	case COMP_SUCCESS:
2344 		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
2345 			break;
2346 		if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2347 			trb_comp_code = COMP_SHORT_TX;
2348 		else
2349 			xhci_warn_ratelimited(xhci,
2350 					"WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");
2351 	case COMP_SHORT_TX:
2352 		break;
2353 	case COMP_STOP:
2354 		xhci_dbg(xhci, "Stopped on Transfer TRB\n");
2355 		break;
2356 	case COMP_STOP_INVAL:
2357 		xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
2358 		break;
2359 	case COMP_STOP_SHORT:
2360 		xhci_dbg(xhci, "Stopped with short packet transfer detected\n");
2361 		break;
2362 	case COMP_STALL:
2363 		xhci_dbg(xhci, "Stalled endpoint\n");
2364 		ep->ep_state |= EP_HALTED;
2365 		status = -EPIPE;
2366 		break;
2367 	case COMP_TRB_ERR:
2368 		xhci_warn(xhci, "WARN: TRB error on endpoint\n");
2369 		status = -EILSEQ;
2370 		break;
2371 	case COMP_SPLIT_ERR:
2372 	case COMP_TX_ERR:
2373 		xhci_dbg(xhci, "Transfer error on endpoint\n");
2374 		status = -EPROTO;
2375 		break;
2376 	case COMP_BABBLE:
2377 		xhci_dbg(xhci, "Babble error on endpoint\n");
2378 		status = -EOVERFLOW;
2379 		break;
2380 	case COMP_DB_ERR:
2381 		xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
2382 		status = -ENOSR;
2383 		break;
2384 	case COMP_BW_OVER:
2385 		xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
2386 		break;
2387 	case COMP_BUFF_OVER:
2388 		xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
2389 		break;
2390 	case COMP_UNDERRUN:
2391 		/*
2392 		 * When the Isoch ring is empty, the xHC will generate
2393 		 * a Ring Overrun Event for IN Isoch endpoint or Ring
2394 		 * Underrun Event for OUT Isoch endpoint.
2395 		 */
2396 		xhci_dbg(xhci, "underrun event on endpoint\n");
2397 		if (!list_empty(&ep_ring->td_list))
2398 			xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2399 					"still with TDs queued?\n",
2400 				 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2401 				 ep_index);
2402 		goto cleanup;
2403 	case COMP_OVERRUN:
2404 		xhci_dbg(xhci, "overrun event on endpoint\n");
2405 		if (!list_empty(&ep_ring->td_list))
2406 			xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2407 					"still with TDs queued?\n",
2408 				 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2409 				 ep_index);
2410 		goto cleanup;
2411 	case COMP_DEV_ERR:
2412 		xhci_warn(xhci, "WARN: detect an incompatible device");
2413 		status = -EPROTO;
2414 		break;
2415 	case COMP_MISSED_INT:
2416 		/*
2417 		 * When encounter missed service error, one or more isoc tds
2418 		 * may be missed by xHC.
2419 		 * Set skip flag of the ep_ring; Complete the missed tds as
2420 		 * short transfer when process the ep_ring next time.
2421 		 */
2422 		ep->skip = true;
2423 		xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
2424 		goto cleanup;
2425 	case COMP_PING_ERR:
2426 		ep->skip = true;
2427 		xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n");
2428 		goto cleanup;
2429 	default:
2430 		if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2431 			status = 0;
2432 			break;
2433 		}
2434 		xhci_warn(xhci, "ERROR Unknown event condition %u, HC probably busted\n",
2435 			  trb_comp_code);
2436 		goto cleanup;
2437 	}
2438 
2439 	do {
2440 		/* This TRB should be in the TD at the head of this ring's
2441 		 * TD list.
2442 		 */
2443 		if (list_empty(&ep_ring->td_list)) {
2444 			/*
2445 			 * A stopped endpoint may generate an extra completion
2446 			 * event if the device was suspended.  Don't print
2447 			 * warnings.
2448 			 */
2449 			if (!(trb_comp_code == COMP_STOP ||
2450 						trb_comp_code == COMP_STOP_INVAL)) {
2451 				xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
2452 						TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2453 						ep_index);
2454 				xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2455 						(le32_to_cpu(event->flags) &
2456 						 TRB_TYPE_BITMASK)>>10);
2457 				xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
2458 			}
2459 			if (ep->skip) {
2460 				ep->skip = false;
2461 				xhci_dbg(xhci, "td_list is empty while skip "
2462 						"flag set. Clear skip flag.\n");
2463 			}
2464 			ret = 0;
2465 			goto cleanup;
2466 		}
2467 
2468 		/* We've skipped all the TDs on the ep ring when ep->skip set */
2469 		if (ep->skip && td_num == 0) {
2470 			ep->skip = false;
2471 			xhci_dbg(xhci, "All tds on the ep_ring skipped. "
2472 						"Clear skip flag.\n");
2473 			ret = 0;
2474 			goto cleanup;
2475 		}
2476 
2477 		td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
2478 		if (ep->skip)
2479 			td_num--;
2480 
2481 		/* Is this a TRB in the currently executing TD? */
2482 		event_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
2483 				td->last_trb, event_dma, false);
2484 
2485 		/*
2486 		 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
2487 		 * is not in the current TD pointed by ep_ring->dequeue because
2488 		 * that the hardware dequeue pointer still at the previous TRB
2489 		 * of the current TD. The previous TRB maybe a Link TD or the
2490 		 * last TRB of the previous TD. The command completion handle
2491 		 * will take care the rest.
2492 		 */
2493 		if (!event_seg && (trb_comp_code == COMP_STOP ||
2494 				   trb_comp_code == COMP_STOP_INVAL)) {
2495 			ret = 0;
2496 			goto cleanup;
2497 		}
2498 
2499 		if (!event_seg) {
2500 			if (!ep->skip ||
2501 			    !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2502 				/* Some host controllers give a spurious
2503 				 * successful event after a short transfer.
2504 				 * Ignore it.
2505 				 */
2506 				if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
2507 						ep_ring->last_td_was_short) {
2508 					ep_ring->last_td_was_short = false;
2509 					ret = 0;
2510 					goto cleanup;
2511 				}
2512 				/* HC is busted, give up! */
2513 				xhci_err(xhci,
2514 					"ERROR Transfer event TRB DMA ptr not "
2515 					"part of current TD ep_index %d "
2516 					"comp_code %u\n", ep_index,
2517 					trb_comp_code);
2518 				trb_in_td(xhci, ep_ring->deq_seg,
2519 					  ep_ring->dequeue, td->last_trb,
2520 					  event_dma, true);
2521 				return -ESHUTDOWN;
2522 			}
2523 
2524 			ret = skip_isoc_td(xhci, td, event, ep, &status);
2525 			goto cleanup;
2526 		}
2527 		if (trb_comp_code == COMP_SHORT_TX)
2528 			ep_ring->last_td_was_short = true;
2529 		else
2530 			ep_ring->last_td_was_short = false;
2531 
2532 		if (ep->skip) {
2533 			xhci_dbg(xhci, "Found td. Clear skip flag.\n");
2534 			ep->skip = false;
2535 		}
2536 
2537 		event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
2538 						sizeof(*event_trb)];
2539 		/*
2540 		 * No-op TRB should not trigger interrupts.
2541 		 * If event_trb is a no-op TRB, it means the
2542 		 * corresponding TD has been cancelled. Just ignore
2543 		 * the TD.
2544 		 */
2545 		if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
2546 			xhci_dbg(xhci,
2547 				 "event_trb is a no-op TRB. Skip it\n");
2548 			goto cleanup;
2549 		}
2550 
2551 		/* Now update the urb's actual_length and give back to
2552 		 * the core
2553 		 */
2554 		if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2555 			ret = process_ctrl_td(xhci, td, event_trb, event, ep,
2556 						 &status);
2557 		else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2558 			ret = process_isoc_td(xhci, td, event_trb, event, ep,
2559 						 &status);
2560 		else
2561 			ret = process_bulk_intr_td(xhci, td, event_trb, event,
2562 						 ep, &status);
2563 
2564 cleanup:
2565 
2566 
2567 		handling_skipped_tds = ep->skip &&
2568 			trb_comp_code != COMP_MISSED_INT &&
2569 			trb_comp_code != COMP_PING_ERR;
2570 
2571 		/*
2572 		 * Do not update event ring dequeue pointer if we're in a loop
2573 		 * processing missed tds.
2574 		 */
2575 		if (!handling_skipped_tds)
2576 			inc_deq(xhci, xhci->event_ring);
2577 
2578 		if (ret) {
2579 			urb = td->urb;
2580 			urb_priv = urb->hcpriv;
2581 
2582 			xhci_urb_free_priv(urb_priv);
2583 
2584 			usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
2585 			if ((urb->actual_length != urb->transfer_buffer_length &&
2586 						(urb->transfer_flags &
2587 						 URB_SHORT_NOT_OK)) ||
2588 					(status != 0 &&
2589 					 !usb_endpoint_xfer_isoc(&urb->ep->desc)))
2590 				xhci_dbg(xhci, "Giveback URB %p, len = %d, "
2591 						"expected = %d, status = %d\n",
2592 						urb, urb->actual_length,
2593 						urb->transfer_buffer_length,
2594 						status);
2595 			spin_unlock(&xhci->lock);
2596 			/* EHCI, UHCI, and OHCI always unconditionally set the
2597 			 * urb->status of an isochronous endpoint to 0.
2598 			 */
2599 			if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
2600 				status = 0;
2601 			usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
2602 			spin_lock(&xhci->lock);
2603 		}
2604 
2605 	/*
2606 	 * If ep->skip is set, it means there are missed tds on the
2607 	 * endpoint ring need to take care of.
2608 	 * Process them as short transfer until reach the td pointed by
2609 	 * the event.
2610 	 */
2611 	} while (handling_skipped_tds);
2612 
2613 	return 0;
2614 }
2615 
2616 /*
2617  * This function handles all OS-owned events on the event ring.  It may drop
2618  * xhci->lock between event processing (e.g. to pass up port status changes).
2619  * Returns >0 for "possibly more events to process" (caller should call again),
2620  * otherwise 0 if done.  In future, <0 returns should indicate error code.
2621  */
2622 static int xhci_handle_event(struct xhci_hcd *xhci)
2623 {
2624 	union xhci_trb *event;
2625 	int update_ptrs = 1;
2626 	int ret;
2627 
2628 	if (!xhci->event_ring || !xhci->event_ring->dequeue) {
2629 		xhci->error_bitmask |= 1 << 1;
2630 		return 0;
2631 	}
2632 
2633 	event = xhci->event_ring->dequeue;
2634 	/* Does the HC or OS own the TRB? */
2635 	if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
2636 	    xhci->event_ring->cycle_state) {
2637 		xhci->error_bitmask |= 1 << 2;
2638 		return 0;
2639 	}
2640 
2641 	/*
2642 	 * Barrier between reading the TRB_CYCLE (valid) flag above and any
2643 	 * speculative reads of the event's flags/data below.
2644 	 */
2645 	rmb();
2646 	/* FIXME: Handle more event types. */
2647 	switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
2648 	case TRB_TYPE(TRB_COMPLETION):
2649 		handle_cmd_completion(xhci, &event->event_cmd);
2650 		break;
2651 	case TRB_TYPE(TRB_PORT_STATUS):
2652 		handle_port_status(xhci, event);
2653 		update_ptrs = 0;
2654 		break;
2655 	case TRB_TYPE(TRB_TRANSFER):
2656 		ret = handle_tx_event(xhci, &event->trans_event);
2657 		if (ret < 0)
2658 			xhci->error_bitmask |= 1 << 9;
2659 		else
2660 			update_ptrs = 0;
2661 		break;
2662 	case TRB_TYPE(TRB_DEV_NOTE):
2663 		handle_device_notification(xhci, event);
2664 		break;
2665 	default:
2666 		if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
2667 		    TRB_TYPE(48))
2668 			handle_vendor_event(xhci, event);
2669 		else
2670 			xhci->error_bitmask |= 1 << 3;
2671 	}
2672 	/* Any of the above functions may drop and re-acquire the lock, so check
2673 	 * to make sure a watchdog timer didn't mark the host as non-responsive.
2674 	 */
2675 	if (xhci->xhc_state & XHCI_STATE_DYING) {
2676 		xhci_dbg(xhci, "xHCI host dying, returning from "
2677 				"event handler.\n");
2678 		return 0;
2679 	}
2680 
2681 	if (update_ptrs)
2682 		/* Update SW event ring dequeue pointer */
2683 		inc_deq(xhci, xhci->event_ring);
2684 
2685 	/* Are there more items on the event ring?  Caller will call us again to
2686 	 * check.
2687 	 */
2688 	return 1;
2689 }
2690 
2691 /*
2692  * xHCI spec says we can get an interrupt, and if the HC has an error condition,
2693  * we might get bad data out of the event ring.  Section 4.10.2.7 has a list of
2694  * indicators of an event TRB error, but we check the status *first* to be safe.
2695  */
2696 irqreturn_t xhci_irq(struct usb_hcd *hcd)
2697 {
2698 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2699 	u32 status;
2700 	u64 temp_64;
2701 	union xhci_trb *event_ring_deq;
2702 	dma_addr_t deq;
2703 
2704 	spin_lock(&xhci->lock);
2705 	/* Check if the xHC generated the interrupt, or the irq is shared */
2706 	status = readl(&xhci->op_regs->status);
2707 	if (status == 0xffffffff)
2708 		goto hw_died;
2709 
2710 	if (!(status & STS_EINT)) {
2711 		spin_unlock(&xhci->lock);
2712 		return IRQ_NONE;
2713 	}
2714 	if (status & STS_FATAL) {
2715 		xhci_warn(xhci, "WARNING: Host System Error\n");
2716 		xhci_halt(xhci);
2717 hw_died:
2718 		spin_unlock(&xhci->lock);
2719 		return IRQ_HANDLED;
2720 	}
2721 
2722 	/*
2723 	 * Clear the op reg interrupt status first,
2724 	 * so we can receive interrupts from other MSI-X interrupters.
2725 	 * Write 1 to clear the interrupt status.
2726 	 */
2727 	status |= STS_EINT;
2728 	writel(status, &xhci->op_regs->status);
2729 	/* FIXME when MSI-X is supported and there are multiple vectors */
2730 	/* Clear the MSI-X event interrupt status */
2731 
2732 	if (hcd->irq) {
2733 		u32 irq_pending;
2734 		/* Acknowledge the PCI interrupt */
2735 		irq_pending = readl(&xhci->ir_set->irq_pending);
2736 		irq_pending |= IMAN_IP;
2737 		writel(irq_pending, &xhci->ir_set->irq_pending);
2738 	}
2739 
2740 	if (xhci->xhc_state & XHCI_STATE_DYING) {
2741 		xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
2742 				"Shouldn't IRQs be disabled?\n");
2743 		/* Clear the event handler busy flag (RW1C);
2744 		 * the event ring should be empty.
2745 		 */
2746 		temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2747 		xhci_write_64(xhci, temp_64 | ERST_EHB,
2748 				&xhci->ir_set->erst_dequeue);
2749 		spin_unlock(&xhci->lock);
2750 
2751 		return IRQ_HANDLED;
2752 	}
2753 
2754 	event_ring_deq = xhci->event_ring->dequeue;
2755 	/* FIXME this should be a delayed service routine
2756 	 * that clears the EHB.
2757 	 */
2758 	while (xhci_handle_event(xhci) > 0) {}
2759 
2760 	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2761 	/* If necessary, update the HW's version of the event ring deq ptr. */
2762 	if (event_ring_deq != xhci->event_ring->dequeue) {
2763 		deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2764 				xhci->event_ring->dequeue);
2765 		if (deq == 0)
2766 			xhci_warn(xhci, "WARN something wrong with SW event "
2767 					"ring dequeue ptr.\n");
2768 		/* Update HC event ring dequeue pointer */
2769 		temp_64 &= ERST_PTR_MASK;
2770 		temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
2771 	}
2772 
2773 	/* Clear the event handler busy flag (RW1C); event ring is empty. */
2774 	temp_64 |= ERST_EHB;
2775 	xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2776 
2777 	spin_unlock(&xhci->lock);
2778 
2779 	return IRQ_HANDLED;
2780 }
2781 
2782 irqreturn_t xhci_msi_irq(int irq, void *hcd)
2783 {
2784 	return xhci_irq(hcd);
2785 }
2786 
2787 /****		Endpoint Ring Operations	****/
2788 
2789 /*
2790  * Generic function for queueing a TRB on a ring.
2791  * The caller must have checked to make sure there's room on the ring.
2792  *
2793  * @more_trbs_coming:	Will you enqueue more TRBs before calling
2794  *			prepare_transfer()?
2795  */
2796 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2797 		bool more_trbs_coming,
2798 		u32 field1, u32 field2, u32 field3, u32 field4)
2799 {
2800 	struct xhci_generic_trb *trb;
2801 
2802 	trb = &ring->enqueue->generic;
2803 	trb->field[0] = cpu_to_le32(field1);
2804 	trb->field[1] = cpu_to_le32(field2);
2805 	trb->field[2] = cpu_to_le32(field3);
2806 	trb->field[3] = cpu_to_le32(field4);
2807 	inc_enq(xhci, ring, more_trbs_coming);
2808 }
2809 
2810 /*
2811  * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
2812  * FIXME allocate segments if the ring is full.
2813  */
2814 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2815 		u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
2816 {
2817 	unsigned int num_trbs_needed;
2818 
2819 	/* Make sure the endpoint has been added to xHC schedule */
2820 	switch (ep_state) {
2821 	case EP_STATE_DISABLED:
2822 		/*
2823 		 * USB core changed config/interfaces without notifying us,
2824 		 * or hardware is reporting the wrong state.
2825 		 */
2826 		xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
2827 		return -ENOENT;
2828 	case EP_STATE_ERROR:
2829 		xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
2830 		/* FIXME event handling code for error needs to clear it */
2831 		/* XXX not sure if this should be -ENOENT or not */
2832 		return -EINVAL;
2833 	case EP_STATE_HALTED:
2834 		xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
2835 	case EP_STATE_STOPPED:
2836 	case EP_STATE_RUNNING:
2837 		break;
2838 	default:
2839 		xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
2840 		/*
2841 		 * FIXME issue Configure Endpoint command to try to get the HC
2842 		 * back into a known state.
2843 		 */
2844 		return -EINVAL;
2845 	}
2846 
2847 	while (1) {
2848 		if (room_on_ring(xhci, ep_ring, num_trbs))
2849 			break;
2850 
2851 		if (ep_ring == xhci->cmd_ring) {
2852 			xhci_err(xhci, "Do not support expand command ring\n");
2853 			return -ENOMEM;
2854 		}
2855 
2856 		xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
2857 				"ERROR no room on ep ring, try ring expansion");
2858 		num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
2859 		if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
2860 					mem_flags)) {
2861 			xhci_err(xhci, "Ring expansion failed\n");
2862 			return -ENOMEM;
2863 		}
2864 	}
2865 
2866 	if (enqueue_is_link_trb(ep_ring)) {
2867 		struct xhci_ring *ring = ep_ring;
2868 		union xhci_trb *next;
2869 
2870 		next = ring->enqueue;
2871 
2872 		while (last_trb(xhci, ring, ring->enq_seg, next)) {
2873 			/* If we're not dealing with 0.95 hardware or isoc rings
2874 			 * on AMD 0.96 host, clear the chain bit.
2875 			 */
2876 			if (!xhci_link_trb_quirk(xhci) &&
2877 					!(ring->type == TYPE_ISOC &&
2878 					 (xhci->quirks & XHCI_AMD_0x96_HOST)))
2879 				next->link.control &= cpu_to_le32(~TRB_CHAIN);
2880 			else
2881 				next->link.control |= cpu_to_le32(TRB_CHAIN);
2882 
2883 			wmb();
2884 			next->link.control ^= cpu_to_le32(TRB_CYCLE);
2885 
2886 			/* Toggle the cycle bit after the last ring segment. */
2887 			if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
2888 				ring->cycle_state ^= 1;
2889 			}
2890 			ring->enq_seg = ring->enq_seg->next;
2891 			ring->enqueue = ring->enq_seg->trbs;
2892 			next = ring->enqueue;
2893 		}
2894 	}
2895 
2896 	return 0;
2897 }
2898 
2899 static int prepare_transfer(struct xhci_hcd *xhci,
2900 		struct xhci_virt_device *xdev,
2901 		unsigned int ep_index,
2902 		unsigned int stream_id,
2903 		unsigned int num_trbs,
2904 		struct urb *urb,
2905 		unsigned int td_index,
2906 		gfp_t mem_flags)
2907 {
2908 	int ret;
2909 	struct urb_priv *urb_priv;
2910 	struct xhci_td	*td;
2911 	struct xhci_ring *ep_ring;
2912 	struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2913 
2914 	ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
2915 	if (!ep_ring) {
2916 		xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
2917 				stream_id);
2918 		return -EINVAL;
2919 	}
2920 
2921 	ret = prepare_ring(xhci, ep_ring,
2922 			   le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
2923 			   num_trbs, mem_flags);
2924 	if (ret)
2925 		return ret;
2926 
2927 	urb_priv = urb->hcpriv;
2928 	td = urb_priv->td[td_index];
2929 
2930 	INIT_LIST_HEAD(&td->td_list);
2931 	INIT_LIST_HEAD(&td->cancelled_td_list);
2932 
2933 	if (td_index == 0) {
2934 		ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
2935 		if (unlikely(ret))
2936 			return ret;
2937 	}
2938 
2939 	td->urb = urb;
2940 	/* Add this TD to the tail of the endpoint ring's TD list */
2941 	list_add_tail(&td->td_list, &ep_ring->td_list);
2942 	td->start_seg = ep_ring->enq_seg;
2943 	td->first_trb = ep_ring->enqueue;
2944 
2945 	urb_priv->td[td_index] = td;
2946 
2947 	return 0;
2948 }
2949 
2950 static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
2951 {
2952 	int num_sgs, num_trbs, running_total, temp, i;
2953 	struct scatterlist *sg;
2954 
2955 	sg = NULL;
2956 	num_sgs = urb->num_mapped_sgs;
2957 	temp = urb->transfer_buffer_length;
2958 
2959 	num_trbs = 0;
2960 	for_each_sg(urb->sg, sg, num_sgs, i) {
2961 		unsigned int len = sg_dma_len(sg);
2962 
2963 		/* Scatter gather list entries may cross 64KB boundaries */
2964 		running_total = TRB_MAX_BUFF_SIZE -
2965 			(sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
2966 		running_total &= TRB_MAX_BUFF_SIZE - 1;
2967 		if (running_total != 0)
2968 			num_trbs++;
2969 
2970 		/* How many more 64KB chunks to transfer, how many more TRBs? */
2971 		while (running_total < sg_dma_len(sg) && running_total < temp) {
2972 			num_trbs++;
2973 			running_total += TRB_MAX_BUFF_SIZE;
2974 		}
2975 		len = min_t(int, len, temp);
2976 		temp -= len;
2977 		if (temp == 0)
2978 			break;
2979 	}
2980 	return num_trbs;
2981 }
2982 
2983 static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
2984 {
2985 	if (num_trbs != 0)
2986 		dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
2987 				"TRBs, %d left\n", __func__,
2988 				urb->ep->desc.bEndpointAddress, num_trbs);
2989 	if (running_total != urb->transfer_buffer_length)
2990 		dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
2991 				"queued %#x (%d), asked for %#x (%d)\n",
2992 				__func__,
2993 				urb->ep->desc.bEndpointAddress,
2994 				running_total, running_total,
2995 				urb->transfer_buffer_length,
2996 				urb->transfer_buffer_length);
2997 }
2998 
2999 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
3000 		unsigned int ep_index, unsigned int stream_id, int start_cycle,
3001 		struct xhci_generic_trb *start_trb)
3002 {
3003 	/*
3004 	 * Pass all the TRBs to the hardware at once and make sure this write
3005 	 * isn't reordered.
3006 	 */
3007 	wmb();
3008 	if (start_cycle)
3009 		start_trb->field[3] |= cpu_to_le32(start_cycle);
3010 	else
3011 		start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
3012 	xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
3013 }
3014 
3015 /*
3016  * xHCI uses normal TRBs for both bulk and interrupt.  When the interrupt
3017  * endpoint is to be serviced, the xHC will consume (at most) one TD.  A TD
3018  * (comprised of sg list entries) can take several service intervals to
3019  * transmit.
3020  */
3021 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3022 		struct urb *urb, int slot_id, unsigned int ep_index)
3023 {
3024 	struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
3025 			xhci->devs[slot_id]->out_ctx, ep_index);
3026 	int xhci_interval;
3027 	int ep_interval;
3028 
3029 	xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3030 	ep_interval = urb->interval;
3031 	/* Convert to microframes */
3032 	if (urb->dev->speed == USB_SPEED_LOW ||
3033 			urb->dev->speed == USB_SPEED_FULL)
3034 		ep_interval *= 8;
3035 	/* FIXME change this to a warning and a suggestion to use the new API
3036 	 * to set the polling interval (once the API is added).
3037 	 */
3038 	if (xhci_interval != ep_interval) {
3039 		dev_dbg_ratelimited(&urb->dev->dev,
3040 				"Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
3041 				ep_interval, ep_interval == 1 ? "" : "s",
3042 				xhci_interval, xhci_interval == 1 ? "" : "s");
3043 		urb->interval = xhci_interval;
3044 		/* Convert back to frames for LS/FS devices */
3045 		if (urb->dev->speed == USB_SPEED_LOW ||
3046 				urb->dev->speed == USB_SPEED_FULL)
3047 			urb->interval /= 8;
3048 	}
3049 	return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
3050 }
3051 
3052 /*
3053  * For xHCI 1.0 host controllers, TD size is the number of max packet sized
3054  * packets remaining in the TD (*not* including this TRB).
3055  *
3056  * Total TD packet count = total_packet_count =
3057  *     DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
3058  *
3059  * Packets transferred up to and including this TRB = packets_transferred =
3060  *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
3061  *
3062  * TD size = total_packet_count - packets_transferred
3063  *
3064  * For xHCI 0.96 and older, TD size field should be the remaining bytes
3065  * including this TRB, right shifted by 10
3066  *
3067  * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
3068  * This is taken care of in the TRB_TD_SIZE() macro
3069  *
3070  * The last TRB in a TD must have the TD size set to zero.
3071  */
3072 static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
3073 			      int trb_buff_len, unsigned int td_total_len,
3074 			      struct urb *urb, unsigned int num_trbs_left)
3075 {
3076 	u32 maxp, total_packet_count;
3077 
3078 	if (xhci->hci_version < 0x100)
3079 		return ((td_total_len - transferred) >> 10);
3080 
3081 	maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
3082 	total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
3083 
3084 	/* One TRB with a zero-length data packet. */
3085 	if (num_trbs_left == 0 || (transferred == 0 && trb_buff_len == 0) ||
3086 	    trb_buff_len == td_total_len)
3087 		return 0;
3088 
3089 	/* Queueing functions don't count the current TRB into transferred */
3090 	return (total_packet_count - ((transferred + trb_buff_len) / maxp));
3091 }
3092 
3093 
3094 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3095 		struct urb *urb, int slot_id, unsigned int ep_index)
3096 {
3097 	struct xhci_ring *ep_ring;
3098 	unsigned int num_trbs;
3099 	struct urb_priv *urb_priv;
3100 	struct xhci_td *td;
3101 	struct scatterlist *sg;
3102 	int num_sgs;
3103 	int trb_buff_len, this_sg_len, running_total, ret;
3104 	unsigned int total_packet_count;
3105 	bool zero_length_needed;
3106 	bool first_trb;
3107 	int last_trb_num;
3108 	u64 addr;
3109 	bool more_trbs_coming;
3110 
3111 	struct xhci_generic_trb *start_trb;
3112 	int start_cycle;
3113 
3114 	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3115 	if (!ep_ring)
3116 		return -EINVAL;
3117 
3118 	num_trbs = count_sg_trbs_needed(xhci, urb);
3119 	num_sgs = urb->num_mapped_sgs;
3120 	total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
3121 			usb_endpoint_maxp(&urb->ep->desc));
3122 
3123 	ret = prepare_transfer(xhci, xhci->devs[slot_id],
3124 			ep_index, urb->stream_id,
3125 			num_trbs, urb, 0, mem_flags);
3126 	if (ret < 0)
3127 		return ret;
3128 
3129 	urb_priv = urb->hcpriv;
3130 
3131 	/* Deal with URB_ZERO_PACKET - need one more td/trb */
3132 	zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
3133 		urb_priv->length == 2;
3134 	if (zero_length_needed) {
3135 		num_trbs++;
3136 		xhci_dbg(xhci, "Creating zero length td.\n");
3137 		ret = prepare_transfer(xhci, xhci->devs[slot_id],
3138 				ep_index, urb->stream_id,
3139 				1, urb, 1, mem_flags);
3140 		if (ret < 0)
3141 			return ret;
3142 	}
3143 
3144 	td = urb_priv->td[0];
3145 
3146 	/*
3147 	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3148 	 * until we've finished creating all the other TRBs.  The ring's cycle
3149 	 * state may change as we enqueue the other TRBs, so save it too.
3150 	 */
3151 	start_trb = &ep_ring->enqueue->generic;
3152 	start_cycle = ep_ring->cycle_state;
3153 
3154 	running_total = 0;
3155 	/*
3156 	 * How much data is in the first TRB?
3157 	 *
3158 	 * There are three forces at work for TRB buffer pointers and lengths:
3159 	 * 1. We don't want to walk off the end of this sg-list entry buffer.
3160 	 * 2. The transfer length that the driver requested may be smaller than
3161 	 *    the amount of memory allocated for this scatter-gather list.
3162 	 * 3. TRBs buffers can't cross 64KB boundaries.
3163 	 */
3164 	sg = urb->sg;
3165 	addr = (u64) sg_dma_address(sg);
3166 	this_sg_len = sg_dma_len(sg);
3167 	trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
3168 	trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
3169 	if (trb_buff_len > urb->transfer_buffer_length)
3170 		trb_buff_len = urb->transfer_buffer_length;
3171 
3172 	first_trb = true;
3173 	last_trb_num = zero_length_needed ? 2 : 1;
3174 	/* Queue the first TRB, even if it's zero-length */
3175 	do {
3176 		u32 field = 0;
3177 		u32 length_field = 0;
3178 		u32 remainder = 0;
3179 
3180 		/* Don't change the cycle bit of the first TRB until later */
3181 		if (first_trb) {
3182 			first_trb = false;
3183 			if (start_cycle == 0)
3184 				field |= 0x1;
3185 		} else
3186 			field |= ep_ring->cycle_state;
3187 
3188 		/* Chain all the TRBs together; clear the chain bit in the last
3189 		 * TRB to indicate it's the last TRB in the chain.
3190 		 */
3191 		if (num_trbs > last_trb_num) {
3192 			field |= TRB_CHAIN;
3193 		} else if (num_trbs == last_trb_num) {
3194 			td->last_trb = ep_ring->enqueue;
3195 			field |= TRB_IOC;
3196 		} else if (zero_length_needed && num_trbs == 1) {
3197 			trb_buff_len = 0;
3198 			urb_priv->td[1]->last_trb = ep_ring->enqueue;
3199 			field |= TRB_IOC;
3200 		}
3201 
3202 		/* Only set interrupt on short packet for IN endpoints */
3203 		if (usb_urb_dir_in(urb))
3204 			field |= TRB_ISP;
3205 
3206 		if (TRB_MAX_BUFF_SIZE -
3207 				(addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
3208 			xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
3209 			xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
3210 					(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
3211 					(unsigned int) addr + trb_buff_len);
3212 		}
3213 
3214 		/* Set the TRB length, TD size, and interrupter fields. */
3215 		remainder = xhci_td_remainder(xhci, running_total, trb_buff_len,
3216 					   urb->transfer_buffer_length,
3217 					   urb, num_trbs - 1);
3218 
3219 		length_field = TRB_LEN(trb_buff_len) |
3220 			TRB_TD_SIZE(remainder) |
3221 			TRB_INTR_TARGET(0);
3222 
3223 		if (num_trbs > 1)
3224 			more_trbs_coming = true;
3225 		else
3226 			more_trbs_coming = false;
3227 		queue_trb(xhci, ep_ring, more_trbs_coming,
3228 				lower_32_bits(addr),
3229 				upper_32_bits(addr),
3230 				length_field,
3231 				field | TRB_TYPE(TRB_NORMAL));
3232 		--num_trbs;
3233 		running_total += trb_buff_len;
3234 
3235 		/* Calculate length for next transfer --
3236 		 * Are we done queueing all the TRBs for this sg entry?
3237 		 */
3238 		this_sg_len -= trb_buff_len;
3239 		if (this_sg_len == 0) {
3240 			--num_sgs;
3241 			if (num_sgs == 0)
3242 				break;
3243 			sg = sg_next(sg);
3244 			addr = (u64) sg_dma_address(sg);
3245 			this_sg_len = sg_dma_len(sg);
3246 		} else {
3247 			addr += trb_buff_len;
3248 		}
3249 
3250 		trb_buff_len = TRB_MAX_BUFF_SIZE -
3251 			(addr & (TRB_MAX_BUFF_SIZE - 1));
3252 		trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
3253 		if (running_total + trb_buff_len > urb->transfer_buffer_length)
3254 			trb_buff_len =
3255 				urb->transfer_buffer_length - running_total;
3256 	} while (num_trbs > 0);
3257 
3258 	check_trb_math(urb, num_trbs, running_total);
3259 	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3260 			start_cycle, start_trb);
3261 	return 0;
3262 }
3263 
3264 /* This is very similar to what ehci-q.c qtd_fill() does */
3265 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3266 		struct urb *urb, int slot_id, unsigned int ep_index)
3267 {
3268 	struct xhci_ring *ep_ring;
3269 	struct urb_priv *urb_priv;
3270 	struct xhci_td *td;
3271 	int num_trbs;
3272 	struct xhci_generic_trb *start_trb;
3273 	bool first_trb;
3274 	int last_trb_num;
3275 	bool more_trbs_coming;
3276 	bool zero_length_needed;
3277 	int start_cycle;
3278 	u32 field, length_field;
3279 
3280 	int running_total, trb_buff_len, ret;
3281 	unsigned int total_packet_count;
3282 	u64 addr;
3283 
3284 	if (urb->num_sgs)
3285 		return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
3286 
3287 	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3288 	if (!ep_ring)
3289 		return -EINVAL;
3290 
3291 	num_trbs = 0;
3292 	/* How much data is (potentially) left before the 64KB boundary? */
3293 	running_total = TRB_MAX_BUFF_SIZE -
3294 		(urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
3295 	running_total &= TRB_MAX_BUFF_SIZE - 1;
3296 
3297 	/* If there's some data on this 64KB chunk, or we have to send a
3298 	 * zero-length transfer, we need at least one TRB
3299 	 */
3300 	if (running_total != 0 || urb->transfer_buffer_length == 0)
3301 		num_trbs++;
3302 	/* How many more 64KB chunks to transfer, how many more TRBs? */
3303 	while (running_total < urb->transfer_buffer_length) {
3304 		num_trbs++;
3305 		running_total += TRB_MAX_BUFF_SIZE;
3306 	}
3307 
3308 	ret = prepare_transfer(xhci, xhci->devs[slot_id],
3309 			ep_index, urb->stream_id,
3310 			num_trbs, urb, 0, mem_flags);
3311 	if (ret < 0)
3312 		return ret;
3313 
3314 	urb_priv = urb->hcpriv;
3315 
3316 	/* Deal with URB_ZERO_PACKET - need one more td/trb */
3317 	zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
3318 		urb_priv->length == 2;
3319 	if (zero_length_needed) {
3320 		num_trbs++;
3321 		xhci_dbg(xhci, "Creating zero length td.\n");
3322 		ret = prepare_transfer(xhci, xhci->devs[slot_id],
3323 				ep_index, urb->stream_id,
3324 				1, urb, 1, mem_flags);
3325 		if (ret < 0)
3326 			return ret;
3327 	}
3328 
3329 	td = urb_priv->td[0];
3330 
3331 	/*
3332 	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3333 	 * until we've finished creating all the other TRBs.  The ring's cycle
3334 	 * state may change as we enqueue the other TRBs, so save it too.
3335 	 */
3336 	start_trb = &ep_ring->enqueue->generic;
3337 	start_cycle = ep_ring->cycle_state;
3338 
3339 	running_total = 0;
3340 	total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
3341 			usb_endpoint_maxp(&urb->ep->desc));
3342 	/* How much data is in the first TRB? */
3343 	addr = (u64) urb->transfer_dma;
3344 	trb_buff_len = TRB_MAX_BUFF_SIZE -
3345 		(urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
3346 	if (trb_buff_len > urb->transfer_buffer_length)
3347 		trb_buff_len = urb->transfer_buffer_length;
3348 
3349 	first_trb = true;
3350 	last_trb_num = zero_length_needed ? 2 : 1;
3351 	/* Queue the first TRB, even if it's zero-length */
3352 	do {
3353 		u32 remainder = 0;
3354 		field = 0;
3355 
3356 		/* Don't change the cycle bit of the first TRB until later */
3357 		if (first_trb) {
3358 			first_trb = false;
3359 			if (start_cycle == 0)
3360 				field |= 0x1;
3361 		} else
3362 			field |= ep_ring->cycle_state;
3363 
3364 		/* Chain all the TRBs together; clear the chain bit in the last
3365 		 * TRB to indicate it's the last TRB in the chain.
3366 		 */
3367 		if (num_trbs > last_trb_num) {
3368 			field |= TRB_CHAIN;
3369 		} else if (num_trbs == last_trb_num) {
3370 			td->last_trb = ep_ring->enqueue;
3371 			field |= TRB_IOC;
3372 		} else if (zero_length_needed && num_trbs == 1) {
3373 			trb_buff_len = 0;
3374 			urb_priv->td[1]->last_trb = ep_ring->enqueue;
3375 			field |= TRB_IOC;
3376 		}
3377 
3378 		/* Only set interrupt on short packet for IN endpoints */
3379 		if (usb_urb_dir_in(urb))
3380 			field |= TRB_ISP;
3381 
3382 		/* Set the TRB length, TD size, and interrupter fields. */
3383 		remainder = xhci_td_remainder(xhci, running_total, trb_buff_len,
3384 					   urb->transfer_buffer_length,
3385 					   urb, num_trbs - 1);
3386 
3387 		length_field = TRB_LEN(trb_buff_len) |
3388 			TRB_TD_SIZE(remainder) |
3389 			TRB_INTR_TARGET(0);
3390 
3391 		if (num_trbs > 1)
3392 			more_trbs_coming = true;
3393 		else
3394 			more_trbs_coming = false;
3395 		queue_trb(xhci, ep_ring, more_trbs_coming,
3396 				lower_32_bits(addr),
3397 				upper_32_bits(addr),
3398 				length_field,
3399 				field | TRB_TYPE(TRB_NORMAL));
3400 		--num_trbs;
3401 		running_total += trb_buff_len;
3402 
3403 		/* Calculate length for next transfer */
3404 		addr += trb_buff_len;
3405 		trb_buff_len = urb->transfer_buffer_length - running_total;
3406 		if (trb_buff_len > TRB_MAX_BUFF_SIZE)
3407 			trb_buff_len = TRB_MAX_BUFF_SIZE;
3408 	} while (num_trbs > 0);
3409 
3410 	check_trb_math(urb, num_trbs, running_total);
3411 	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3412 			start_cycle, start_trb);
3413 	return 0;
3414 }
3415 
3416 /* Caller must have locked xhci->lock */
3417 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3418 		struct urb *urb, int slot_id, unsigned int ep_index)
3419 {
3420 	struct xhci_ring *ep_ring;
3421 	int num_trbs;
3422 	int ret;
3423 	struct usb_ctrlrequest *setup;
3424 	struct xhci_generic_trb *start_trb;
3425 	int start_cycle;
3426 	u32 field, length_field, remainder;
3427 	struct urb_priv *urb_priv;
3428 	struct xhci_td *td;
3429 
3430 	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3431 	if (!ep_ring)
3432 		return -EINVAL;
3433 
3434 	/*
3435 	 * Need to copy setup packet into setup TRB, so we can't use the setup
3436 	 * DMA address.
3437 	 */
3438 	if (!urb->setup_packet)
3439 		return -EINVAL;
3440 
3441 	/* 1 TRB for setup, 1 for status */
3442 	num_trbs = 2;
3443 	/*
3444 	 * Don't need to check if we need additional event data and normal TRBs,
3445 	 * since data in control transfers will never get bigger than 16MB
3446 	 * XXX: can we get a buffer that crosses 64KB boundaries?
3447 	 */
3448 	if (urb->transfer_buffer_length > 0)
3449 		num_trbs++;
3450 	ret = prepare_transfer(xhci, xhci->devs[slot_id],
3451 			ep_index, urb->stream_id,
3452 			num_trbs, urb, 0, mem_flags);
3453 	if (ret < 0)
3454 		return ret;
3455 
3456 	urb_priv = urb->hcpriv;
3457 	td = urb_priv->td[0];
3458 
3459 	/*
3460 	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3461 	 * until we've finished creating all the other TRBs.  The ring's cycle
3462 	 * state may change as we enqueue the other TRBs, so save it too.
3463 	 */
3464 	start_trb = &ep_ring->enqueue->generic;
3465 	start_cycle = ep_ring->cycle_state;
3466 
3467 	/* Queue setup TRB - see section 6.4.1.2.1 */
3468 	/* FIXME better way to translate setup_packet into two u32 fields? */
3469 	setup = (struct usb_ctrlrequest *) urb->setup_packet;
3470 	field = 0;
3471 	field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
3472 	if (start_cycle == 0)
3473 		field |= 0x1;
3474 
3475 	/* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
3476 	if (xhci->hci_version >= 0x100) {
3477 		if (urb->transfer_buffer_length > 0) {
3478 			if (setup->bRequestType & USB_DIR_IN)
3479 				field |= TRB_TX_TYPE(TRB_DATA_IN);
3480 			else
3481 				field |= TRB_TX_TYPE(TRB_DATA_OUT);
3482 		}
3483 	}
3484 
3485 	queue_trb(xhci, ep_ring, true,
3486 		  setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3487 		  le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3488 		  TRB_LEN(8) | TRB_INTR_TARGET(0),
3489 		  /* Immediate data in pointer */
3490 		  field);
3491 
3492 	/* If there's data, queue data TRBs */
3493 	/* Only set interrupt on short packet for IN endpoints */
3494 	if (usb_urb_dir_in(urb))
3495 		field = TRB_ISP | TRB_TYPE(TRB_DATA);
3496 	else
3497 		field = TRB_TYPE(TRB_DATA);
3498 
3499 	remainder = xhci_td_remainder(xhci, 0,
3500 				   urb->transfer_buffer_length,
3501 				   urb->transfer_buffer_length,
3502 				   urb, 1);
3503 
3504 	length_field = TRB_LEN(urb->transfer_buffer_length) |
3505 		TRB_TD_SIZE(remainder) |
3506 		TRB_INTR_TARGET(0);
3507 
3508 	if (urb->transfer_buffer_length > 0) {
3509 		if (setup->bRequestType & USB_DIR_IN)
3510 			field |= TRB_DIR_IN;
3511 		queue_trb(xhci, ep_ring, true,
3512 				lower_32_bits(urb->transfer_dma),
3513 				upper_32_bits(urb->transfer_dma),
3514 				length_field,
3515 				field | ep_ring->cycle_state);
3516 	}
3517 
3518 	/* Save the DMA address of the last TRB in the TD */
3519 	td->last_trb = ep_ring->enqueue;
3520 
3521 	/* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
3522 	/* If the device sent data, the status stage is an OUT transfer */
3523 	if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
3524 		field = 0;
3525 	else
3526 		field = TRB_DIR_IN;
3527 	queue_trb(xhci, ep_ring, false,
3528 			0,
3529 			0,
3530 			TRB_INTR_TARGET(0),
3531 			/* Event on completion */
3532 			field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
3533 
3534 	giveback_first_trb(xhci, slot_id, ep_index, 0,
3535 			start_cycle, start_trb);
3536 	return 0;
3537 }
3538 
3539 static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
3540 		struct urb *urb, int i)
3541 {
3542 	int num_trbs = 0;
3543 	u64 addr, td_len;
3544 
3545 	addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3546 	td_len = urb->iso_frame_desc[i].length;
3547 
3548 	num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3549 			TRB_MAX_BUFF_SIZE);
3550 	if (num_trbs == 0)
3551 		num_trbs++;
3552 
3553 	return num_trbs;
3554 }
3555 
3556 /*
3557  * The transfer burst count field of the isochronous TRB defines the number of
3558  * bursts that are required to move all packets in this TD.  Only SuperSpeed
3559  * devices can burst up to bMaxBurst number of packets per service interval.
3560  * This field is zero based, meaning a value of zero in the field means one
3561  * burst.  Basically, for everything but SuperSpeed devices, this field will be
3562  * zero.  Only xHCI 1.0 host controllers support this field.
3563  */
3564 static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
3565 		struct usb_device *udev,
3566 		struct urb *urb, unsigned int total_packet_count)
3567 {
3568 	unsigned int max_burst;
3569 
3570 	if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
3571 		return 0;
3572 
3573 	max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3574 	return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
3575 }
3576 
3577 /*
3578  * Returns the number of packets in the last "burst" of packets.  This field is
3579  * valid for all speeds of devices.  USB 2.0 devices can only do one "burst", so
3580  * the last burst packet count is equal to the total number of packets in the
3581  * TD.  SuperSpeed endpoints can have up to 3 bursts.  All but the last burst
3582  * must contain (bMaxBurst + 1) number of packets, but the last burst can
3583  * contain 1 to (bMaxBurst + 1) packets.
3584  */
3585 static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
3586 		struct usb_device *udev,
3587 		struct urb *urb, unsigned int total_packet_count)
3588 {
3589 	unsigned int max_burst;
3590 	unsigned int residue;
3591 
3592 	if (xhci->hci_version < 0x100)
3593 		return 0;
3594 
3595 	switch (udev->speed) {
3596 	case USB_SPEED_SUPER:
3597 		/* bMaxBurst is zero based: 0 means 1 packet per burst */
3598 		max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3599 		residue = total_packet_count % (max_burst + 1);
3600 		/* If residue is zero, the last burst contains (max_burst + 1)
3601 		 * number of packets, but the TLBPC field is zero-based.
3602 		 */
3603 		if (residue == 0)
3604 			return max_burst;
3605 		return residue - 1;
3606 	default:
3607 		if (total_packet_count == 0)
3608 			return 0;
3609 		return total_packet_count - 1;
3610 	}
3611 }
3612 
3613 /*
3614  * Calculates Frame ID field of the isochronous TRB identifies the
3615  * target frame that the Interval associated with this Isochronous
3616  * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
3617  *
3618  * Returns actual frame id on success, negative value on error.
3619  */
3620 static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
3621 		struct urb *urb, int index)
3622 {
3623 	int start_frame, ist, ret = 0;
3624 	int start_frame_id, end_frame_id, current_frame_id;
3625 
3626 	if (urb->dev->speed == USB_SPEED_LOW ||
3627 			urb->dev->speed == USB_SPEED_FULL)
3628 		start_frame = urb->start_frame + index * urb->interval;
3629 	else
3630 		start_frame = (urb->start_frame + index * urb->interval) >> 3;
3631 
3632 	/* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
3633 	 *
3634 	 * If bit [3] of IST is cleared to '0', software can add a TRB no
3635 	 * later than IST[2:0] Microframes before that TRB is scheduled to
3636 	 * be executed.
3637 	 * If bit [3] of IST is set to '1', software can add a TRB no later
3638 	 * than IST[2:0] Frames before that TRB is scheduled to be executed.
3639 	 */
3640 	ist = HCS_IST(xhci->hcs_params2) & 0x7;
3641 	if (HCS_IST(xhci->hcs_params2) & (1 << 3))
3642 		ist <<= 3;
3643 
3644 	/* Software shall not schedule an Isoch TD with a Frame ID value that
3645 	 * is less than the Start Frame ID or greater than the End Frame ID,
3646 	 * where:
3647 	 *
3648 	 * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
3649 	 * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
3650 	 *
3651 	 * Both the End Frame ID and Start Frame ID values are calculated
3652 	 * in microframes. When software determines the valid Frame ID value;
3653 	 * The End Frame ID value should be rounded down to the nearest Frame
3654 	 * boundary, and the Start Frame ID value should be rounded up to the
3655 	 * nearest Frame boundary.
3656 	 */
3657 	current_frame_id = readl(&xhci->run_regs->microframe_index);
3658 	start_frame_id = roundup(current_frame_id + ist + 1, 8);
3659 	end_frame_id = rounddown(current_frame_id + 895 * 8, 8);
3660 
3661 	start_frame &= 0x7ff;
3662 	start_frame_id = (start_frame_id >> 3) & 0x7ff;
3663 	end_frame_id = (end_frame_id >> 3) & 0x7ff;
3664 
3665 	xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
3666 		 __func__, index, readl(&xhci->run_regs->microframe_index),
3667 		 start_frame_id, end_frame_id, start_frame);
3668 
3669 	if (start_frame_id < end_frame_id) {
3670 		if (start_frame > end_frame_id ||
3671 				start_frame < start_frame_id)
3672 			ret = -EINVAL;
3673 	} else if (start_frame_id > end_frame_id) {
3674 		if ((start_frame > end_frame_id &&
3675 				start_frame < start_frame_id))
3676 			ret = -EINVAL;
3677 	} else {
3678 			ret = -EINVAL;
3679 	}
3680 
3681 	if (index == 0) {
3682 		if (ret == -EINVAL || start_frame == start_frame_id) {
3683 			start_frame = start_frame_id + 1;
3684 			if (urb->dev->speed == USB_SPEED_LOW ||
3685 					urb->dev->speed == USB_SPEED_FULL)
3686 				urb->start_frame = start_frame;
3687 			else
3688 				urb->start_frame = start_frame << 3;
3689 			ret = 0;
3690 		}
3691 	}
3692 
3693 	if (ret) {
3694 		xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
3695 				start_frame, current_frame_id, index,
3696 				start_frame_id, end_frame_id);
3697 		xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
3698 		return ret;
3699 	}
3700 
3701 	return start_frame;
3702 }
3703 
3704 /* This is for isoc transfer */
3705 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3706 		struct urb *urb, int slot_id, unsigned int ep_index)
3707 {
3708 	struct xhci_ring *ep_ring;
3709 	struct urb_priv *urb_priv;
3710 	struct xhci_td *td;
3711 	int num_tds, trbs_per_td;
3712 	struct xhci_generic_trb *start_trb;
3713 	bool first_trb;
3714 	int start_cycle;
3715 	u32 field, length_field;
3716 	int running_total, trb_buff_len, td_len, td_remain_len, ret;
3717 	u64 start_addr, addr;
3718 	int i, j;
3719 	bool more_trbs_coming;
3720 	struct xhci_virt_ep *xep;
3721 
3722 	xep = &xhci->devs[slot_id]->eps[ep_index];
3723 	ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
3724 
3725 	num_tds = urb->number_of_packets;
3726 	if (num_tds < 1) {
3727 		xhci_dbg(xhci, "Isoc URB with zero packets?\n");
3728 		return -EINVAL;
3729 	}
3730 
3731 	start_addr = (u64) urb->transfer_dma;
3732 	start_trb = &ep_ring->enqueue->generic;
3733 	start_cycle = ep_ring->cycle_state;
3734 
3735 	urb_priv = urb->hcpriv;
3736 	/* Queue the first TRB, even if it's zero-length */
3737 	for (i = 0; i < num_tds; i++) {
3738 		unsigned int total_packet_count;
3739 		unsigned int burst_count;
3740 		unsigned int residue;
3741 
3742 		first_trb = true;
3743 		running_total = 0;
3744 		addr = start_addr + urb->iso_frame_desc[i].offset;
3745 		td_len = urb->iso_frame_desc[i].length;
3746 		td_remain_len = td_len;
3747 		total_packet_count = DIV_ROUND_UP(td_len,
3748 				GET_MAX_PACKET(
3749 					usb_endpoint_maxp(&urb->ep->desc)));
3750 		/* A zero-length transfer still involves at least one packet. */
3751 		if (total_packet_count == 0)
3752 			total_packet_count++;
3753 		burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
3754 				total_packet_count);
3755 		residue = xhci_get_last_burst_packet_count(xhci,
3756 				urb->dev, urb, total_packet_count);
3757 
3758 		trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
3759 
3760 		ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3761 				urb->stream_id, trbs_per_td, urb, i, mem_flags);
3762 		if (ret < 0) {
3763 			if (i == 0)
3764 				return ret;
3765 			goto cleanup;
3766 		}
3767 
3768 		td = urb_priv->td[i];
3769 		for (j = 0; j < trbs_per_td; j++) {
3770 			int frame_id = 0;
3771 			u32 remainder = 0;
3772 			field = 0;
3773 
3774 			if (first_trb) {
3775 				field = TRB_TBC(burst_count) |
3776 					TRB_TLBPC(residue);
3777 				/* Queue the isoc TRB */
3778 				field |= TRB_TYPE(TRB_ISOC);
3779 
3780 				/* Calculate Frame ID and SIA fields */
3781 				if (!(urb->transfer_flags & URB_ISO_ASAP) &&
3782 						HCC_CFC(xhci->hcc_params)) {
3783 					frame_id = xhci_get_isoc_frame_id(xhci,
3784 									  urb,
3785 									  i);
3786 					if (frame_id >= 0)
3787 						field |= TRB_FRAME_ID(frame_id);
3788 					else
3789 						field |= TRB_SIA;
3790 				} else
3791 					field |= TRB_SIA;
3792 
3793 				if (i == 0) {
3794 					if (start_cycle == 0)
3795 						field |= 0x1;
3796 				} else
3797 					field |= ep_ring->cycle_state;
3798 				first_trb = false;
3799 			} else {
3800 				/* Queue other normal TRBs */
3801 				field |= TRB_TYPE(TRB_NORMAL);
3802 				field |= ep_ring->cycle_state;
3803 			}
3804 
3805 			/* Only set interrupt on short packet for IN EPs */
3806 			if (usb_urb_dir_in(urb))
3807 				field |= TRB_ISP;
3808 
3809 			/* Chain all the TRBs together; clear the chain bit in
3810 			 * the last TRB to indicate it's the last TRB in the
3811 			 * chain.
3812 			 */
3813 			if (j < trbs_per_td - 1) {
3814 				field |= TRB_CHAIN;
3815 				more_trbs_coming = true;
3816 			} else {
3817 				td->last_trb = ep_ring->enqueue;
3818 				field |= TRB_IOC;
3819 				if (xhci->hci_version == 0x100 &&
3820 						!(xhci->quirks &
3821 							XHCI_AVOID_BEI)) {
3822 					/* Set BEI bit except for the last td */
3823 					if (i < num_tds - 1)
3824 						field |= TRB_BEI;
3825 				}
3826 				more_trbs_coming = false;
3827 			}
3828 
3829 			/* Calculate TRB length */
3830 			trb_buff_len = TRB_MAX_BUFF_SIZE -
3831 				(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
3832 			if (trb_buff_len > td_remain_len)
3833 				trb_buff_len = td_remain_len;
3834 
3835 			/* Set the TRB length, TD size, & interrupter fields. */
3836 			remainder = xhci_td_remainder(xhci, running_total,
3837 						   trb_buff_len, td_len,
3838 						   urb, trbs_per_td - j - 1);
3839 
3840 			length_field = TRB_LEN(trb_buff_len) |
3841 				TRB_TD_SIZE(remainder) |
3842 				TRB_INTR_TARGET(0);
3843 
3844 			queue_trb(xhci, ep_ring, more_trbs_coming,
3845 				lower_32_bits(addr),
3846 				upper_32_bits(addr),
3847 				length_field,
3848 				field);
3849 			running_total += trb_buff_len;
3850 
3851 			addr += trb_buff_len;
3852 			td_remain_len -= trb_buff_len;
3853 		}
3854 
3855 		/* Check TD length */
3856 		if (running_total != td_len) {
3857 			xhci_err(xhci, "ISOC TD length unmatch\n");
3858 			ret = -EINVAL;
3859 			goto cleanup;
3860 		}
3861 	}
3862 
3863 	/* store the next frame id */
3864 	if (HCC_CFC(xhci->hcc_params))
3865 		xep->next_frame_id = urb->start_frame + num_tds * urb->interval;
3866 
3867 	if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
3868 		if (xhci->quirks & XHCI_AMD_PLL_FIX)
3869 			usb_amd_quirk_pll_disable();
3870 	}
3871 	xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
3872 
3873 	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3874 			start_cycle, start_trb);
3875 	return 0;
3876 cleanup:
3877 	/* Clean up a partially enqueued isoc transfer. */
3878 
3879 	for (i--; i >= 0; i--)
3880 		list_del_init(&urb_priv->td[i]->td_list);
3881 
3882 	/* Use the first TD as a temporary variable to turn the TDs we've queued
3883 	 * into No-ops with a software-owned cycle bit. That way the hardware
3884 	 * won't accidentally start executing bogus TDs when we partially
3885 	 * overwrite them.  td->first_trb and td->start_seg are already set.
3886 	 */
3887 	urb_priv->td[0]->last_trb = ep_ring->enqueue;
3888 	/* Every TRB except the first & last will have its cycle bit flipped. */
3889 	td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
3890 
3891 	/* Reset the ring enqueue back to the first TRB and its cycle bit. */
3892 	ep_ring->enqueue = urb_priv->td[0]->first_trb;
3893 	ep_ring->enq_seg = urb_priv->td[0]->start_seg;
3894 	ep_ring->cycle_state = start_cycle;
3895 	ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
3896 	usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
3897 	return ret;
3898 }
3899 
3900 /*
3901  * Check transfer ring to guarantee there is enough room for the urb.
3902  * Update ISO URB start_frame and interval.
3903  * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
3904  * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
3905  * Contiguous Frame ID is not supported by HC.
3906  */
3907 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3908 		struct urb *urb, int slot_id, unsigned int ep_index)
3909 {
3910 	struct xhci_virt_device *xdev;
3911 	struct xhci_ring *ep_ring;
3912 	struct xhci_ep_ctx *ep_ctx;
3913 	int start_frame;
3914 	int xhci_interval;
3915 	int ep_interval;
3916 	int num_tds, num_trbs, i;
3917 	int ret;
3918 	struct xhci_virt_ep *xep;
3919 	int ist;
3920 
3921 	xdev = xhci->devs[slot_id];
3922 	xep = &xhci->devs[slot_id]->eps[ep_index];
3923 	ep_ring = xdev->eps[ep_index].ring;
3924 	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3925 
3926 	num_trbs = 0;
3927 	num_tds = urb->number_of_packets;
3928 	for (i = 0; i < num_tds; i++)
3929 		num_trbs += count_isoc_trbs_needed(xhci, urb, i);
3930 
3931 	/* Check the ring to guarantee there is enough room for the whole urb.
3932 	 * Do not insert any td of the urb to the ring if the check failed.
3933 	 */
3934 	ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
3935 			   num_trbs, mem_flags);
3936 	if (ret)
3937 		return ret;
3938 
3939 	/*
3940 	 * Check interval value. This should be done before we start to
3941 	 * calculate the start frame value.
3942 	 */
3943 	xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3944 	ep_interval = urb->interval;
3945 	/* Convert to microframes */
3946 	if (urb->dev->speed == USB_SPEED_LOW ||
3947 			urb->dev->speed == USB_SPEED_FULL)
3948 		ep_interval *= 8;
3949 	/* FIXME change this to a warning and a suggestion to use the new API
3950 	 * to set the polling interval (once the API is added).
3951 	 */
3952 	if (xhci_interval != ep_interval) {
3953 		dev_dbg_ratelimited(&urb->dev->dev,
3954 				"Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
3955 				ep_interval, ep_interval == 1 ? "" : "s",
3956 				xhci_interval, xhci_interval == 1 ? "" : "s");
3957 		urb->interval = xhci_interval;
3958 		/* Convert back to frames for LS/FS devices */
3959 		if (urb->dev->speed == USB_SPEED_LOW ||
3960 				urb->dev->speed == USB_SPEED_FULL)
3961 			urb->interval /= 8;
3962 	}
3963 
3964 	/* Calculate the start frame and put it in urb->start_frame. */
3965 	if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
3966 		if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
3967 				EP_STATE_RUNNING) {
3968 			urb->start_frame = xep->next_frame_id;
3969 			goto skip_start_over;
3970 		}
3971 	}
3972 
3973 	start_frame = readl(&xhci->run_regs->microframe_index);
3974 	start_frame &= 0x3fff;
3975 	/*
3976 	 * Round up to the next frame and consider the time before trb really
3977 	 * gets scheduled by hardare.
3978 	 */
3979 	ist = HCS_IST(xhci->hcs_params2) & 0x7;
3980 	if (HCS_IST(xhci->hcs_params2) & (1 << 3))
3981 		ist <<= 3;
3982 	start_frame += ist + XHCI_CFC_DELAY;
3983 	start_frame = roundup(start_frame, 8);
3984 
3985 	/*
3986 	 * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
3987 	 * is greate than 8 microframes.
3988 	 */
3989 	if (urb->dev->speed == USB_SPEED_LOW ||
3990 			urb->dev->speed == USB_SPEED_FULL) {
3991 		start_frame = roundup(start_frame, urb->interval << 3);
3992 		urb->start_frame = start_frame >> 3;
3993 	} else {
3994 		start_frame = roundup(start_frame, urb->interval);
3995 		urb->start_frame = start_frame;
3996 	}
3997 
3998 skip_start_over:
3999 	ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
4000 
4001 	return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
4002 }
4003 
4004 /****		Command Ring Operations		****/
4005 
4006 /* Generic function for queueing a command TRB on the command ring.
4007  * Check to make sure there's room on the command ring for one command TRB.
4008  * Also check that there's room reserved for commands that must not fail.
4009  * If this is a command that must not fail, meaning command_must_succeed = TRUE,
4010  * then only check for the number of reserved spots.
4011  * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
4012  * because the command event handler may want to resubmit a failed command.
4013  */
4014 static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
4015 			 u32 field1, u32 field2,
4016 			 u32 field3, u32 field4, bool command_must_succeed)
4017 {
4018 	int reserved_trbs = xhci->cmd_ring_reserved_trbs;
4019 	int ret;
4020 
4021 	if (xhci->xhc_state) {
4022 		xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
4023 		return -ESHUTDOWN;
4024 	}
4025 
4026 	if (!command_must_succeed)
4027 		reserved_trbs++;
4028 
4029 	ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
4030 			reserved_trbs, GFP_ATOMIC);
4031 	if (ret < 0) {
4032 		xhci_err(xhci, "ERR: No room for command on command ring\n");
4033 		if (command_must_succeed)
4034 			xhci_err(xhci, "ERR: Reserved TRB counting for "
4035 					"unfailable commands failed.\n");
4036 		return ret;
4037 	}
4038 
4039 	cmd->command_trb = xhci->cmd_ring->enqueue;
4040 	list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
4041 
4042 	/* if there are no other commands queued we start the timeout timer */
4043 	if (xhci->cmd_list.next == &cmd->cmd_list &&
4044 	    !timer_pending(&xhci->cmd_timer)) {
4045 		xhci->current_cmd = cmd;
4046 		mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
4047 	}
4048 
4049 	queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
4050 			field4 | xhci->cmd_ring->cycle_state);
4051 	return 0;
4052 }
4053 
4054 /* Queue a slot enable or disable request on the command ring */
4055 int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
4056 		u32 trb_type, u32 slot_id)
4057 {
4058 	return queue_command(xhci, cmd, 0, 0, 0,
4059 			TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
4060 }
4061 
4062 /* Queue an address device command TRB */
4063 int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
4064 		dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
4065 {
4066 	return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4067 			upper_32_bits(in_ctx_ptr), 0,
4068 			TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
4069 			| (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
4070 }
4071 
4072 int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
4073 		u32 field1, u32 field2, u32 field3, u32 field4)
4074 {
4075 	return queue_command(xhci, cmd, field1, field2, field3, field4, false);
4076 }
4077 
4078 /* Queue a reset device command TRB */
4079 int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
4080 		u32 slot_id)
4081 {
4082 	return queue_command(xhci, cmd, 0, 0, 0,
4083 			TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
4084 			false);
4085 }
4086 
4087 /* Queue a configure endpoint command TRB */
4088 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
4089 		struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
4090 		u32 slot_id, bool command_must_succeed)
4091 {
4092 	return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4093 			upper_32_bits(in_ctx_ptr), 0,
4094 			TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
4095 			command_must_succeed);
4096 }
4097 
4098 /* Queue an evaluate context command TRB */
4099 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
4100 		dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
4101 {
4102 	return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4103 			upper_32_bits(in_ctx_ptr), 0,
4104 			TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
4105 			command_must_succeed);
4106 }
4107 
4108 /*
4109  * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
4110  * activity on an endpoint that is about to be suspended.
4111  */
4112 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
4113 			     int slot_id, unsigned int ep_index, int suspend)
4114 {
4115 	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4116 	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4117 	u32 type = TRB_TYPE(TRB_STOP_RING);
4118 	u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
4119 
4120 	return queue_command(xhci, cmd, 0, 0, 0,
4121 			trb_slot_id | trb_ep_index | type | trb_suspend, false);
4122 }
4123 
4124 /* Set Transfer Ring Dequeue Pointer command */
4125 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
4126 		unsigned int slot_id, unsigned int ep_index,
4127 		unsigned int stream_id,
4128 		struct xhci_dequeue_state *deq_state)
4129 {
4130 	dma_addr_t addr;
4131 	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4132 	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4133 	u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
4134 	u32 trb_sct = 0;
4135 	u32 type = TRB_TYPE(TRB_SET_DEQ);
4136 	struct xhci_virt_ep *ep;
4137 	struct xhci_command *cmd;
4138 	int ret;
4139 
4140 	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
4141 		"Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u",
4142 		deq_state->new_deq_seg,
4143 		(unsigned long long)deq_state->new_deq_seg->dma,
4144 		deq_state->new_deq_ptr,
4145 		(unsigned long long)xhci_trb_virt_to_dma(
4146 			deq_state->new_deq_seg, deq_state->new_deq_ptr),
4147 		deq_state->new_cycle_state);
4148 
4149 	addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
4150 				    deq_state->new_deq_ptr);
4151 	if (addr == 0) {
4152 		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
4153 		xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
4154 			  deq_state->new_deq_seg, deq_state->new_deq_ptr);
4155 		return;
4156 	}
4157 	ep = &xhci->devs[slot_id]->eps[ep_index];
4158 	if ((ep->ep_state & SET_DEQ_PENDING)) {
4159 		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
4160 		xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
4161 		return;
4162 	}
4163 
4164 	/* This function gets called from contexts where it cannot sleep */
4165 	cmd = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
4166 	if (!cmd) {
4167 		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr: ENOMEM\n");
4168 		return;
4169 	}
4170 
4171 	ep->queued_deq_seg = deq_state->new_deq_seg;
4172 	ep->queued_deq_ptr = deq_state->new_deq_ptr;
4173 	if (stream_id)
4174 		trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
4175 	ret = queue_command(xhci, cmd,
4176 		lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state,
4177 		upper_32_bits(addr), trb_stream_id,
4178 		trb_slot_id | trb_ep_index | type, false);
4179 	if (ret < 0) {
4180 		xhci_free_command(xhci, cmd);
4181 		return;
4182 	}
4183 
4184 	/* Stop the TD queueing code from ringing the doorbell until
4185 	 * this command completes.  The HC won't set the dequeue pointer
4186 	 * if the ring is running, and ringing the doorbell starts the
4187 	 * ring running.
4188 	 */
4189 	ep->ep_state |= SET_DEQ_PENDING;
4190 }
4191 
4192 int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
4193 			int slot_id, unsigned int ep_index)
4194 {
4195 	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4196 	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4197 	u32 type = TRB_TYPE(TRB_RESET_EP);
4198 
4199 	return queue_command(xhci, cmd, 0, 0, 0,
4200 			trb_slot_id | trb_ep_index | type, false);
4201 }
4202