xref: /openbmc/linux/drivers/usb/host/ehci-q.c (revision 5bd8e16d)
1 /*
2  * Copyright (C) 2001-2004 by David Brownell
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License as published by the
6  * Free Software Foundation; either version 2 of the License, or (at your
7  * option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17  */
18 
19 /* this file is part of ehci-hcd.c */
20 
21 /*-------------------------------------------------------------------------*/
22 
23 /*
24  * EHCI hardware queue manipulation ... the core.  QH/QTD manipulation.
25  *
26  * Control, bulk, and interrupt traffic all use "qh" lists.  They list "qtd"
27  * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
28  * buffers needed for the larger number).  We use one QH per endpoint, queue
29  * multiple urbs (all three types) per endpoint.  URBs may need several qtds.
30  *
31  * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with
32  * interrupts) needs careful scheduling.  Performance improvements can be
33  * an ongoing challenge.  That's in "ehci-sched.c".
34  *
35  * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
36  * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
37  * (b) special fields in qh entries or (c) split iso entries.  TTs will
38  * buffer low/full speed data so the host collects it at high speed.
39  */
40 
41 /*-------------------------------------------------------------------------*/
42 
43 /* fill a qtd, returning how much of the buffer we were able to queue up */
44 
45 static int
46 qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf,
47 		  size_t len, int token, int maxpacket)
48 {
49 	int	i, count;
50 	u64	addr = buf;
51 
52 	/* one buffer entry per 4K ... first might be short or unaligned */
53 	qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr);
54 	qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32));
55 	count = 0x1000 - (buf & 0x0fff);	/* rest of that page */
56 	if (likely (len < count))		/* ... iff needed */
57 		count = len;
58 	else {
59 		buf +=  0x1000;
60 		buf &= ~0x0fff;
61 
62 		/* per-qtd limit: from 16K to 20K (best alignment) */
63 		for (i = 1; count < len && i < 5; i++) {
64 			addr = buf;
65 			qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr);
66 			qtd->hw_buf_hi[i] = cpu_to_hc32(ehci,
67 					(u32)(addr >> 32));
68 			buf += 0x1000;
69 			if ((count + 0x1000) < len)
70 				count += 0x1000;
71 			else
72 				count = len;
73 		}
74 
75 		/* short packets may only terminate transfers */
76 		if (count != len)
77 			count -= (count % maxpacket);
78 	}
79 	qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token);
80 	qtd->length = count;
81 
82 	return count;
83 }
84 
85 /*-------------------------------------------------------------------------*/
86 
87 static inline void
88 qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
89 {
90 	struct ehci_qh_hw *hw = qh->hw;
91 
92 	/* writes to an active overlay are unsafe */
93 	WARN_ON(qh->qh_state != QH_STATE_IDLE);
94 
95 	hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
96 	hw->hw_alt_next = EHCI_LIST_END(ehci);
97 
98 	/* Except for control endpoints, we make hardware maintain data
99 	 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
100 	 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
101 	 * ever clear it.
102 	 */
103 	if (!(hw->hw_info1 & cpu_to_hc32(ehci, QH_TOGGLE_CTL))) {
104 		unsigned	is_out, epnum;
105 
106 		is_out = qh->is_out;
107 		epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f;
108 		if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
109 			hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
110 			usb_settoggle (qh->dev, epnum, is_out, 1);
111 		}
112 	}
113 
114 	hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
115 }
116 
117 /* if it weren't for a common silicon quirk (writing the dummy into the qh
118  * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
119  * recovery (including urb dequeue) would need software changes to a QH...
120  */
121 static void
122 qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
123 {
124 	struct ehci_qtd *qtd;
125 
126 	qtd = list_entry(qh->qtd_list.next, struct ehci_qtd, qtd_list);
127 
128 	/*
129 	 * first qtd may already be partially processed.
130 	 * If we come here during unlink, the QH overlay region
131 	 * might have reference to the just unlinked qtd. The
132 	 * qtd is updated in qh_completions(). Update the QH
133 	 * overlay here.
134 	 */
135 	if (qh->hw->hw_token & ACTIVE_BIT(ehci))
136 		qh->hw->hw_qtd_next = qtd->hw_next;
137 	else
138 		qh_update(ehci, qh, qtd);
139 }
140 
141 /*-------------------------------------------------------------------------*/
142 
143 static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
144 
145 static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd,
146 		struct usb_host_endpoint *ep)
147 {
148 	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
149 	struct ehci_qh		*qh = ep->hcpriv;
150 	unsigned long		flags;
151 
152 	spin_lock_irqsave(&ehci->lock, flags);
153 	qh->clearing_tt = 0;
154 	if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
155 			&& ehci->rh_state == EHCI_RH_RUNNING)
156 		qh_link_async(ehci, qh);
157 	spin_unlock_irqrestore(&ehci->lock, flags);
158 }
159 
160 static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh,
161 		struct urb *urb, u32 token)
162 {
163 
164 	/* If an async split transaction gets an error or is unlinked,
165 	 * the TT buffer may be left in an indeterminate state.  We
166 	 * have to clear the TT buffer.
167 	 *
168 	 * Note: this routine is never called for Isochronous transfers.
169 	 */
170 	if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
171 #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
172 		struct usb_device *tt = urb->dev->tt->hub;
173 		dev_dbg(&tt->dev,
174 			"clear tt buffer port %d, a%d ep%d t%08x\n",
175 			urb->dev->ttport, urb->dev->devnum,
176 			usb_pipeendpoint(urb->pipe), token);
177 #endif /* DEBUG || CONFIG_DYNAMIC_DEBUG */
178 		if (!ehci_is_TDI(ehci)
179 				|| urb->dev->tt->hub !=
180 				   ehci_to_hcd(ehci)->self.root_hub) {
181 			if (usb_hub_clear_tt_buffer(urb) == 0)
182 				qh->clearing_tt = 1;
183 		} else {
184 
185 			/* REVISIT ARC-derived cores don't clear the root
186 			 * hub TT buffer in this way...
187 			 */
188 		}
189 	}
190 }
191 
192 static int qtd_copy_status (
193 	struct ehci_hcd *ehci,
194 	struct urb *urb,
195 	size_t length,
196 	u32 token
197 )
198 {
199 	int	status = -EINPROGRESS;
200 
201 	/* count IN/OUT bytes, not SETUP (even short packets) */
202 	if (likely (QTD_PID (token) != 2))
203 		urb->actual_length += length - QTD_LENGTH (token);
204 
205 	/* don't modify error codes */
206 	if (unlikely(urb->unlinked))
207 		return status;
208 
209 	/* force cleanup after short read; not always an error */
210 	if (unlikely (IS_SHORT_READ (token)))
211 		status = -EREMOTEIO;
212 
213 	/* serious "can't proceed" faults reported by the hardware */
214 	if (token & QTD_STS_HALT) {
215 		if (token & QTD_STS_BABBLE) {
216 			/* FIXME "must" disable babbling device's port too */
217 			status = -EOVERFLOW;
218 		/* CERR nonzero + halt --> stall */
219 		} else if (QTD_CERR(token)) {
220 			status = -EPIPE;
221 
222 		/* In theory, more than one of the following bits can be set
223 		 * since they are sticky and the transaction is retried.
224 		 * Which to test first is rather arbitrary.
225 		 */
226 		} else if (token & QTD_STS_MMF) {
227 			/* fs/ls interrupt xfer missed the complete-split */
228 			status = -EPROTO;
229 		} else if (token & QTD_STS_DBE) {
230 			status = (QTD_PID (token) == 1) /* IN ? */
231 				? -ENOSR  /* hc couldn't read data */
232 				: -ECOMM; /* hc couldn't write data */
233 		} else if (token & QTD_STS_XACT) {
234 			/* timeout, bad CRC, wrong PID, etc */
235 			ehci_dbg(ehci, "devpath %s ep%d%s 3strikes\n",
236 				urb->dev->devpath,
237 				usb_pipeendpoint(urb->pipe),
238 				usb_pipein(urb->pipe) ? "in" : "out");
239 			status = -EPROTO;
240 		} else {	/* unknown */
241 			status = -EPROTO;
242 		}
243 	}
244 
245 	return status;
246 }
247 
248 static void
249 ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
250 __releases(ehci->lock)
251 __acquires(ehci->lock)
252 {
253 	if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
254 		/* ... update hc-wide periodic stats */
255 		ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
256 	}
257 
258 	if (unlikely(urb->unlinked)) {
259 		COUNT(ehci->stats.unlink);
260 	} else {
261 		/* report non-error and short read status as zero */
262 		if (status == -EINPROGRESS || status == -EREMOTEIO)
263 			status = 0;
264 		COUNT(ehci->stats.complete);
265 	}
266 
267 #ifdef EHCI_URB_TRACE
268 	ehci_dbg (ehci,
269 		"%s %s urb %p ep%d%s status %d len %d/%d\n",
270 		__func__, urb->dev->devpath, urb,
271 		usb_pipeendpoint (urb->pipe),
272 		usb_pipein (urb->pipe) ? "in" : "out",
273 		status,
274 		urb->actual_length, urb->transfer_buffer_length);
275 #endif
276 
277 	/* complete() can reenter this HCD */
278 	usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
279 	spin_unlock (&ehci->lock);
280 	usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status);
281 	spin_lock (&ehci->lock);
282 }
283 
284 static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
285 
286 /*
287  * Process and free completed qtds for a qh, returning URBs to drivers.
288  * Chases up to qh->hw_current.  Returns nonzero if the caller should
289  * unlink qh.
290  */
291 static unsigned
292 qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
293 {
294 	struct ehci_qtd		*last, *end = qh->dummy;
295 	struct list_head	*entry, *tmp;
296 	int			last_status;
297 	int			stopped;
298 	u8			state;
299 	struct ehci_qh_hw	*hw = qh->hw;
300 
301 	/* completions (or tasks on other cpus) must never clobber HALT
302 	 * till we've gone through and cleaned everything up, even when
303 	 * they add urbs to this qh's queue or mark them for unlinking.
304 	 *
305 	 * NOTE:  unlinking expects to be done in queue order.
306 	 *
307 	 * It's a bug for qh->qh_state to be anything other than
308 	 * QH_STATE_IDLE, unless our caller is scan_async() or
309 	 * scan_intr().
310 	 */
311 	state = qh->qh_state;
312 	qh->qh_state = QH_STATE_COMPLETING;
313 	stopped = (state == QH_STATE_IDLE);
314 
315  rescan:
316 	last = NULL;
317 	last_status = -EINPROGRESS;
318 	qh->dequeue_during_giveback = 0;
319 
320 	/* remove de-activated QTDs from front of queue.
321 	 * after faults (including short reads), cleanup this urb
322 	 * then let the queue advance.
323 	 * if queue is stopped, handles unlinks.
324 	 */
325 	list_for_each_safe (entry, tmp, &qh->qtd_list) {
326 		struct ehci_qtd	*qtd;
327 		struct urb	*urb;
328 		u32		token = 0;
329 
330 		qtd = list_entry (entry, struct ehci_qtd, qtd_list);
331 		urb = qtd->urb;
332 
333 		/* clean up any state from previous QTD ...*/
334 		if (last) {
335 			if (likely (last->urb != urb)) {
336 				ehci_urb_done(ehci, last->urb, last_status);
337 				last_status = -EINPROGRESS;
338 			}
339 			ehci_qtd_free (ehci, last);
340 			last = NULL;
341 		}
342 
343 		/* ignore urbs submitted during completions we reported */
344 		if (qtd == end)
345 			break;
346 
347 		/* hardware copies qtd out of qh overlay */
348 		rmb ();
349 		token = hc32_to_cpu(ehci, qtd->hw_token);
350 
351 		/* always clean up qtds the hc de-activated */
352  retry_xacterr:
353 		if ((token & QTD_STS_ACTIVE) == 0) {
354 
355 			/* Report Data Buffer Error: non-fatal but useful */
356 			if (token & QTD_STS_DBE)
357 				ehci_dbg(ehci,
358 					"detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
359 					urb,
360 					usb_endpoint_num(&urb->ep->desc),
361 					usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out",
362 					urb->transfer_buffer_length,
363 					qtd,
364 					qh);
365 
366 			/* on STALL, error, and short reads this urb must
367 			 * complete and all its qtds must be recycled.
368 			 */
369 			if ((token & QTD_STS_HALT) != 0) {
370 
371 				/* retry transaction errors until we
372 				 * reach the software xacterr limit
373 				 */
374 				if ((token & QTD_STS_XACT) &&
375 						QTD_CERR(token) == 0 &&
376 						++qh->xacterrs < QH_XACTERR_MAX &&
377 						!urb->unlinked) {
378 					ehci_dbg(ehci,
379 	"detected XactErr len %zu/%zu retry %d\n",
380 	qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs);
381 
382 					/* reset the token in the qtd and the
383 					 * qh overlay (which still contains
384 					 * the qtd) so that we pick up from
385 					 * where we left off
386 					 */
387 					token &= ~QTD_STS_HALT;
388 					token |= QTD_STS_ACTIVE |
389 							(EHCI_TUNE_CERR << 10);
390 					qtd->hw_token = cpu_to_hc32(ehci,
391 							token);
392 					wmb();
393 					hw->hw_token = cpu_to_hc32(ehci,
394 							token);
395 					goto retry_xacterr;
396 				}
397 				stopped = 1;
398 
399 			/* magic dummy for some short reads; qh won't advance.
400 			 * that silicon quirk can kick in with this dummy too.
401 			 *
402 			 * other short reads won't stop the queue, including
403 			 * control transfers (status stage handles that) or
404 			 * most other single-qtd reads ... the queue stops if
405 			 * URB_SHORT_NOT_OK was set so the driver submitting
406 			 * the urbs could clean it up.
407 			 */
408 			} else if (IS_SHORT_READ (token)
409 					&& !(qtd->hw_alt_next
410 						& EHCI_LIST_END(ehci))) {
411 				stopped = 1;
412 			}
413 
414 		/* stop scanning when we reach qtds the hc is using */
415 		} else if (likely (!stopped
416 				&& ehci->rh_state >= EHCI_RH_RUNNING)) {
417 			break;
418 
419 		/* scan the whole queue for unlinks whenever it stops */
420 		} else {
421 			stopped = 1;
422 
423 			/* cancel everything if we halt, suspend, etc */
424 			if (ehci->rh_state < EHCI_RH_RUNNING)
425 				last_status = -ESHUTDOWN;
426 
427 			/* this qtd is active; skip it unless a previous qtd
428 			 * for its urb faulted, or its urb was canceled.
429 			 */
430 			else if (last_status == -EINPROGRESS && !urb->unlinked)
431 				continue;
432 
433 			/*
434 			 * If this was the active qtd when the qh was unlinked
435 			 * and the overlay's token is active, then the overlay
436 			 * hasn't been written back to the qtd yet so use its
437 			 * token instead of the qtd's.  After the qtd is
438 			 * processed and removed, the overlay won't be valid
439 			 * any more.
440 			 */
441 			if (state == QH_STATE_IDLE &&
442 					qh->qtd_list.next == &qtd->qtd_list &&
443 					(hw->hw_token & ACTIVE_BIT(ehci))) {
444 				token = hc32_to_cpu(ehci, hw->hw_token);
445 				hw->hw_token &= ~ACTIVE_BIT(ehci);
446 
447 				/* An unlink may leave an incomplete
448 				 * async transaction in the TT buffer.
449 				 * We have to clear it.
450 				 */
451 				ehci_clear_tt_buffer(ehci, qh, urb, token);
452 			}
453 		}
454 
455 		/* unless we already know the urb's status, collect qtd status
456 		 * and update count of bytes transferred.  in common short read
457 		 * cases with only one data qtd (including control transfers),
458 		 * queue processing won't halt.  but with two or more qtds (for
459 		 * example, with a 32 KB transfer), when the first qtd gets a
460 		 * short read the second must be removed by hand.
461 		 */
462 		if (last_status == -EINPROGRESS) {
463 			last_status = qtd_copy_status(ehci, urb,
464 					qtd->length, token);
465 			if (last_status == -EREMOTEIO
466 					&& (qtd->hw_alt_next
467 						& EHCI_LIST_END(ehci)))
468 				last_status = -EINPROGRESS;
469 
470 			/* As part of low/full-speed endpoint-halt processing
471 			 * we must clear the TT buffer (11.17.5).
472 			 */
473 			if (unlikely(last_status != -EINPROGRESS &&
474 					last_status != -EREMOTEIO)) {
475 				/* The TT's in some hubs malfunction when they
476 				 * receive this request following a STALL (they
477 				 * stop sending isochronous packets).  Since a
478 				 * STALL can't leave the TT buffer in a busy
479 				 * state (if you believe Figures 11-48 - 11-51
480 				 * in the USB 2.0 spec), we won't clear the TT
481 				 * buffer in this case.  Strictly speaking this
482 				 * is a violation of the spec.
483 				 */
484 				if (last_status != -EPIPE)
485 					ehci_clear_tt_buffer(ehci, qh, urb,
486 							token);
487 			}
488 		}
489 
490 		/* if we're removing something not at the queue head,
491 		 * patch the hardware queue pointer.
492 		 */
493 		if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
494 			last = list_entry (qtd->qtd_list.prev,
495 					struct ehci_qtd, qtd_list);
496 			last->hw_next = qtd->hw_next;
497 		}
498 
499 		/* remove qtd; it's recycled after possible urb completion */
500 		list_del (&qtd->qtd_list);
501 		last = qtd;
502 
503 		/* reinit the xacterr counter for the next qtd */
504 		qh->xacterrs = 0;
505 	}
506 
507 	/* last urb's completion might still need calling */
508 	if (likely (last != NULL)) {
509 		ehci_urb_done(ehci, last->urb, last_status);
510 		ehci_qtd_free (ehci, last);
511 	}
512 
513 	/* Do we need to rescan for URBs dequeued during a giveback? */
514 	if (unlikely(qh->dequeue_during_giveback)) {
515 		/* If the QH is already unlinked, do the rescan now. */
516 		if (state == QH_STATE_IDLE)
517 			goto rescan;
518 
519 		/* Otherwise the caller must unlink the QH. */
520 	}
521 
522 	/* restore original state; caller must unlink or relink */
523 	qh->qh_state = state;
524 
525 	/* be sure the hardware's done with the qh before refreshing
526 	 * it after fault cleanup, or recovering from silicon wrongly
527 	 * overlaying the dummy qtd (which reduces DMA chatter).
528 	 *
529 	 * We won't refresh a QH that's linked (after the HC
530 	 * stopped the queue).  That avoids a race:
531 	 *  - HC reads first part of QH;
532 	 *  - CPU updates that first part and the token;
533 	 *  - HC reads rest of that QH, including token
534 	 * Result:  HC gets an inconsistent image, and then
535 	 * DMAs to/from the wrong memory (corrupting it).
536 	 *
537 	 * That should be rare for interrupt transfers,
538 	 * except maybe high bandwidth ...
539 	 */
540 	if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci))
541 		qh->exception = 1;
542 
543 	/* Let the caller know if the QH needs to be unlinked. */
544 	return qh->exception;
545 }
546 
547 /*-------------------------------------------------------------------------*/
548 
549 // high bandwidth multiplier, as encoded in highspeed endpoint descriptors
550 #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
551 // ... and packet size, for any kind of endpoint descriptor
552 #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
553 
554 /*
555  * reverse of qh_urb_transaction:  free a list of TDs.
556  * used for cleanup after errors, before HC sees an URB's TDs.
557  */
558 static void qtd_list_free (
559 	struct ehci_hcd		*ehci,
560 	struct urb		*urb,
561 	struct list_head	*qtd_list
562 ) {
563 	struct list_head	*entry, *temp;
564 
565 	list_for_each_safe (entry, temp, qtd_list) {
566 		struct ehci_qtd	*qtd;
567 
568 		qtd = list_entry (entry, struct ehci_qtd, qtd_list);
569 		list_del (&qtd->qtd_list);
570 		ehci_qtd_free (ehci, qtd);
571 	}
572 }
573 
574 /*
575  * create a list of filled qtds for this URB; won't link into qh.
576  */
577 static struct list_head *
578 qh_urb_transaction (
579 	struct ehci_hcd		*ehci,
580 	struct urb		*urb,
581 	struct list_head	*head,
582 	gfp_t			flags
583 ) {
584 	struct ehci_qtd		*qtd, *qtd_prev;
585 	dma_addr_t		buf;
586 	int			len, this_sg_len, maxpacket;
587 	int			is_input;
588 	u32			token;
589 	int			i;
590 	struct scatterlist	*sg;
591 
592 	/*
593 	 * URBs map to sequences of QTDs:  one logical transaction
594 	 */
595 	qtd = ehci_qtd_alloc (ehci, flags);
596 	if (unlikely (!qtd))
597 		return NULL;
598 	list_add_tail (&qtd->qtd_list, head);
599 	qtd->urb = urb;
600 
601 	token = QTD_STS_ACTIVE;
602 	token |= (EHCI_TUNE_CERR << 10);
603 	/* for split transactions, SplitXState initialized to zero */
604 
605 	len = urb->transfer_buffer_length;
606 	is_input = usb_pipein (urb->pipe);
607 	if (usb_pipecontrol (urb->pipe)) {
608 		/* SETUP pid */
609 		qtd_fill(ehci, qtd, urb->setup_dma,
610 				sizeof (struct usb_ctrlrequest),
611 				token | (2 /* "setup" */ << 8), 8);
612 
613 		/* ... and always at least one more pid */
614 		token ^= QTD_TOGGLE;
615 		qtd_prev = qtd;
616 		qtd = ehci_qtd_alloc (ehci, flags);
617 		if (unlikely (!qtd))
618 			goto cleanup;
619 		qtd->urb = urb;
620 		qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
621 		list_add_tail (&qtd->qtd_list, head);
622 
623 		/* for zero length DATA stages, STATUS is always IN */
624 		if (len == 0)
625 			token |= (1 /* "in" */ << 8);
626 	}
627 
628 	/*
629 	 * data transfer stage:  buffer setup
630 	 */
631 	i = urb->num_mapped_sgs;
632 	if (len > 0 && i > 0) {
633 		sg = urb->sg;
634 		buf = sg_dma_address(sg);
635 
636 		/* urb->transfer_buffer_length may be smaller than the
637 		 * size of the scatterlist (or vice versa)
638 		 */
639 		this_sg_len = min_t(int, sg_dma_len(sg), len);
640 	} else {
641 		sg = NULL;
642 		buf = urb->transfer_dma;
643 		this_sg_len = len;
644 	}
645 
646 	if (is_input)
647 		token |= (1 /* "in" */ << 8);
648 	/* else it's already initted to "out" pid (0 << 8) */
649 
650 	maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
651 
652 	/*
653 	 * buffer gets wrapped in one or more qtds;
654 	 * last one may be "short" (including zero len)
655 	 * and may serve as a control status ack
656 	 */
657 	for (;;) {
658 		int this_qtd_len;
659 
660 		this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token,
661 				maxpacket);
662 		this_sg_len -= this_qtd_len;
663 		len -= this_qtd_len;
664 		buf += this_qtd_len;
665 
666 		/*
667 		 * short reads advance to a "magic" dummy instead of the next
668 		 * qtd ... that forces the queue to stop, for manual cleanup.
669 		 * (this will usually be overridden later.)
670 		 */
671 		if (is_input)
672 			qtd->hw_alt_next = ehci->async->hw->hw_alt_next;
673 
674 		/* qh makes control packets use qtd toggle; maybe switch it */
675 		if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
676 			token ^= QTD_TOGGLE;
677 
678 		if (likely(this_sg_len <= 0)) {
679 			if (--i <= 0 || len <= 0)
680 				break;
681 			sg = sg_next(sg);
682 			buf = sg_dma_address(sg);
683 			this_sg_len = min_t(int, sg_dma_len(sg), len);
684 		}
685 
686 		qtd_prev = qtd;
687 		qtd = ehci_qtd_alloc (ehci, flags);
688 		if (unlikely (!qtd))
689 			goto cleanup;
690 		qtd->urb = urb;
691 		qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
692 		list_add_tail (&qtd->qtd_list, head);
693 	}
694 
695 	/*
696 	 * unless the caller requires manual cleanup after short reads,
697 	 * have the alt_next mechanism keep the queue running after the
698 	 * last data qtd (the only one, for control and most other cases).
699 	 */
700 	if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
701 				|| usb_pipecontrol (urb->pipe)))
702 		qtd->hw_alt_next = EHCI_LIST_END(ehci);
703 
704 	/*
705 	 * control requests may need a terminating data "status" ack;
706 	 * other OUT ones may need a terminating short packet
707 	 * (zero length).
708 	 */
709 	if (likely (urb->transfer_buffer_length != 0)) {
710 		int	one_more = 0;
711 
712 		if (usb_pipecontrol (urb->pipe)) {
713 			one_more = 1;
714 			token ^= 0x0100;	/* "in" <--> "out"  */
715 			token |= QTD_TOGGLE;	/* force DATA1 */
716 		} else if (usb_pipeout(urb->pipe)
717 				&& (urb->transfer_flags & URB_ZERO_PACKET)
718 				&& !(urb->transfer_buffer_length % maxpacket)) {
719 			one_more = 1;
720 		}
721 		if (one_more) {
722 			qtd_prev = qtd;
723 			qtd = ehci_qtd_alloc (ehci, flags);
724 			if (unlikely (!qtd))
725 				goto cleanup;
726 			qtd->urb = urb;
727 			qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
728 			list_add_tail (&qtd->qtd_list, head);
729 
730 			/* never any data in such packets */
731 			qtd_fill(ehci, qtd, 0, 0, token, 0);
732 		}
733 	}
734 
735 	/* by default, enable interrupt on urb completion */
736 	if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT)))
737 		qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
738 	return head;
739 
740 cleanup:
741 	qtd_list_free (ehci, urb, head);
742 	return NULL;
743 }
744 
745 /*-------------------------------------------------------------------------*/
746 
747 // Would be best to create all qh's from config descriptors,
748 // when each interface/altsetting is established.  Unlink
749 // any previous qh and cancel its urbs first; endpoints are
750 // implicitly reset then (data toggle too).
751 // That'd mean updating how usbcore talks to HCDs. (2.7?)
752 
753 
754 /*
755  * Each QH holds a qtd list; a QH is used for everything except iso.
756  *
757  * For interrupt urbs, the scheduler must set the microframe scheduling
758  * mask(s) each time the QH gets scheduled.  For highspeed, that's
759  * just one microframe in the s-mask.  For split interrupt transactions
760  * there are additional complications: c-mask, maybe FSTNs.
761  */
762 static struct ehci_qh *
763 qh_make (
764 	struct ehci_hcd		*ehci,
765 	struct urb		*urb,
766 	gfp_t			flags
767 ) {
768 	struct ehci_qh		*qh = ehci_qh_alloc (ehci, flags);
769 	u32			info1 = 0, info2 = 0;
770 	int			is_input, type;
771 	int			maxp = 0;
772 	struct usb_tt		*tt = urb->dev->tt;
773 	struct ehci_qh_hw	*hw;
774 
775 	if (!qh)
776 		return qh;
777 
778 	/*
779 	 * init endpoint/device data for this QH
780 	 */
781 	info1 |= usb_pipeendpoint (urb->pipe) << 8;
782 	info1 |= usb_pipedevice (urb->pipe) << 0;
783 
784 	is_input = usb_pipein (urb->pipe);
785 	type = usb_pipetype (urb->pipe);
786 	maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input);
787 
788 	/* 1024 byte maxpacket is a hardware ceiling.  High bandwidth
789 	 * acts like up to 3KB, but is built from smaller packets.
790 	 */
791 	if (max_packet(maxp) > 1024) {
792 		ehci_dbg(ehci, "bogus qh maxpacket %d\n", max_packet(maxp));
793 		goto done;
794 	}
795 
796 	/* Compute interrupt scheduling parameters just once, and save.
797 	 * - allowing for high bandwidth, how many nsec/uframe are used?
798 	 * - split transactions need a second CSPLIT uframe; same question
799 	 * - splits also need a schedule gap (for full/low speed I/O)
800 	 * - qh has a polling interval
801 	 *
802 	 * For control/bulk requests, the HC or TT handles these.
803 	 */
804 	if (type == PIPE_INTERRUPT) {
805 		qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
806 				is_input, 0,
807 				hb_mult(maxp) * max_packet(maxp)));
808 		qh->start = NO_FRAME;
809 
810 		if (urb->dev->speed == USB_SPEED_HIGH) {
811 			qh->c_usecs = 0;
812 			qh->gap_uf = 0;
813 
814 			qh->period = urb->interval >> 3;
815 			if (qh->period == 0 && urb->interval != 1) {
816 				/* NOTE interval 2 or 4 uframes could work.
817 				 * But interval 1 scheduling is simpler, and
818 				 * includes high bandwidth.
819 				 */
820 				urb->interval = 1;
821 			} else if (qh->period > ehci->periodic_size) {
822 				qh->period = ehci->periodic_size;
823 				urb->interval = qh->period << 3;
824 			}
825 		} else {
826 			int		think_time;
827 
828 			/* gap is f(FS/LS transfer times) */
829 			qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed,
830 					is_input, 0, maxp) / (125 * 1000);
831 
832 			/* FIXME this just approximates SPLIT/CSPLIT times */
833 			if (is_input) {		// SPLIT, gap, CSPLIT+DATA
834 				qh->c_usecs = qh->usecs + HS_USECS (0);
835 				qh->usecs = HS_USECS (1);
836 			} else {		// SPLIT+DATA, gap, CSPLIT
837 				qh->usecs += HS_USECS (1);
838 				qh->c_usecs = HS_USECS (0);
839 			}
840 
841 			think_time = tt ? tt->think_time : 0;
842 			qh->tt_usecs = NS_TO_US (think_time +
843 					usb_calc_bus_time (urb->dev->speed,
844 					is_input, 0, max_packet (maxp)));
845 			qh->period = urb->interval;
846 			if (qh->period > ehci->periodic_size) {
847 				qh->period = ehci->periodic_size;
848 				urb->interval = qh->period;
849 			}
850 		}
851 	}
852 
853 	/* support for tt scheduling, and access to toggles */
854 	qh->dev = urb->dev;
855 
856 	/* using TT? */
857 	switch (urb->dev->speed) {
858 	case USB_SPEED_LOW:
859 		info1 |= QH_LOW_SPEED;
860 		/* FALL THROUGH */
861 
862 	case USB_SPEED_FULL:
863 		/* EPS 0 means "full" */
864 		if (type != PIPE_INTERRUPT)
865 			info1 |= (EHCI_TUNE_RL_TT << 28);
866 		if (type == PIPE_CONTROL) {
867 			info1 |= QH_CONTROL_EP;		/* for TT */
868 			info1 |= QH_TOGGLE_CTL;		/* toggle from qtd */
869 		}
870 		info1 |= maxp << 16;
871 
872 		info2 |= (EHCI_TUNE_MULT_TT << 30);
873 
874 		/* Some Freescale processors have an erratum in which the
875 		 * port number in the queue head was 0..N-1 instead of 1..N.
876 		 */
877 		if (ehci_has_fsl_portno_bug(ehci))
878 			info2 |= (urb->dev->ttport-1) << 23;
879 		else
880 			info2 |= urb->dev->ttport << 23;
881 
882 		/* set the address of the TT; for TDI's integrated
883 		 * root hub tt, leave it zeroed.
884 		 */
885 		if (tt && tt->hub != ehci_to_hcd(ehci)->self.root_hub)
886 			info2 |= tt->hub->devnum << 16;
887 
888 		/* NOTE:  if (PIPE_INTERRUPT) { scheduler sets c-mask } */
889 
890 		break;
891 
892 	case USB_SPEED_HIGH:		/* no TT involved */
893 		info1 |= QH_HIGH_SPEED;
894 		if (type == PIPE_CONTROL) {
895 			info1 |= (EHCI_TUNE_RL_HS << 28);
896 			info1 |= 64 << 16;	/* usb2 fixed maxpacket */
897 			info1 |= QH_TOGGLE_CTL;	/* toggle from qtd */
898 			info2 |= (EHCI_TUNE_MULT_HS << 30);
899 		} else if (type == PIPE_BULK) {
900 			info1 |= (EHCI_TUNE_RL_HS << 28);
901 			/* The USB spec says that high speed bulk endpoints
902 			 * always use 512 byte maxpacket.  But some device
903 			 * vendors decided to ignore that, and MSFT is happy
904 			 * to help them do so.  So now people expect to use
905 			 * such nonconformant devices with Linux too; sigh.
906 			 */
907 			info1 |= max_packet(maxp) << 16;
908 			info2 |= (EHCI_TUNE_MULT_HS << 30);
909 		} else {		/* PIPE_INTERRUPT */
910 			info1 |= max_packet (maxp) << 16;
911 			info2 |= hb_mult (maxp) << 30;
912 		}
913 		break;
914 	default:
915 		ehci_dbg(ehci, "bogus dev %p speed %d\n", urb->dev,
916 			urb->dev->speed);
917 done:
918 		qh_destroy(ehci, qh);
919 		return NULL;
920 	}
921 
922 	/* NOTE:  if (PIPE_INTERRUPT) { scheduler sets s-mask } */
923 
924 	/* init as live, toggle clear */
925 	qh->qh_state = QH_STATE_IDLE;
926 	hw = qh->hw;
927 	hw->hw_info1 = cpu_to_hc32(ehci, info1);
928 	hw->hw_info2 = cpu_to_hc32(ehci, info2);
929 	qh->is_out = !is_input;
930 	usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
931 	return qh;
932 }
933 
934 /*-------------------------------------------------------------------------*/
935 
936 static void enable_async(struct ehci_hcd *ehci)
937 {
938 	if (ehci->async_count++)
939 		return;
940 
941 	/* Stop waiting to turn off the async schedule */
942 	ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_ASYNC);
943 
944 	/* Don't start the schedule until ASS is 0 */
945 	ehci_poll_ASS(ehci);
946 	turn_on_io_watchdog(ehci);
947 }
948 
949 static void disable_async(struct ehci_hcd *ehci)
950 {
951 	if (--ehci->async_count)
952 		return;
953 
954 	/* The async schedule and unlink lists are supposed to be empty */
955 	WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) ||
956 			!list_empty(&ehci->async_idle));
957 
958 	/* Don't turn off the schedule until ASS is 1 */
959 	ehci_poll_ASS(ehci);
960 }
961 
962 /* move qh (and its qtds) onto async queue; maybe enable queue.  */
963 
964 static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
965 {
966 	__hc32		dma = QH_NEXT(ehci, qh->qh_dma);
967 	struct ehci_qh	*head;
968 
969 	/* Don't link a QH if there's a Clear-TT-Buffer pending */
970 	if (unlikely(qh->clearing_tt))
971 		return;
972 
973 	WARN_ON(qh->qh_state != QH_STATE_IDLE);
974 
975 	/* clear halt and/or toggle; and maybe recover from silicon quirk */
976 	qh_refresh(ehci, qh);
977 
978 	/* splice right after start */
979 	head = ehci->async;
980 	qh->qh_next = head->qh_next;
981 	qh->hw->hw_next = head->hw->hw_next;
982 	wmb ();
983 
984 	head->qh_next.qh = qh;
985 	head->hw->hw_next = dma;
986 
987 	qh->qh_state = QH_STATE_LINKED;
988 	qh->xacterrs = 0;
989 	qh->exception = 0;
990 	/* qtd completions reported later by interrupt */
991 
992 	enable_async(ehci);
993 }
994 
995 /*-------------------------------------------------------------------------*/
996 
997 /*
998  * For control/bulk/interrupt, return QH with these TDs appended.
999  * Allocates and initializes the QH if necessary.
1000  * Returns null if it can't allocate a QH it needs to.
1001  * If the QH has TDs (urbs) already, that's great.
1002  */
1003 static struct ehci_qh *qh_append_tds (
1004 	struct ehci_hcd		*ehci,
1005 	struct urb		*urb,
1006 	struct list_head	*qtd_list,
1007 	int			epnum,
1008 	void			**ptr
1009 )
1010 {
1011 	struct ehci_qh		*qh = NULL;
1012 	__hc32			qh_addr_mask = cpu_to_hc32(ehci, 0x7f);
1013 
1014 	qh = (struct ehci_qh *) *ptr;
1015 	if (unlikely (qh == NULL)) {
1016 		/* can't sleep here, we have ehci->lock... */
1017 		qh = qh_make (ehci, urb, GFP_ATOMIC);
1018 		*ptr = qh;
1019 	}
1020 	if (likely (qh != NULL)) {
1021 		struct ehci_qtd	*qtd;
1022 
1023 		if (unlikely (list_empty (qtd_list)))
1024 			qtd = NULL;
1025 		else
1026 			qtd = list_entry (qtd_list->next, struct ehci_qtd,
1027 					qtd_list);
1028 
1029 		/* control qh may need patching ... */
1030 		if (unlikely (epnum == 0)) {
1031 
1032                         /* usb_reset_device() briefly reverts to address 0 */
1033                         if (usb_pipedevice (urb->pipe) == 0)
1034 				qh->hw->hw_info1 &= ~qh_addr_mask;
1035 		}
1036 
1037 		/* just one way to queue requests: swap with the dummy qtd.
1038 		 * only hc or qh_refresh() ever modify the overlay.
1039 		 */
1040 		if (likely (qtd != NULL)) {
1041 			struct ehci_qtd		*dummy;
1042 			dma_addr_t		dma;
1043 			__hc32			token;
1044 
1045 			/* to avoid racing the HC, use the dummy td instead of
1046 			 * the first td of our list (becomes new dummy).  both
1047 			 * tds stay deactivated until we're done, when the
1048 			 * HC is allowed to fetch the old dummy (4.10.2).
1049 			 */
1050 			token = qtd->hw_token;
1051 			qtd->hw_token = HALT_BIT(ehci);
1052 
1053 			dummy = qh->dummy;
1054 
1055 			dma = dummy->qtd_dma;
1056 			*dummy = *qtd;
1057 			dummy->qtd_dma = dma;
1058 
1059 			list_del (&qtd->qtd_list);
1060 			list_add (&dummy->qtd_list, qtd_list);
1061 			list_splice_tail(qtd_list, &qh->qtd_list);
1062 
1063 			ehci_qtd_init(ehci, qtd, qtd->qtd_dma);
1064 			qh->dummy = qtd;
1065 
1066 			/* hc must see the new dummy at list end */
1067 			dma = qtd->qtd_dma;
1068 			qtd = list_entry (qh->qtd_list.prev,
1069 					struct ehci_qtd, qtd_list);
1070 			qtd->hw_next = QTD_NEXT(ehci, dma);
1071 
1072 			/* let the hc process these next qtds */
1073 			wmb ();
1074 			dummy->hw_token = token;
1075 
1076 			urb->hcpriv = qh;
1077 		}
1078 	}
1079 	return qh;
1080 }
1081 
1082 /*-------------------------------------------------------------------------*/
1083 
1084 static int
1085 submit_async (
1086 	struct ehci_hcd		*ehci,
1087 	struct urb		*urb,
1088 	struct list_head	*qtd_list,
1089 	gfp_t			mem_flags
1090 ) {
1091 	int			epnum;
1092 	unsigned long		flags;
1093 	struct ehci_qh		*qh = NULL;
1094 	int			rc;
1095 
1096 	epnum = urb->ep->desc.bEndpointAddress;
1097 
1098 #ifdef EHCI_URB_TRACE
1099 	{
1100 		struct ehci_qtd *qtd;
1101 		qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
1102 		ehci_dbg(ehci,
1103 			 "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
1104 			 __func__, urb->dev->devpath, urb,
1105 			 epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
1106 			 urb->transfer_buffer_length,
1107 			 qtd, urb->ep->hcpriv);
1108 	}
1109 #endif
1110 
1111 	spin_lock_irqsave (&ehci->lock, flags);
1112 	if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
1113 		rc = -ESHUTDOWN;
1114 		goto done;
1115 	}
1116 	rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
1117 	if (unlikely(rc))
1118 		goto done;
1119 
1120 	qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
1121 	if (unlikely(qh == NULL)) {
1122 		usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
1123 		rc = -ENOMEM;
1124 		goto done;
1125 	}
1126 
1127 	/* Control/bulk operations through TTs don't need scheduling,
1128 	 * the HC and TT handle it when the TT has a buffer ready.
1129 	 */
1130 	if (likely (qh->qh_state == QH_STATE_IDLE))
1131 		qh_link_async(ehci, qh);
1132  done:
1133 	spin_unlock_irqrestore (&ehci->lock, flags);
1134 	if (unlikely (qh == NULL))
1135 		qtd_list_free (ehci, urb, qtd_list);
1136 	return rc;
1137 }
1138 
1139 /*-------------------------------------------------------------------------*/
1140 #ifdef CONFIG_USB_HCD_TEST_MODE
1141 /*
1142  * This function creates the qtds and submits them for the
1143  * SINGLE_STEP_SET_FEATURE Test.
1144  * This is done in two parts: first SETUP req for GetDesc is sent then
1145  * 15 seconds later, the IN stage for GetDesc starts to req data from dev
1146  *
1147  * is_setup : i/p arguement decides which of the two stage needs to be
1148  * performed; TRUE - SETUP and FALSE - IN+STATUS
1149  * Returns 0 if success
1150  */
1151 static int submit_single_step_set_feature(
1152 	struct usb_hcd  *hcd,
1153 	struct urb      *urb,
1154 	int             is_setup
1155 ) {
1156 	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
1157 	struct list_head	qtd_list;
1158 	struct list_head	*head;
1159 
1160 	struct ehci_qtd		*qtd, *qtd_prev;
1161 	dma_addr_t		buf;
1162 	int			len, maxpacket;
1163 	u32			token;
1164 
1165 	INIT_LIST_HEAD(&qtd_list);
1166 	head = &qtd_list;
1167 
1168 	/* URBs map to sequences of QTDs:  one logical transaction */
1169 	qtd = ehci_qtd_alloc(ehci, GFP_KERNEL);
1170 	if (unlikely(!qtd))
1171 		return -1;
1172 	list_add_tail(&qtd->qtd_list, head);
1173 	qtd->urb = urb;
1174 
1175 	token = QTD_STS_ACTIVE;
1176 	token |= (EHCI_TUNE_CERR << 10);
1177 
1178 	len = urb->transfer_buffer_length;
1179 	/*
1180 	 * Check if the request is to perform just the SETUP stage (getDesc)
1181 	 * as in SINGLE_STEP_SET_FEATURE test, DATA stage (IN) happens
1182 	 * 15 secs after the setup
1183 	 */
1184 	if (is_setup) {
1185 		/* SETUP pid */
1186 		qtd_fill(ehci, qtd, urb->setup_dma,
1187 				sizeof(struct usb_ctrlrequest),
1188 				token | (2 /* "setup" */ << 8), 8);
1189 
1190 		submit_async(ehci, urb, &qtd_list, GFP_ATOMIC);
1191 		return 0; /*Return now; we shall come back after 15 seconds*/
1192 	}
1193 
1194 	/*
1195 	 * IN: data transfer stage:  buffer setup : start the IN txn phase for
1196 	 * the get_Desc SETUP which was sent 15seconds back
1197 	 */
1198 	token ^= QTD_TOGGLE;   /*We need to start IN with DATA-1 Pid-sequence*/
1199 	buf = urb->transfer_dma;
1200 
1201 	token |= (1 /* "in" */ << 8);  /*This is IN stage*/
1202 
1203 	maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, 0));
1204 
1205 	qtd_fill(ehci, qtd, buf, len, token, maxpacket);
1206 
1207 	/*
1208 	 * Our IN phase shall always be a short read; so keep the queue running
1209 	 * and let it advance to the next qtd which zero length OUT status
1210 	 */
1211 	qtd->hw_alt_next = EHCI_LIST_END(ehci);
1212 
1213 	/* STATUS stage for GetDesc control request */
1214 	token ^= 0x0100;        /* "in" <--> "out"  */
1215 	token |= QTD_TOGGLE;    /* force DATA1 */
1216 
1217 	qtd_prev = qtd;
1218 	qtd = ehci_qtd_alloc(ehci, GFP_ATOMIC);
1219 	if (unlikely(!qtd))
1220 		goto cleanup;
1221 	qtd->urb = urb;
1222 	qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
1223 	list_add_tail(&qtd->qtd_list, head);
1224 
1225 	/* dont fill any data in such packets */
1226 	qtd_fill(ehci, qtd, 0, 0, token, 0);
1227 
1228 	/* by default, enable interrupt on urb completion */
1229 	if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT)))
1230 		qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
1231 
1232 	submit_async(ehci, urb, &qtd_list, GFP_KERNEL);
1233 
1234 	return 0;
1235 
1236 cleanup:
1237 	qtd_list_free(ehci, urb, head);
1238 	return -1;
1239 }
1240 #endif /* CONFIG_USB_HCD_TEST_MODE */
1241 
1242 /*-------------------------------------------------------------------------*/
1243 
1244 static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
1245 {
1246 	struct ehci_qh		*prev;
1247 
1248 	/* Add to the end of the list of QHs waiting for the next IAAD */
1249 	qh->qh_state = QH_STATE_UNLINK_WAIT;
1250 	list_add_tail(&qh->unlink_node, &ehci->async_unlink);
1251 
1252 	/* Unlink it from the schedule */
1253 	prev = ehci->async;
1254 	while (prev->qh_next.qh != qh)
1255 		prev = prev->qh_next.qh;
1256 
1257 	prev->hw->hw_next = qh->hw->hw_next;
1258 	prev->qh_next = qh->qh_next;
1259 	if (ehci->qh_scan_next == qh)
1260 		ehci->qh_scan_next = qh->qh_next.qh;
1261 }
1262 
1263 static void start_iaa_cycle(struct ehci_hcd *ehci)
1264 {
1265 	/* Do nothing if an IAA cycle is already running */
1266 	if (ehci->iaa_in_progress)
1267 		return;
1268 	ehci->iaa_in_progress = true;
1269 
1270 	/* If the controller isn't running, we don't have to wait for it */
1271 	if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {
1272 		end_unlink_async(ehci);
1273 
1274 	/* Otherwise start a new IAA cycle */
1275 	} else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) {
1276 
1277 		/* Make sure the unlinks are all visible to the hardware */
1278 		wmb();
1279 
1280 		ehci_writel(ehci, ehci->command | CMD_IAAD,
1281 				&ehci->regs->command);
1282 		ehci_readl(ehci, &ehci->regs->command);
1283 		ehci_enable_event(ehci, EHCI_HRTIMER_IAA_WATCHDOG, true);
1284 	}
1285 }
1286 
1287 /* the async qh for the qtds being unlinked are now gone from the HC */
1288 
1289 static void end_unlink_async(struct ehci_hcd *ehci)
1290 {
1291 	struct ehci_qh		*qh;
1292 	bool			early_exit;
1293 
1294 	if (ehci->has_synopsys_hc_bug)
1295 		ehci_writel(ehci, (u32) ehci->async->qh_dma,
1296 			    &ehci->regs->async_next);
1297 
1298 	/* The current IAA cycle has ended */
1299 	ehci->iaa_in_progress = false;
1300 
1301 	if (list_empty(&ehci->async_unlink))
1302 		return;
1303 	qh = list_first_entry(&ehci->async_unlink, struct ehci_qh,
1304 			unlink_node);	/* QH whose IAA cycle just ended */
1305 
1306 	/*
1307 	 * If async_unlinking is set then this routine is already running,
1308 	 * either on the stack or on another CPU.
1309 	 */
1310 	early_exit = ehci->async_unlinking;
1311 
1312 	/* If the controller isn't running, process all the waiting QHs */
1313 	if (ehci->rh_state < EHCI_RH_RUNNING)
1314 		list_splice_tail_init(&ehci->async_unlink, &ehci->async_idle);
1315 
1316 	/*
1317 	 * Intel (?) bug: The HC can write back the overlay region even
1318 	 * after the IAA interrupt occurs.  In self-defense, always go
1319 	 * through two IAA cycles for each QH.
1320 	 */
1321 	else if (qh->qh_state == QH_STATE_UNLINK_WAIT) {
1322 		qh->qh_state = QH_STATE_UNLINK;
1323 		early_exit = true;
1324 	}
1325 
1326 	/* Otherwise process only the first waiting QH (NVIDIA bug?) */
1327 	else
1328 		list_move_tail(&qh->unlink_node, &ehci->async_idle);
1329 
1330 	/* Start a new IAA cycle if any QHs are waiting for it */
1331 	if (!list_empty(&ehci->async_unlink))
1332 		start_iaa_cycle(ehci);
1333 
1334 	/*
1335 	 * Don't allow nesting or concurrent calls,
1336 	 * or wait for the second IAA cycle for the next QH.
1337 	 */
1338 	if (early_exit)
1339 		return;
1340 
1341 	/* Process the idle QHs */
1342 	ehci->async_unlinking = true;
1343 	while (!list_empty(&ehci->async_idle)) {
1344 		qh = list_first_entry(&ehci->async_idle, struct ehci_qh,
1345 				unlink_node);
1346 		list_del(&qh->unlink_node);
1347 
1348 		qh->qh_state = QH_STATE_IDLE;
1349 		qh->qh_next.qh = NULL;
1350 
1351 		if (!list_empty(&qh->qtd_list))
1352 			qh_completions(ehci, qh);
1353 		if (!list_empty(&qh->qtd_list) &&
1354 				ehci->rh_state == EHCI_RH_RUNNING)
1355 			qh_link_async(ehci, qh);
1356 		disable_async(ehci);
1357 	}
1358 	ehci->async_unlinking = false;
1359 }
1360 
1361 static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
1362 
1363 static void unlink_empty_async(struct ehci_hcd *ehci)
1364 {
1365 	struct ehci_qh		*qh;
1366 	struct ehci_qh		*qh_to_unlink = NULL;
1367 	int			count = 0;
1368 
1369 	/* Find the last async QH which has been empty for a timer cycle */
1370 	for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) {
1371 		if (list_empty(&qh->qtd_list) &&
1372 				qh->qh_state == QH_STATE_LINKED) {
1373 			++count;
1374 			if (qh->unlink_cycle != ehci->async_unlink_cycle)
1375 				qh_to_unlink = qh;
1376 		}
1377 	}
1378 
1379 	/* If nothing else is being unlinked, unlink the last empty QH */
1380 	if (list_empty(&ehci->async_unlink) && qh_to_unlink) {
1381 		start_unlink_async(ehci, qh_to_unlink);
1382 		--count;
1383 	}
1384 
1385 	/* Other QHs will be handled later */
1386 	if (count > 0) {
1387 		ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
1388 		++ehci->async_unlink_cycle;
1389 	}
1390 }
1391 
1392 /* The root hub is suspended; unlink all the async QHs */
1393 static void __maybe_unused unlink_empty_async_suspended(struct ehci_hcd *ehci)
1394 {
1395 	struct ehci_qh		*qh;
1396 
1397 	while (ehci->async->qh_next.qh) {
1398 		qh = ehci->async->qh_next.qh;
1399 		WARN_ON(!list_empty(&qh->qtd_list));
1400 		single_unlink_async(ehci, qh);
1401 	}
1402 	start_iaa_cycle(ehci);
1403 }
1404 
1405 /* makes sure the async qh will become idle */
1406 /* caller must own ehci->lock */
1407 
1408 static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
1409 {
1410 	/* If the QH isn't linked then there's nothing we can do. */
1411 	if (qh->qh_state != QH_STATE_LINKED)
1412 		return;
1413 
1414 	single_unlink_async(ehci, qh);
1415 	start_iaa_cycle(ehci);
1416 }
1417 
1418 /*-------------------------------------------------------------------------*/
1419 
1420 static void scan_async (struct ehci_hcd *ehci)
1421 {
1422 	struct ehci_qh		*qh;
1423 	bool			check_unlinks_later = false;
1424 
1425 	ehci->qh_scan_next = ehci->async->qh_next.qh;
1426 	while (ehci->qh_scan_next) {
1427 		qh = ehci->qh_scan_next;
1428 		ehci->qh_scan_next = qh->qh_next.qh;
1429 
1430 		/* clean any finished work for this qh */
1431 		if (!list_empty(&qh->qtd_list)) {
1432 			int temp;
1433 
1434 			/*
1435 			 * Unlinks could happen here; completion reporting
1436 			 * drops the lock.  That's why ehci->qh_scan_next
1437 			 * always holds the next qh to scan; if the next qh
1438 			 * gets unlinked then ehci->qh_scan_next is adjusted
1439 			 * in single_unlink_async().
1440 			 */
1441 			temp = qh_completions(ehci, qh);
1442 			if (unlikely(temp)) {
1443 				start_unlink_async(ehci, qh);
1444 			} else if (list_empty(&qh->qtd_list)
1445 					&& qh->qh_state == QH_STATE_LINKED) {
1446 				qh->unlink_cycle = ehci->async_unlink_cycle;
1447 				check_unlinks_later = true;
1448 			}
1449 		}
1450 	}
1451 
1452 	/*
1453 	 * Unlink empty entries, reducing DMA usage as well
1454 	 * as HCD schedule-scanning costs.  Delay for any qh
1455 	 * we just scanned, there's a not-unusual case that it
1456 	 * doesn't stay idle for long.
1457 	 */
1458 	if (check_unlinks_later && ehci->rh_state == EHCI_RH_RUNNING &&
1459 			!(ehci->enabled_hrtimer_events &
1460 				BIT(EHCI_HRTIMER_ASYNC_UNLINKS))) {
1461 		ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
1462 		++ehci->async_unlink_cycle;
1463 	}
1464 }
1465