xref: /openbmc/linux/drivers/usb/host/ehci-q.c (revision 63dc02bd)
1 /*
2  * Copyright (C) 2001-2004 by David Brownell
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License as published by the
6  * Free Software Foundation; either version 2 of the License, or (at your
7  * option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17  */
18 
19 /* this file is part of ehci-hcd.c */
20 
21 /*-------------------------------------------------------------------------*/
22 
23 /*
24  * EHCI hardware queue manipulation ... the core.  QH/QTD manipulation.
25  *
26  * Control, bulk, and interrupt traffic all use "qh" lists.  They list "qtd"
27  * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
28  * buffers needed for the larger number).  We use one QH per endpoint, queue
29  * multiple urbs (all three types) per endpoint.  URBs may need several qtds.
30  *
31  * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with
32  * interrupts) needs careful scheduling.  Performance improvements can be
33  * an ongoing challenge.  That's in "ehci-sched.c".
34  *
35  * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
36  * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
37  * (b) special fields in qh entries or (c) split iso entries.  TTs will
38  * buffer low/full speed data so the host collects it at high speed.
39  */
40 
41 /*-------------------------------------------------------------------------*/
42 
43 /* fill a qtd, returning how much of the buffer we were able to queue up */
44 
45 static int
46 qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf,
47 		  size_t len, int token, int maxpacket)
48 {
49 	int	i, count;
50 	u64	addr = buf;
51 
52 	/* one buffer entry per 4K ... first might be short or unaligned */
53 	qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr);
54 	qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32));
55 	count = 0x1000 - (buf & 0x0fff);	/* rest of that page */
56 	if (likely (len < count))		/* ... iff needed */
57 		count = len;
58 	else {
59 		buf +=  0x1000;
60 		buf &= ~0x0fff;
61 
62 		/* per-qtd limit: from 16K to 20K (best alignment) */
63 		for (i = 1; count < len && i < 5; i++) {
64 			addr = buf;
65 			qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr);
66 			qtd->hw_buf_hi[i] = cpu_to_hc32(ehci,
67 					(u32)(addr >> 32));
68 			buf += 0x1000;
69 			if ((count + 0x1000) < len)
70 				count += 0x1000;
71 			else
72 				count = len;
73 		}
74 
75 		/* short packets may only terminate transfers */
76 		if (count != len)
77 			count -= (count % maxpacket);
78 	}
79 	qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token);
80 	qtd->length = count;
81 
82 	return count;
83 }
84 
85 /*-------------------------------------------------------------------------*/
86 
87 static inline void
88 qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
89 {
90 	struct ehci_qh_hw *hw = qh->hw;
91 
92 	/* writes to an active overlay are unsafe */
93 	BUG_ON(qh->qh_state != QH_STATE_IDLE);
94 
95 	hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
96 	hw->hw_alt_next = EHCI_LIST_END(ehci);
97 
98 	/* Except for control endpoints, we make hardware maintain data
99 	 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
100 	 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
101 	 * ever clear it.
102 	 */
103 	if (!(hw->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
104 		unsigned	is_out, epnum;
105 
106 		is_out = qh->is_out;
107 		epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f;
108 		if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
109 			hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
110 			usb_settoggle (qh->dev, epnum, is_out, 1);
111 		}
112 	}
113 
114 	hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
115 }
116 
117 /* if it weren't for a common silicon quirk (writing the dummy into the qh
118  * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
119  * recovery (including urb dequeue) would need software changes to a QH...
120  */
121 static void
122 qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
123 {
124 	struct ehci_qtd *qtd;
125 
126 	if (list_empty (&qh->qtd_list))
127 		qtd = qh->dummy;
128 	else {
129 		qtd = list_entry (qh->qtd_list.next,
130 				struct ehci_qtd, qtd_list);
131 		/* first qtd may already be partially processed */
132 		if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current)
133 			qtd = NULL;
134 	}
135 
136 	if (qtd)
137 		qh_update (ehci, qh, qtd);
138 }
139 
140 /*-------------------------------------------------------------------------*/
141 
142 static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
143 
144 static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd,
145 		struct usb_host_endpoint *ep)
146 {
147 	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
148 	struct ehci_qh		*qh = ep->hcpriv;
149 	unsigned long		flags;
150 
151 	spin_lock_irqsave(&ehci->lock, flags);
152 	qh->clearing_tt = 0;
153 	if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
154 			&& ehci->rh_state == EHCI_RH_RUNNING)
155 		qh_link_async(ehci, qh);
156 	spin_unlock_irqrestore(&ehci->lock, flags);
157 }
158 
159 static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh,
160 		struct urb *urb, u32 token)
161 {
162 
163 	/* If an async split transaction gets an error or is unlinked,
164 	 * the TT buffer may be left in an indeterminate state.  We
165 	 * have to clear the TT buffer.
166 	 *
167 	 * Note: this routine is never called for Isochronous transfers.
168 	 */
169 	if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
170 #ifdef DEBUG
171 		struct usb_device *tt = urb->dev->tt->hub;
172 		dev_dbg(&tt->dev,
173 			"clear tt buffer port %d, a%d ep%d t%08x\n",
174 			urb->dev->ttport, urb->dev->devnum,
175 			usb_pipeendpoint(urb->pipe), token);
176 #endif /* DEBUG */
177 		if (!ehci_is_TDI(ehci)
178 				|| urb->dev->tt->hub !=
179 				   ehci_to_hcd(ehci)->self.root_hub) {
180 			if (usb_hub_clear_tt_buffer(urb) == 0)
181 				qh->clearing_tt = 1;
182 		} else {
183 
184 			/* REVISIT ARC-derived cores don't clear the root
185 			 * hub TT buffer in this way...
186 			 */
187 		}
188 	}
189 }
190 
191 static int qtd_copy_status (
192 	struct ehci_hcd *ehci,
193 	struct urb *urb,
194 	size_t length,
195 	u32 token
196 )
197 {
198 	int	status = -EINPROGRESS;
199 
200 	/* count IN/OUT bytes, not SETUP (even short packets) */
201 	if (likely (QTD_PID (token) != 2))
202 		urb->actual_length += length - QTD_LENGTH (token);
203 
204 	/* don't modify error codes */
205 	if (unlikely(urb->unlinked))
206 		return status;
207 
208 	/* force cleanup after short read; not always an error */
209 	if (unlikely (IS_SHORT_READ (token)))
210 		status = -EREMOTEIO;
211 
212 	/* serious "can't proceed" faults reported by the hardware */
213 	if (token & QTD_STS_HALT) {
214 		if (token & QTD_STS_BABBLE) {
215 			/* FIXME "must" disable babbling device's port too */
216 			status = -EOVERFLOW;
217 		/* CERR nonzero + halt --> stall */
218 		} else if (QTD_CERR(token)) {
219 			status = -EPIPE;
220 
221 		/* In theory, more than one of the following bits can be set
222 		 * since they are sticky and the transaction is retried.
223 		 * Which to test first is rather arbitrary.
224 		 */
225 		} else if (token & QTD_STS_MMF) {
226 			/* fs/ls interrupt xfer missed the complete-split */
227 			status = -EPROTO;
228 		} else if (token & QTD_STS_DBE) {
229 			status = (QTD_PID (token) == 1) /* IN ? */
230 				? -ENOSR  /* hc couldn't read data */
231 				: -ECOMM; /* hc couldn't write data */
232 		} else if (token & QTD_STS_XACT) {
233 			/* timeout, bad CRC, wrong PID, etc */
234 			ehci_dbg(ehci, "devpath %s ep%d%s 3strikes\n",
235 				urb->dev->devpath,
236 				usb_pipeendpoint(urb->pipe),
237 				usb_pipein(urb->pipe) ? "in" : "out");
238 			status = -EPROTO;
239 		} else {	/* unknown */
240 			status = -EPROTO;
241 		}
242 
243 		ehci_vdbg (ehci,
244 			"dev%d ep%d%s qtd token %08x --> status %d\n",
245 			usb_pipedevice (urb->pipe),
246 			usb_pipeendpoint (urb->pipe),
247 			usb_pipein (urb->pipe) ? "in" : "out",
248 			token, status);
249 	}
250 
251 	return status;
252 }
253 
254 static void
255 ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
256 __releases(ehci->lock)
257 __acquires(ehci->lock)
258 {
259 	if (likely (urb->hcpriv != NULL)) {
260 		struct ehci_qh	*qh = (struct ehci_qh *) urb->hcpriv;
261 
262 		/* S-mask in a QH means it's an interrupt urb */
263 		if ((qh->hw->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) {
264 
265 			/* ... update hc-wide periodic stats (for usbfs) */
266 			ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
267 		}
268 		qh_put (qh);
269 	}
270 
271 	if (unlikely(urb->unlinked)) {
272 		COUNT(ehci->stats.unlink);
273 	} else {
274 		/* report non-error and short read status as zero */
275 		if (status == -EINPROGRESS || status == -EREMOTEIO)
276 			status = 0;
277 		COUNT(ehci->stats.complete);
278 	}
279 
280 #ifdef EHCI_URB_TRACE
281 	ehci_dbg (ehci,
282 		"%s %s urb %p ep%d%s status %d len %d/%d\n",
283 		__func__, urb->dev->devpath, urb,
284 		usb_pipeendpoint (urb->pipe),
285 		usb_pipein (urb->pipe) ? "in" : "out",
286 		status,
287 		urb->actual_length, urb->transfer_buffer_length);
288 #endif
289 
290 	/* complete() can reenter this HCD */
291 	usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
292 	spin_unlock (&ehci->lock);
293 	usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status);
294 	spin_lock (&ehci->lock);
295 }
296 
297 static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
298 static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
299 
300 static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
301 
302 /*
303  * Process and free completed qtds for a qh, returning URBs to drivers.
304  * Chases up to qh->hw_current.  Returns number of completions called,
305  * indicating how much "real" work we did.
306  */
307 static unsigned
308 qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
309 {
310 	struct ehci_qtd		*last, *end = qh->dummy;
311 	struct list_head	*entry, *tmp;
312 	int			last_status;
313 	int			stopped;
314 	unsigned		count = 0;
315 	u8			state;
316 	struct ehci_qh_hw	*hw = qh->hw;
317 
318 	if (unlikely (list_empty (&qh->qtd_list)))
319 		return count;
320 
321 	/* completions (or tasks on other cpus) must never clobber HALT
322 	 * till we've gone through and cleaned everything up, even when
323 	 * they add urbs to this qh's queue or mark them for unlinking.
324 	 *
325 	 * NOTE:  unlinking expects to be done in queue order.
326 	 *
327 	 * It's a bug for qh->qh_state to be anything other than
328 	 * QH_STATE_IDLE, unless our caller is scan_async() or
329 	 * scan_periodic().
330 	 */
331 	state = qh->qh_state;
332 	qh->qh_state = QH_STATE_COMPLETING;
333 	stopped = (state == QH_STATE_IDLE);
334 
335  rescan:
336 	last = NULL;
337 	last_status = -EINPROGRESS;
338 	qh->needs_rescan = 0;
339 
340 	/* remove de-activated QTDs from front of queue.
341 	 * after faults (including short reads), cleanup this urb
342 	 * then let the queue advance.
343 	 * if queue is stopped, handles unlinks.
344 	 */
345 	list_for_each_safe (entry, tmp, &qh->qtd_list) {
346 		struct ehci_qtd	*qtd;
347 		struct urb	*urb;
348 		u32		token = 0;
349 
350 		qtd = list_entry (entry, struct ehci_qtd, qtd_list);
351 		urb = qtd->urb;
352 
353 		/* clean up any state from previous QTD ...*/
354 		if (last) {
355 			if (likely (last->urb != urb)) {
356 				ehci_urb_done(ehci, last->urb, last_status);
357 				count++;
358 				last_status = -EINPROGRESS;
359 			}
360 			ehci_qtd_free (ehci, last);
361 			last = NULL;
362 		}
363 
364 		/* ignore urbs submitted during completions we reported */
365 		if (qtd == end)
366 			break;
367 
368 		/* hardware copies qtd out of qh overlay */
369 		rmb ();
370 		token = hc32_to_cpu(ehci, qtd->hw_token);
371 
372 		/* always clean up qtds the hc de-activated */
373  retry_xacterr:
374 		if ((token & QTD_STS_ACTIVE) == 0) {
375 
376 			/* Report Data Buffer Error: non-fatal but useful */
377 			if (token & QTD_STS_DBE)
378 				ehci_dbg(ehci,
379 					"detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
380 					urb,
381 					usb_endpoint_num(&urb->ep->desc),
382 					usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out",
383 					urb->transfer_buffer_length,
384 					qtd,
385 					qh);
386 
387 			/* on STALL, error, and short reads this urb must
388 			 * complete and all its qtds must be recycled.
389 			 */
390 			if ((token & QTD_STS_HALT) != 0) {
391 
392 				/* retry transaction errors until we
393 				 * reach the software xacterr limit
394 				 */
395 				if ((token & QTD_STS_XACT) &&
396 						QTD_CERR(token) == 0 &&
397 						++qh->xacterrs < QH_XACTERR_MAX &&
398 						!urb->unlinked) {
399 					ehci_dbg(ehci,
400 	"detected XactErr len %zu/%zu retry %d\n",
401 	qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs);
402 
403 					/* reset the token in the qtd and the
404 					 * qh overlay (which still contains
405 					 * the qtd) so that we pick up from
406 					 * where we left off
407 					 */
408 					token &= ~QTD_STS_HALT;
409 					token |= QTD_STS_ACTIVE |
410 							(EHCI_TUNE_CERR << 10);
411 					qtd->hw_token = cpu_to_hc32(ehci,
412 							token);
413 					wmb();
414 					hw->hw_token = cpu_to_hc32(ehci,
415 							token);
416 					goto retry_xacterr;
417 				}
418 				stopped = 1;
419 
420 			/* magic dummy for some short reads; qh won't advance.
421 			 * that silicon quirk can kick in with this dummy too.
422 			 *
423 			 * other short reads won't stop the queue, including
424 			 * control transfers (status stage handles that) or
425 			 * most other single-qtd reads ... the queue stops if
426 			 * URB_SHORT_NOT_OK was set so the driver submitting
427 			 * the urbs could clean it up.
428 			 */
429 			} else if (IS_SHORT_READ (token)
430 					&& !(qtd->hw_alt_next
431 						& EHCI_LIST_END(ehci))) {
432 				stopped = 1;
433 			}
434 
435 		/* stop scanning when we reach qtds the hc is using */
436 		} else if (likely (!stopped
437 				&& ehci->rh_state == EHCI_RH_RUNNING)) {
438 			break;
439 
440 		/* scan the whole queue for unlinks whenever it stops */
441 		} else {
442 			stopped = 1;
443 
444 			/* cancel everything if we halt, suspend, etc */
445 			if (ehci->rh_state != EHCI_RH_RUNNING)
446 				last_status = -ESHUTDOWN;
447 
448 			/* this qtd is active; skip it unless a previous qtd
449 			 * for its urb faulted, or its urb was canceled.
450 			 */
451 			else if (last_status == -EINPROGRESS && !urb->unlinked)
452 				continue;
453 
454 			/* qh unlinked; token in overlay may be most current */
455 			if (state == QH_STATE_IDLE
456 					&& cpu_to_hc32(ehci, qtd->qtd_dma)
457 						== hw->hw_current) {
458 				token = hc32_to_cpu(ehci, hw->hw_token);
459 
460 				/* An unlink may leave an incomplete
461 				 * async transaction in the TT buffer.
462 				 * We have to clear it.
463 				 */
464 				ehci_clear_tt_buffer(ehci, qh, urb, token);
465 			}
466 		}
467 
468 		/* unless we already know the urb's status, collect qtd status
469 		 * and update count of bytes transferred.  in common short read
470 		 * cases with only one data qtd (including control transfers),
471 		 * queue processing won't halt.  but with two or more qtds (for
472 		 * example, with a 32 KB transfer), when the first qtd gets a
473 		 * short read the second must be removed by hand.
474 		 */
475 		if (last_status == -EINPROGRESS) {
476 			last_status = qtd_copy_status(ehci, urb,
477 					qtd->length, token);
478 			if (last_status == -EREMOTEIO
479 					&& (qtd->hw_alt_next
480 						& EHCI_LIST_END(ehci)))
481 				last_status = -EINPROGRESS;
482 
483 			/* As part of low/full-speed endpoint-halt processing
484 			 * we must clear the TT buffer (11.17.5).
485 			 */
486 			if (unlikely(last_status != -EINPROGRESS &&
487 					last_status != -EREMOTEIO)) {
488 				/* The TT's in some hubs malfunction when they
489 				 * receive this request following a STALL (they
490 				 * stop sending isochronous packets).  Since a
491 				 * STALL can't leave the TT buffer in a busy
492 				 * state (if you believe Figures 11-48 - 11-51
493 				 * in the USB 2.0 spec), we won't clear the TT
494 				 * buffer in this case.  Strictly speaking this
495 				 * is a violation of the spec.
496 				 */
497 				if (last_status != -EPIPE)
498 					ehci_clear_tt_buffer(ehci, qh, urb,
499 							token);
500 			}
501 		}
502 
503 		/* if we're removing something not at the queue head,
504 		 * patch the hardware queue pointer.
505 		 */
506 		if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
507 			last = list_entry (qtd->qtd_list.prev,
508 					struct ehci_qtd, qtd_list);
509 			last->hw_next = qtd->hw_next;
510 		}
511 
512 		/* remove qtd; it's recycled after possible urb completion */
513 		list_del (&qtd->qtd_list);
514 		last = qtd;
515 
516 		/* reinit the xacterr counter for the next qtd */
517 		qh->xacterrs = 0;
518 	}
519 
520 	/* last urb's completion might still need calling */
521 	if (likely (last != NULL)) {
522 		ehci_urb_done(ehci, last->urb, last_status);
523 		count++;
524 		ehci_qtd_free (ehci, last);
525 	}
526 
527 	/* Do we need to rescan for URBs dequeued during a giveback? */
528 	if (unlikely(qh->needs_rescan)) {
529 		/* If the QH is already unlinked, do the rescan now. */
530 		if (state == QH_STATE_IDLE)
531 			goto rescan;
532 
533 		/* Otherwise we have to wait until the QH is fully unlinked.
534 		 * Our caller will start an unlink if qh->needs_rescan is
535 		 * set.  But if an unlink has already started, nothing needs
536 		 * to be done.
537 		 */
538 		if (state != QH_STATE_LINKED)
539 			qh->needs_rescan = 0;
540 	}
541 
542 	/* restore original state; caller must unlink or relink */
543 	qh->qh_state = state;
544 
545 	/* be sure the hardware's done with the qh before refreshing
546 	 * it after fault cleanup, or recovering from silicon wrongly
547 	 * overlaying the dummy qtd (which reduces DMA chatter).
548 	 */
549 	if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci)) {
550 		switch (state) {
551 		case QH_STATE_IDLE:
552 			qh_refresh(ehci, qh);
553 			break;
554 		case QH_STATE_LINKED:
555 			/* We won't refresh a QH that's linked (after the HC
556 			 * stopped the queue).  That avoids a race:
557 			 *  - HC reads first part of QH;
558 			 *  - CPU updates that first part and the token;
559 			 *  - HC reads rest of that QH, including token
560 			 * Result:  HC gets an inconsistent image, and then
561 			 * DMAs to/from the wrong memory (corrupting it).
562 			 *
563 			 * That should be rare for interrupt transfers,
564 			 * except maybe high bandwidth ...
565 			 */
566 
567 			/* Tell the caller to start an unlink */
568 			qh->needs_rescan = 1;
569 			break;
570 		/* otherwise, unlink already started */
571 		}
572 	}
573 
574 	return count;
575 }
576 
577 /*-------------------------------------------------------------------------*/
578 
579 // high bandwidth multiplier, as encoded in highspeed endpoint descriptors
580 #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
581 // ... and packet size, for any kind of endpoint descriptor
582 #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
583 
584 /*
585  * reverse of qh_urb_transaction:  free a list of TDs.
586  * used for cleanup after errors, before HC sees an URB's TDs.
587  */
588 static void qtd_list_free (
589 	struct ehci_hcd		*ehci,
590 	struct urb		*urb,
591 	struct list_head	*qtd_list
592 ) {
593 	struct list_head	*entry, *temp;
594 
595 	list_for_each_safe (entry, temp, qtd_list) {
596 		struct ehci_qtd	*qtd;
597 
598 		qtd = list_entry (entry, struct ehci_qtd, qtd_list);
599 		list_del (&qtd->qtd_list);
600 		ehci_qtd_free (ehci, qtd);
601 	}
602 }
603 
604 /*
605  * create a list of filled qtds for this URB; won't link into qh.
606  */
607 static struct list_head *
608 qh_urb_transaction (
609 	struct ehci_hcd		*ehci,
610 	struct urb		*urb,
611 	struct list_head	*head,
612 	gfp_t			flags
613 ) {
614 	struct ehci_qtd		*qtd, *qtd_prev;
615 	dma_addr_t		buf;
616 	int			len, this_sg_len, maxpacket;
617 	int			is_input;
618 	u32			token;
619 	int			i;
620 	struct scatterlist	*sg;
621 
622 	/*
623 	 * URBs map to sequences of QTDs:  one logical transaction
624 	 */
625 	qtd = ehci_qtd_alloc (ehci, flags);
626 	if (unlikely (!qtd))
627 		return NULL;
628 	list_add_tail (&qtd->qtd_list, head);
629 	qtd->urb = urb;
630 
631 	token = QTD_STS_ACTIVE;
632 	token |= (EHCI_TUNE_CERR << 10);
633 	/* for split transactions, SplitXState initialized to zero */
634 
635 	len = urb->transfer_buffer_length;
636 	is_input = usb_pipein (urb->pipe);
637 	if (usb_pipecontrol (urb->pipe)) {
638 		/* SETUP pid */
639 		qtd_fill(ehci, qtd, urb->setup_dma,
640 				sizeof (struct usb_ctrlrequest),
641 				token | (2 /* "setup" */ << 8), 8);
642 
643 		/* ... and always at least one more pid */
644 		token ^= QTD_TOGGLE;
645 		qtd_prev = qtd;
646 		qtd = ehci_qtd_alloc (ehci, flags);
647 		if (unlikely (!qtd))
648 			goto cleanup;
649 		qtd->urb = urb;
650 		qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
651 		list_add_tail (&qtd->qtd_list, head);
652 
653 		/* for zero length DATA stages, STATUS is always IN */
654 		if (len == 0)
655 			token |= (1 /* "in" */ << 8);
656 	}
657 
658 	/*
659 	 * data transfer stage:  buffer setup
660 	 */
661 	i = urb->num_mapped_sgs;
662 	if (len > 0 && i > 0) {
663 		sg = urb->sg;
664 		buf = sg_dma_address(sg);
665 
666 		/* urb->transfer_buffer_length may be smaller than the
667 		 * size of the scatterlist (or vice versa)
668 		 */
669 		this_sg_len = min_t(int, sg_dma_len(sg), len);
670 	} else {
671 		sg = NULL;
672 		buf = urb->transfer_dma;
673 		this_sg_len = len;
674 	}
675 
676 	if (is_input)
677 		token |= (1 /* "in" */ << 8);
678 	/* else it's already initted to "out" pid (0 << 8) */
679 
680 	maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
681 
682 	/*
683 	 * buffer gets wrapped in one or more qtds;
684 	 * last one may be "short" (including zero len)
685 	 * and may serve as a control status ack
686 	 */
687 	for (;;) {
688 		int this_qtd_len;
689 
690 		this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token,
691 				maxpacket);
692 		this_sg_len -= this_qtd_len;
693 		len -= this_qtd_len;
694 		buf += this_qtd_len;
695 
696 		/*
697 		 * short reads advance to a "magic" dummy instead of the next
698 		 * qtd ... that forces the queue to stop, for manual cleanup.
699 		 * (this will usually be overridden later.)
700 		 */
701 		if (is_input)
702 			qtd->hw_alt_next = ehci->async->hw->hw_alt_next;
703 
704 		/* qh makes control packets use qtd toggle; maybe switch it */
705 		if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
706 			token ^= QTD_TOGGLE;
707 
708 		if (likely(this_sg_len <= 0)) {
709 			if (--i <= 0 || len <= 0)
710 				break;
711 			sg = sg_next(sg);
712 			buf = sg_dma_address(sg);
713 			this_sg_len = min_t(int, sg_dma_len(sg), len);
714 		}
715 
716 		qtd_prev = qtd;
717 		qtd = ehci_qtd_alloc (ehci, flags);
718 		if (unlikely (!qtd))
719 			goto cleanup;
720 		qtd->urb = urb;
721 		qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
722 		list_add_tail (&qtd->qtd_list, head);
723 	}
724 
725 	/*
726 	 * unless the caller requires manual cleanup after short reads,
727 	 * have the alt_next mechanism keep the queue running after the
728 	 * last data qtd (the only one, for control and most other cases).
729 	 */
730 	if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
731 				|| usb_pipecontrol (urb->pipe)))
732 		qtd->hw_alt_next = EHCI_LIST_END(ehci);
733 
734 	/*
735 	 * control requests may need a terminating data "status" ack;
736 	 * other OUT ones may need a terminating short packet
737 	 * (zero length).
738 	 */
739 	if (likely (urb->transfer_buffer_length != 0)) {
740 		int	one_more = 0;
741 
742 		if (usb_pipecontrol (urb->pipe)) {
743 			one_more = 1;
744 			token ^= 0x0100;	/* "in" <--> "out"  */
745 			token |= QTD_TOGGLE;	/* force DATA1 */
746 		} else if (usb_pipeout(urb->pipe)
747 				&& (urb->transfer_flags & URB_ZERO_PACKET)
748 				&& !(urb->transfer_buffer_length % maxpacket)) {
749 			one_more = 1;
750 		}
751 		if (one_more) {
752 			qtd_prev = qtd;
753 			qtd = ehci_qtd_alloc (ehci, flags);
754 			if (unlikely (!qtd))
755 				goto cleanup;
756 			qtd->urb = urb;
757 			qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
758 			list_add_tail (&qtd->qtd_list, head);
759 
760 			/* never any data in such packets */
761 			qtd_fill(ehci, qtd, 0, 0, token, 0);
762 		}
763 	}
764 
765 	/* by default, enable interrupt on urb completion */
766 	if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT)))
767 		qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
768 	return head;
769 
770 cleanup:
771 	qtd_list_free (ehci, urb, head);
772 	return NULL;
773 }
774 
775 /*-------------------------------------------------------------------------*/
776 
777 // Would be best to create all qh's from config descriptors,
778 // when each interface/altsetting is established.  Unlink
779 // any previous qh and cancel its urbs first; endpoints are
780 // implicitly reset then (data toggle too).
781 // That'd mean updating how usbcore talks to HCDs. (2.7?)
782 
783 
784 /*
785  * Each QH holds a qtd list; a QH is used for everything except iso.
786  *
787  * For interrupt urbs, the scheduler must set the microframe scheduling
788  * mask(s) each time the QH gets scheduled.  For highspeed, that's
789  * just one microframe in the s-mask.  For split interrupt transactions
790  * there are additional complications: c-mask, maybe FSTNs.
791  */
792 static struct ehci_qh *
793 qh_make (
794 	struct ehci_hcd		*ehci,
795 	struct urb		*urb,
796 	gfp_t			flags
797 ) {
798 	struct ehci_qh		*qh = ehci_qh_alloc (ehci, flags);
799 	u32			info1 = 0, info2 = 0;
800 	int			is_input, type;
801 	int			maxp = 0;
802 	struct usb_tt		*tt = urb->dev->tt;
803 	struct ehci_qh_hw	*hw;
804 
805 	if (!qh)
806 		return qh;
807 
808 	/*
809 	 * init endpoint/device data for this QH
810 	 */
811 	info1 |= usb_pipeendpoint (urb->pipe) << 8;
812 	info1 |= usb_pipedevice (urb->pipe) << 0;
813 
814 	is_input = usb_pipein (urb->pipe);
815 	type = usb_pipetype (urb->pipe);
816 	maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input);
817 
818 	/* 1024 byte maxpacket is a hardware ceiling.  High bandwidth
819 	 * acts like up to 3KB, but is built from smaller packets.
820 	 */
821 	if (max_packet(maxp) > 1024) {
822 		ehci_dbg(ehci, "bogus qh maxpacket %d\n", max_packet(maxp));
823 		goto done;
824 	}
825 
826 	/* Compute interrupt scheduling parameters just once, and save.
827 	 * - allowing for high bandwidth, how many nsec/uframe are used?
828 	 * - split transactions need a second CSPLIT uframe; same question
829 	 * - splits also need a schedule gap (for full/low speed I/O)
830 	 * - qh has a polling interval
831 	 *
832 	 * For control/bulk requests, the HC or TT handles these.
833 	 */
834 	if (type == PIPE_INTERRUPT) {
835 		qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
836 				is_input, 0,
837 				hb_mult(maxp) * max_packet(maxp)));
838 		qh->start = NO_FRAME;
839 		qh->stamp = ehci->periodic_stamp;
840 
841 		if (urb->dev->speed == USB_SPEED_HIGH) {
842 			qh->c_usecs = 0;
843 			qh->gap_uf = 0;
844 
845 			qh->period = urb->interval >> 3;
846 			if (qh->period == 0 && urb->interval != 1) {
847 				/* NOTE interval 2 or 4 uframes could work.
848 				 * But interval 1 scheduling is simpler, and
849 				 * includes high bandwidth.
850 				 */
851 				urb->interval = 1;
852 			} else if (qh->period > ehci->periodic_size) {
853 				qh->period = ehci->periodic_size;
854 				urb->interval = qh->period << 3;
855 			}
856 		} else {
857 			int		think_time;
858 
859 			/* gap is f(FS/LS transfer times) */
860 			qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed,
861 					is_input, 0, maxp) / (125 * 1000);
862 
863 			/* FIXME this just approximates SPLIT/CSPLIT times */
864 			if (is_input) {		// SPLIT, gap, CSPLIT+DATA
865 				qh->c_usecs = qh->usecs + HS_USECS (0);
866 				qh->usecs = HS_USECS (1);
867 			} else {		// SPLIT+DATA, gap, CSPLIT
868 				qh->usecs += HS_USECS (1);
869 				qh->c_usecs = HS_USECS (0);
870 			}
871 
872 			think_time = tt ? tt->think_time : 0;
873 			qh->tt_usecs = NS_TO_US (think_time +
874 					usb_calc_bus_time (urb->dev->speed,
875 					is_input, 0, max_packet (maxp)));
876 			qh->period = urb->interval;
877 			if (qh->period > ehci->periodic_size) {
878 				qh->period = ehci->periodic_size;
879 				urb->interval = qh->period;
880 			}
881 		}
882 	}
883 
884 	/* support for tt scheduling, and access to toggles */
885 	qh->dev = urb->dev;
886 
887 	/* using TT? */
888 	switch (urb->dev->speed) {
889 	case USB_SPEED_LOW:
890 		info1 |= (1 << 12);	/* EPS "low" */
891 		/* FALL THROUGH */
892 
893 	case USB_SPEED_FULL:
894 		/* EPS 0 means "full" */
895 		if (type != PIPE_INTERRUPT)
896 			info1 |= (EHCI_TUNE_RL_TT << 28);
897 		if (type == PIPE_CONTROL) {
898 			info1 |= (1 << 27);	/* for TT */
899 			info1 |= 1 << 14;	/* toggle from qtd */
900 		}
901 		info1 |= maxp << 16;
902 
903 		info2 |= (EHCI_TUNE_MULT_TT << 30);
904 
905 		/* Some Freescale processors have an erratum in which the
906 		 * port number in the queue head was 0..N-1 instead of 1..N.
907 		 */
908 		if (ehci_has_fsl_portno_bug(ehci))
909 			info2 |= (urb->dev->ttport-1) << 23;
910 		else
911 			info2 |= urb->dev->ttport << 23;
912 
913 		/* set the address of the TT; for TDI's integrated
914 		 * root hub tt, leave it zeroed.
915 		 */
916 		if (tt && tt->hub != ehci_to_hcd(ehci)->self.root_hub)
917 			info2 |= tt->hub->devnum << 16;
918 
919 		/* NOTE:  if (PIPE_INTERRUPT) { scheduler sets c-mask } */
920 
921 		break;
922 
923 	case USB_SPEED_HIGH:		/* no TT involved */
924 		info1 |= (2 << 12);	/* EPS "high" */
925 		if (type == PIPE_CONTROL) {
926 			info1 |= (EHCI_TUNE_RL_HS << 28);
927 			info1 |= 64 << 16;	/* usb2 fixed maxpacket */
928 			info1 |= 1 << 14;	/* toggle from qtd */
929 			info2 |= (EHCI_TUNE_MULT_HS << 30);
930 		} else if (type == PIPE_BULK) {
931 			info1 |= (EHCI_TUNE_RL_HS << 28);
932 			/* The USB spec says that high speed bulk endpoints
933 			 * always use 512 byte maxpacket.  But some device
934 			 * vendors decided to ignore that, and MSFT is happy
935 			 * to help them do so.  So now people expect to use
936 			 * such nonconformant devices with Linux too; sigh.
937 			 */
938 			info1 |= max_packet(maxp) << 16;
939 			info2 |= (EHCI_TUNE_MULT_HS << 30);
940 		} else {		/* PIPE_INTERRUPT */
941 			info1 |= max_packet (maxp) << 16;
942 			info2 |= hb_mult (maxp) << 30;
943 		}
944 		break;
945 	default:
946 		dbg ("bogus dev %p speed %d", urb->dev, urb->dev->speed);
947 done:
948 		qh_put (qh);
949 		return NULL;
950 	}
951 
952 	/* NOTE:  if (PIPE_INTERRUPT) { scheduler sets s-mask } */
953 
954 	/* init as live, toggle clear, advance to dummy */
955 	qh->qh_state = QH_STATE_IDLE;
956 	hw = qh->hw;
957 	hw->hw_info1 = cpu_to_hc32(ehci, info1);
958 	hw->hw_info2 = cpu_to_hc32(ehci, info2);
959 	qh->is_out = !is_input;
960 	usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
961 	qh_refresh (ehci, qh);
962 	return qh;
963 }
964 
965 /*-------------------------------------------------------------------------*/
966 
967 /* move qh (and its qtds) onto async queue; maybe enable queue.  */
968 
969 static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
970 {
971 	__hc32		dma = QH_NEXT(ehci, qh->qh_dma);
972 	struct ehci_qh	*head;
973 
974 	/* Don't link a QH if there's a Clear-TT-Buffer pending */
975 	if (unlikely(qh->clearing_tt))
976 		return;
977 
978 	WARN_ON(qh->qh_state != QH_STATE_IDLE);
979 
980 	/* (re)start the async schedule? */
981 	head = ehci->async;
982 	timer_action_done (ehci, TIMER_ASYNC_OFF);
983 	if (!head->qh_next.qh) {
984 		u32	cmd = ehci_readl(ehci, &ehci->regs->command);
985 
986 		if (!(cmd & CMD_ASE)) {
987 			/* in case a clear of CMD_ASE didn't take yet */
988 			(void)handshake(ehci, &ehci->regs->status,
989 					STS_ASS, 0, 150);
990 			cmd |= CMD_ASE;
991 			ehci_writel(ehci, cmd, &ehci->regs->command);
992 			/* posted write need not be known to HC yet ... */
993 		}
994 	}
995 
996 	/* clear halt and/or toggle; and maybe recover from silicon quirk */
997 	qh_refresh(ehci, qh);
998 
999 	/* splice right after start */
1000 	qh->qh_next = head->qh_next;
1001 	qh->hw->hw_next = head->hw->hw_next;
1002 	wmb ();
1003 
1004 	head->qh_next.qh = qh;
1005 	head->hw->hw_next = dma;
1006 
1007 	qh_get(qh);
1008 	qh->xacterrs = 0;
1009 	qh->qh_state = QH_STATE_LINKED;
1010 	/* qtd completions reported later by interrupt */
1011 }
1012 
1013 /*-------------------------------------------------------------------------*/
1014 
1015 /*
1016  * For control/bulk/interrupt, return QH with these TDs appended.
1017  * Allocates and initializes the QH if necessary.
1018  * Returns null if it can't allocate a QH it needs to.
1019  * If the QH has TDs (urbs) already, that's great.
1020  */
1021 static struct ehci_qh *qh_append_tds (
1022 	struct ehci_hcd		*ehci,
1023 	struct urb		*urb,
1024 	struct list_head	*qtd_list,
1025 	int			epnum,
1026 	void			**ptr
1027 )
1028 {
1029 	struct ehci_qh		*qh = NULL;
1030 	__hc32			qh_addr_mask = cpu_to_hc32(ehci, 0x7f);
1031 
1032 	qh = (struct ehci_qh *) *ptr;
1033 	if (unlikely (qh == NULL)) {
1034 		/* can't sleep here, we have ehci->lock... */
1035 		qh = qh_make (ehci, urb, GFP_ATOMIC);
1036 		*ptr = qh;
1037 	}
1038 	if (likely (qh != NULL)) {
1039 		struct ehci_qtd	*qtd;
1040 
1041 		if (unlikely (list_empty (qtd_list)))
1042 			qtd = NULL;
1043 		else
1044 			qtd = list_entry (qtd_list->next, struct ehci_qtd,
1045 					qtd_list);
1046 
1047 		/* control qh may need patching ... */
1048 		if (unlikely (epnum == 0)) {
1049 
1050                         /* usb_reset_device() briefly reverts to address 0 */
1051                         if (usb_pipedevice (urb->pipe) == 0)
1052 				qh->hw->hw_info1 &= ~qh_addr_mask;
1053 		}
1054 
1055 		/* just one way to queue requests: swap with the dummy qtd.
1056 		 * only hc or qh_refresh() ever modify the overlay.
1057 		 */
1058 		if (likely (qtd != NULL)) {
1059 			struct ehci_qtd		*dummy;
1060 			dma_addr_t		dma;
1061 			__hc32			token;
1062 
1063 			/* to avoid racing the HC, use the dummy td instead of
1064 			 * the first td of our list (becomes new dummy).  both
1065 			 * tds stay deactivated until we're done, when the
1066 			 * HC is allowed to fetch the old dummy (4.10.2).
1067 			 */
1068 			token = qtd->hw_token;
1069 			qtd->hw_token = HALT_BIT(ehci);
1070 
1071 			dummy = qh->dummy;
1072 
1073 			dma = dummy->qtd_dma;
1074 			*dummy = *qtd;
1075 			dummy->qtd_dma = dma;
1076 
1077 			list_del (&qtd->qtd_list);
1078 			list_add (&dummy->qtd_list, qtd_list);
1079 			list_splice_tail(qtd_list, &qh->qtd_list);
1080 
1081 			ehci_qtd_init(ehci, qtd, qtd->qtd_dma);
1082 			qh->dummy = qtd;
1083 
1084 			/* hc must see the new dummy at list end */
1085 			dma = qtd->qtd_dma;
1086 			qtd = list_entry (qh->qtd_list.prev,
1087 					struct ehci_qtd, qtd_list);
1088 			qtd->hw_next = QTD_NEXT(ehci, dma);
1089 
1090 			/* let the hc process these next qtds */
1091 			wmb ();
1092 			dummy->hw_token = token;
1093 
1094 			urb->hcpriv = qh_get (qh);
1095 		}
1096 	}
1097 	return qh;
1098 }
1099 
1100 /*-------------------------------------------------------------------------*/
1101 
1102 static int
1103 submit_async (
1104 	struct ehci_hcd		*ehci,
1105 	struct urb		*urb,
1106 	struct list_head	*qtd_list,
1107 	gfp_t			mem_flags
1108 ) {
1109 	int			epnum;
1110 	unsigned long		flags;
1111 	struct ehci_qh		*qh = NULL;
1112 	int			rc;
1113 
1114 	epnum = urb->ep->desc.bEndpointAddress;
1115 
1116 #ifdef EHCI_URB_TRACE
1117 	{
1118 		struct ehci_qtd *qtd;
1119 		qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
1120 		ehci_dbg(ehci,
1121 			 "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
1122 			 __func__, urb->dev->devpath, urb,
1123 			 epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
1124 			 urb->transfer_buffer_length,
1125 			 qtd, urb->ep->hcpriv);
1126 	}
1127 #endif
1128 
1129 	spin_lock_irqsave (&ehci->lock, flags);
1130 	if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
1131 		rc = -ESHUTDOWN;
1132 		goto done;
1133 	}
1134 	rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
1135 	if (unlikely(rc))
1136 		goto done;
1137 
1138 	qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
1139 	if (unlikely(qh == NULL)) {
1140 		usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
1141 		rc = -ENOMEM;
1142 		goto done;
1143 	}
1144 
1145 	/* Control/bulk operations through TTs don't need scheduling,
1146 	 * the HC and TT handle it when the TT has a buffer ready.
1147 	 */
1148 	if (likely (qh->qh_state == QH_STATE_IDLE))
1149 		qh_link_async(ehci, qh);
1150  done:
1151 	spin_unlock_irqrestore (&ehci->lock, flags);
1152 	if (unlikely (qh == NULL))
1153 		qtd_list_free (ehci, urb, qtd_list);
1154 	return rc;
1155 }
1156 
1157 /*-------------------------------------------------------------------------*/
1158 
1159 /* the async qh for the qtds being reclaimed are now unlinked from the HC */
1160 
1161 static void end_unlink_async (struct ehci_hcd *ehci)
1162 {
1163 	struct ehci_qh		*qh = ehci->reclaim;
1164 	struct ehci_qh		*next;
1165 
1166 	iaa_watchdog_done(ehci);
1167 
1168 	// qh->hw_next = cpu_to_hc32(qh->qh_dma);
1169 	qh->qh_state = QH_STATE_IDLE;
1170 	qh->qh_next.qh = NULL;
1171 	qh_put (qh);			// refcount from reclaim
1172 
1173 	/* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
1174 	next = qh->reclaim;
1175 	ehci->reclaim = next;
1176 	qh->reclaim = NULL;
1177 
1178 	qh_completions (ehci, qh);
1179 
1180 	if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
1181 		qh_link_async (ehci, qh);
1182 	} else {
1183 		/* it's not free to turn the async schedule on/off; leave it
1184 		 * active but idle for a while once it empties.
1185 		 */
1186 		if (ehci->rh_state == EHCI_RH_RUNNING
1187 				&& ehci->async->qh_next.qh == NULL)
1188 			timer_action (ehci, TIMER_ASYNC_OFF);
1189 	}
1190 	qh_put(qh);			/* refcount from async list */
1191 
1192 	if (next) {
1193 		ehci->reclaim = NULL;
1194 		start_unlink_async (ehci, next);
1195 	}
1196 
1197 	if (ehci->has_synopsys_hc_bug)
1198 		ehci_writel(ehci, (u32) ehci->async->qh_dma,
1199 			    &ehci->regs->async_next);
1200 }
1201 
1202 /* makes sure the async qh will become idle */
1203 /* caller must own ehci->lock */
1204 
1205 static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
1206 {
1207 	int		cmd = ehci_readl(ehci, &ehci->regs->command);
1208 	struct ehci_qh	*prev;
1209 
1210 #ifdef DEBUG
1211 	assert_spin_locked(&ehci->lock);
1212 	if (ehci->reclaim
1213 			|| (qh->qh_state != QH_STATE_LINKED
1214 				&& qh->qh_state != QH_STATE_UNLINK_WAIT)
1215 			)
1216 		BUG ();
1217 #endif
1218 
1219 	/* stop async schedule right now? */
1220 	if (unlikely (qh == ehci->async)) {
1221 		/* can't get here without STS_ASS set */
1222 		if (ehci->rh_state != EHCI_RH_HALTED
1223 				&& !ehci->reclaim) {
1224 			/* ... and CMD_IAAD clear */
1225 			ehci_writel(ehci, cmd & ~CMD_ASE,
1226 				    &ehci->regs->command);
1227 			wmb ();
1228 			// handshake later, if we need to
1229 			timer_action_done (ehci, TIMER_ASYNC_OFF);
1230 		}
1231 		return;
1232 	}
1233 
1234 	qh->qh_state = QH_STATE_UNLINK;
1235 	ehci->reclaim = qh = qh_get (qh);
1236 
1237 	prev = ehci->async;
1238 	while (prev->qh_next.qh != qh)
1239 		prev = prev->qh_next.qh;
1240 
1241 	prev->hw->hw_next = qh->hw->hw_next;
1242 	prev->qh_next = qh->qh_next;
1243 	if (ehci->qh_scan_next == qh)
1244 		ehci->qh_scan_next = qh->qh_next.qh;
1245 	wmb ();
1246 
1247 	/* If the controller isn't running, we don't have to wait for it */
1248 	if (unlikely(ehci->rh_state != EHCI_RH_RUNNING)) {
1249 		/* if (unlikely (qh->reclaim != 0))
1250 		 *	this will recurse, probably not much
1251 		 */
1252 		end_unlink_async (ehci);
1253 		return;
1254 	}
1255 
1256 	cmd |= CMD_IAAD;
1257 	ehci_writel(ehci, cmd, &ehci->regs->command);
1258 	(void)ehci_readl(ehci, &ehci->regs->command);
1259 	iaa_watchdog_start(ehci);
1260 }
1261 
1262 /*-------------------------------------------------------------------------*/
1263 
1264 static void scan_async (struct ehci_hcd *ehci)
1265 {
1266 	bool			stopped;
1267 	struct ehci_qh		*qh;
1268 	enum ehci_timer_action	action = TIMER_IO_WATCHDOG;
1269 
1270 	timer_action_done (ehci, TIMER_ASYNC_SHRINK);
1271 	stopped = (ehci->rh_state != EHCI_RH_RUNNING);
1272 
1273 	ehci->qh_scan_next = ehci->async->qh_next.qh;
1274 	while (ehci->qh_scan_next) {
1275 		qh = ehci->qh_scan_next;
1276 		ehci->qh_scan_next = qh->qh_next.qh;
1277  rescan:
1278 		/* clean any finished work for this qh */
1279 		if (!list_empty(&qh->qtd_list)) {
1280 			int temp;
1281 
1282 			/*
1283 			 * Unlinks could happen here; completion reporting
1284 			 * drops the lock.  That's why ehci->qh_scan_next
1285 			 * always holds the next qh to scan; if the next qh
1286 			 * gets unlinked then ehci->qh_scan_next is adjusted
1287 			 * in start_unlink_async().
1288 			 */
1289 			qh = qh_get(qh);
1290 			temp = qh_completions(ehci, qh);
1291 			if (qh->needs_rescan)
1292 				unlink_async(ehci, qh);
1293 			qh->unlink_time = jiffies + EHCI_SHRINK_JIFFIES;
1294 			qh_put(qh);
1295 			if (temp != 0)
1296 				goto rescan;
1297 		}
1298 
1299 		/* unlink idle entries, reducing DMA usage as well
1300 		 * as HCD schedule-scanning costs.  delay for any qh
1301 		 * we just scanned, there's a not-unusual case that it
1302 		 * doesn't stay idle for long.
1303 		 * (plus, avoids some kind of re-activation race.)
1304 		 */
1305 		if (list_empty(&qh->qtd_list)
1306 				&& qh->qh_state == QH_STATE_LINKED) {
1307 			if (!ehci->reclaim && (stopped ||
1308 					time_after_eq(jiffies, qh->unlink_time)))
1309 				start_unlink_async(ehci, qh);
1310 			else
1311 				action = TIMER_ASYNC_SHRINK;
1312 		}
1313 	}
1314 	if (action == TIMER_ASYNC_SHRINK)
1315 		timer_action (ehci, TIMER_ASYNC_SHRINK);
1316 }
1317