xref: /openbmc/linux/drivers/usb/host/uhci-q.c (revision 04538a255ac8b404c20cbf15867c9829254c470f)
1 /*
2  * Universal Host Controller Interface driver for USB.
3  *
4  * Maintainer: Alan Stern <stern@rowland.harvard.edu>
5  *
6  * (C) Copyright 1999 Linus Torvalds
7  * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8  * (C) Copyright 1999 Randy Dunlap
9  * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10  * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11  * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12  * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13  * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14  *               support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15  * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16  * (C) Copyright 2004-2005 Alan Stern, stern@rowland.harvard.edu
17  */
18 
19 
20 /*
21  * Technically, updating td->status here is a race, but it's not really a
22  * problem. The worst that can happen is that we set the IOC bit again
23  * generating a spurious interrupt. We could fix this by creating another
24  * QH and leaving the IOC bit always set, but then we would have to play
25  * games with the FSBR code to make sure we get the correct order in all
26  * the cases. I don't think it's worth the effort
27  */
28 static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
29 {
30 	if (uhci->is_stopped)
31 		mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
32 	uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
33 }
34 
35 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
36 {
37 	uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
38 }
39 
40 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
41 {
42 	dma_addr_t dma_handle;
43 	struct uhci_td *td;
44 
45 	td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
46 	if (!td)
47 		return NULL;
48 
49 	td->dma_handle = dma_handle;
50 	td->frame = -1;
51 
52 	INIT_LIST_HEAD(&td->list);
53 	INIT_LIST_HEAD(&td->fl_list);
54 
55 	return td;
56 }
57 
58 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
59 {
60 	if (!list_empty(&td->list))
61 		dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);
62 	if (!list_empty(&td->fl_list))
63 		dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);
64 
65 	dma_pool_free(uhci->td_pool, td, td->dma_handle);
66 }
67 
68 static inline void uhci_fill_td(struct uhci_td *td, u32 status,
69 		u32 token, u32 buffer)
70 {
71 	td->status = cpu_to_le32(status);
72 	td->token = cpu_to_le32(token);
73 	td->buffer = cpu_to_le32(buffer);
74 }
75 
76 static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp)
77 {
78 	list_add_tail(&td->list, &urbp->td_list);
79 }
80 
81 static void uhci_remove_td_from_urbp(struct uhci_td *td)
82 {
83 	list_del_init(&td->list);
84 }
85 
86 /*
87  * We insert Isochronous URBs directly into the frame list at the beginning
88  */
89 static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
90 		struct uhci_td *td, unsigned framenum)
91 {
92 	framenum &= (UHCI_NUMFRAMES - 1);
93 
94 	td->frame = framenum;
95 
96 	/* Is there a TD already mapped there? */
97 	if (uhci->frame_cpu[framenum]) {
98 		struct uhci_td *ftd, *ltd;
99 
100 		ftd = uhci->frame_cpu[framenum];
101 		ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
102 
103 		list_add_tail(&td->fl_list, &ftd->fl_list);
104 
105 		td->link = ltd->link;
106 		wmb();
107 		ltd->link = cpu_to_le32(td->dma_handle);
108 	} else {
109 		td->link = uhci->frame[framenum];
110 		wmb();
111 		uhci->frame[framenum] = cpu_to_le32(td->dma_handle);
112 		uhci->frame_cpu[framenum] = td;
113 	}
114 }
115 
116 static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
117 		struct uhci_td *td)
118 {
119 	/* If it's not inserted, don't remove it */
120 	if (td->frame == -1) {
121 		WARN_ON(!list_empty(&td->fl_list));
122 		return;
123 	}
124 
125 	if (uhci->frame_cpu[td->frame] == td) {
126 		if (list_empty(&td->fl_list)) {
127 			uhci->frame[td->frame] = td->link;
128 			uhci->frame_cpu[td->frame] = NULL;
129 		} else {
130 			struct uhci_td *ntd;
131 
132 			ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
133 			uhci->frame[td->frame] = cpu_to_le32(ntd->dma_handle);
134 			uhci->frame_cpu[td->frame] = ntd;
135 		}
136 	} else {
137 		struct uhci_td *ptd;
138 
139 		ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
140 		ptd->link = td->link;
141 	}
142 
143 	list_del_init(&td->fl_list);
144 	td->frame = -1;
145 }
146 
147 /*
148  * Remove all the TDs for an Isochronous URB from the frame list
149  */
150 static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
151 {
152 	struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
153 	struct uhci_td *td;
154 
155 	list_for_each_entry(td, &urbp->td_list, list)
156 		uhci_remove_td_from_frame_list(uhci, td);
157 	wmb();
158 }
159 
160 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
161 		struct usb_device *udev, struct usb_host_endpoint *hep)
162 {
163 	dma_addr_t dma_handle;
164 	struct uhci_qh *qh;
165 
166 	qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
167 	if (!qh)
168 		return NULL;
169 
170 	memset(qh, 0, sizeof(*qh));
171 	qh->dma_handle = dma_handle;
172 
173 	qh->element = UHCI_PTR_TERM;
174 	qh->link = UHCI_PTR_TERM;
175 
176 	INIT_LIST_HEAD(&qh->queue);
177 	INIT_LIST_HEAD(&qh->node);
178 
179 	if (udev) {		/* Normal QH */
180 		qh->dummy_td = uhci_alloc_td(uhci);
181 		if (!qh->dummy_td) {
182 			dma_pool_free(uhci->qh_pool, qh, dma_handle);
183 			return NULL;
184 		}
185 		qh->state = QH_STATE_IDLE;
186 		qh->hep = hep;
187 		qh->udev = udev;
188 		hep->hcpriv = qh;
189 		qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
190 
191 	} else {		/* Skeleton QH */
192 		qh->state = QH_STATE_ACTIVE;
193 		qh->type = -1;
194 	}
195 	return qh;
196 }
197 
198 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
199 {
200 	WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
201 	if (!list_empty(&qh->queue))
202 		dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);
203 
204 	list_del(&qh->node);
205 	if (qh->udev) {
206 		qh->hep->hcpriv = NULL;
207 		uhci_free_td(uhci, qh->dummy_td);
208 	}
209 	dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
210 }
211 
212 /*
213  * When a queue is stopped and a dequeued URB is given back, adjust
214  * the previous TD link (if the URB isn't first on the queue) or
215  * save its toggle value (if it is first and is currently executing).
216  */
217 static void uhci_cleanup_queue(struct uhci_qh *qh,
218 		struct urb *urb)
219 {
220 	struct urb_priv *urbp = urb->hcpriv;
221 	struct uhci_td *td;
222 
223 	/* Isochronous pipes don't use toggles and their TD link pointers
224 	 * get adjusted during uhci_urb_dequeue(). */
225 	if (qh->type == USB_ENDPOINT_XFER_ISOC)
226 		return;
227 
228 	/* If the URB isn't first on its queue, adjust the link pointer
229 	 * of the last TD in the previous URB.  The toggle doesn't need
230 	 * to be saved since this URB can't be executing yet. */
231 	if (qh->queue.next != &urbp->node) {
232 		struct urb_priv *purbp;
233 		struct uhci_td *ptd;
234 
235 		purbp = list_entry(urbp->node.prev, struct urb_priv, node);
236 		WARN_ON(list_empty(&purbp->td_list));
237 		ptd = list_entry(purbp->td_list.prev, struct uhci_td,
238 				list);
239 		td = list_entry(urbp->td_list.prev, struct uhci_td,
240 				list);
241 		ptd->link = td->link;
242 		return;
243 	}
244 
245 	/* If the QH element pointer is UHCI_PTR_TERM then then currently
246 	 * executing URB has already been unlinked, so this one isn't it. */
247 	if (qh_element(qh) == UHCI_PTR_TERM)
248 		return;
249 	qh->element = UHCI_PTR_TERM;
250 
251 	/* Control pipes have to worry about toggles */
252 	if (qh->type == USB_ENDPOINT_XFER_CONTROL)
253 		return;
254 
255 	/* Save the next toggle value */
256 	WARN_ON(list_empty(&urbp->td_list));
257 	td = list_entry(urbp->td_list.next, struct uhci_td, list);
258 	qh->needs_fixup = 1;
259 	qh->initial_toggle = uhci_toggle(td_token(td));
260 }
261 
262 /*
263  * Fix up the data toggles for URBs in a queue, when one of them
264  * terminates early (short transfer, error, or dequeued).
265  */
266 static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first)
267 {
268 	struct urb_priv *urbp = NULL;
269 	struct uhci_td *td;
270 	unsigned int toggle = qh->initial_toggle;
271 	unsigned int pipe;
272 
273 	/* Fixups for a short transfer start with the second URB in the
274 	 * queue (the short URB is the first). */
275 	if (skip_first)
276 		urbp = list_entry(qh->queue.next, struct urb_priv, node);
277 
278 	/* When starting with the first URB, if the QH element pointer is
279 	 * still valid then we know the URB's toggles are okay. */
280 	else if (qh_element(qh) != UHCI_PTR_TERM)
281 		toggle = 2;
282 
283 	/* Fix up the toggle for the URBs in the queue.  Normally this
284 	 * loop won't run more than once: When an error or short transfer
285 	 * occurs, the queue usually gets emptied. */
286 	urbp = list_prepare_entry(urbp, &qh->queue, node);
287 	list_for_each_entry_continue(urbp, &qh->queue, node) {
288 
289 		/* If the first TD has the right toggle value, we don't
290 		 * need to change any toggles in this URB */
291 		td = list_entry(urbp->td_list.next, struct uhci_td, list);
292 		if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) {
293 			td = list_entry(urbp->td_list.next, struct uhci_td,
294 					list);
295 			toggle = uhci_toggle(td_token(td)) ^ 1;
296 
297 		/* Otherwise all the toggles in the URB have to be switched */
298 		} else {
299 			list_for_each_entry(td, &urbp->td_list, list) {
300 				td->token ^= __constant_cpu_to_le32(
301 							TD_TOKEN_TOGGLE);
302 				toggle ^= 1;
303 			}
304 		}
305 	}
306 
307 	wmb();
308 	pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
309 	usb_settoggle(qh->udev, usb_pipeendpoint(pipe),
310 			usb_pipeout(pipe), toggle);
311 	qh->needs_fixup = 0;
312 }
313 
314 /*
315  * Put a QH on the schedule in both hardware and software
316  */
317 static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
318 {
319 	struct uhci_qh *pqh;
320 
321 	WARN_ON(list_empty(&qh->queue));
322 
323 	/* Set the element pointer if it isn't set already.
324 	 * This isn't needed for Isochronous queues, but it doesn't hurt. */
325 	if (qh_element(qh) == UHCI_PTR_TERM) {
326 		struct urb_priv *urbp = list_entry(qh->queue.next,
327 				struct urb_priv, node);
328 		struct uhci_td *td = list_entry(urbp->td_list.next,
329 				struct uhci_td, list);
330 
331 		qh->element = cpu_to_le32(td->dma_handle);
332 	}
333 
334 	if (qh->state == QH_STATE_ACTIVE)
335 		return;
336 	qh->state = QH_STATE_ACTIVE;
337 
338 	/* Move the QH from its old list to the end of the appropriate
339 	 * skeleton's list */
340 	if (qh == uhci->next_qh)
341 		uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
342 				node);
343 	list_move_tail(&qh->node, &qh->skel->node);
344 
345 	/* Link it into the schedule */
346 	pqh = list_entry(qh->node.prev, struct uhci_qh, node);
347 	qh->link = pqh->link;
348 	wmb();
349 	pqh->link = UHCI_PTR_QH | cpu_to_le32(qh->dma_handle);
350 }
351 
352 /*
353  * Take a QH off the hardware schedule
354  */
355 static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
356 {
357 	struct uhci_qh *pqh;
358 
359 	if (qh->state == QH_STATE_UNLINKING)
360 		return;
361 	WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);
362 	qh->state = QH_STATE_UNLINKING;
363 
364 	/* Unlink the QH from the schedule and record when we did it */
365 	pqh = list_entry(qh->node.prev, struct uhci_qh, node);
366 	pqh->link = qh->link;
367 	mb();
368 
369 	uhci_get_current_frame_number(uhci);
370 	qh->unlink_frame = uhci->frame_number;
371 
372 	/* Force an interrupt so we know when the QH is fully unlinked */
373 	if (list_empty(&uhci->skel_unlink_qh->node))
374 		uhci_set_next_interrupt(uhci);
375 
376 	/* Move the QH from its old list to the end of the unlinking list */
377 	if (qh == uhci->next_qh)
378 		uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
379 				node);
380 	list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
381 }
382 
383 /*
384  * When we and the controller are through with a QH, it becomes IDLE.
385  * This happens when a QH has been off the schedule (on the unlinking
386  * list) for more than one frame, or when an error occurs while adding
387  * the first URB onto a new QH.
388  */
389 static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
390 {
391 	WARN_ON(qh->state == QH_STATE_ACTIVE);
392 
393 	if (qh == uhci->next_qh)
394 		uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
395 				node);
396 	list_move(&qh->node, &uhci->idle_qh_list);
397 	qh->state = QH_STATE_IDLE;
398 
399 	/* Now that the QH is idle, its post_td isn't being used */
400 	if (qh->post_td) {
401 		uhci_free_td(uhci, qh->post_td);
402 		qh->post_td = NULL;
403 	}
404 
405 	/* If anyone is waiting for a QH to become idle, wake them up */
406 	if (uhci->num_waiting)
407 		wake_up_all(&uhci->waitqh);
408 }
409 
410 static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
411 		struct urb *urb)
412 {
413 	struct urb_priv *urbp;
414 
415 	urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
416 	if (!urbp)
417 		return NULL;
418 
419 	memset((void *)urbp, 0, sizeof(*urbp));
420 
421 	urbp->urb = urb;
422 	urb->hcpriv = urbp;
423 
424 	INIT_LIST_HEAD(&urbp->node);
425 	INIT_LIST_HEAD(&urbp->td_list);
426 
427 	return urbp;
428 }
429 
430 static void uhci_free_urb_priv(struct uhci_hcd *uhci,
431 		struct urb_priv *urbp)
432 {
433 	struct uhci_td *td, *tmp;
434 
435 	if (!list_empty(&urbp->node))
436 		dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n",
437 				urbp->urb);
438 
439 	list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
440 		uhci_remove_td_from_urbp(td);
441 		uhci_free_td(uhci, td);
442 	}
443 
444 	urbp->urb->hcpriv = NULL;
445 	kmem_cache_free(uhci_up_cachep, urbp);
446 }
447 
448 static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb)
449 {
450 	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
451 
452 	if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) {
453 		urbp->fsbr = 1;
454 		if (!uhci->fsbr++ && !uhci->fsbrtimeout)
455 			uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
456 	}
457 }
458 
459 static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb)
460 {
461 	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
462 
463 	if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) {
464 		urbp->fsbr = 0;
465 		if (!--uhci->fsbr)
466 			uhci->fsbrtimeout = jiffies + FSBR_DELAY;
467 	}
468 }
469 
470 /*
471  * Map status to standard result codes
472  *
473  * <status> is (td_status(td) & 0xF60000), a.k.a.
474  * uhci_status_bits(td_status(td)).
475  * Note: <status> does not include the TD_CTRL_NAK bit.
476  * <dir_out> is True for output TDs and False for input TDs.
477  */
478 static int uhci_map_status(int status, int dir_out)
479 {
480 	if (!status)
481 		return 0;
482 	if (status & TD_CTRL_BITSTUFF)			/* Bitstuff error */
483 		return -EPROTO;
484 	if (status & TD_CTRL_CRCTIMEO) {		/* CRC/Timeout */
485 		if (dir_out)
486 			return -EPROTO;
487 		else
488 			return -EILSEQ;
489 	}
490 	if (status & TD_CTRL_BABBLE)			/* Babble */
491 		return -EOVERFLOW;
492 	if (status & TD_CTRL_DBUFERR)			/* Buffer error */
493 		return -ENOSR;
494 	if (status & TD_CTRL_STALLED)			/* Stalled */
495 		return -EPIPE;
496 	WARN_ON(status & TD_CTRL_ACTIVE);		/* Active */
497 	return 0;
498 }
499 
500 /*
501  * Control transfers
502  */
503 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
504 		struct uhci_qh *qh)
505 {
506 	struct uhci_td *td;
507 	unsigned long destination, status;
508 	int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
509 	int len = urb->transfer_buffer_length;
510 	dma_addr_t data = urb->transfer_dma;
511 	__le32 *plink;
512 	struct urb_priv *urbp = urb->hcpriv;
513 
514 	/* The "pipe" thing contains the destination in bits 8--18 */
515 	destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
516 
517 	/* 3 errors, dummy TD remains inactive */
518 	status = uhci_maxerr(3);
519 	if (urb->dev->speed == USB_SPEED_LOW)
520 		status |= TD_CTRL_LS;
521 
522 	/*
523 	 * Build the TD for the control request setup packet
524 	 */
525 	td = qh->dummy_td;
526 	uhci_add_td_to_urbp(td, urbp);
527 	uhci_fill_td(td, status, destination | uhci_explen(8),
528 			urb->setup_dma);
529 	plink = &td->link;
530 	status |= TD_CTRL_ACTIVE;
531 
532 	/*
533 	 * If direction is "send", change the packet ID from SETUP (0x2D)
534 	 * to OUT (0xE1).  Else change it from SETUP to IN (0x69) and
535 	 * set Short Packet Detect (SPD) for all data packets.
536 	 */
537 	if (usb_pipeout(urb->pipe))
538 		destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
539 	else {
540 		destination ^= (USB_PID_SETUP ^ USB_PID_IN);
541 		status |= TD_CTRL_SPD;
542 	}
543 
544 	/*
545 	 * Build the DATA TDs
546 	 */
547 	while (len > 0) {
548 		int pktsze = min(len, maxsze);
549 
550 		td = uhci_alloc_td(uhci);
551 		if (!td)
552 			goto nomem;
553 		*plink = cpu_to_le32(td->dma_handle);
554 
555 		/* Alternate Data0/1 (start with Data1) */
556 		destination ^= TD_TOKEN_TOGGLE;
557 
558 		uhci_add_td_to_urbp(td, urbp);
559 		uhci_fill_td(td, status, destination | uhci_explen(pktsze),
560 				data);
561 		plink = &td->link;
562 
563 		data += pktsze;
564 		len -= pktsze;
565 	}
566 
567 	/*
568 	 * Build the final TD for control status
569 	 */
570 	td = uhci_alloc_td(uhci);
571 	if (!td)
572 		goto nomem;
573 	*plink = cpu_to_le32(td->dma_handle);
574 
575 	/*
576 	 * It's IN if the pipe is an output pipe or we're not expecting
577 	 * data back.
578 	 */
579 	destination &= ~TD_TOKEN_PID_MASK;
580 	if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
581 		destination |= USB_PID_IN;
582 	else
583 		destination |= USB_PID_OUT;
584 
585 	destination |= TD_TOKEN_TOGGLE;		/* End in Data1 */
586 
587 	status &= ~TD_CTRL_SPD;
588 
589 	uhci_add_td_to_urbp(td, urbp);
590 	uhci_fill_td(td, status | TD_CTRL_IOC,
591 			destination | uhci_explen(0), 0);
592 	plink = &td->link;
593 
594 	/*
595 	 * Build the new dummy TD and activate the old one
596 	 */
597 	td = uhci_alloc_td(uhci);
598 	if (!td)
599 		goto nomem;
600 	*plink = cpu_to_le32(td->dma_handle);
601 
602 	uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
603 	wmb();
604 	qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
605 	qh->dummy_td = td;
606 
607 	/* Low-speed transfers get a different queue, and won't hog the bus.
608 	 * Also, some devices enumerate better without FSBR; the easiest way
609 	 * to do that is to put URBs on the low-speed queue while the device
610 	 * isn't in the CONFIGURED state. */
611 	if (urb->dev->speed == USB_SPEED_LOW ||
612 			urb->dev->state != USB_STATE_CONFIGURED)
613 		qh->skel = uhci->skel_ls_control_qh;
614 	else {
615 		qh->skel = uhci->skel_fs_control_qh;
616 		uhci_inc_fsbr(uhci, urb);
617 	}
618 
619 	urb->actual_length = -8;	/* Account for the SETUP packet */
620 	return 0;
621 
622 nomem:
623 	/* Remove the dummy TD from the td_list so it doesn't get freed */
624 	uhci_remove_td_from_urbp(qh->dummy_td);
625 	return -ENOMEM;
626 }
627 
628 /*
629  * Common submit for bulk and interrupt
630  */
631 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
632 		struct uhci_qh *qh)
633 {
634 	struct uhci_td *td;
635 	unsigned long destination, status;
636 	int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
637 	int len = urb->transfer_buffer_length;
638 	dma_addr_t data = urb->transfer_dma;
639 	__le32 *plink;
640 	struct urb_priv *urbp = urb->hcpriv;
641 	unsigned int toggle;
642 
643 	if (len < 0)
644 		return -EINVAL;
645 
646 	/* The "pipe" thing contains the destination in bits 8--18 */
647 	destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
648 	toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
649 			 usb_pipeout(urb->pipe));
650 
651 	/* 3 errors, dummy TD remains inactive */
652 	status = uhci_maxerr(3);
653 	if (urb->dev->speed == USB_SPEED_LOW)
654 		status |= TD_CTRL_LS;
655 	if (usb_pipein(urb->pipe))
656 		status |= TD_CTRL_SPD;
657 
658 	/*
659 	 * Build the DATA TDs
660 	 */
661 	plink = NULL;
662 	td = qh->dummy_td;
663 	do {	/* Allow zero length packets */
664 		int pktsze = maxsze;
665 
666 		if (len <= pktsze) {		/* The last packet */
667 			pktsze = len;
668 			if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
669 				status &= ~TD_CTRL_SPD;
670 		}
671 
672 		if (plink) {
673 			td = uhci_alloc_td(uhci);
674 			if (!td)
675 				goto nomem;
676 			*plink = cpu_to_le32(td->dma_handle);
677 		}
678 		uhci_add_td_to_urbp(td, urbp);
679 		uhci_fill_td(td, status,
680 				destination | uhci_explen(pktsze) |
681 					(toggle << TD_TOKEN_TOGGLE_SHIFT),
682 				data);
683 		plink = &td->link;
684 		status |= TD_CTRL_ACTIVE;
685 
686 		data += pktsze;
687 		len -= maxsze;
688 		toggle ^= 1;
689 	} while (len > 0);
690 
691 	/*
692 	 * URB_ZERO_PACKET means adding a 0-length packet, if direction
693 	 * is OUT and the transfer_length was an exact multiple of maxsze,
694 	 * hence (len = transfer_length - N * maxsze) == 0
695 	 * however, if transfer_length == 0, the zero packet was already
696 	 * prepared above.
697 	 */
698 	if ((urb->transfer_flags & URB_ZERO_PACKET) &&
699 			usb_pipeout(urb->pipe) && len == 0 &&
700 			urb->transfer_buffer_length > 0) {
701 		td = uhci_alloc_td(uhci);
702 		if (!td)
703 			goto nomem;
704 		*plink = cpu_to_le32(td->dma_handle);
705 
706 		uhci_add_td_to_urbp(td, urbp);
707 		uhci_fill_td(td, status,
708 				destination | uhci_explen(0) |
709 					(toggle << TD_TOKEN_TOGGLE_SHIFT),
710 				data);
711 		plink = &td->link;
712 
713 		toggle ^= 1;
714 	}
715 
716 	/* Set the interrupt-on-completion flag on the last packet.
717 	 * A more-or-less typical 4 KB URB (= size of one memory page)
718 	 * will require about 3 ms to transfer; that's a little on the
719 	 * fast side but not enough to justify delaying an interrupt
720 	 * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
721 	 * flag setting. */
722 	td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
723 
724 	/*
725 	 * Build the new dummy TD and activate the old one
726 	 */
727 	td = uhci_alloc_td(uhci);
728 	if (!td)
729 		goto nomem;
730 	*plink = cpu_to_le32(td->dma_handle);
731 
732 	uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
733 	wmb();
734 	qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
735 	qh->dummy_td = td;
736 
737 	usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
738 			usb_pipeout(urb->pipe), toggle);
739 	return 0;
740 
741 nomem:
742 	/* Remove the dummy TD from the td_list so it doesn't get freed */
743 	uhci_remove_td_from_urbp(qh->dummy_td);
744 	return -ENOMEM;
745 }
746 
747 static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
748 		struct uhci_qh *qh)
749 {
750 	int ret;
751 
752 	/* Can't have low-speed bulk transfers */
753 	if (urb->dev->speed == USB_SPEED_LOW)
754 		return -EINVAL;
755 
756 	qh->skel = uhci->skel_bulk_qh;
757 	ret = uhci_submit_common(uhci, urb, qh);
758 	if (ret == 0)
759 		uhci_inc_fsbr(uhci, urb);
760 	return ret;
761 }
762 
763 static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
764 		struct uhci_qh *qh)
765 {
766 	/* USB 1.1 interrupt transfers only involve one packet per interval.
767 	 * Drivers can submit URBs of any length, but longer ones will need
768 	 * multiple intervals to complete.
769 	 */
770 	qh->skel = uhci->skelqh[__interval_to_skel(urb->interval)];
771 	return uhci_submit_common(uhci, urb, qh);
772 }
773 
774 /*
775  * Fix up the data structures following a short transfer
776  */
777 static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
778 		struct uhci_qh *qh, struct urb_priv *urbp)
779 {
780 	struct uhci_td *td;
781 	struct list_head *tmp;
782 	int ret;
783 
784 	td = list_entry(urbp->td_list.prev, struct uhci_td, list);
785 	if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
786 
787 		/* When a control transfer is short, we have to restart
788 		 * the queue at the status stage transaction, which is
789 		 * the last TD. */
790 		WARN_ON(list_empty(&urbp->td_list));
791 		qh->element = cpu_to_le32(td->dma_handle);
792 		tmp = td->list.prev;
793 		ret = -EINPROGRESS;
794 
795 	} else {
796 
797 		/* When a bulk/interrupt transfer is short, we have to
798 		 * fix up the toggles of the following URBs on the queue
799 		 * before restarting the queue at the next URB. */
800 		qh->initial_toggle = uhci_toggle(td_token(qh->post_td)) ^ 1;
801 		uhci_fixup_toggles(qh, 1);
802 
803 		if (list_empty(&urbp->td_list))
804 			td = qh->post_td;
805 		qh->element = td->link;
806 		tmp = urbp->td_list.prev;
807 		ret = 0;
808 	}
809 
810 	/* Remove all the TDs we skipped over, from tmp back to the start */
811 	while (tmp != &urbp->td_list) {
812 		td = list_entry(tmp, struct uhci_td, list);
813 		tmp = tmp->prev;
814 
815 		uhci_remove_td_from_urbp(td);
816 		uhci_free_td(uhci, td);
817 	}
818 	return ret;
819 }
820 
821 /*
822  * Common result for control, bulk, and interrupt
823  */
824 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
825 {
826 	struct urb_priv *urbp = urb->hcpriv;
827 	struct uhci_qh *qh = urbp->qh;
828 	struct uhci_td *td, *tmp;
829 	unsigned status;
830 	int ret = 0;
831 
832 	list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
833 		unsigned int ctrlstat;
834 		int len;
835 
836 		ctrlstat = td_status(td);
837 		status = uhci_status_bits(ctrlstat);
838 		if (status & TD_CTRL_ACTIVE)
839 			return -EINPROGRESS;
840 
841 		len = uhci_actual_length(ctrlstat);
842 		urb->actual_length += len;
843 
844 		if (status) {
845 			ret = uhci_map_status(status,
846 					uhci_packetout(td_token(td)));
847 			if ((debug == 1 && ret != -EPIPE) || debug > 1) {
848 				/* Some debugging code */
849 				dev_dbg(uhci_dev(uhci),
850 						"%s: failed with status %x\n",
851 						__FUNCTION__, status);
852 
853 				if (debug > 1 && errbuf) {
854 					/* Print the chain for debugging */
855 					uhci_show_qh(urbp->qh, errbuf,
856 							ERRBUF_LEN, 0);
857 					lprintk(errbuf);
858 				}
859 			}
860 
861 		} else if (len < uhci_expected_length(td_token(td))) {
862 
863 			/* We received a short packet */
864 			if (urb->transfer_flags & URB_SHORT_NOT_OK)
865 				ret = -EREMOTEIO;
866 			else if (ctrlstat & TD_CTRL_SPD)
867 				ret = 1;
868 		}
869 
870 		uhci_remove_td_from_urbp(td);
871 		if (qh->post_td)
872 			uhci_free_td(uhci, qh->post_td);
873 		qh->post_td = td;
874 
875 		if (ret != 0)
876 			goto err;
877 	}
878 	return ret;
879 
880 err:
881 	if (ret < 0) {
882 		/* In case a control transfer gets an error
883 		 * during the setup stage */
884 		urb->actual_length = max(urb->actual_length, 0);
885 
886 		/* Note that the queue has stopped and save
887 		 * the next toggle value */
888 		qh->element = UHCI_PTR_TERM;
889 		qh->is_stopped = 1;
890 		qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL);
891 		qh->initial_toggle = uhci_toggle(td_token(td)) ^
892 				(ret == -EREMOTEIO);
893 
894 	} else		/* Short packet received */
895 		ret = uhci_fixup_short_transfer(uhci, qh, urbp);
896 	return ret;
897 }
898 
899 /*
900  * Isochronous transfers
901  */
902 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
903 		struct uhci_qh *qh)
904 {
905 	struct uhci_td *td = NULL;	/* Since urb->number_of_packets > 0 */
906 	int i, frame;
907 	unsigned long destination, status;
908 	struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
909 
910 	if (urb->number_of_packets > 900)	/* 900? Why? */
911 		return -EFBIG;
912 
913 	status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
914 	destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
915 
916 	/* Figure out the starting frame number */
917 	if (urb->transfer_flags & URB_ISO_ASAP) {
918 		if (list_empty(&qh->queue)) {
919 			uhci_get_current_frame_number(uhci);
920 			urb->start_frame = (uhci->frame_number + 10);
921 
922 		} else {		/* Go right after the last one */
923 			struct urb *last_urb;
924 
925 			last_urb = list_entry(qh->queue.prev,
926 					struct urb_priv, node)->urb;
927 			urb->start_frame = (last_urb->start_frame +
928 					last_urb->number_of_packets *
929 					last_urb->interval);
930 		}
931 	} else {
932 		/* FIXME: Sanity check */
933 	}
934 	urb->start_frame &= (UHCI_NUMFRAMES - 1);
935 
936 	for (i = 0; i < urb->number_of_packets; i++) {
937 		td = uhci_alloc_td(uhci);
938 		if (!td)
939 			return -ENOMEM;
940 
941 		uhci_add_td_to_urbp(td, urbp);
942 		uhci_fill_td(td, status, destination |
943 				uhci_explen(urb->iso_frame_desc[i].length),
944 				urb->transfer_dma +
945 					urb->iso_frame_desc[i].offset);
946 	}
947 
948 	/* Set the interrupt-on-completion flag on the last packet. */
949 	td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
950 
951 	qh->skel = uhci->skel_iso_qh;
952 
953 	/* Add the TDs to the frame list */
954 	frame = urb->start_frame;
955 	list_for_each_entry(td, &urbp->td_list, list) {
956 		uhci_insert_td_in_frame_list(uhci, td, frame);
957 		frame += urb->interval;
958 	}
959 
960 	return 0;
961 }
962 
963 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
964 {
965 	struct uhci_td *td;
966 	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
967 	int status;
968 	int i, ret = 0;
969 
970 	urb->actual_length = urb->error_count = 0;
971 
972 	i = 0;
973 	list_for_each_entry(td, &urbp->td_list, list) {
974 		int actlength;
975 		unsigned int ctrlstat = td_status(td);
976 
977 		if (ctrlstat & TD_CTRL_ACTIVE)
978 			return -EINPROGRESS;
979 
980 		actlength = uhci_actual_length(ctrlstat);
981 		urb->iso_frame_desc[i].actual_length = actlength;
982 		urb->actual_length += actlength;
983 
984 		status = uhci_map_status(uhci_status_bits(ctrlstat),
985 				usb_pipeout(urb->pipe));
986 		urb->iso_frame_desc[i].status = status;
987 		if (status) {
988 			urb->error_count++;
989 			ret = status;
990 		}
991 
992 		i++;
993 	}
994 
995 	return ret;
996 }
997 
998 static int uhci_urb_enqueue(struct usb_hcd *hcd,
999 		struct usb_host_endpoint *hep,
1000 		struct urb *urb, gfp_t mem_flags)
1001 {
1002 	int ret;
1003 	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1004 	unsigned long flags;
1005 	struct urb_priv *urbp;
1006 	struct uhci_qh *qh;
1007 	int bustime;
1008 
1009 	spin_lock_irqsave(&uhci->lock, flags);
1010 
1011 	ret = urb->status;
1012 	if (ret != -EINPROGRESS)		/* URB already unlinked! */
1013 		goto done;
1014 
1015 	ret = -ENOMEM;
1016 	urbp = uhci_alloc_urb_priv(uhci, urb);
1017 	if (!urbp)
1018 		goto done;
1019 
1020 	if (hep->hcpriv)
1021 		qh = (struct uhci_qh *) hep->hcpriv;
1022 	else {
1023 		qh = uhci_alloc_qh(uhci, urb->dev, hep);
1024 		if (!qh)
1025 			goto err_no_qh;
1026 	}
1027 	urbp->qh = qh;
1028 
1029 	switch (qh->type) {
1030 	case USB_ENDPOINT_XFER_CONTROL:
1031 		ret = uhci_submit_control(uhci, urb, qh);
1032 		break;
1033 	case USB_ENDPOINT_XFER_BULK:
1034 		ret = uhci_submit_bulk(uhci, urb, qh);
1035 		break;
1036 	case USB_ENDPOINT_XFER_INT:
1037 		if (list_empty(&qh->queue)) {
1038 			bustime = usb_check_bandwidth(urb->dev, urb);
1039 			if (bustime < 0)
1040 				ret = bustime;
1041 			else {
1042 				ret = uhci_submit_interrupt(uhci, urb, qh);
1043 				if (ret == 0)
1044 					usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1045 			}
1046 		} else {	/* inherit from parent */
1047 			struct urb_priv *eurbp;
1048 
1049 			eurbp = list_entry(qh->queue.prev, struct urb_priv,
1050 					node);
1051 			urb->bandwidth = eurbp->urb->bandwidth;
1052 			ret = uhci_submit_interrupt(uhci, urb, qh);
1053 		}
1054 		break;
1055 	case USB_ENDPOINT_XFER_ISOC:
1056 		bustime = usb_check_bandwidth(urb->dev, urb);
1057 		if (bustime < 0) {
1058 			ret = bustime;
1059 			break;
1060 		}
1061 
1062 		ret = uhci_submit_isochronous(uhci, urb, qh);
1063 		if (ret == 0)
1064 			usb_claim_bandwidth(urb->dev, urb, bustime, 1);
1065 		break;
1066 	}
1067 	if (ret != 0)
1068 		goto err_submit_failed;
1069 
1070 	/* Add this URB to the QH */
1071 	urbp->qh = qh;
1072 	list_add_tail(&urbp->node, &qh->queue);
1073 
1074 	/* If the new URB is the first and only one on this QH then either
1075 	 * the QH is new and idle or else it's unlinked and waiting to
1076 	 * become idle, so we can activate it right away.  But only if the
1077 	 * queue isn't stopped. */
1078 	if (qh->queue.next == &urbp->node && !qh->is_stopped)
1079 		uhci_activate_qh(uhci, qh);
1080 	goto done;
1081 
1082 err_submit_failed:
1083 	if (qh->state == QH_STATE_IDLE)
1084 		uhci_make_qh_idle(uhci, qh);	/* Reclaim unused QH */
1085 
1086 err_no_qh:
1087 	uhci_free_urb_priv(uhci, urbp);
1088 
1089 done:
1090 	spin_unlock_irqrestore(&uhci->lock, flags);
1091 	return ret;
1092 }
1093 
1094 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
1095 {
1096 	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1097 	unsigned long flags;
1098 	struct urb_priv *urbp;
1099 
1100 	spin_lock_irqsave(&uhci->lock, flags);
1101 	urbp = urb->hcpriv;
1102 	if (!urbp)			/* URB was never linked! */
1103 		goto done;
1104 
1105 	/* Remove Isochronous TDs from the frame list ASAP */
1106 	if (urbp->qh->type == USB_ENDPOINT_XFER_ISOC)
1107 		uhci_unlink_isochronous_tds(uhci, urb);
1108 	uhci_unlink_qh(uhci, urbp->qh);
1109 
1110 done:
1111 	spin_unlock_irqrestore(&uhci->lock, flags);
1112 	return 0;
1113 }
1114 
1115 /*
1116  * Finish unlinking an URB and give it back
1117  */
1118 static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
1119 		struct urb *urb, struct pt_regs *regs)
1120 __releases(uhci->lock)
1121 __acquires(uhci->lock)
1122 {
1123 	struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1124 
1125 	/* Isochronous TDs get unlinked directly from the frame list */
1126 	if (qh->type == USB_ENDPOINT_XFER_ISOC)
1127 		uhci_unlink_isochronous_tds(uhci, urb);
1128 
1129 	/* Take the URB off the QH's queue.  If the queue is now empty,
1130 	 * this is a perfect time for a toggle fixup. */
1131 	list_del_init(&urbp->node);
1132 	if (list_empty(&qh->queue) && qh->needs_fixup) {
1133 		usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1134 				usb_pipeout(urb->pipe), qh->initial_toggle);
1135 		qh->needs_fixup = 0;
1136 	}
1137 
1138 	uhci_dec_fsbr(uhci, urb);	/* Safe since it checks */
1139 	uhci_free_urb_priv(uhci, urbp);
1140 
1141 	switch (qh->type) {
1142 	case USB_ENDPOINT_XFER_ISOC:
1143 		/* Release bandwidth for Interrupt or Isoc. transfers */
1144 		if (urb->bandwidth)
1145 			usb_release_bandwidth(urb->dev, urb, 1);
1146 		break;
1147 	case USB_ENDPOINT_XFER_INT:
1148 		/* Release bandwidth for Interrupt or Isoc. transfers */
1149 		/* Make sure we don't release if we have a queued URB */
1150 		if (list_empty(&qh->queue) && urb->bandwidth)
1151 			usb_release_bandwidth(urb->dev, urb, 0);
1152 		else
1153 			/* bandwidth was passed on to queued URB, */
1154 			/* so don't let usb_unlink_urb() release it */
1155 			urb->bandwidth = 0;
1156 		break;
1157 	}
1158 
1159 	spin_unlock(&uhci->lock);
1160 	usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, regs);
1161 	spin_lock(&uhci->lock);
1162 
1163 	/* If the queue is now empty, we can unlink the QH and give up its
1164 	 * reserved bandwidth. */
1165 	if (list_empty(&qh->queue)) {
1166 		uhci_unlink_qh(uhci, qh);
1167 
1168 		/* Bandwidth stuff not yet implemented */
1169 	}
1170 }
1171 
1172 /*
1173  * Scan the URBs in a QH's queue
1174  */
1175 #define QH_FINISHED_UNLINKING(qh)			\
1176 		(qh->state == QH_STATE_UNLINKING &&	\
1177 		uhci->frame_number + uhci->is_stopped != qh->unlink_frame)
1178 
1179 static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh,
1180 		struct pt_regs *regs)
1181 {
1182 	struct urb_priv *urbp;
1183 	struct urb *urb;
1184 	int status;
1185 
1186 	while (!list_empty(&qh->queue)) {
1187 		urbp = list_entry(qh->queue.next, struct urb_priv, node);
1188 		urb = urbp->urb;
1189 
1190 		if (qh->type == USB_ENDPOINT_XFER_ISOC)
1191 			status = uhci_result_isochronous(uhci, urb);
1192 		else
1193 			status = uhci_result_common(uhci, urb);
1194 		if (status == -EINPROGRESS)
1195 			break;
1196 
1197 		spin_lock(&urb->lock);
1198 		if (urb->status == -EINPROGRESS)	/* Not dequeued */
1199 			urb->status = status;
1200 		else
1201 			status = ECONNRESET;		/* Not -ECONNRESET */
1202 		spin_unlock(&urb->lock);
1203 
1204 		/* Dequeued but completed URBs can't be given back unless
1205 		 * the QH is stopped or has finished unlinking. */
1206 		if (status == ECONNRESET) {
1207 			if (QH_FINISHED_UNLINKING(qh))
1208 				qh->is_stopped = 1;
1209 			else if (!qh->is_stopped)
1210 				return;
1211 		}
1212 
1213 		uhci_giveback_urb(uhci, qh, urb, regs);
1214 		if (status < 0)
1215 			break;
1216 	}
1217 
1218 	/* If the QH is neither stopped nor finished unlinking (normal case),
1219 	 * our work here is done. */
1220 	if (QH_FINISHED_UNLINKING(qh))
1221 		qh->is_stopped = 1;
1222 	else if (!qh->is_stopped)
1223 		return;
1224 
1225 	/* Otherwise give back each of the dequeued URBs */
1226 restart:
1227 	list_for_each_entry(urbp, &qh->queue, node) {
1228 		urb = urbp->urb;
1229 		if (urb->status != -EINPROGRESS) {
1230 			uhci_cleanup_queue(qh, urb);
1231 			uhci_giveback_urb(uhci, qh, urb, regs);
1232 			goto restart;
1233 		}
1234 	}
1235 	qh->is_stopped = 0;
1236 
1237 	/* There are no more dequeued URBs.  If there are still URBs on the
1238 	 * queue, the QH can now be re-activated. */
1239 	if (!list_empty(&qh->queue)) {
1240 		if (qh->needs_fixup)
1241 			uhci_fixup_toggles(qh, 0);
1242 		uhci_activate_qh(uhci, qh);
1243 	}
1244 
1245 	/* The queue is empty.  The QH can become idle if it is fully
1246 	 * unlinked. */
1247 	else if (QH_FINISHED_UNLINKING(qh))
1248 		uhci_make_qh_idle(uhci, qh);
1249 }
1250 
1251 /*
1252  * Process events in the schedule, but only in one thread at a time
1253  */
1254 static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs)
1255 {
1256 	int i;
1257 	struct uhci_qh *qh;
1258 
1259 	/* Don't allow re-entrant calls */
1260 	if (uhci->scan_in_progress) {
1261 		uhci->need_rescan = 1;
1262 		return;
1263 	}
1264 	uhci->scan_in_progress = 1;
1265  rescan:
1266 	uhci->need_rescan = 0;
1267 
1268 	uhci_clear_next_interrupt(uhci);
1269 	uhci_get_current_frame_number(uhci);
1270 
1271 	/* Go through all the QH queues and process the URBs in each one */
1272 	for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) {
1273 		uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
1274 				struct uhci_qh, node);
1275 		while ((qh = uhci->next_qh) != uhci->skelqh[i]) {
1276 			uhci->next_qh = list_entry(qh->node.next,
1277 					struct uhci_qh, node);
1278 			uhci_scan_qh(uhci, qh, regs);
1279 		}
1280 	}
1281 
1282 	if (uhci->need_rescan)
1283 		goto rescan;
1284 	uhci->scan_in_progress = 0;
1285 
1286 	if (list_empty(&uhci->skel_unlink_qh->node))
1287 		uhci_clear_next_interrupt(uhci);
1288 	else
1289 		uhci_set_next_interrupt(uhci);
1290 }
1291 
1292 static void check_fsbr(struct uhci_hcd *uhci)
1293 {
1294 	/* For now, don't scan URBs for FSBR timeouts.
1295 	 * Add it back in later... */
1296 
1297 	/* Really disable FSBR */
1298 	if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) {
1299 		uhci->fsbrtimeout = 0;
1300 		uhci->skel_term_qh->link = UHCI_PTR_TERM;
1301 	}
1302 }
1303