xref: /openbmc/linux/drivers/usb/host/uhci-q.c (revision 22246614)
1 /*
2  * Universal Host Controller Interface driver for USB.
3  *
4  * Maintainer: Alan Stern <stern@rowland.harvard.edu>
5  *
6  * (C) Copyright 1999 Linus Torvalds
7  * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8  * (C) Copyright 1999 Randy Dunlap
9  * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10  * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11  * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12  * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13  * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14  *               support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15  * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16  * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu
17  */
18 
19 
20 /*
21  * Technically, updating td->status here is a race, but it's not really a
22  * problem. The worst that can happen is that we set the IOC bit again
23  * generating a spurious interrupt. We could fix this by creating another
24  * QH and leaving the IOC bit always set, but then we would have to play
25  * games with the FSBR code to make sure we get the correct order in all
26  * the cases. I don't think it's worth the effort
27  */
28 static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
29 {
30 	if (uhci->is_stopped)
31 		mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
32 	uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
33 }
34 
35 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
36 {
37 	uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
38 }
39 
40 
41 /*
42  * Full-Speed Bandwidth Reclamation (FSBR).
43  * We turn on FSBR whenever a queue that wants it is advancing,
44  * and leave it on for a short time thereafter.
45  */
46 static void uhci_fsbr_on(struct uhci_hcd *uhci)
47 {
48 	struct uhci_qh *lqh;
49 
50 	/* The terminating skeleton QH always points back to the first
51 	 * FSBR QH.  Make the last async QH point to the terminating
52 	 * skeleton QH. */
53 	uhci->fsbr_is_on = 1;
54 	lqh = list_entry(uhci->skel_async_qh->node.prev,
55 			struct uhci_qh, node);
56 	lqh->link = LINK_TO_QH(uhci->skel_term_qh);
57 }
58 
59 static void uhci_fsbr_off(struct uhci_hcd *uhci)
60 {
61 	struct uhci_qh *lqh;
62 
63 	/* Remove the link from the last async QH to the terminating
64 	 * skeleton QH. */
65 	uhci->fsbr_is_on = 0;
66 	lqh = list_entry(uhci->skel_async_qh->node.prev,
67 			struct uhci_qh, node);
68 	lqh->link = UHCI_PTR_TERM;
69 }
70 
71 static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
72 {
73 	struct urb_priv *urbp = urb->hcpriv;
74 
75 	if (!(urb->transfer_flags & URB_NO_FSBR))
76 		urbp->fsbr = 1;
77 }
78 
79 static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
80 {
81 	if (urbp->fsbr) {
82 		uhci->fsbr_is_wanted = 1;
83 		if (!uhci->fsbr_is_on)
84 			uhci_fsbr_on(uhci);
85 		else if (uhci->fsbr_expiring) {
86 			uhci->fsbr_expiring = 0;
87 			del_timer(&uhci->fsbr_timer);
88 		}
89 	}
90 }
91 
92 static void uhci_fsbr_timeout(unsigned long _uhci)
93 {
94 	struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci;
95 	unsigned long flags;
96 
97 	spin_lock_irqsave(&uhci->lock, flags);
98 	if (uhci->fsbr_expiring) {
99 		uhci->fsbr_expiring = 0;
100 		uhci_fsbr_off(uhci);
101 	}
102 	spin_unlock_irqrestore(&uhci->lock, flags);
103 }
104 
105 
106 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
107 {
108 	dma_addr_t dma_handle;
109 	struct uhci_td *td;
110 
111 	td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
112 	if (!td)
113 		return NULL;
114 
115 	td->dma_handle = dma_handle;
116 	td->frame = -1;
117 
118 	INIT_LIST_HEAD(&td->list);
119 	INIT_LIST_HEAD(&td->fl_list);
120 
121 	return td;
122 }
123 
124 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
125 {
126 	if (!list_empty(&td->list)) {
127 		dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);
128 		WARN_ON(1);
129 	}
130 	if (!list_empty(&td->fl_list)) {
131 		dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);
132 		WARN_ON(1);
133 	}
134 
135 	dma_pool_free(uhci->td_pool, td, td->dma_handle);
136 }
137 
138 static inline void uhci_fill_td(struct uhci_td *td, u32 status,
139 		u32 token, u32 buffer)
140 {
141 	td->status = cpu_to_le32(status);
142 	td->token = cpu_to_le32(token);
143 	td->buffer = cpu_to_le32(buffer);
144 }
145 
146 static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp)
147 {
148 	list_add_tail(&td->list, &urbp->td_list);
149 }
150 
151 static void uhci_remove_td_from_urbp(struct uhci_td *td)
152 {
153 	list_del_init(&td->list);
154 }
155 
156 /*
157  * We insert Isochronous URBs directly into the frame list at the beginning
158  */
159 static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
160 		struct uhci_td *td, unsigned framenum)
161 {
162 	framenum &= (UHCI_NUMFRAMES - 1);
163 
164 	td->frame = framenum;
165 
166 	/* Is there a TD already mapped there? */
167 	if (uhci->frame_cpu[framenum]) {
168 		struct uhci_td *ftd, *ltd;
169 
170 		ftd = uhci->frame_cpu[framenum];
171 		ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
172 
173 		list_add_tail(&td->fl_list, &ftd->fl_list);
174 
175 		td->link = ltd->link;
176 		wmb();
177 		ltd->link = LINK_TO_TD(td);
178 	} else {
179 		td->link = uhci->frame[framenum];
180 		wmb();
181 		uhci->frame[framenum] = LINK_TO_TD(td);
182 		uhci->frame_cpu[framenum] = td;
183 	}
184 }
185 
186 static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
187 		struct uhci_td *td)
188 {
189 	/* If it's not inserted, don't remove it */
190 	if (td->frame == -1) {
191 		WARN_ON(!list_empty(&td->fl_list));
192 		return;
193 	}
194 
195 	if (uhci->frame_cpu[td->frame] == td) {
196 		if (list_empty(&td->fl_list)) {
197 			uhci->frame[td->frame] = td->link;
198 			uhci->frame_cpu[td->frame] = NULL;
199 		} else {
200 			struct uhci_td *ntd;
201 
202 			ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
203 			uhci->frame[td->frame] = LINK_TO_TD(ntd);
204 			uhci->frame_cpu[td->frame] = ntd;
205 		}
206 	} else {
207 		struct uhci_td *ptd;
208 
209 		ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
210 		ptd->link = td->link;
211 	}
212 
213 	list_del_init(&td->fl_list);
214 	td->frame = -1;
215 }
216 
217 static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci,
218 		unsigned int framenum)
219 {
220 	struct uhci_td *ftd, *ltd;
221 
222 	framenum &= (UHCI_NUMFRAMES - 1);
223 
224 	ftd = uhci->frame_cpu[framenum];
225 	if (ftd) {
226 		ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
227 		uhci->frame[framenum] = ltd->link;
228 		uhci->frame_cpu[framenum] = NULL;
229 
230 		while (!list_empty(&ftd->fl_list))
231 			list_del_init(ftd->fl_list.prev);
232 	}
233 }
234 
235 /*
236  * Remove all the TDs for an Isochronous URB from the frame list
237  */
238 static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
239 {
240 	struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
241 	struct uhci_td *td;
242 
243 	list_for_each_entry(td, &urbp->td_list, list)
244 		uhci_remove_td_from_frame_list(uhci, td);
245 }
246 
247 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
248 		struct usb_device *udev, struct usb_host_endpoint *hep)
249 {
250 	dma_addr_t dma_handle;
251 	struct uhci_qh *qh;
252 
253 	qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
254 	if (!qh)
255 		return NULL;
256 
257 	memset(qh, 0, sizeof(*qh));
258 	qh->dma_handle = dma_handle;
259 
260 	qh->element = UHCI_PTR_TERM;
261 	qh->link = UHCI_PTR_TERM;
262 
263 	INIT_LIST_HEAD(&qh->queue);
264 	INIT_LIST_HEAD(&qh->node);
265 
266 	if (udev) {		/* Normal QH */
267 		qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
268 		if (qh->type != USB_ENDPOINT_XFER_ISOC) {
269 			qh->dummy_td = uhci_alloc_td(uhci);
270 			if (!qh->dummy_td) {
271 				dma_pool_free(uhci->qh_pool, qh, dma_handle);
272 				return NULL;
273 			}
274 		}
275 		qh->state = QH_STATE_IDLE;
276 		qh->hep = hep;
277 		qh->udev = udev;
278 		hep->hcpriv = qh;
279 
280 		if (qh->type == USB_ENDPOINT_XFER_INT ||
281 				qh->type == USB_ENDPOINT_XFER_ISOC)
282 			qh->load = usb_calc_bus_time(udev->speed,
283 					usb_endpoint_dir_in(&hep->desc),
284 					qh->type == USB_ENDPOINT_XFER_ISOC,
285 					le16_to_cpu(hep->desc.wMaxPacketSize))
286 				/ 1000 + 1;
287 
288 	} else {		/* Skeleton QH */
289 		qh->state = QH_STATE_ACTIVE;
290 		qh->type = -1;
291 	}
292 	return qh;
293 }
294 
295 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
296 {
297 	WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
298 	if (!list_empty(&qh->queue)) {
299 		dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);
300 		WARN_ON(1);
301 	}
302 
303 	list_del(&qh->node);
304 	if (qh->udev) {
305 		qh->hep->hcpriv = NULL;
306 		if (qh->dummy_td)
307 			uhci_free_td(uhci, qh->dummy_td);
308 	}
309 	dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
310 }
311 
312 /*
313  * When a queue is stopped and a dequeued URB is given back, adjust
314  * the previous TD link (if the URB isn't first on the queue) or
315  * save its toggle value (if it is first and is currently executing).
316  *
317  * Returns 0 if the URB should not yet be given back, 1 otherwise.
318  */
319 static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
320 		struct urb *urb)
321 {
322 	struct urb_priv *urbp = urb->hcpriv;
323 	struct uhci_td *td;
324 	int ret = 1;
325 
326 	/* Isochronous pipes don't use toggles and their TD link pointers
327 	 * get adjusted during uhci_urb_dequeue().  But since their queues
328 	 * cannot truly be stopped, we have to watch out for dequeues
329 	 * occurring after the nominal unlink frame. */
330 	if (qh->type == USB_ENDPOINT_XFER_ISOC) {
331 		ret = (uhci->frame_number + uhci->is_stopped !=
332 				qh->unlink_frame);
333 		goto done;
334 	}
335 
336 	/* If the URB isn't first on its queue, adjust the link pointer
337 	 * of the last TD in the previous URB.  The toggle doesn't need
338 	 * to be saved since this URB can't be executing yet. */
339 	if (qh->queue.next != &urbp->node) {
340 		struct urb_priv *purbp;
341 		struct uhci_td *ptd;
342 
343 		purbp = list_entry(urbp->node.prev, struct urb_priv, node);
344 		WARN_ON(list_empty(&purbp->td_list));
345 		ptd = list_entry(purbp->td_list.prev, struct uhci_td,
346 				list);
347 		td = list_entry(urbp->td_list.prev, struct uhci_td,
348 				list);
349 		ptd->link = td->link;
350 		goto done;
351 	}
352 
353 	/* If the QH element pointer is UHCI_PTR_TERM then then currently
354 	 * executing URB has already been unlinked, so this one isn't it. */
355 	if (qh_element(qh) == UHCI_PTR_TERM)
356 		goto done;
357 	qh->element = UHCI_PTR_TERM;
358 
359 	/* Control pipes don't have to worry about toggles */
360 	if (qh->type == USB_ENDPOINT_XFER_CONTROL)
361 		goto done;
362 
363 	/* Save the next toggle value */
364 	WARN_ON(list_empty(&urbp->td_list));
365 	td = list_entry(urbp->td_list.next, struct uhci_td, list);
366 	qh->needs_fixup = 1;
367 	qh->initial_toggle = uhci_toggle(td_token(td));
368 
369 done:
370 	return ret;
371 }
372 
373 /*
374  * Fix up the data toggles for URBs in a queue, when one of them
375  * terminates early (short transfer, error, or dequeued).
376  */
377 static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first)
378 {
379 	struct urb_priv *urbp = NULL;
380 	struct uhci_td *td;
381 	unsigned int toggle = qh->initial_toggle;
382 	unsigned int pipe;
383 
384 	/* Fixups for a short transfer start with the second URB in the
385 	 * queue (the short URB is the first). */
386 	if (skip_first)
387 		urbp = list_entry(qh->queue.next, struct urb_priv, node);
388 
389 	/* When starting with the first URB, if the QH element pointer is
390 	 * still valid then we know the URB's toggles are okay. */
391 	else if (qh_element(qh) != UHCI_PTR_TERM)
392 		toggle = 2;
393 
394 	/* Fix up the toggle for the URBs in the queue.  Normally this
395 	 * loop won't run more than once: When an error or short transfer
396 	 * occurs, the queue usually gets emptied. */
397 	urbp = list_prepare_entry(urbp, &qh->queue, node);
398 	list_for_each_entry_continue(urbp, &qh->queue, node) {
399 
400 		/* If the first TD has the right toggle value, we don't
401 		 * need to change any toggles in this URB */
402 		td = list_entry(urbp->td_list.next, struct uhci_td, list);
403 		if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) {
404 			td = list_entry(urbp->td_list.prev, struct uhci_td,
405 					list);
406 			toggle = uhci_toggle(td_token(td)) ^ 1;
407 
408 		/* Otherwise all the toggles in the URB have to be switched */
409 		} else {
410 			list_for_each_entry(td, &urbp->td_list, list) {
411 				td->token ^= __constant_cpu_to_le32(
412 							TD_TOKEN_TOGGLE);
413 				toggle ^= 1;
414 			}
415 		}
416 	}
417 
418 	wmb();
419 	pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
420 	usb_settoggle(qh->udev, usb_pipeendpoint(pipe),
421 			usb_pipeout(pipe), toggle);
422 	qh->needs_fixup = 0;
423 }
424 
425 /*
426  * Link an Isochronous QH into its skeleton's list
427  */
428 static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh)
429 {
430 	list_add_tail(&qh->node, &uhci->skel_iso_qh->node);
431 
432 	/* Isochronous QHs aren't linked by the hardware */
433 }
434 
435 /*
436  * Link a high-period interrupt QH into the schedule at the end of its
437  * skeleton's list
438  */
439 static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
440 {
441 	struct uhci_qh *pqh;
442 
443 	list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node);
444 
445 	pqh = list_entry(qh->node.prev, struct uhci_qh, node);
446 	qh->link = pqh->link;
447 	wmb();
448 	pqh->link = LINK_TO_QH(qh);
449 }
450 
451 /*
452  * Link a period-1 interrupt or async QH into the schedule at the
453  * correct spot in the async skeleton's list, and update the FSBR link
454  */
455 static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
456 {
457 	struct uhci_qh *pqh;
458 	__le32 link_to_new_qh;
459 
460 	/* Find the predecessor QH for our new one and insert it in the list.
461 	 * The list of QHs is expected to be short, so linear search won't
462 	 * take too long. */
463 	list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) {
464 		if (pqh->skel <= qh->skel)
465 			break;
466 	}
467 	list_add(&qh->node, &pqh->node);
468 
469 	/* Link it into the schedule */
470 	qh->link = pqh->link;
471 	wmb();
472 	link_to_new_qh = LINK_TO_QH(qh);
473 	pqh->link = link_to_new_qh;
474 
475 	/* If this is now the first FSBR QH, link the terminating skeleton
476 	 * QH to it. */
477 	if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
478 		uhci->skel_term_qh->link = link_to_new_qh;
479 }
480 
481 /*
482  * Put a QH on the schedule in both hardware and software
483  */
484 static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
485 {
486 	WARN_ON(list_empty(&qh->queue));
487 
488 	/* Set the element pointer if it isn't set already.
489 	 * This isn't needed for Isochronous queues, but it doesn't hurt. */
490 	if (qh_element(qh) == UHCI_PTR_TERM) {
491 		struct urb_priv *urbp = list_entry(qh->queue.next,
492 				struct urb_priv, node);
493 		struct uhci_td *td = list_entry(urbp->td_list.next,
494 				struct uhci_td, list);
495 
496 		qh->element = LINK_TO_TD(td);
497 	}
498 
499 	/* Treat the queue as if it has just advanced */
500 	qh->wait_expired = 0;
501 	qh->advance_jiffies = jiffies;
502 
503 	if (qh->state == QH_STATE_ACTIVE)
504 		return;
505 	qh->state = QH_STATE_ACTIVE;
506 
507 	/* Move the QH from its old list to the correct spot in the appropriate
508 	 * skeleton's list */
509 	if (qh == uhci->next_qh)
510 		uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
511 				node);
512 	list_del(&qh->node);
513 
514 	if (qh->skel == SKEL_ISO)
515 		link_iso(uhci, qh);
516 	else if (qh->skel < SKEL_ASYNC)
517 		link_interrupt(uhci, qh);
518 	else
519 		link_async(uhci, qh);
520 }
521 
522 /*
523  * Unlink a high-period interrupt QH from the schedule
524  */
525 static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
526 {
527 	struct uhci_qh *pqh;
528 
529 	pqh = list_entry(qh->node.prev, struct uhci_qh, node);
530 	pqh->link = qh->link;
531 	mb();
532 }
533 
534 /*
535  * Unlink a period-1 interrupt or async QH from the schedule
536  */
537 static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
538 {
539 	struct uhci_qh *pqh;
540 	__le32 link_to_next_qh = qh->link;
541 
542 	pqh = list_entry(qh->node.prev, struct uhci_qh, node);
543 	pqh->link = link_to_next_qh;
544 
545 	/* If this was the old first FSBR QH, link the terminating skeleton
546 	 * QH to the next (new first FSBR) QH. */
547 	if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
548 		uhci->skel_term_qh->link = link_to_next_qh;
549 	mb();
550 }
551 
552 /*
553  * Take a QH off the hardware schedule
554  */
555 static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
556 {
557 	if (qh->state == QH_STATE_UNLINKING)
558 		return;
559 	WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);
560 	qh->state = QH_STATE_UNLINKING;
561 
562 	/* Unlink the QH from the schedule and record when we did it */
563 	if (qh->skel == SKEL_ISO)
564 		;
565 	else if (qh->skel < SKEL_ASYNC)
566 		unlink_interrupt(uhci, qh);
567 	else
568 		unlink_async(uhci, qh);
569 
570 	uhci_get_current_frame_number(uhci);
571 	qh->unlink_frame = uhci->frame_number;
572 
573 	/* Force an interrupt so we know when the QH is fully unlinked */
574 	if (list_empty(&uhci->skel_unlink_qh->node))
575 		uhci_set_next_interrupt(uhci);
576 
577 	/* Move the QH from its old list to the end of the unlinking list */
578 	if (qh == uhci->next_qh)
579 		uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
580 				node);
581 	list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
582 }
583 
584 /*
585  * When we and the controller are through with a QH, it becomes IDLE.
586  * This happens when a QH has been off the schedule (on the unlinking
587  * list) for more than one frame, or when an error occurs while adding
588  * the first URB onto a new QH.
589  */
590 static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
591 {
592 	WARN_ON(qh->state == QH_STATE_ACTIVE);
593 
594 	if (qh == uhci->next_qh)
595 		uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
596 				node);
597 	list_move(&qh->node, &uhci->idle_qh_list);
598 	qh->state = QH_STATE_IDLE;
599 
600 	/* Now that the QH is idle, its post_td isn't being used */
601 	if (qh->post_td) {
602 		uhci_free_td(uhci, qh->post_td);
603 		qh->post_td = NULL;
604 	}
605 
606 	/* If anyone is waiting for a QH to become idle, wake them up */
607 	if (uhci->num_waiting)
608 		wake_up_all(&uhci->waitqh);
609 }
610 
611 /*
612  * Find the highest existing bandwidth load for a given phase and period.
613  */
614 static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period)
615 {
616 	int highest_load = uhci->load[phase];
617 
618 	for (phase += period; phase < MAX_PHASE; phase += period)
619 		highest_load = max_t(int, highest_load, uhci->load[phase]);
620 	return highest_load;
621 }
622 
623 /*
624  * Set qh->phase to the optimal phase for a periodic transfer and
625  * check whether the bandwidth requirement is acceptable.
626  */
627 static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
628 {
629 	int minimax_load;
630 
631 	/* Find the optimal phase (unless it is already set) and get
632 	 * its load value. */
633 	if (qh->phase >= 0)
634 		minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
635 	else {
636 		int phase, load;
637 		int max_phase = min_t(int, MAX_PHASE, qh->period);
638 
639 		qh->phase = 0;
640 		minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
641 		for (phase = 1; phase < max_phase; ++phase) {
642 			load = uhci_highest_load(uhci, phase, qh->period);
643 			if (load < minimax_load) {
644 				minimax_load = load;
645 				qh->phase = phase;
646 			}
647 		}
648 	}
649 
650 	/* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */
651 	if (minimax_load + qh->load > 900) {
652 		dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: "
653 				"period %d, phase %d, %d + %d us\n",
654 				qh->period, qh->phase, minimax_load, qh->load);
655 		return -ENOSPC;
656 	}
657 	return 0;
658 }
659 
660 /*
661  * Reserve a periodic QH's bandwidth in the schedule
662  */
663 static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
664 {
665 	int i;
666 	int load = qh->load;
667 	char *p = "??";
668 
669 	for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
670 		uhci->load[i] += load;
671 		uhci->total_load += load;
672 	}
673 	uhci_to_hcd(uhci)->self.bandwidth_allocated =
674 			uhci->total_load / MAX_PHASE;
675 	switch (qh->type) {
676 	case USB_ENDPOINT_XFER_INT:
677 		++uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
678 		p = "INT";
679 		break;
680 	case USB_ENDPOINT_XFER_ISOC:
681 		++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
682 		p = "ISO";
683 		break;
684 	}
685 	qh->bandwidth_reserved = 1;
686 	dev_dbg(uhci_dev(uhci),
687 			"%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
688 			"reserve", qh->udev->devnum,
689 			qh->hep->desc.bEndpointAddress, p,
690 			qh->period, qh->phase, load);
691 }
692 
693 /*
694  * Release a periodic QH's bandwidth reservation
695  */
696 static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
697 {
698 	int i;
699 	int load = qh->load;
700 	char *p = "??";
701 
702 	for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
703 		uhci->load[i] -= load;
704 		uhci->total_load -= load;
705 	}
706 	uhci_to_hcd(uhci)->self.bandwidth_allocated =
707 			uhci->total_load / MAX_PHASE;
708 	switch (qh->type) {
709 	case USB_ENDPOINT_XFER_INT:
710 		--uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
711 		p = "INT";
712 		break;
713 	case USB_ENDPOINT_XFER_ISOC:
714 		--uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
715 		p = "ISO";
716 		break;
717 	}
718 	qh->bandwidth_reserved = 0;
719 	dev_dbg(uhci_dev(uhci),
720 			"%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
721 			"release", qh->udev->devnum,
722 			qh->hep->desc.bEndpointAddress, p,
723 			qh->period, qh->phase, load);
724 }
725 
726 static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
727 		struct urb *urb)
728 {
729 	struct urb_priv *urbp;
730 
731 	urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC);
732 	if (!urbp)
733 		return NULL;
734 
735 	urbp->urb = urb;
736 	urb->hcpriv = urbp;
737 
738 	INIT_LIST_HEAD(&urbp->node);
739 	INIT_LIST_HEAD(&urbp->td_list);
740 
741 	return urbp;
742 }
743 
744 static void uhci_free_urb_priv(struct uhci_hcd *uhci,
745 		struct urb_priv *urbp)
746 {
747 	struct uhci_td *td, *tmp;
748 
749 	if (!list_empty(&urbp->node)) {
750 		dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n",
751 				urbp->urb);
752 		WARN_ON(1);
753 	}
754 
755 	list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
756 		uhci_remove_td_from_urbp(td);
757 		uhci_free_td(uhci, td);
758 	}
759 
760 	kmem_cache_free(uhci_up_cachep, urbp);
761 }
762 
763 /*
764  * Map status to standard result codes
765  *
766  * <status> is (td_status(td) & 0xF60000), a.k.a.
767  * uhci_status_bits(td_status(td)).
768  * Note: <status> does not include the TD_CTRL_NAK bit.
769  * <dir_out> is True for output TDs and False for input TDs.
770  */
771 static int uhci_map_status(int status, int dir_out)
772 {
773 	if (!status)
774 		return 0;
775 	if (status & TD_CTRL_BITSTUFF)			/* Bitstuff error */
776 		return -EPROTO;
777 	if (status & TD_CTRL_CRCTIMEO) {		/* CRC/Timeout */
778 		if (dir_out)
779 			return -EPROTO;
780 		else
781 			return -EILSEQ;
782 	}
783 	if (status & TD_CTRL_BABBLE)			/* Babble */
784 		return -EOVERFLOW;
785 	if (status & TD_CTRL_DBUFERR)			/* Buffer error */
786 		return -ENOSR;
787 	if (status & TD_CTRL_STALLED)			/* Stalled */
788 		return -EPIPE;
789 	return 0;
790 }
791 
792 /*
793  * Control transfers
794  */
795 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
796 		struct uhci_qh *qh)
797 {
798 	struct uhci_td *td;
799 	unsigned long destination, status;
800 	int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
801 	int len = urb->transfer_buffer_length;
802 	dma_addr_t data = urb->transfer_dma;
803 	__le32 *plink;
804 	struct urb_priv *urbp = urb->hcpriv;
805 	int skel;
806 
807 	/* The "pipe" thing contains the destination in bits 8--18 */
808 	destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
809 
810 	/* 3 errors, dummy TD remains inactive */
811 	status = uhci_maxerr(3);
812 	if (urb->dev->speed == USB_SPEED_LOW)
813 		status |= TD_CTRL_LS;
814 
815 	/*
816 	 * Build the TD for the control request setup packet
817 	 */
818 	td = qh->dummy_td;
819 	uhci_add_td_to_urbp(td, urbp);
820 	uhci_fill_td(td, status, destination | uhci_explen(8),
821 			urb->setup_dma);
822 	plink = &td->link;
823 	status |= TD_CTRL_ACTIVE;
824 
825 	/*
826 	 * If direction is "send", change the packet ID from SETUP (0x2D)
827 	 * to OUT (0xE1).  Else change it from SETUP to IN (0x69) and
828 	 * set Short Packet Detect (SPD) for all data packets.
829 	 *
830 	 * 0-length transfers always get treated as "send".
831 	 */
832 	if (usb_pipeout(urb->pipe) || len == 0)
833 		destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
834 	else {
835 		destination ^= (USB_PID_SETUP ^ USB_PID_IN);
836 		status |= TD_CTRL_SPD;
837 	}
838 
839 	/*
840 	 * Build the DATA TDs
841 	 */
842 	while (len > 0) {
843 		int pktsze = maxsze;
844 
845 		if (len <= pktsze) {		/* The last data packet */
846 			pktsze = len;
847 			status &= ~TD_CTRL_SPD;
848 		}
849 
850 		td = uhci_alloc_td(uhci);
851 		if (!td)
852 			goto nomem;
853 		*plink = LINK_TO_TD(td);
854 
855 		/* Alternate Data0/1 (start with Data1) */
856 		destination ^= TD_TOKEN_TOGGLE;
857 
858 		uhci_add_td_to_urbp(td, urbp);
859 		uhci_fill_td(td, status, destination | uhci_explen(pktsze),
860 				data);
861 		plink = &td->link;
862 
863 		data += pktsze;
864 		len -= pktsze;
865 	}
866 
867 	/*
868 	 * Build the final TD for control status
869 	 */
870 	td = uhci_alloc_td(uhci);
871 	if (!td)
872 		goto nomem;
873 	*plink = LINK_TO_TD(td);
874 
875 	/* Change direction for the status transaction */
876 	destination ^= (USB_PID_IN ^ USB_PID_OUT);
877 	destination |= TD_TOKEN_TOGGLE;		/* End in Data1 */
878 
879 	uhci_add_td_to_urbp(td, urbp);
880 	uhci_fill_td(td, status | TD_CTRL_IOC,
881 			destination | uhci_explen(0), 0);
882 	plink = &td->link;
883 
884 	/*
885 	 * Build the new dummy TD and activate the old one
886 	 */
887 	td = uhci_alloc_td(uhci);
888 	if (!td)
889 		goto nomem;
890 	*plink = LINK_TO_TD(td);
891 
892 	uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
893 	wmb();
894 	qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
895 	qh->dummy_td = td;
896 
897 	/* Low-speed transfers get a different queue, and won't hog the bus.
898 	 * Also, some devices enumerate better without FSBR; the easiest way
899 	 * to do that is to put URBs on the low-speed queue while the device
900 	 * isn't in the CONFIGURED state. */
901 	if (urb->dev->speed == USB_SPEED_LOW ||
902 			urb->dev->state != USB_STATE_CONFIGURED)
903 		skel = SKEL_LS_CONTROL;
904 	else {
905 		skel = SKEL_FS_CONTROL;
906 		uhci_add_fsbr(uhci, urb);
907 	}
908 	if (qh->state != QH_STATE_ACTIVE)
909 		qh->skel = skel;
910 
911 	urb->actual_length = -8;	/* Account for the SETUP packet */
912 	return 0;
913 
914 nomem:
915 	/* Remove the dummy TD from the td_list so it doesn't get freed */
916 	uhci_remove_td_from_urbp(qh->dummy_td);
917 	return -ENOMEM;
918 }
919 
920 /*
921  * Common submit for bulk and interrupt
922  */
923 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
924 		struct uhci_qh *qh)
925 {
926 	struct uhci_td *td;
927 	unsigned long destination, status;
928 	int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
929 	int len = urb->transfer_buffer_length;
930 	dma_addr_t data = urb->transfer_dma;
931 	__le32 *plink;
932 	struct urb_priv *urbp = urb->hcpriv;
933 	unsigned int toggle;
934 
935 	if (len < 0)
936 		return -EINVAL;
937 
938 	/* The "pipe" thing contains the destination in bits 8--18 */
939 	destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
940 	toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
941 			 usb_pipeout(urb->pipe));
942 
943 	/* 3 errors, dummy TD remains inactive */
944 	status = uhci_maxerr(3);
945 	if (urb->dev->speed == USB_SPEED_LOW)
946 		status |= TD_CTRL_LS;
947 	if (usb_pipein(urb->pipe))
948 		status |= TD_CTRL_SPD;
949 
950 	/*
951 	 * Build the DATA TDs
952 	 */
953 	plink = NULL;
954 	td = qh->dummy_td;
955 	do {	/* Allow zero length packets */
956 		int pktsze = maxsze;
957 
958 		if (len <= pktsze) {		/* The last packet */
959 			pktsze = len;
960 			if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
961 				status &= ~TD_CTRL_SPD;
962 		}
963 
964 		if (plink) {
965 			td = uhci_alloc_td(uhci);
966 			if (!td)
967 				goto nomem;
968 			*plink = LINK_TO_TD(td);
969 		}
970 		uhci_add_td_to_urbp(td, urbp);
971 		uhci_fill_td(td, status,
972 				destination | uhci_explen(pktsze) |
973 					(toggle << TD_TOKEN_TOGGLE_SHIFT),
974 				data);
975 		plink = &td->link;
976 		status |= TD_CTRL_ACTIVE;
977 
978 		data += pktsze;
979 		len -= maxsze;
980 		toggle ^= 1;
981 	} while (len > 0);
982 
983 	/*
984 	 * URB_ZERO_PACKET means adding a 0-length packet, if direction
985 	 * is OUT and the transfer_length was an exact multiple of maxsze,
986 	 * hence (len = transfer_length - N * maxsze) == 0
987 	 * however, if transfer_length == 0, the zero packet was already
988 	 * prepared above.
989 	 */
990 	if ((urb->transfer_flags & URB_ZERO_PACKET) &&
991 			usb_pipeout(urb->pipe) && len == 0 &&
992 			urb->transfer_buffer_length > 0) {
993 		td = uhci_alloc_td(uhci);
994 		if (!td)
995 			goto nomem;
996 		*plink = LINK_TO_TD(td);
997 
998 		uhci_add_td_to_urbp(td, urbp);
999 		uhci_fill_td(td, status,
1000 				destination | uhci_explen(0) |
1001 					(toggle << TD_TOKEN_TOGGLE_SHIFT),
1002 				data);
1003 		plink = &td->link;
1004 
1005 		toggle ^= 1;
1006 	}
1007 
1008 	/* Set the interrupt-on-completion flag on the last packet.
1009 	 * A more-or-less typical 4 KB URB (= size of one memory page)
1010 	 * will require about 3 ms to transfer; that's a little on the
1011 	 * fast side but not enough to justify delaying an interrupt
1012 	 * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
1013 	 * flag setting. */
1014 	td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
1015 
1016 	/*
1017 	 * Build the new dummy TD and activate the old one
1018 	 */
1019 	td = uhci_alloc_td(uhci);
1020 	if (!td)
1021 		goto nomem;
1022 	*plink = LINK_TO_TD(td);
1023 
1024 	uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
1025 	wmb();
1026 	qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
1027 	qh->dummy_td = td;
1028 
1029 	usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1030 			usb_pipeout(urb->pipe), toggle);
1031 	return 0;
1032 
1033 nomem:
1034 	/* Remove the dummy TD from the td_list so it doesn't get freed */
1035 	uhci_remove_td_from_urbp(qh->dummy_td);
1036 	return -ENOMEM;
1037 }
1038 
1039 static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
1040 		struct uhci_qh *qh)
1041 {
1042 	int ret;
1043 
1044 	/* Can't have low-speed bulk transfers */
1045 	if (urb->dev->speed == USB_SPEED_LOW)
1046 		return -EINVAL;
1047 
1048 	if (qh->state != QH_STATE_ACTIVE)
1049 		qh->skel = SKEL_BULK;
1050 	ret = uhci_submit_common(uhci, urb, qh);
1051 	if (ret == 0)
1052 		uhci_add_fsbr(uhci, urb);
1053 	return ret;
1054 }
1055 
1056 static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
1057 		struct uhci_qh *qh)
1058 {
1059 	int ret;
1060 
1061 	/* USB 1.1 interrupt transfers only involve one packet per interval.
1062 	 * Drivers can submit URBs of any length, but longer ones will need
1063 	 * multiple intervals to complete.
1064 	 */
1065 
1066 	if (!qh->bandwidth_reserved) {
1067 		int exponent;
1068 
1069 		/* Figure out which power-of-two queue to use */
1070 		for (exponent = 7; exponent >= 0; --exponent) {
1071 			if ((1 << exponent) <= urb->interval)
1072 				break;
1073 		}
1074 		if (exponent < 0)
1075 			return -EINVAL;
1076 		qh->period = 1 << exponent;
1077 		qh->skel = SKEL_INDEX(exponent);
1078 
1079 		/* For now, interrupt phase is fixed by the layout
1080 		 * of the QH lists. */
1081 		qh->phase = (qh->period / 2) & (MAX_PHASE - 1);
1082 		ret = uhci_check_bandwidth(uhci, qh);
1083 		if (ret)
1084 			return ret;
1085 	} else if (qh->period > urb->interval)
1086 		return -EINVAL;		/* Can't decrease the period */
1087 
1088 	ret = uhci_submit_common(uhci, urb, qh);
1089 	if (ret == 0) {
1090 		urb->interval = qh->period;
1091 		if (!qh->bandwidth_reserved)
1092 			uhci_reserve_bandwidth(uhci, qh);
1093 	}
1094 	return ret;
1095 }
1096 
1097 /*
1098  * Fix up the data structures following a short transfer
1099  */
1100 static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
1101 		struct uhci_qh *qh, struct urb_priv *urbp)
1102 {
1103 	struct uhci_td *td;
1104 	struct list_head *tmp;
1105 	int ret;
1106 
1107 	td = list_entry(urbp->td_list.prev, struct uhci_td, list);
1108 	if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1109 
1110 		/* When a control transfer is short, we have to restart
1111 		 * the queue at the status stage transaction, which is
1112 		 * the last TD. */
1113 		WARN_ON(list_empty(&urbp->td_list));
1114 		qh->element = LINK_TO_TD(td);
1115 		tmp = td->list.prev;
1116 		ret = -EINPROGRESS;
1117 
1118 	} else {
1119 
1120 		/* When a bulk/interrupt transfer is short, we have to
1121 		 * fix up the toggles of the following URBs on the queue
1122 		 * before restarting the queue at the next URB. */
1123 		qh->initial_toggle = uhci_toggle(td_token(qh->post_td)) ^ 1;
1124 		uhci_fixup_toggles(qh, 1);
1125 
1126 		if (list_empty(&urbp->td_list))
1127 			td = qh->post_td;
1128 		qh->element = td->link;
1129 		tmp = urbp->td_list.prev;
1130 		ret = 0;
1131 	}
1132 
1133 	/* Remove all the TDs we skipped over, from tmp back to the start */
1134 	while (tmp != &urbp->td_list) {
1135 		td = list_entry(tmp, struct uhci_td, list);
1136 		tmp = tmp->prev;
1137 
1138 		uhci_remove_td_from_urbp(td);
1139 		uhci_free_td(uhci, td);
1140 	}
1141 	return ret;
1142 }
1143 
1144 /*
1145  * Common result for control, bulk, and interrupt
1146  */
1147 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
1148 {
1149 	struct urb_priv *urbp = urb->hcpriv;
1150 	struct uhci_qh *qh = urbp->qh;
1151 	struct uhci_td *td, *tmp;
1152 	unsigned status;
1153 	int ret = 0;
1154 
1155 	list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1156 		unsigned int ctrlstat;
1157 		int len;
1158 
1159 		ctrlstat = td_status(td);
1160 		status = uhci_status_bits(ctrlstat);
1161 		if (status & TD_CTRL_ACTIVE)
1162 			return -EINPROGRESS;
1163 
1164 		len = uhci_actual_length(ctrlstat);
1165 		urb->actual_length += len;
1166 
1167 		if (status) {
1168 			ret = uhci_map_status(status,
1169 					uhci_packetout(td_token(td)));
1170 			if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1171 				/* Some debugging code */
1172 				dev_dbg(&urb->dev->dev,
1173 						"%s: failed with status %x\n",
1174 						__func__, status);
1175 
1176 				if (debug > 1 && errbuf) {
1177 					/* Print the chain for debugging */
1178 					uhci_show_qh(uhci, urbp->qh, errbuf,
1179 							ERRBUF_LEN, 0);
1180 					lprintk(errbuf);
1181 				}
1182 			}
1183 
1184 		/* Did we receive a short packet? */
1185 		} else if (len < uhci_expected_length(td_token(td))) {
1186 
1187 			/* For control transfers, go to the status TD if
1188 			 * this isn't already the last data TD */
1189 			if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1190 				if (td->list.next != urbp->td_list.prev)
1191 					ret = 1;
1192 			}
1193 
1194 			/* For bulk and interrupt, this may be an error */
1195 			else if (urb->transfer_flags & URB_SHORT_NOT_OK)
1196 				ret = -EREMOTEIO;
1197 
1198 			/* Fixup needed only if this isn't the URB's last TD */
1199 			else if (&td->list != urbp->td_list.prev)
1200 				ret = 1;
1201 		}
1202 
1203 		uhci_remove_td_from_urbp(td);
1204 		if (qh->post_td)
1205 			uhci_free_td(uhci, qh->post_td);
1206 		qh->post_td = td;
1207 
1208 		if (ret != 0)
1209 			goto err;
1210 	}
1211 	return ret;
1212 
1213 err:
1214 	if (ret < 0) {
1215 		/* Note that the queue has stopped and save
1216 		 * the next toggle value */
1217 		qh->element = UHCI_PTR_TERM;
1218 		qh->is_stopped = 1;
1219 		qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL);
1220 		qh->initial_toggle = uhci_toggle(td_token(td)) ^
1221 				(ret == -EREMOTEIO);
1222 
1223 	} else		/* Short packet received */
1224 		ret = uhci_fixup_short_transfer(uhci, qh, urbp);
1225 	return ret;
1226 }
1227 
1228 /*
1229  * Isochronous transfers
1230  */
1231 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1232 		struct uhci_qh *qh)
1233 {
1234 	struct uhci_td *td = NULL;	/* Since urb->number_of_packets > 0 */
1235 	int i, frame;
1236 	unsigned long destination, status;
1237 	struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1238 
1239 	/* Values must not be too big (could overflow below) */
1240 	if (urb->interval >= UHCI_NUMFRAMES ||
1241 			urb->number_of_packets >= UHCI_NUMFRAMES)
1242 		return -EFBIG;
1243 
1244 	/* Check the period and figure out the starting frame number */
1245 	if (!qh->bandwidth_reserved) {
1246 		qh->period = urb->interval;
1247 		if (urb->transfer_flags & URB_ISO_ASAP) {
1248 			qh->phase = -1;		/* Find the best phase */
1249 			i = uhci_check_bandwidth(uhci, qh);
1250 			if (i)
1251 				return i;
1252 
1253 			/* Allow a little time to allocate the TDs */
1254 			uhci_get_current_frame_number(uhci);
1255 			frame = uhci->frame_number + 10;
1256 
1257 			/* Move forward to the first frame having the
1258 			 * correct phase */
1259 			urb->start_frame = frame + ((qh->phase - frame) &
1260 					(qh->period - 1));
1261 		} else {
1262 			i = urb->start_frame - uhci->last_iso_frame;
1263 			if (i <= 0 || i >= UHCI_NUMFRAMES)
1264 				return -EINVAL;
1265 			qh->phase = urb->start_frame & (qh->period - 1);
1266 			i = uhci_check_bandwidth(uhci, qh);
1267 			if (i)
1268 				return i;
1269 		}
1270 
1271 	} else if (qh->period != urb->interval) {
1272 		return -EINVAL;		/* Can't change the period */
1273 
1274 	} else {
1275 		/* Find the next unused frame */
1276 		if (list_empty(&qh->queue)) {
1277 			frame = qh->iso_frame;
1278 		} else {
1279 			struct urb *lurb;
1280 
1281 			lurb = list_entry(qh->queue.prev,
1282 					struct urb_priv, node)->urb;
1283 			frame = lurb->start_frame +
1284 					lurb->number_of_packets *
1285 					lurb->interval;
1286 		}
1287 		if (urb->transfer_flags & URB_ISO_ASAP) {
1288 			/* Skip some frames if necessary to insure
1289 			 * the start frame is in the future.
1290 			 */
1291 			uhci_get_current_frame_number(uhci);
1292 			if (uhci_frame_before_eq(frame, uhci->frame_number)) {
1293 				frame = uhci->frame_number + 1;
1294 				frame += ((qh->phase - frame) &
1295 					(qh->period - 1));
1296 			}
1297 		}	/* Otherwise pick up where the last URB leaves off */
1298 		urb->start_frame = frame;
1299 	}
1300 
1301 	/* Make sure we won't have to go too far into the future */
1302 	if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES,
1303 			urb->start_frame + urb->number_of_packets *
1304 				urb->interval))
1305 		return -EFBIG;
1306 
1307 	status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1308 	destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1309 
1310 	for (i = 0; i < urb->number_of_packets; i++) {
1311 		td = uhci_alloc_td(uhci);
1312 		if (!td)
1313 			return -ENOMEM;
1314 
1315 		uhci_add_td_to_urbp(td, urbp);
1316 		uhci_fill_td(td, status, destination |
1317 				uhci_explen(urb->iso_frame_desc[i].length),
1318 				urb->transfer_dma +
1319 					urb->iso_frame_desc[i].offset);
1320 	}
1321 
1322 	/* Set the interrupt-on-completion flag on the last packet. */
1323 	td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
1324 
1325 	/* Add the TDs to the frame list */
1326 	frame = urb->start_frame;
1327 	list_for_each_entry(td, &urbp->td_list, list) {
1328 		uhci_insert_td_in_frame_list(uhci, td, frame);
1329 		frame += qh->period;
1330 	}
1331 
1332 	if (list_empty(&qh->queue)) {
1333 		qh->iso_packet_desc = &urb->iso_frame_desc[0];
1334 		qh->iso_frame = urb->start_frame;
1335 	}
1336 
1337 	qh->skel = SKEL_ISO;
1338 	if (!qh->bandwidth_reserved)
1339 		uhci_reserve_bandwidth(uhci, qh);
1340 	return 0;
1341 }
1342 
1343 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1344 {
1345 	struct uhci_td *td, *tmp;
1346 	struct urb_priv *urbp = urb->hcpriv;
1347 	struct uhci_qh *qh = urbp->qh;
1348 
1349 	list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1350 		unsigned int ctrlstat;
1351 		int status;
1352 		int actlength;
1353 
1354 		if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame))
1355 			return -EINPROGRESS;
1356 
1357 		uhci_remove_tds_from_frame(uhci, qh->iso_frame);
1358 
1359 		ctrlstat = td_status(td);
1360 		if (ctrlstat & TD_CTRL_ACTIVE) {
1361 			status = -EXDEV;	/* TD was added too late? */
1362 		} else {
1363 			status = uhci_map_status(uhci_status_bits(ctrlstat),
1364 					usb_pipeout(urb->pipe));
1365 			actlength = uhci_actual_length(ctrlstat);
1366 
1367 			urb->actual_length += actlength;
1368 			qh->iso_packet_desc->actual_length = actlength;
1369 			qh->iso_packet_desc->status = status;
1370 		}
1371 		if (status)
1372 			urb->error_count++;
1373 
1374 		uhci_remove_td_from_urbp(td);
1375 		uhci_free_td(uhci, td);
1376 		qh->iso_frame += qh->period;
1377 		++qh->iso_packet_desc;
1378 	}
1379 	return 0;
1380 }
1381 
1382 static int uhci_urb_enqueue(struct usb_hcd *hcd,
1383 		struct urb *urb, gfp_t mem_flags)
1384 {
1385 	int ret;
1386 	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1387 	unsigned long flags;
1388 	struct urb_priv *urbp;
1389 	struct uhci_qh *qh;
1390 
1391 	spin_lock_irqsave(&uhci->lock, flags);
1392 
1393 	ret = usb_hcd_link_urb_to_ep(hcd, urb);
1394 	if (ret)
1395 		goto done_not_linked;
1396 
1397 	ret = -ENOMEM;
1398 	urbp = uhci_alloc_urb_priv(uhci, urb);
1399 	if (!urbp)
1400 		goto done;
1401 
1402 	if (urb->ep->hcpriv)
1403 		qh = urb->ep->hcpriv;
1404 	else {
1405 		qh = uhci_alloc_qh(uhci, urb->dev, urb->ep);
1406 		if (!qh)
1407 			goto err_no_qh;
1408 	}
1409 	urbp->qh = qh;
1410 
1411 	switch (qh->type) {
1412 	case USB_ENDPOINT_XFER_CONTROL:
1413 		ret = uhci_submit_control(uhci, urb, qh);
1414 		break;
1415 	case USB_ENDPOINT_XFER_BULK:
1416 		ret = uhci_submit_bulk(uhci, urb, qh);
1417 		break;
1418 	case USB_ENDPOINT_XFER_INT:
1419 		ret = uhci_submit_interrupt(uhci, urb, qh);
1420 		break;
1421 	case USB_ENDPOINT_XFER_ISOC:
1422 		urb->error_count = 0;
1423 		ret = uhci_submit_isochronous(uhci, urb, qh);
1424 		break;
1425 	}
1426 	if (ret != 0)
1427 		goto err_submit_failed;
1428 
1429 	/* Add this URB to the QH */
1430 	urbp->qh = qh;
1431 	list_add_tail(&urbp->node, &qh->queue);
1432 
1433 	/* If the new URB is the first and only one on this QH then either
1434 	 * the QH is new and idle or else it's unlinked and waiting to
1435 	 * become idle, so we can activate it right away.  But only if the
1436 	 * queue isn't stopped. */
1437 	if (qh->queue.next == &urbp->node && !qh->is_stopped) {
1438 		uhci_activate_qh(uhci, qh);
1439 		uhci_urbp_wants_fsbr(uhci, urbp);
1440 	}
1441 	goto done;
1442 
1443 err_submit_failed:
1444 	if (qh->state == QH_STATE_IDLE)
1445 		uhci_make_qh_idle(uhci, qh);	/* Reclaim unused QH */
1446 err_no_qh:
1447 	uhci_free_urb_priv(uhci, urbp);
1448 done:
1449 	if (ret)
1450 		usb_hcd_unlink_urb_from_ep(hcd, urb);
1451 done_not_linked:
1452 	spin_unlock_irqrestore(&uhci->lock, flags);
1453 	return ret;
1454 }
1455 
1456 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1457 {
1458 	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1459 	unsigned long flags;
1460 	struct uhci_qh *qh;
1461 	int rc;
1462 
1463 	spin_lock_irqsave(&uhci->lock, flags);
1464 	rc = usb_hcd_check_unlink_urb(hcd, urb, status);
1465 	if (rc)
1466 		goto done;
1467 
1468 	qh = ((struct urb_priv *) urb->hcpriv)->qh;
1469 
1470 	/* Remove Isochronous TDs from the frame list ASAP */
1471 	if (qh->type == USB_ENDPOINT_XFER_ISOC) {
1472 		uhci_unlink_isochronous_tds(uhci, urb);
1473 		mb();
1474 
1475 		/* If the URB has already started, update the QH unlink time */
1476 		uhci_get_current_frame_number(uhci);
1477 		if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number))
1478 			qh->unlink_frame = uhci->frame_number;
1479 	}
1480 
1481 	uhci_unlink_qh(uhci, qh);
1482 
1483 done:
1484 	spin_unlock_irqrestore(&uhci->lock, flags);
1485 	return rc;
1486 }
1487 
1488 /*
1489  * Finish unlinking an URB and give it back
1490  */
1491 static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
1492 		struct urb *urb, int status)
1493 __releases(uhci->lock)
1494 __acquires(uhci->lock)
1495 {
1496 	struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1497 
1498 	if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1499 
1500 		/* urb->actual_length < 0 means the setup transaction didn't
1501 		 * complete successfully.  Either it failed or the URB was
1502 		 * unlinked first.  Regardless, don't confuse people with a
1503 		 * negative length. */
1504 		urb->actual_length = max(urb->actual_length, 0);
1505 	}
1506 
1507 	/* When giving back the first URB in an Isochronous queue,
1508 	 * reinitialize the QH's iso-related members for the next URB. */
1509 	else if (qh->type == USB_ENDPOINT_XFER_ISOC &&
1510 			urbp->node.prev == &qh->queue &&
1511 			urbp->node.next != &qh->queue) {
1512 		struct urb *nurb = list_entry(urbp->node.next,
1513 				struct urb_priv, node)->urb;
1514 
1515 		qh->iso_packet_desc = &nurb->iso_frame_desc[0];
1516 		qh->iso_frame = nurb->start_frame;
1517 	}
1518 
1519 	/* Take the URB off the QH's queue.  If the queue is now empty,
1520 	 * this is a perfect time for a toggle fixup. */
1521 	list_del_init(&urbp->node);
1522 	if (list_empty(&qh->queue) && qh->needs_fixup) {
1523 		usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1524 				usb_pipeout(urb->pipe), qh->initial_toggle);
1525 		qh->needs_fixup = 0;
1526 	}
1527 
1528 	uhci_free_urb_priv(uhci, urbp);
1529 	usb_hcd_unlink_urb_from_ep(uhci_to_hcd(uhci), urb);
1530 
1531 	spin_unlock(&uhci->lock);
1532 	usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, status);
1533 	spin_lock(&uhci->lock);
1534 
1535 	/* If the queue is now empty, we can unlink the QH and give up its
1536 	 * reserved bandwidth. */
1537 	if (list_empty(&qh->queue)) {
1538 		uhci_unlink_qh(uhci, qh);
1539 		if (qh->bandwidth_reserved)
1540 			uhci_release_bandwidth(uhci, qh);
1541 	}
1542 }
1543 
1544 /*
1545  * Scan the URBs in a QH's queue
1546  */
1547 #define QH_FINISHED_UNLINKING(qh)			\
1548 		(qh->state == QH_STATE_UNLINKING &&	\
1549 		uhci->frame_number + uhci->is_stopped != qh->unlink_frame)
1550 
1551 static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
1552 {
1553 	struct urb_priv *urbp;
1554 	struct urb *urb;
1555 	int status;
1556 
1557 	while (!list_empty(&qh->queue)) {
1558 		urbp = list_entry(qh->queue.next, struct urb_priv, node);
1559 		urb = urbp->urb;
1560 
1561 		if (qh->type == USB_ENDPOINT_XFER_ISOC)
1562 			status = uhci_result_isochronous(uhci, urb);
1563 		else
1564 			status = uhci_result_common(uhci, urb);
1565 		if (status == -EINPROGRESS)
1566 			break;
1567 
1568 		/* Dequeued but completed URBs can't be given back unless
1569 		 * the QH is stopped or has finished unlinking. */
1570 		if (urb->unlinked) {
1571 			if (QH_FINISHED_UNLINKING(qh))
1572 				qh->is_stopped = 1;
1573 			else if (!qh->is_stopped)
1574 				return;
1575 		}
1576 
1577 		uhci_giveback_urb(uhci, qh, urb, status);
1578 		if (status < 0)
1579 			break;
1580 	}
1581 
1582 	/* If the QH is neither stopped nor finished unlinking (normal case),
1583 	 * our work here is done. */
1584 	if (QH_FINISHED_UNLINKING(qh))
1585 		qh->is_stopped = 1;
1586 	else if (!qh->is_stopped)
1587 		return;
1588 
1589 	/* Otherwise give back each of the dequeued URBs */
1590 restart:
1591 	list_for_each_entry(urbp, &qh->queue, node) {
1592 		urb = urbp->urb;
1593 		if (urb->unlinked) {
1594 
1595 			/* Fix up the TD links and save the toggles for
1596 			 * non-Isochronous queues.  For Isochronous queues,
1597 			 * test for too-recent dequeues. */
1598 			if (!uhci_cleanup_queue(uhci, qh, urb)) {
1599 				qh->is_stopped = 0;
1600 				return;
1601 			}
1602 			uhci_giveback_urb(uhci, qh, urb, 0);
1603 			goto restart;
1604 		}
1605 	}
1606 	qh->is_stopped = 0;
1607 
1608 	/* There are no more dequeued URBs.  If there are still URBs on the
1609 	 * queue, the QH can now be re-activated. */
1610 	if (!list_empty(&qh->queue)) {
1611 		if (qh->needs_fixup)
1612 			uhci_fixup_toggles(qh, 0);
1613 
1614 		/* If the first URB on the queue wants FSBR but its time
1615 		 * limit has expired, set the next TD to interrupt on
1616 		 * completion before reactivating the QH. */
1617 		urbp = list_entry(qh->queue.next, struct urb_priv, node);
1618 		if (urbp->fsbr && qh->wait_expired) {
1619 			struct uhci_td *td = list_entry(urbp->td_list.next,
1620 					struct uhci_td, list);
1621 
1622 			td->status |= __cpu_to_le32(TD_CTRL_IOC);
1623 		}
1624 
1625 		uhci_activate_qh(uhci, qh);
1626 	}
1627 
1628 	/* The queue is empty.  The QH can become idle if it is fully
1629 	 * unlinked. */
1630 	else if (QH_FINISHED_UNLINKING(qh))
1631 		uhci_make_qh_idle(uhci, qh);
1632 }
1633 
1634 /*
1635  * Check for queues that have made some forward progress.
1636  * Returns 0 if the queue is not Isochronous, is ACTIVE, and
1637  * has not advanced since last examined; 1 otherwise.
1638  *
1639  * Early Intel controllers have a bug which causes qh->element sometimes
1640  * not to advance when a TD completes successfully.  The queue remains
1641  * stuck on the inactive completed TD.  We detect such cases and advance
1642  * the element pointer by hand.
1643  */
1644 static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
1645 {
1646 	struct urb_priv *urbp = NULL;
1647 	struct uhci_td *td;
1648 	int ret = 1;
1649 	unsigned status;
1650 
1651 	if (qh->type == USB_ENDPOINT_XFER_ISOC)
1652 		goto done;
1653 
1654 	/* Treat an UNLINKING queue as though it hasn't advanced.
1655 	 * This is okay because reactivation will treat it as though
1656 	 * it has advanced, and if it is going to become IDLE then
1657 	 * this doesn't matter anyway.  Furthermore it's possible
1658 	 * for an UNLINKING queue not to have any URBs at all, or
1659 	 * for its first URB not to have any TDs (if it was dequeued
1660 	 * just as it completed).  So it's not easy in any case to
1661 	 * test whether such queues have advanced. */
1662 	if (qh->state != QH_STATE_ACTIVE) {
1663 		urbp = NULL;
1664 		status = 0;
1665 
1666 	} else {
1667 		urbp = list_entry(qh->queue.next, struct urb_priv, node);
1668 		td = list_entry(urbp->td_list.next, struct uhci_td, list);
1669 		status = td_status(td);
1670 		if (!(status & TD_CTRL_ACTIVE)) {
1671 
1672 			/* We're okay, the queue has advanced */
1673 			qh->wait_expired = 0;
1674 			qh->advance_jiffies = jiffies;
1675 			goto done;
1676 		}
1677 		ret = 0;
1678 	}
1679 
1680 	/* The queue hasn't advanced; check for timeout */
1681 	if (qh->wait_expired)
1682 		goto done;
1683 
1684 	if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) {
1685 
1686 		/* Detect the Intel bug and work around it */
1687 		if (qh->post_td && qh_element(qh) == LINK_TO_TD(qh->post_td)) {
1688 			qh->element = qh->post_td->link;
1689 			qh->advance_jiffies = jiffies;
1690 			ret = 1;
1691 			goto done;
1692 		}
1693 
1694 		qh->wait_expired = 1;
1695 
1696 		/* If the current URB wants FSBR, unlink it temporarily
1697 		 * so that we can safely set the next TD to interrupt on
1698 		 * completion.  That way we'll know as soon as the queue
1699 		 * starts moving again. */
1700 		if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC))
1701 			uhci_unlink_qh(uhci, qh);
1702 
1703 	} else {
1704 		/* Unmoving but not-yet-expired queues keep FSBR alive */
1705 		if (urbp)
1706 			uhci_urbp_wants_fsbr(uhci, urbp);
1707 	}
1708 
1709 done:
1710 	return ret;
1711 }
1712 
1713 /*
1714  * Process events in the schedule, but only in one thread at a time
1715  */
1716 static void uhci_scan_schedule(struct uhci_hcd *uhci)
1717 {
1718 	int i;
1719 	struct uhci_qh *qh;
1720 
1721 	/* Don't allow re-entrant calls */
1722 	if (uhci->scan_in_progress) {
1723 		uhci->need_rescan = 1;
1724 		return;
1725 	}
1726 	uhci->scan_in_progress = 1;
1727 rescan:
1728 	uhci->need_rescan = 0;
1729 	uhci->fsbr_is_wanted = 0;
1730 
1731 	uhci_clear_next_interrupt(uhci);
1732 	uhci_get_current_frame_number(uhci);
1733 	uhci->cur_iso_frame = uhci->frame_number;
1734 
1735 	/* Go through all the QH queues and process the URBs in each one */
1736 	for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) {
1737 		uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
1738 				struct uhci_qh, node);
1739 		while ((qh = uhci->next_qh) != uhci->skelqh[i]) {
1740 			uhci->next_qh = list_entry(qh->node.next,
1741 					struct uhci_qh, node);
1742 
1743 			if (uhci_advance_check(uhci, qh)) {
1744 				uhci_scan_qh(uhci, qh);
1745 				if (qh->state == QH_STATE_ACTIVE) {
1746 					uhci_urbp_wants_fsbr(uhci,
1747 	list_entry(qh->queue.next, struct urb_priv, node));
1748 				}
1749 			}
1750 		}
1751 	}
1752 
1753 	uhci->last_iso_frame = uhci->cur_iso_frame;
1754 	if (uhci->need_rescan)
1755 		goto rescan;
1756 	uhci->scan_in_progress = 0;
1757 
1758 	if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted &&
1759 			!uhci->fsbr_expiring) {
1760 		uhci->fsbr_expiring = 1;
1761 		mod_timer(&uhci->fsbr_timer, jiffies + FSBR_OFF_DELAY);
1762 	}
1763 
1764 	if (list_empty(&uhci->skel_unlink_qh->node))
1765 		uhci_clear_next_interrupt(uhci);
1766 	else
1767 		uhci_set_next_interrupt(uhci);
1768 }
1769