1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * Universal Host Controller Interface driver for USB.
41da177e4SLinus Torvalds *
51da177e4SLinus Torvalds * Maintainer: Alan Stern <stern@rowland.harvard.edu>
61da177e4SLinus Torvalds *
71da177e4SLinus Torvalds * (C) Copyright 1999 Linus Torvalds
81da177e4SLinus Torvalds * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
91da177e4SLinus Torvalds * (C) Copyright 1999 Randy Dunlap
101da177e4SLinus Torvalds * (C) Copyright 1999 Georg Acher, acher@in.tum.de
111da177e4SLinus Torvalds * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
121da177e4SLinus Torvalds * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
131da177e4SLinus Torvalds * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
141da177e4SLinus Torvalds * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
151da177e4SLinus Torvalds * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
161da177e4SLinus Torvalds * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
1717230acdSAlan Stern * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu
181da177e4SLinus Torvalds */
191da177e4SLinus Torvalds
201da177e4SLinus Torvalds
211da177e4SLinus Torvalds /*
221da177e4SLinus Torvalds * Technically, updating td->status here is a race, but it's not really a
231da177e4SLinus Torvalds * problem. The worst that can happen is that we set the IOC bit again
241da177e4SLinus Torvalds * generating a spurious interrupt. We could fix this by creating another
251da177e4SLinus Torvalds * QH and leaving the IOC bit always set, but then we would have to play
261da177e4SLinus Torvalds * games with the FSBR code to make sure we get the correct order in all
271da177e4SLinus Torvalds * the cases. I don't think it's worth the effort
281da177e4SLinus Torvalds */
uhci_set_next_interrupt(struct uhci_hcd * uhci)29dccf4a48SAlan Stern static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
301da177e4SLinus Torvalds {
316c1b445cSAlan Stern if (uhci->is_stopped)
321f09df8bSAlan Stern mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
3351e2f62fSJan Andersson uhci->term_td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
341da177e4SLinus Torvalds }
351da177e4SLinus Torvalds
uhci_clear_next_interrupt(struct uhci_hcd * uhci)361da177e4SLinus Torvalds static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
371da177e4SLinus Torvalds {
3851e2f62fSJan Andersson uhci->term_td->status &= ~cpu_to_hc32(uhci, TD_CTRL_IOC);
391da177e4SLinus Torvalds }
401da177e4SLinus Torvalds
4184afddd7SAlan Stern
4284afddd7SAlan Stern /*
4384afddd7SAlan Stern * Full-Speed Bandwidth Reclamation (FSBR).
4484afddd7SAlan Stern * We turn on FSBR whenever a queue that wants it is advancing,
4584afddd7SAlan Stern * and leave it on for a short time thereafter.
4684afddd7SAlan Stern */
uhci_fsbr_on(struct uhci_hcd * uhci)4784afddd7SAlan Stern static void uhci_fsbr_on(struct uhci_hcd *uhci)
4884afddd7SAlan Stern {
49e009f1b2SAlan Stern struct uhci_qh *lqh;
5017230acdSAlan Stern
51e009f1b2SAlan Stern /* The terminating skeleton QH always points back to the first
52e009f1b2SAlan Stern * FSBR QH. Make the last async QH point to the terminating
53e009f1b2SAlan Stern * skeleton QH. */
5484afddd7SAlan Stern uhci->fsbr_is_on = 1;
5517230acdSAlan Stern lqh = list_entry(uhci->skel_async_qh->node.prev,
5617230acdSAlan Stern struct uhci_qh, node);
5751e2f62fSJan Andersson lqh->link = LINK_TO_QH(uhci, uhci->skel_term_qh);
5884afddd7SAlan Stern }
5984afddd7SAlan Stern
uhci_fsbr_off(struct uhci_hcd * uhci)6084afddd7SAlan Stern static void uhci_fsbr_off(struct uhci_hcd *uhci)
6184afddd7SAlan Stern {
6217230acdSAlan Stern struct uhci_qh *lqh;
6317230acdSAlan Stern
64e009f1b2SAlan Stern /* Remove the link from the last async QH to the terminating
65e009f1b2SAlan Stern * skeleton QH. */
6684afddd7SAlan Stern uhci->fsbr_is_on = 0;
6717230acdSAlan Stern lqh = list_entry(uhci->skel_async_qh->node.prev,
6817230acdSAlan Stern struct uhci_qh, node);
6951e2f62fSJan Andersson lqh->link = UHCI_PTR_TERM(uhci);
7084afddd7SAlan Stern }
7184afddd7SAlan Stern
uhci_add_fsbr(struct uhci_hcd * uhci,struct urb * urb)7284afddd7SAlan Stern static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
7384afddd7SAlan Stern {
7484afddd7SAlan Stern struct urb_priv *urbp = urb->hcpriv;
7584afddd7SAlan Stern
7684afddd7SAlan Stern urbp->fsbr = 1;
7784afddd7SAlan Stern }
7884afddd7SAlan Stern
uhci_urbp_wants_fsbr(struct uhci_hcd * uhci,struct urb_priv * urbp)79c5e3b741SAlan Stern static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
8084afddd7SAlan Stern {
8184afddd7SAlan Stern if (urbp->fsbr) {
82c5e3b741SAlan Stern uhci->fsbr_is_wanted = 1;
8384afddd7SAlan Stern if (!uhci->fsbr_is_on)
8484afddd7SAlan Stern uhci_fsbr_on(uhci);
85c5e3b741SAlan Stern else if (uhci->fsbr_expiring) {
86c5e3b741SAlan Stern uhci->fsbr_expiring = 0;
87c5e3b741SAlan Stern del_timer(&uhci->fsbr_timer);
8884afddd7SAlan Stern }
8984afddd7SAlan Stern }
90c5e3b741SAlan Stern }
91c5e3b741SAlan Stern
uhci_fsbr_timeout(struct timer_list * t)92*e99e88a9SKees Cook static void uhci_fsbr_timeout(struct timer_list *t)
93c5e3b741SAlan Stern {
94*e99e88a9SKees Cook struct uhci_hcd *uhci = from_timer(uhci, t, fsbr_timer);
95c5e3b741SAlan Stern unsigned long flags;
96c5e3b741SAlan Stern
97c5e3b741SAlan Stern spin_lock_irqsave(&uhci->lock, flags);
98c5e3b741SAlan Stern if (uhci->fsbr_expiring) {
99c5e3b741SAlan Stern uhci->fsbr_expiring = 0;
100c5e3b741SAlan Stern uhci_fsbr_off(uhci);
101c5e3b741SAlan Stern }
102c5e3b741SAlan Stern spin_unlock_irqrestore(&uhci->lock, flags);
103c5e3b741SAlan Stern }
10484afddd7SAlan Stern
10584afddd7SAlan Stern
uhci_alloc_td(struct uhci_hcd * uhci)1062532178aSAlan Stern static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
1071da177e4SLinus Torvalds {
1081da177e4SLinus Torvalds dma_addr_t dma_handle;
1091da177e4SLinus Torvalds struct uhci_td *td;
1101da177e4SLinus Torvalds
1111da177e4SLinus Torvalds td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
1121da177e4SLinus Torvalds if (!td)
1131da177e4SLinus Torvalds return NULL;
1141da177e4SLinus Torvalds
1151da177e4SLinus Torvalds td->dma_handle = dma_handle;
1161da177e4SLinus Torvalds td->frame = -1;
1171da177e4SLinus Torvalds
1181da177e4SLinus Torvalds INIT_LIST_HEAD(&td->list);
1191da177e4SLinus Torvalds INIT_LIST_HEAD(&td->fl_list);
1201da177e4SLinus Torvalds
1211da177e4SLinus Torvalds return td;
1221da177e4SLinus Torvalds }
1231da177e4SLinus Torvalds
uhci_free_td(struct uhci_hcd * uhci,struct uhci_td * td)124dccf4a48SAlan Stern static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
125dccf4a48SAlan Stern {
1265172046dSArjan van de Ven if (!list_empty(&td->list))
1275172046dSArjan van de Ven dev_WARN(uhci_dev(uhci), "td %p still in list!\n", td);
1285172046dSArjan van de Ven if (!list_empty(&td->fl_list))
1295172046dSArjan van de Ven dev_WARN(uhci_dev(uhci), "td %p still in fl_list!\n", td);
130dccf4a48SAlan Stern
131dccf4a48SAlan Stern dma_pool_free(uhci->td_pool, td, td->dma_handle);
132dccf4a48SAlan Stern }
133dccf4a48SAlan Stern
uhci_fill_td(struct uhci_hcd * uhci,struct uhci_td * td,u32 status,u32 token,u32 buffer)13451e2f62fSJan Andersson static inline void uhci_fill_td(struct uhci_hcd *uhci, struct uhci_td *td,
13551e2f62fSJan Andersson u32 status, u32 token, u32 buffer)
1361da177e4SLinus Torvalds {
13751e2f62fSJan Andersson td->status = cpu_to_hc32(uhci, status);
13851e2f62fSJan Andersson td->token = cpu_to_hc32(uhci, token);
13951e2f62fSJan Andersson td->buffer = cpu_to_hc32(uhci, buffer);
1401da177e4SLinus Torvalds }
1411da177e4SLinus Torvalds
uhci_add_td_to_urbp(struct uhci_td * td,struct urb_priv * urbp)14204538a25SAlan Stern static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp)
14304538a25SAlan Stern {
14404538a25SAlan Stern list_add_tail(&td->list, &urbp->td_list);
14504538a25SAlan Stern }
14604538a25SAlan Stern
uhci_remove_td_from_urbp(struct uhci_td * td)14704538a25SAlan Stern static void uhci_remove_td_from_urbp(struct uhci_td *td)
14804538a25SAlan Stern {
14904538a25SAlan Stern list_del_init(&td->list);
15004538a25SAlan Stern }
15104538a25SAlan Stern
1521da177e4SLinus Torvalds /*
153687f5f34SAlan Stern * We insert Isochronous URBs directly into the frame list at the beginning
1541da177e4SLinus Torvalds */
uhci_insert_td_in_frame_list(struct uhci_hcd * uhci,struct uhci_td * td,unsigned framenum)155dccf4a48SAlan Stern static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
156dccf4a48SAlan Stern struct uhci_td *td, unsigned framenum)
1571da177e4SLinus Torvalds {
1581da177e4SLinus Torvalds framenum &= (UHCI_NUMFRAMES - 1);
1591da177e4SLinus Torvalds
1601da177e4SLinus Torvalds td->frame = framenum;
1611da177e4SLinus Torvalds
1621da177e4SLinus Torvalds /* Is there a TD already mapped there? */
163a1d59ce8SAlan Stern if (uhci->frame_cpu[framenum]) {
1641da177e4SLinus Torvalds struct uhci_td *ftd, *ltd;
1651da177e4SLinus Torvalds
166a1d59ce8SAlan Stern ftd = uhci->frame_cpu[framenum];
1671da177e4SLinus Torvalds ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
1681da177e4SLinus Torvalds
1691da177e4SLinus Torvalds list_add_tail(&td->fl_list, &ftd->fl_list);
1701da177e4SLinus Torvalds
1711da177e4SLinus Torvalds td->link = ltd->link;
1721da177e4SLinus Torvalds wmb();
17351e2f62fSJan Andersson ltd->link = LINK_TO_TD(uhci, td);
1741da177e4SLinus Torvalds } else {
175a1d59ce8SAlan Stern td->link = uhci->frame[framenum];
1761da177e4SLinus Torvalds wmb();
17751e2f62fSJan Andersson uhci->frame[framenum] = LINK_TO_TD(uhci, td);
178a1d59ce8SAlan Stern uhci->frame_cpu[framenum] = td;
1791da177e4SLinus Torvalds }
1801da177e4SLinus Torvalds }
1811da177e4SLinus Torvalds
uhci_remove_td_from_frame_list(struct uhci_hcd * uhci,struct uhci_td * td)182dccf4a48SAlan Stern static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
183b81d3436SAlan Stern struct uhci_td *td)
1841da177e4SLinus Torvalds {
1851da177e4SLinus Torvalds /* If it's not inserted, don't remove it */
186b81d3436SAlan Stern if (td->frame == -1) {
187b81d3436SAlan Stern WARN_ON(!list_empty(&td->fl_list));
1881da177e4SLinus Torvalds return;
189b81d3436SAlan Stern }
1901da177e4SLinus Torvalds
191b81d3436SAlan Stern if (uhci->frame_cpu[td->frame] == td) {
1921da177e4SLinus Torvalds if (list_empty(&td->fl_list)) {
193a1d59ce8SAlan Stern uhci->frame[td->frame] = td->link;
194a1d59ce8SAlan Stern uhci->frame_cpu[td->frame] = NULL;
1951da177e4SLinus Torvalds } else {
1961da177e4SLinus Torvalds struct uhci_td *ntd;
1971da177e4SLinus Torvalds
19816325f18STobias Ollmann ntd = list_entry(td->fl_list.next,
19916325f18STobias Ollmann struct uhci_td,
20016325f18STobias Ollmann fl_list);
20151e2f62fSJan Andersson uhci->frame[td->frame] = LINK_TO_TD(uhci, ntd);
202a1d59ce8SAlan Stern uhci->frame_cpu[td->frame] = ntd;
2031da177e4SLinus Torvalds }
2041da177e4SLinus Torvalds } else {
2051da177e4SLinus Torvalds struct uhci_td *ptd;
2061da177e4SLinus Torvalds
2071da177e4SLinus Torvalds ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
2081da177e4SLinus Torvalds ptd->link = td->link;
2091da177e4SLinus Torvalds }
2101da177e4SLinus Torvalds
2111da177e4SLinus Torvalds list_del_init(&td->fl_list);
2121da177e4SLinus Torvalds td->frame = -1;
2131da177e4SLinus Torvalds }
2141da177e4SLinus Torvalds
uhci_remove_tds_from_frame(struct uhci_hcd * uhci,unsigned int framenum)215c8155cc5SAlan Stern static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci,
216c8155cc5SAlan Stern unsigned int framenum)
217c8155cc5SAlan Stern {
218c8155cc5SAlan Stern struct uhci_td *ftd, *ltd;
219c8155cc5SAlan Stern
220c8155cc5SAlan Stern framenum &= (UHCI_NUMFRAMES - 1);
221c8155cc5SAlan Stern
222c8155cc5SAlan Stern ftd = uhci->frame_cpu[framenum];
223c8155cc5SAlan Stern if (ftd) {
224c8155cc5SAlan Stern ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
225c8155cc5SAlan Stern uhci->frame[framenum] = ltd->link;
226c8155cc5SAlan Stern uhci->frame_cpu[framenum] = NULL;
227c8155cc5SAlan Stern
228c8155cc5SAlan Stern while (!list_empty(&ftd->fl_list))
229c8155cc5SAlan Stern list_del_init(ftd->fl_list.prev);
230c8155cc5SAlan Stern }
231c8155cc5SAlan Stern }
232c8155cc5SAlan Stern
233dccf4a48SAlan Stern /*
234dccf4a48SAlan Stern * Remove all the TDs for an Isochronous URB from the frame list
235dccf4a48SAlan Stern */
uhci_unlink_isochronous_tds(struct uhci_hcd * uhci,struct urb * urb)236dccf4a48SAlan Stern static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
237b81d3436SAlan Stern {
238b81d3436SAlan Stern struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
239b81d3436SAlan Stern struct uhci_td *td;
240b81d3436SAlan Stern
241b81d3436SAlan Stern list_for_each_entry(td, &urbp->td_list, list)
242dccf4a48SAlan Stern uhci_remove_td_from_frame_list(uhci, td);
243b81d3436SAlan Stern }
244b81d3436SAlan Stern
uhci_alloc_qh(struct uhci_hcd * uhci,struct usb_device * udev,struct usb_host_endpoint * hep)245dccf4a48SAlan Stern static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
246dccf4a48SAlan Stern struct usb_device *udev, struct usb_host_endpoint *hep)
2471da177e4SLinus Torvalds {
2481da177e4SLinus Torvalds dma_addr_t dma_handle;
2491da177e4SLinus Torvalds struct uhci_qh *qh;
2501da177e4SLinus Torvalds
25184c1eeb0SSaurabh Sengar qh = dma_pool_zalloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
2521da177e4SLinus Torvalds if (!qh)
2531da177e4SLinus Torvalds return NULL;
2541da177e4SLinus Torvalds
2551da177e4SLinus Torvalds qh->dma_handle = dma_handle;
2561da177e4SLinus Torvalds
25751e2f62fSJan Andersson qh->element = UHCI_PTR_TERM(uhci);
25851e2f62fSJan Andersson qh->link = UHCI_PTR_TERM(uhci);
2591da177e4SLinus Torvalds
260dccf4a48SAlan Stern INIT_LIST_HEAD(&qh->queue);
261dccf4a48SAlan Stern INIT_LIST_HEAD(&qh->node);
2621da177e4SLinus Torvalds
263dccf4a48SAlan Stern if (udev) { /* Normal QH */
2641eba67a6SMatthias Kaehlcke qh->type = usb_endpoint_type(&hep->desc);
26585a975d0SAlan Stern if (qh->type != USB_ENDPOINT_XFER_ISOC) {
266af0bb599SAlan Stern qh->dummy_td = uhci_alloc_td(uhci);
267af0bb599SAlan Stern if (!qh->dummy_td) {
268af0bb599SAlan Stern dma_pool_free(uhci->qh_pool, qh, dma_handle);
269af0bb599SAlan Stern return NULL;
270af0bb599SAlan Stern }
27185a975d0SAlan Stern }
272dccf4a48SAlan Stern qh->state = QH_STATE_IDLE;
273dccf4a48SAlan Stern qh->hep = hep;
274dccf4a48SAlan Stern qh->udev = udev;
275dccf4a48SAlan Stern hep->hcpriv = qh;
2761da177e4SLinus Torvalds
2773ca2a321SAlan Stern if (qh->type == USB_ENDPOINT_XFER_INT ||
2783ca2a321SAlan Stern qh->type == USB_ENDPOINT_XFER_ISOC)
2793ca2a321SAlan Stern qh->load = usb_calc_bus_time(udev->speed,
2803ca2a321SAlan Stern usb_endpoint_dir_in(&hep->desc),
2813ca2a321SAlan Stern qh->type == USB_ENDPOINT_XFER_ISOC,
28229cc8897SKuninori Morimoto usb_endpoint_maxp(&hep->desc))
2833ca2a321SAlan Stern / 1000 + 1;
2843ca2a321SAlan Stern
285dccf4a48SAlan Stern } else { /* Skeleton QH */
286dccf4a48SAlan Stern qh->state = QH_STATE_ACTIVE;
2874de7d2c2SAlan Stern qh->type = -1;
288dccf4a48SAlan Stern }
2891da177e4SLinus Torvalds return qh;
2901da177e4SLinus Torvalds }
2911da177e4SLinus Torvalds
uhci_free_qh(struct uhci_hcd * uhci,struct uhci_qh * qh)2921da177e4SLinus Torvalds static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
2931da177e4SLinus Torvalds {
294dccf4a48SAlan Stern WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
2955172046dSArjan van de Ven if (!list_empty(&qh->queue))
2965172046dSArjan van de Ven dev_WARN(uhci_dev(uhci), "qh %p list not empty!\n", qh);
2971da177e4SLinus Torvalds
298dccf4a48SAlan Stern list_del(&qh->node);
299dccf4a48SAlan Stern if (qh->udev) {
300dccf4a48SAlan Stern qh->hep->hcpriv = NULL;
30185a975d0SAlan Stern if (qh->dummy_td)
302af0bb599SAlan Stern uhci_free_td(uhci, qh->dummy_td);
303dccf4a48SAlan Stern }
3041da177e4SLinus Torvalds dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
3051da177e4SLinus Torvalds }
3061da177e4SLinus Torvalds
3071da177e4SLinus Torvalds /*
308a0b458b6SAlan Stern * When a queue is stopped and a dequeued URB is given back, adjust
309a0b458b6SAlan Stern * the previous TD link (if the URB isn't first on the queue) or
310a0b458b6SAlan Stern * save its toggle value (if it is first and is currently executing).
31110b8e47dSAlan Stern *
31210b8e47dSAlan Stern * Returns 0 if the URB should not yet be given back, 1 otherwise.
3130ed8fee1SAlan Stern */
uhci_cleanup_queue(struct uhci_hcd * uhci,struct uhci_qh * qh,struct urb * urb)31410b8e47dSAlan Stern static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
315a0b458b6SAlan Stern struct urb *urb)
3160ed8fee1SAlan Stern {
317a0b458b6SAlan Stern struct urb_priv *urbp = urb->hcpriv;
3180ed8fee1SAlan Stern struct uhci_td *td;
31910b8e47dSAlan Stern int ret = 1;
3200ed8fee1SAlan Stern
321a0b458b6SAlan Stern /* Isochronous pipes don't use toggles and their TD link pointers
32210b8e47dSAlan Stern * get adjusted during uhci_urb_dequeue(). But since their queues
32310b8e47dSAlan Stern * cannot truly be stopped, we have to watch out for dequeues
32410b8e47dSAlan Stern * occurring after the nominal unlink frame. */
32510b8e47dSAlan Stern if (qh->type == USB_ENDPOINT_XFER_ISOC) {
32610b8e47dSAlan Stern ret = (uhci->frame_number + uhci->is_stopped !=
32710b8e47dSAlan Stern qh->unlink_frame);
328c5e3b741SAlan Stern goto done;
32910b8e47dSAlan Stern }
330a0b458b6SAlan Stern
331a0b458b6SAlan Stern /* If the URB isn't first on its queue, adjust the link pointer
332a0b458b6SAlan Stern * of the last TD in the previous URB. The toggle doesn't need
333a0b458b6SAlan Stern * to be saved since this URB can't be executing yet. */
334a0b458b6SAlan Stern if (qh->queue.next != &urbp->node) {
335a0b458b6SAlan Stern struct urb_priv *purbp;
336a0b458b6SAlan Stern struct uhci_td *ptd;
337a0b458b6SAlan Stern
338a0b458b6SAlan Stern purbp = list_entry(urbp->node.prev, struct urb_priv, node);
339a0b458b6SAlan Stern WARN_ON(list_empty(&purbp->td_list));
340a0b458b6SAlan Stern ptd = list_entry(purbp->td_list.prev, struct uhci_td,
341a0b458b6SAlan Stern list);
342a0b458b6SAlan Stern td = list_entry(urbp->td_list.prev, struct uhci_td,
343a0b458b6SAlan Stern list);
344a0b458b6SAlan Stern ptd->link = td->link;
345c5e3b741SAlan Stern goto done;
346a0b458b6SAlan Stern }
347a0b458b6SAlan Stern
3480ed8fee1SAlan Stern /* If the QH element pointer is UHCI_PTR_TERM then then currently
3490ed8fee1SAlan Stern * executing URB has already been unlinked, so this one isn't it. */
35051e2f62fSJan Andersson if (qh_element(qh) == UHCI_PTR_TERM(uhci))
351c5e3b741SAlan Stern goto done;
35251e2f62fSJan Andersson qh->element = UHCI_PTR_TERM(uhci);
3530ed8fee1SAlan Stern
35485a975d0SAlan Stern /* Control pipes don't have to worry about toggles */
355a0b458b6SAlan Stern if (qh->type == USB_ENDPOINT_XFER_CONTROL)
356c5e3b741SAlan Stern goto done;
3570ed8fee1SAlan Stern
358a0b458b6SAlan Stern /* Save the next toggle value */
35959e29ed9SAlan Stern WARN_ON(list_empty(&urbp->td_list));
36059e29ed9SAlan Stern td = list_entry(urbp->td_list.next, struct uhci_td, list);
3610ed8fee1SAlan Stern qh->needs_fixup = 1;
36251e2f62fSJan Andersson qh->initial_toggle = uhci_toggle(td_token(uhci, td));
363c5e3b741SAlan Stern
364c5e3b741SAlan Stern done:
36510b8e47dSAlan Stern return ret;
3660ed8fee1SAlan Stern }
3670ed8fee1SAlan Stern
3680ed8fee1SAlan Stern /*
3690ed8fee1SAlan Stern * Fix up the data toggles for URBs in a queue, when one of them
3700ed8fee1SAlan Stern * terminates early (short transfer, error, or dequeued).
3710ed8fee1SAlan Stern */
uhci_fixup_toggles(struct uhci_hcd * uhci,struct uhci_qh * qh,int skip_first)37251e2f62fSJan Andersson static void uhci_fixup_toggles(struct uhci_hcd *uhci, struct uhci_qh *qh,
37351e2f62fSJan Andersson int skip_first)
3740ed8fee1SAlan Stern {
3750ed8fee1SAlan Stern struct urb_priv *urbp = NULL;
3760ed8fee1SAlan Stern struct uhci_td *td;
3770ed8fee1SAlan Stern unsigned int toggle = qh->initial_toggle;
3780ed8fee1SAlan Stern unsigned int pipe;
3790ed8fee1SAlan Stern
3800ed8fee1SAlan Stern /* Fixups for a short transfer start with the second URB in the
3810ed8fee1SAlan Stern * queue (the short URB is the first). */
3820ed8fee1SAlan Stern if (skip_first)
3830ed8fee1SAlan Stern urbp = list_entry(qh->queue.next, struct urb_priv, node);
3840ed8fee1SAlan Stern
3850ed8fee1SAlan Stern /* When starting with the first URB, if the QH element pointer is
3860ed8fee1SAlan Stern * still valid then we know the URB's toggles are okay. */
38751e2f62fSJan Andersson else if (qh_element(qh) != UHCI_PTR_TERM(uhci))
3880ed8fee1SAlan Stern toggle = 2;
3890ed8fee1SAlan Stern
3900ed8fee1SAlan Stern /* Fix up the toggle for the URBs in the queue. Normally this
3910ed8fee1SAlan Stern * loop won't run more than once: When an error or short transfer
3920ed8fee1SAlan Stern * occurs, the queue usually gets emptied. */
3931393adb2SAlan Stern urbp = list_prepare_entry(urbp, &qh->queue, node);
3940ed8fee1SAlan Stern list_for_each_entry_continue(urbp, &qh->queue, node) {
3950ed8fee1SAlan Stern
3960ed8fee1SAlan Stern /* If the first TD has the right toggle value, we don't
3970ed8fee1SAlan Stern * need to change any toggles in this URB */
3980ed8fee1SAlan Stern td = list_entry(urbp->td_list.next, struct uhci_td, list);
39951e2f62fSJan Andersson if (toggle > 1 || uhci_toggle(td_token(uhci, td)) == toggle) {
400db59b464SAlan Stern td = list_entry(urbp->td_list.prev, struct uhci_td,
4010ed8fee1SAlan Stern list);
40251e2f62fSJan Andersson toggle = uhci_toggle(td_token(uhci, td)) ^ 1;
4030ed8fee1SAlan Stern
4040ed8fee1SAlan Stern /* Otherwise all the toggles in the URB have to be switched */
4050ed8fee1SAlan Stern } else {
4060ed8fee1SAlan Stern list_for_each_entry(td, &urbp->td_list, list) {
40751e2f62fSJan Andersson td->token ^= cpu_to_hc32(uhci,
4080ed8fee1SAlan Stern TD_TOKEN_TOGGLE);
4090ed8fee1SAlan Stern toggle ^= 1;
4100ed8fee1SAlan Stern }
4110ed8fee1SAlan Stern }
4120ed8fee1SAlan Stern }
4130ed8fee1SAlan Stern
4140ed8fee1SAlan Stern wmb();
4150ed8fee1SAlan Stern pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
4160ed8fee1SAlan Stern usb_settoggle(qh->udev, usb_pipeendpoint(pipe),
4170ed8fee1SAlan Stern usb_pipeout(pipe), toggle);
4180ed8fee1SAlan Stern qh->needs_fixup = 0;
4190ed8fee1SAlan Stern }
4200ed8fee1SAlan Stern
4210ed8fee1SAlan Stern /*
42217230acdSAlan Stern * Link an Isochronous QH into its skeleton's list
42317230acdSAlan Stern */
link_iso(struct uhci_hcd * uhci,struct uhci_qh * qh)42417230acdSAlan Stern static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh)
42517230acdSAlan Stern {
42617230acdSAlan Stern list_add_tail(&qh->node, &uhci->skel_iso_qh->node);
42717230acdSAlan Stern
42817230acdSAlan Stern /* Isochronous QHs aren't linked by the hardware */
42917230acdSAlan Stern }
43017230acdSAlan Stern
43117230acdSAlan Stern /*
43217230acdSAlan Stern * Link a high-period interrupt QH into the schedule at the end of its
43317230acdSAlan Stern * skeleton's list
43417230acdSAlan Stern */
link_interrupt(struct uhci_hcd * uhci,struct uhci_qh * qh)43517230acdSAlan Stern static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
43617230acdSAlan Stern {
43717230acdSAlan Stern struct uhci_qh *pqh;
43817230acdSAlan Stern
43917230acdSAlan Stern list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node);
44017230acdSAlan Stern
44117230acdSAlan Stern pqh = list_entry(qh->node.prev, struct uhci_qh, node);
44217230acdSAlan Stern qh->link = pqh->link;
44317230acdSAlan Stern wmb();
44451e2f62fSJan Andersson pqh->link = LINK_TO_QH(uhci, qh);
44517230acdSAlan Stern }
44617230acdSAlan Stern
44717230acdSAlan Stern /*
44817230acdSAlan Stern * Link a period-1 interrupt or async QH into the schedule at the
44917230acdSAlan Stern * correct spot in the async skeleton's list, and update the FSBR link
45017230acdSAlan Stern */
link_async(struct uhci_hcd * uhci,struct uhci_qh * qh)45117230acdSAlan Stern static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
45217230acdSAlan Stern {
453e009f1b2SAlan Stern struct uhci_qh *pqh;
45451e2f62fSJan Andersson __hc32 link_to_new_qh;
45517230acdSAlan Stern
45617230acdSAlan Stern /* Find the predecessor QH for our new one and insert it in the list.
45717230acdSAlan Stern * The list of QHs is expected to be short, so linear search won't
45817230acdSAlan Stern * take too long. */
45917230acdSAlan Stern list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) {
46017230acdSAlan Stern if (pqh->skel <= qh->skel)
46117230acdSAlan Stern break;
46217230acdSAlan Stern }
46317230acdSAlan Stern list_add(&qh->node, &pqh->node);
46417230acdSAlan Stern
46517230acdSAlan Stern /* Link it into the schedule */
466e009f1b2SAlan Stern qh->link = pqh->link;
46717230acdSAlan Stern wmb();
46851e2f62fSJan Andersson link_to_new_qh = LINK_TO_QH(uhci, qh);
469e009f1b2SAlan Stern pqh->link = link_to_new_qh;
470e009f1b2SAlan Stern
471e009f1b2SAlan Stern /* If this is now the first FSBR QH, link the terminating skeleton
472e009f1b2SAlan Stern * QH to it. */
473e009f1b2SAlan Stern if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
474e009f1b2SAlan Stern uhci->skel_term_qh->link = link_to_new_qh;
47517230acdSAlan Stern }
47617230acdSAlan Stern
47717230acdSAlan Stern /*
478dccf4a48SAlan Stern * Put a QH on the schedule in both hardware and software
4791da177e4SLinus Torvalds */
uhci_activate_qh(struct uhci_hcd * uhci,struct uhci_qh * qh)480dccf4a48SAlan Stern static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
4811da177e4SLinus Torvalds {
482dccf4a48SAlan Stern WARN_ON(list_empty(&qh->queue));
483dccf4a48SAlan Stern
484dccf4a48SAlan Stern /* Set the element pointer if it isn't set already.
485dccf4a48SAlan Stern * This isn't needed for Isochronous queues, but it doesn't hurt. */
48651e2f62fSJan Andersson if (qh_element(qh) == UHCI_PTR_TERM(uhci)) {
487dccf4a48SAlan Stern struct urb_priv *urbp = list_entry(qh->queue.next,
488dccf4a48SAlan Stern struct urb_priv, node);
489dccf4a48SAlan Stern struct uhci_td *td = list_entry(urbp->td_list.next,
490dccf4a48SAlan Stern struct uhci_td, list);
491dccf4a48SAlan Stern
49251e2f62fSJan Andersson qh->element = LINK_TO_TD(uhci, td);
493dccf4a48SAlan Stern }
494dccf4a48SAlan Stern
49584afddd7SAlan Stern /* Treat the queue as if it has just advanced */
49684afddd7SAlan Stern qh->wait_expired = 0;
49784afddd7SAlan Stern qh->advance_jiffies = jiffies;
49884afddd7SAlan Stern
499dccf4a48SAlan Stern if (qh->state == QH_STATE_ACTIVE)
5001da177e4SLinus Torvalds return;
501dccf4a48SAlan Stern qh->state = QH_STATE_ACTIVE;
502dccf4a48SAlan Stern
50317230acdSAlan Stern /* Move the QH from its old list to the correct spot in the appropriate
504dccf4a48SAlan Stern * skeleton's list */
5050ed8fee1SAlan Stern if (qh == uhci->next_qh)
5060ed8fee1SAlan Stern uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
5070ed8fee1SAlan Stern node);
50817230acdSAlan Stern list_del(&qh->node);
509dccf4a48SAlan Stern
51017230acdSAlan Stern if (qh->skel == SKEL_ISO)
51117230acdSAlan Stern link_iso(uhci, qh);
51217230acdSAlan Stern else if (qh->skel < SKEL_ASYNC)
51317230acdSAlan Stern link_interrupt(uhci, qh);
51417230acdSAlan Stern else
51517230acdSAlan Stern link_async(uhci, qh);
51617230acdSAlan Stern }
51717230acdSAlan Stern
51817230acdSAlan Stern /*
51917230acdSAlan Stern * Unlink a high-period interrupt QH from the schedule
52017230acdSAlan Stern */
unlink_interrupt(struct uhci_hcd * uhci,struct uhci_qh * qh)52117230acdSAlan Stern static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
52217230acdSAlan Stern {
52317230acdSAlan Stern struct uhci_qh *pqh;
52417230acdSAlan Stern
525dccf4a48SAlan Stern pqh = list_entry(qh->node.prev, struct uhci_qh, node);
52617230acdSAlan Stern pqh->link = qh->link;
52717230acdSAlan Stern mb();
52817230acdSAlan Stern }
52917230acdSAlan Stern
53017230acdSAlan Stern /*
53117230acdSAlan Stern * Unlink a period-1 interrupt or async QH from the schedule
53217230acdSAlan Stern */
unlink_async(struct uhci_hcd * uhci,struct uhci_qh * qh)53317230acdSAlan Stern static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
53417230acdSAlan Stern {
535e009f1b2SAlan Stern struct uhci_qh *pqh;
53651e2f62fSJan Andersson __hc32 link_to_next_qh = qh->link;
53717230acdSAlan Stern
53817230acdSAlan Stern pqh = list_entry(qh->node.prev, struct uhci_qh, node);
53917230acdSAlan Stern pqh->link = link_to_next_qh;
540e009f1b2SAlan Stern
541e009f1b2SAlan Stern /* If this was the old first FSBR QH, link the terminating skeleton
542e009f1b2SAlan Stern * QH to the next (new first FSBR) QH. */
543e009f1b2SAlan Stern if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
544e009f1b2SAlan Stern uhci->skel_term_qh->link = link_to_next_qh;
54517230acdSAlan Stern mb();
546dccf4a48SAlan Stern }
5471da177e4SLinus Torvalds
5481da177e4SLinus Torvalds /*
549dccf4a48SAlan Stern * Take a QH off the hardware schedule
5501da177e4SLinus Torvalds */
uhci_unlink_qh(struct uhci_hcd * uhci,struct uhci_qh * qh)551dccf4a48SAlan Stern static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
552dccf4a48SAlan Stern {
553dccf4a48SAlan Stern if (qh->state == QH_STATE_UNLINKING)
554dccf4a48SAlan Stern return;
555dccf4a48SAlan Stern WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);
556dccf4a48SAlan Stern qh->state = QH_STATE_UNLINKING;
5571da177e4SLinus Torvalds
558dccf4a48SAlan Stern /* Unlink the QH from the schedule and record when we did it */
55917230acdSAlan Stern if (qh->skel == SKEL_ISO)
56017230acdSAlan Stern ;
56117230acdSAlan Stern else if (qh->skel < SKEL_ASYNC)
56217230acdSAlan Stern unlink_interrupt(uhci, qh);
56317230acdSAlan Stern else
56417230acdSAlan Stern unlink_async(uhci, qh);
5651da177e4SLinus Torvalds
5661da177e4SLinus Torvalds uhci_get_current_frame_number(uhci);
567dccf4a48SAlan Stern qh->unlink_frame = uhci->frame_number;
5681da177e4SLinus Torvalds
569dccf4a48SAlan Stern /* Force an interrupt so we know when the QH is fully unlinked */
570ba297eddSAlan Stern if (list_empty(&uhci->skel_unlink_qh->node) || uhci->is_stopped)
5711da177e4SLinus Torvalds uhci_set_next_interrupt(uhci);
5721da177e4SLinus Torvalds
573dccf4a48SAlan Stern /* Move the QH from its old list to the end of the unlinking list */
5740ed8fee1SAlan Stern if (qh == uhci->next_qh)
5750ed8fee1SAlan Stern uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
5760ed8fee1SAlan Stern node);
577dccf4a48SAlan Stern list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
5781da177e4SLinus Torvalds }
5791da177e4SLinus Torvalds
5801da177e4SLinus Torvalds /*
581dccf4a48SAlan Stern * When we and the controller are through with a QH, it becomes IDLE.
582dccf4a48SAlan Stern * This happens when a QH has been off the schedule (on the unlinking
583dccf4a48SAlan Stern * list) for more than one frame, or when an error occurs while adding
584dccf4a48SAlan Stern * the first URB onto a new QH.
5851da177e4SLinus Torvalds */
uhci_make_qh_idle(struct uhci_hcd * uhci,struct uhci_qh * qh)586dccf4a48SAlan Stern static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
587dccf4a48SAlan Stern {
588dccf4a48SAlan Stern WARN_ON(qh->state == QH_STATE_ACTIVE);
589dccf4a48SAlan Stern
5900ed8fee1SAlan Stern if (qh == uhci->next_qh)
5910ed8fee1SAlan Stern uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
5920ed8fee1SAlan Stern node);
593dccf4a48SAlan Stern list_move(&qh->node, &uhci->idle_qh_list);
594dccf4a48SAlan Stern qh->state = QH_STATE_IDLE;
595dccf4a48SAlan Stern
59659e29ed9SAlan Stern /* Now that the QH is idle, its post_td isn't being used */
59759e29ed9SAlan Stern if (qh->post_td) {
59859e29ed9SAlan Stern uhci_free_td(uhci, qh->post_td);
59959e29ed9SAlan Stern qh->post_td = NULL;
60059e29ed9SAlan Stern }
60159e29ed9SAlan Stern
602dccf4a48SAlan Stern /* If anyone is waiting for a QH to become idle, wake them up */
603dccf4a48SAlan Stern if (uhci->num_waiting)
604dccf4a48SAlan Stern wake_up_all(&uhci->waitqh);
6051da177e4SLinus Torvalds }
6061da177e4SLinus Torvalds
6073ca2a321SAlan Stern /*
6083ca2a321SAlan Stern * Find the highest existing bandwidth load for a given phase and period.
6093ca2a321SAlan Stern */
uhci_highest_load(struct uhci_hcd * uhci,int phase,int period)6103ca2a321SAlan Stern static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period)
6113ca2a321SAlan Stern {
6123ca2a321SAlan Stern int highest_load = uhci->load[phase];
6133ca2a321SAlan Stern
6143ca2a321SAlan Stern for (phase += period; phase < MAX_PHASE; phase += period)
6153ca2a321SAlan Stern highest_load = max_t(int, highest_load, uhci->load[phase]);
6163ca2a321SAlan Stern return highest_load;
6173ca2a321SAlan Stern }
6183ca2a321SAlan Stern
6193ca2a321SAlan Stern /*
6203ca2a321SAlan Stern * Set qh->phase to the optimal phase for a periodic transfer and
6213ca2a321SAlan Stern * check whether the bandwidth requirement is acceptable.
6223ca2a321SAlan Stern */
uhci_check_bandwidth(struct uhci_hcd * uhci,struct uhci_qh * qh)6233ca2a321SAlan Stern static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
6243ca2a321SAlan Stern {
6253ca2a321SAlan Stern int minimax_load;
6263ca2a321SAlan Stern
6273ca2a321SAlan Stern /* Find the optimal phase (unless it is already set) and get
6283ca2a321SAlan Stern * its load value. */
6293ca2a321SAlan Stern if (qh->phase >= 0)
6303ca2a321SAlan Stern minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
6313ca2a321SAlan Stern else {
6323ca2a321SAlan Stern int phase, load;
6333ca2a321SAlan Stern int max_phase = min_t(int, MAX_PHASE, qh->period);
6343ca2a321SAlan Stern
6353ca2a321SAlan Stern qh->phase = 0;
6363ca2a321SAlan Stern minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
6373ca2a321SAlan Stern for (phase = 1; phase < max_phase; ++phase) {
6383ca2a321SAlan Stern load = uhci_highest_load(uhci, phase, qh->period);
6393ca2a321SAlan Stern if (load < minimax_load) {
6403ca2a321SAlan Stern minimax_load = load;
6413ca2a321SAlan Stern qh->phase = phase;
6423ca2a321SAlan Stern }
6433ca2a321SAlan Stern }
6443ca2a321SAlan Stern }
6453ca2a321SAlan Stern
6463ca2a321SAlan Stern /* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */
6473ca2a321SAlan Stern if (minimax_load + qh->load > 900) {
6483ca2a321SAlan Stern dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: "
6493ca2a321SAlan Stern "period %d, phase %d, %d + %d us\n",
6503ca2a321SAlan Stern qh->period, qh->phase, minimax_load, qh->load);
6513ca2a321SAlan Stern return -ENOSPC;
6523ca2a321SAlan Stern }
6533ca2a321SAlan Stern return 0;
6543ca2a321SAlan Stern }
6553ca2a321SAlan Stern
6563ca2a321SAlan Stern /*
6573ca2a321SAlan Stern * Reserve a periodic QH's bandwidth in the schedule
6583ca2a321SAlan Stern */
uhci_reserve_bandwidth(struct uhci_hcd * uhci,struct uhci_qh * qh)6593ca2a321SAlan Stern static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
6603ca2a321SAlan Stern {
6613ca2a321SAlan Stern int i;
6623ca2a321SAlan Stern int load = qh->load;
6633ca2a321SAlan Stern char *p = "??";
6643ca2a321SAlan Stern
6653ca2a321SAlan Stern for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
6663ca2a321SAlan Stern uhci->load[i] += load;
6673ca2a321SAlan Stern uhci->total_load += load;
6683ca2a321SAlan Stern }
6693ca2a321SAlan Stern uhci_to_hcd(uhci)->self.bandwidth_allocated =
6703ca2a321SAlan Stern uhci->total_load / MAX_PHASE;
6713ca2a321SAlan Stern switch (qh->type) {
6723ca2a321SAlan Stern case USB_ENDPOINT_XFER_INT:
6733ca2a321SAlan Stern ++uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
6743ca2a321SAlan Stern p = "INT";
6753ca2a321SAlan Stern break;
6763ca2a321SAlan Stern case USB_ENDPOINT_XFER_ISOC:
6773ca2a321SAlan Stern ++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
6783ca2a321SAlan Stern p = "ISO";
6793ca2a321SAlan Stern break;
6803ca2a321SAlan Stern }
6813ca2a321SAlan Stern qh->bandwidth_reserved = 1;
6823ca2a321SAlan Stern dev_dbg(uhci_dev(uhci),
6833ca2a321SAlan Stern "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
6843ca2a321SAlan Stern "reserve", qh->udev->devnum,
6853ca2a321SAlan Stern qh->hep->desc.bEndpointAddress, p,
6863ca2a321SAlan Stern qh->period, qh->phase, load);
6873ca2a321SAlan Stern }
6883ca2a321SAlan Stern
6893ca2a321SAlan Stern /*
6903ca2a321SAlan Stern * Release a periodic QH's bandwidth reservation
6913ca2a321SAlan Stern */
uhci_release_bandwidth(struct uhci_hcd * uhci,struct uhci_qh * qh)6923ca2a321SAlan Stern static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
6933ca2a321SAlan Stern {
6943ca2a321SAlan Stern int i;
6953ca2a321SAlan Stern int load = qh->load;
6963ca2a321SAlan Stern char *p = "??";
6973ca2a321SAlan Stern
6983ca2a321SAlan Stern for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
6993ca2a321SAlan Stern uhci->load[i] -= load;
7003ca2a321SAlan Stern uhci->total_load -= load;
7013ca2a321SAlan Stern }
7023ca2a321SAlan Stern uhci_to_hcd(uhci)->self.bandwidth_allocated =
7033ca2a321SAlan Stern uhci->total_load / MAX_PHASE;
7043ca2a321SAlan Stern switch (qh->type) {
7053ca2a321SAlan Stern case USB_ENDPOINT_XFER_INT:
7063ca2a321SAlan Stern --uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
7073ca2a321SAlan Stern p = "INT";
7083ca2a321SAlan Stern break;
7093ca2a321SAlan Stern case USB_ENDPOINT_XFER_ISOC:
7103ca2a321SAlan Stern --uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
7113ca2a321SAlan Stern p = "ISO";
7123ca2a321SAlan Stern break;
7133ca2a321SAlan Stern }
7143ca2a321SAlan Stern qh->bandwidth_reserved = 0;
7153ca2a321SAlan Stern dev_dbg(uhci_dev(uhci),
7163ca2a321SAlan Stern "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
7173ca2a321SAlan Stern "release", qh->udev->devnum,
7183ca2a321SAlan Stern qh->hep->desc.bEndpointAddress, p,
7193ca2a321SAlan Stern qh->period, qh->phase, load);
7203ca2a321SAlan Stern }
7213ca2a321SAlan Stern
uhci_alloc_urb_priv(struct uhci_hcd * uhci,struct urb * urb)722dccf4a48SAlan Stern static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
723dccf4a48SAlan Stern struct urb *urb)
7241da177e4SLinus Torvalds {
7251da177e4SLinus Torvalds struct urb_priv *urbp;
7261da177e4SLinus Torvalds
727c3762229SRobert P. J. Day urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC);
7281da177e4SLinus Torvalds if (!urbp)
7291da177e4SLinus Torvalds return NULL;
7301da177e4SLinus Torvalds
7311da177e4SLinus Torvalds urbp->urb = urb;
7321da177e4SLinus Torvalds urb->hcpriv = urbp;
733dccf4a48SAlan Stern
734dccf4a48SAlan Stern INIT_LIST_HEAD(&urbp->node);
735dccf4a48SAlan Stern INIT_LIST_HEAD(&urbp->td_list);
7361da177e4SLinus Torvalds
7371da177e4SLinus Torvalds return urbp;
7381da177e4SLinus Torvalds }
7391da177e4SLinus Torvalds
uhci_free_urb_priv(struct uhci_hcd * uhci,struct urb_priv * urbp)740dccf4a48SAlan Stern static void uhci_free_urb_priv(struct uhci_hcd *uhci,
741dccf4a48SAlan Stern struct urb_priv *urbp)
7421da177e4SLinus Torvalds {
7431da177e4SLinus Torvalds struct uhci_td *td, *tmp;
7441da177e4SLinus Torvalds
7455172046dSArjan van de Ven if (!list_empty(&urbp->node))
7465172046dSArjan van de Ven dev_WARN(uhci_dev(uhci), "urb %p still on QH's list!\n",
747dccf4a48SAlan Stern urbp->urb);
7481da177e4SLinus Torvalds
7491da177e4SLinus Torvalds list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
75004538a25SAlan Stern uhci_remove_td_from_urbp(td);
75104538a25SAlan Stern uhci_free_td(uhci, td);
7521da177e4SLinus Torvalds }
7531da177e4SLinus Torvalds
7541da177e4SLinus Torvalds kmem_cache_free(uhci_up_cachep, urbp);
7551da177e4SLinus Torvalds }
7561da177e4SLinus Torvalds
7571da177e4SLinus Torvalds /*
7581da177e4SLinus Torvalds * Map status to standard result codes
7591da177e4SLinus Torvalds *
76051e2f62fSJan Andersson * <status> is (td_status(uhci, td) & 0xF60000), a.k.a.
76151e2f62fSJan Andersson * uhci_status_bits(td_status(uhci, td)).
7621da177e4SLinus Torvalds * Note: <status> does not include the TD_CTRL_NAK bit.
7631da177e4SLinus Torvalds * <dir_out> is True for output TDs and False for input TDs.
7641da177e4SLinus Torvalds */
uhci_map_status(int status,int dir_out)7651da177e4SLinus Torvalds static int uhci_map_status(int status, int dir_out)
7661da177e4SLinus Torvalds {
7671da177e4SLinus Torvalds if (!status)
7681da177e4SLinus Torvalds return 0;
7691da177e4SLinus Torvalds if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */
7701da177e4SLinus Torvalds return -EPROTO;
7711da177e4SLinus Torvalds if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */
7721da177e4SLinus Torvalds if (dir_out)
7731da177e4SLinus Torvalds return -EPROTO;
7741da177e4SLinus Torvalds else
7751da177e4SLinus Torvalds return -EILSEQ;
7761da177e4SLinus Torvalds }
7771da177e4SLinus Torvalds if (status & TD_CTRL_BABBLE) /* Babble */
7781da177e4SLinus Torvalds return -EOVERFLOW;
7791da177e4SLinus Torvalds if (status & TD_CTRL_DBUFERR) /* Buffer error */
7801da177e4SLinus Torvalds return -ENOSR;
7811da177e4SLinus Torvalds if (status & TD_CTRL_STALLED) /* Stalled */
7821da177e4SLinus Torvalds return -EPIPE;
7831da177e4SLinus Torvalds return 0;
7841da177e4SLinus Torvalds }
7851da177e4SLinus Torvalds
7861da177e4SLinus Torvalds /*
7871da177e4SLinus Torvalds * Control transfers
7881da177e4SLinus Torvalds */
uhci_submit_control(struct uhci_hcd * uhci,struct urb * urb,struct uhci_qh * qh)789dccf4a48SAlan Stern static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
790dccf4a48SAlan Stern struct uhci_qh *qh)
7911da177e4SLinus Torvalds {
7921da177e4SLinus Torvalds struct uhci_td *td;
7931da177e4SLinus Torvalds unsigned long destination, status;
79429cc8897SKuninori Morimoto int maxsze = usb_endpoint_maxp(&qh->hep->desc);
7951da177e4SLinus Torvalds int len = urb->transfer_buffer_length;
7961da177e4SLinus Torvalds dma_addr_t data = urb->transfer_dma;
79751e2f62fSJan Andersson __hc32 *plink;
79804538a25SAlan Stern struct urb_priv *urbp = urb->hcpriv;
79917230acdSAlan Stern int skel;
8001da177e4SLinus Torvalds
8011da177e4SLinus Torvalds /* The "pipe" thing contains the destination in bits 8--18 */
8021da177e4SLinus Torvalds destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
8031da177e4SLinus Torvalds
804af0bb599SAlan Stern /* 3 errors, dummy TD remains inactive */
805af0bb599SAlan Stern status = uhci_maxerr(3);
8061da177e4SLinus Torvalds if (urb->dev->speed == USB_SPEED_LOW)
8071da177e4SLinus Torvalds status |= TD_CTRL_LS;
8081da177e4SLinus Torvalds
8091da177e4SLinus Torvalds /*
8101da177e4SLinus Torvalds * Build the TD for the control request setup packet
8111da177e4SLinus Torvalds */
812af0bb599SAlan Stern td = qh->dummy_td;
81304538a25SAlan Stern uhci_add_td_to_urbp(td, urbp);
81451e2f62fSJan Andersson uhci_fill_td(uhci, td, status, destination | uhci_explen(8),
8151da177e4SLinus Torvalds urb->setup_dma);
816dccf4a48SAlan Stern plink = &td->link;
817af0bb599SAlan Stern status |= TD_CTRL_ACTIVE;
8181da177e4SLinus Torvalds
8191da177e4SLinus Torvalds /*
8201da177e4SLinus Torvalds * If direction is "send", change the packet ID from SETUP (0x2D)
8211da177e4SLinus Torvalds * to OUT (0xE1). Else change it from SETUP to IN (0x69) and
8221da177e4SLinus Torvalds * set Short Packet Detect (SPD) for all data packets.
823e7e7c360SAlan Stern *
824e7e7c360SAlan Stern * 0-length transfers always get treated as "send".
8251da177e4SLinus Torvalds */
826e7e7c360SAlan Stern if (usb_pipeout(urb->pipe) || len == 0)
8271da177e4SLinus Torvalds destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
8281da177e4SLinus Torvalds else {
8291da177e4SLinus Torvalds destination ^= (USB_PID_SETUP ^ USB_PID_IN);
8301da177e4SLinus Torvalds status |= TD_CTRL_SPD;
8311da177e4SLinus Torvalds }
8321da177e4SLinus Torvalds
8331da177e4SLinus Torvalds /*
834687f5f34SAlan Stern * Build the DATA TDs
8351da177e4SLinus Torvalds */
8361da177e4SLinus Torvalds while (len > 0) {
837e7e7c360SAlan Stern int pktsze = maxsze;
838e7e7c360SAlan Stern
839e7e7c360SAlan Stern if (len <= pktsze) { /* The last data packet */
840e7e7c360SAlan Stern pktsze = len;
841e7e7c360SAlan Stern status &= ~TD_CTRL_SPD;
842e7e7c360SAlan Stern }
8431da177e4SLinus Torvalds
8442532178aSAlan Stern td = uhci_alloc_td(uhci);
8451da177e4SLinus Torvalds if (!td)
846af0bb599SAlan Stern goto nomem;
84751e2f62fSJan Andersson *plink = LINK_TO_TD(uhci, td);
8481da177e4SLinus Torvalds
8491da177e4SLinus Torvalds /* Alternate Data0/1 (start with Data1) */
8501da177e4SLinus Torvalds destination ^= TD_TOKEN_TOGGLE;
8511da177e4SLinus Torvalds
85204538a25SAlan Stern uhci_add_td_to_urbp(td, urbp);
85351e2f62fSJan Andersson uhci_fill_td(uhci, td, status,
85451e2f62fSJan Andersson destination | uhci_explen(pktsze), data);
855dccf4a48SAlan Stern plink = &td->link;
8561da177e4SLinus Torvalds
8571da177e4SLinus Torvalds data += pktsze;
8581da177e4SLinus Torvalds len -= pktsze;
8591da177e4SLinus Torvalds }
8601da177e4SLinus Torvalds
8611da177e4SLinus Torvalds /*
8621da177e4SLinus Torvalds * Build the final TD for control status
8631da177e4SLinus Torvalds */
8642532178aSAlan Stern td = uhci_alloc_td(uhci);
8651da177e4SLinus Torvalds if (!td)
866af0bb599SAlan Stern goto nomem;
86751e2f62fSJan Andersson *plink = LINK_TO_TD(uhci, td);
8681da177e4SLinus Torvalds
869e7e7c360SAlan Stern /* Change direction for the status transaction */
870e7e7c360SAlan Stern destination ^= (USB_PID_IN ^ USB_PID_OUT);
8711da177e4SLinus Torvalds destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
8721da177e4SLinus Torvalds
87304538a25SAlan Stern uhci_add_td_to_urbp(td, urbp);
87451e2f62fSJan Andersson uhci_fill_td(uhci, td, status | TD_CTRL_IOC,
875fa346568SAlan Stern destination | uhci_explen(0), 0);
876af0bb599SAlan Stern plink = &td->link;
877af0bb599SAlan Stern
878af0bb599SAlan Stern /*
879af0bb599SAlan Stern * Build the new dummy TD and activate the old one
880af0bb599SAlan Stern */
881af0bb599SAlan Stern td = uhci_alloc_td(uhci);
882af0bb599SAlan Stern if (!td)
883af0bb599SAlan Stern goto nomem;
88451e2f62fSJan Andersson *plink = LINK_TO_TD(uhci, td);
885af0bb599SAlan Stern
88651e2f62fSJan Andersson uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0);
887af0bb599SAlan Stern wmb();
88851e2f62fSJan Andersson qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE);
889af0bb599SAlan Stern qh->dummy_td = td;
8901da177e4SLinus Torvalds
8911da177e4SLinus Torvalds /* Low-speed transfers get a different queue, and won't hog the bus.
8921da177e4SLinus Torvalds * Also, some devices enumerate better without FSBR; the easiest way
8931da177e4SLinus Torvalds * to do that is to put URBs on the low-speed queue while the device
894630aa3cfSAlan Stern * isn't in the CONFIGURED state. */
8951da177e4SLinus Torvalds if (urb->dev->speed == USB_SPEED_LOW ||
896630aa3cfSAlan Stern urb->dev->state != USB_STATE_CONFIGURED)
89717230acdSAlan Stern skel = SKEL_LS_CONTROL;
8981da177e4SLinus Torvalds else {
89917230acdSAlan Stern skel = SKEL_FS_CONTROL;
90084afddd7SAlan Stern uhci_add_fsbr(uhci, urb);
9011da177e4SLinus Torvalds }
90217230acdSAlan Stern if (qh->state != QH_STATE_ACTIVE)
90317230acdSAlan Stern qh->skel = skel;
904dccf4a48SAlan Stern return 0;
905af0bb599SAlan Stern
906af0bb599SAlan Stern nomem:
907af0bb599SAlan Stern /* Remove the dummy TD from the td_list so it doesn't get freed */
90804538a25SAlan Stern uhci_remove_td_from_urbp(qh->dummy_td);
909af0bb599SAlan Stern return -ENOMEM;
9101da177e4SLinus Torvalds }
9111da177e4SLinus Torvalds
9121da177e4SLinus Torvalds /*
9131da177e4SLinus Torvalds * Common submit for bulk and interrupt
9141da177e4SLinus Torvalds */
uhci_submit_common(struct uhci_hcd * uhci,struct urb * urb,struct uhci_qh * qh)915dccf4a48SAlan Stern static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
916dccf4a48SAlan Stern struct uhci_qh *qh)
9171da177e4SLinus Torvalds {
9181da177e4SLinus Torvalds struct uhci_td *td;
9191da177e4SLinus Torvalds unsigned long destination, status;
92029cc8897SKuninori Morimoto int maxsze = usb_endpoint_maxp(&qh->hep->desc);
9211da177e4SLinus Torvalds int len = urb->transfer_buffer_length;
922689d6eacSMing Lei int this_sg_len;
923689d6eacSMing Lei dma_addr_t data;
92451e2f62fSJan Andersson __hc32 *plink;
92504538a25SAlan Stern struct urb_priv *urbp = urb->hcpriv;
926af0bb599SAlan Stern unsigned int toggle;
927689d6eacSMing Lei struct scatterlist *sg;
928689d6eacSMing Lei int i;
9291da177e4SLinus Torvalds
9301da177e4SLinus Torvalds if (len < 0)
9311da177e4SLinus Torvalds return -EINVAL;
9321da177e4SLinus Torvalds
9331da177e4SLinus Torvalds /* The "pipe" thing contains the destination in bits 8--18 */
9341da177e4SLinus Torvalds destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
935af0bb599SAlan Stern toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
936af0bb599SAlan Stern usb_pipeout(urb->pipe));
9371da177e4SLinus Torvalds
938af0bb599SAlan Stern /* 3 errors, dummy TD remains inactive */
939af0bb599SAlan Stern status = uhci_maxerr(3);
9401da177e4SLinus Torvalds if (urb->dev->speed == USB_SPEED_LOW)
9411da177e4SLinus Torvalds status |= TD_CTRL_LS;
9421da177e4SLinus Torvalds if (usb_pipein(urb->pipe))
9431da177e4SLinus Torvalds status |= TD_CTRL_SPD;
9441da177e4SLinus Torvalds
945bc677d5bSClemens Ladisch i = urb->num_mapped_sgs;
946689d6eacSMing Lei if (len > 0 && i > 0) {
947689d6eacSMing Lei sg = urb->sg;
948689d6eacSMing Lei data = sg_dma_address(sg);
949689d6eacSMing Lei
950689d6eacSMing Lei /* urb->transfer_buffer_length may be smaller than the
951689d6eacSMing Lei * size of the scatterlist (or vice versa)
952689d6eacSMing Lei */
953689d6eacSMing Lei this_sg_len = min_t(int, sg_dma_len(sg), len);
954689d6eacSMing Lei } else {
955689d6eacSMing Lei sg = NULL;
956689d6eacSMing Lei data = urb->transfer_dma;
957689d6eacSMing Lei this_sg_len = len;
958689d6eacSMing Lei }
9591da177e4SLinus Torvalds /*
960687f5f34SAlan Stern * Build the DATA TDs
9611da177e4SLinus Torvalds */
962af0bb599SAlan Stern plink = NULL;
963af0bb599SAlan Stern td = qh->dummy_td;
964689d6eacSMing Lei for (;;) { /* Allow zero length packets */
9651da177e4SLinus Torvalds int pktsze = maxsze;
9661da177e4SLinus Torvalds
967dccf4a48SAlan Stern if (len <= pktsze) { /* The last packet */
9681da177e4SLinus Torvalds pktsze = len;
9691da177e4SLinus Torvalds if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
9701da177e4SLinus Torvalds status &= ~TD_CTRL_SPD;
9711da177e4SLinus Torvalds }
9721da177e4SLinus Torvalds
973af0bb599SAlan Stern if (plink) {
9742532178aSAlan Stern td = uhci_alloc_td(uhci);
9751da177e4SLinus Torvalds if (!td)
976af0bb599SAlan Stern goto nomem;
97751e2f62fSJan Andersson *plink = LINK_TO_TD(uhci, td);
978af0bb599SAlan Stern }
97904538a25SAlan Stern uhci_add_td_to_urbp(td, urbp);
98051e2f62fSJan Andersson uhci_fill_td(uhci, td, status,
981dccf4a48SAlan Stern destination | uhci_explen(pktsze) |
982af0bb599SAlan Stern (toggle << TD_TOKEN_TOGGLE_SHIFT),
9831da177e4SLinus Torvalds data);
984dccf4a48SAlan Stern plink = &td->link;
985af0bb599SAlan Stern status |= TD_CTRL_ACTIVE;
9861da177e4SLinus Torvalds
987af0bb599SAlan Stern toggle ^= 1;
988689d6eacSMing Lei data += pktsze;
989689d6eacSMing Lei this_sg_len -= pktsze;
990689d6eacSMing Lei len -= maxsze;
991689d6eacSMing Lei if (this_sg_len <= 0) {
992689d6eacSMing Lei if (--i <= 0 || len <= 0)
993689d6eacSMing Lei break;
994689d6eacSMing Lei sg = sg_next(sg);
995689d6eacSMing Lei data = sg_dma_address(sg);
996689d6eacSMing Lei this_sg_len = min_t(int, sg_dma_len(sg), len);
997689d6eacSMing Lei }
998689d6eacSMing Lei }
9991da177e4SLinus Torvalds
10001da177e4SLinus Torvalds /*
10011da177e4SLinus Torvalds * URB_ZERO_PACKET means adding a 0-length packet, if direction
10021da177e4SLinus Torvalds * is OUT and the transfer_length was an exact multiple of maxsze,
10031da177e4SLinus Torvalds * hence (len = transfer_length - N * maxsze) == 0
10041da177e4SLinus Torvalds * however, if transfer_length == 0, the zero packet was already
10051da177e4SLinus Torvalds * prepared above.
10061da177e4SLinus Torvalds */
1007dccf4a48SAlan Stern if ((urb->transfer_flags & URB_ZERO_PACKET) &&
1008dccf4a48SAlan Stern usb_pipeout(urb->pipe) && len == 0 &&
1009dccf4a48SAlan Stern urb->transfer_buffer_length > 0) {
10102532178aSAlan Stern td = uhci_alloc_td(uhci);
10111da177e4SLinus Torvalds if (!td)
1012af0bb599SAlan Stern goto nomem;
101351e2f62fSJan Andersson *plink = LINK_TO_TD(uhci, td);
10141da177e4SLinus Torvalds
101504538a25SAlan Stern uhci_add_td_to_urbp(td, urbp);
101651e2f62fSJan Andersson uhci_fill_td(uhci, td, status,
1017af0bb599SAlan Stern destination | uhci_explen(0) |
1018af0bb599SAlan Stern (toggle << TD_TOKEN_TOGGLE_SHIFT),
10191da177e4SLinus Torvalds data);
1020af0bb599SAlan Stern plink = &td->link;
10211da177e4SLinus Torvalds
1022af0bb599SAlan Stern toggle ^= 1;
10231da177e4SLinus Torvalds }
10241da177e4SLinus Torvalds
10251da177e4SLinus Torvalds /* Set the interrupt-on-completion flag on the last packet.
10261da177e4SLinus Torvalds * A more-or-less typical 4 KB URB (= size of one memory page)
10271da177e4SLinus Torvalds * will require about 3 ms to transfer; that's a little on the
10281da177e4SLinus Torvalds * fast side but not enough to justify delaying an interrupt
10291da177e4SLinus Torvalds * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
10301da177e4SLinus Torvalds * flag setting. */
103151e2f62fSJan Andersson td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
10321da177e4SLinus Torvalds
1033af0bb599SAlan Stern /*
1034af0bb599SAlan Stern * Build the new dummy TD and activate the old one
1035af0bb599SAlan Stern */
1036af0bb599SAlan Stern td = uhci_alloc_td(uhci);
1037af0bb599SAlan Stern if (!td)
1038af0bb599SAlan Stern goto nomem;
103951e2f62fSJan Andersson *plink = LINK_TO_TD(uhci, td);
1040af0bb599SAlan Stern
104151e2f62fSJan Andersson uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0);
1042af0bb599SAlan Stern wmb();
104351e2f62fSJan Andersson qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE);
1044af0bb599SAlan Stern qh->dummy_td = td;
1045af0bb599SAlan Stern
1046af0bb599SAlan Stern usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1047af0bb599SAlan Stern usb_pipeout(urb->pipe), toggle);
1048dccf4a48SAlan Stern return 0;
1049af0bb599SAlan Stern
1050af0bb599SAlan Stern nomem:
1051af0bb599SAlan Stern /* Remove the dummy TD from the td_list so it doesn't get freed */
105204538a25SAlan Stern uhci_remove_td_from_urbp(qh->dummy_td);
1053af0bb599SAlan Stern return -ENOMEM;
10541da177e4SLinus Torvalds }
10551da177e4SLinus Torvalds
uhci_submit_bulk(struct uhci_hcd * uhci,struct urb * urb,struct uhci_qh * qh)105617230acdSAlan Stern static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
1057dccf4a48SAlan Stern struct uhci_qh *qh)
10581da177e4SLinus Torvalds {
10591da177e4SLinus Torvalds int ret;
10601da177e4SLinus Torvalds
10611da177e4SLinus Torvalds /* Can't have low-speed bulk transfers */
10621da177e4SLinus Torvalds if (urb->dev->speed == USB_SPEED_LOW)
10631da177e4SLinus Torvalds return -EINVAL;
10641da177e4SLinus Torvalds
106517230acdSAlan Stern if (qh->state != QH_STATE_ACTIVE)
106617230acdSAlan Stern qh->skel = SKEL_BULK;
1067dccf4a48SAlan Stern ret = uhci_submit_common(uhci, urb, qh);
1068dccf4a48SAlan Stern if (ret == 0)
106984afddd7SAlan Stern uhci_add_fsbr(uhci, urb);
10701da177e4SLinus Torvalds return ret;
10711da177e4SLinus Torvalds }
10721da177e4SLinus Torvalds
uhci_submit_interrupt(struct uhci_hcd * uhci,struct urb * urb,struct uhci_qh * qh)1073caf3827aSAlan Stern static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
1074dccf4a48SAlan Stern struct uhci_qh *qh)
10751da177e4SLinus Torvalds {
10763ca2a321SAlan Stern int ret;
1077caf3827aSAlan Stern
1078dccf4a48SAlan Stern /* USB 1.1 interrupt transfers only involve one packet per interval.
1079dccf4a48SAlan Stern * Drivers can submit URBs of any length, but longer ones will need
1080dccf4a48SAlan Stern * multiple intervals to complete.
10811da177e4SLinus Torvalds */
1082caf3827aSAlan Stern
10833ca2a321SAlan Stern if (!qh->bandwidth_reserved) {
10843ca2a321SAlan Stern int exponent;
10853ca2a321SAlan Stern
1086caf3827aSAlan Stern /* Figure out which power-of-two queue to use */
1087caf3827aSAlan Stern for (exponent = 7; exponent >= 0; --exponent) {
1088caf3827aSAlan Stern if ((1 << exponent) <= urb->interval)
1089caf3827aSAlan Stern break;
1090caf3827aSAlan Stern }
1091caf3827aSAlan Stern if (exponent < 0)
1092caf3827aSAlan Stern return -EINVAL;
1093e58dcebcSAlan Stern
1094e58dcebcSAlan Stern /* If the slot is full, try a lower period */
1095e58dcebcSAlan Stern do {
10963ca2a321SAlan Stern qh->period = 1 << exponent;
109717230acdSAlan Stern qh->skel = SKEL_INDEX(exponent);
1098caf3827aSAlan Stern
10993ca2a321SAlan Stern /* For now, interrupt phase is fixed by the layout
1100e58dcebcSAlan Stern * of the QH lists.
1101e58dcebcSAlan Stern */
11023ca2a321SAlan Stern qh->phase = (qh->period / 2) & (MAX_PHASE - 1);
11033ca2a321SAlan Stern ret = uhci_check_bandwidth(uhci, qh);
1104e58dcebcSAlan Stern } while (ret != 0 && --exponent >= 0);
11053ca2a321SAlan Stern if (ret)
11063ca2a321SAlan Stern return ret;
11073ca2a321SAlan Stern } else if (qh->period > urb->interval)
11083ca2a321SAlan Stern return -EINVAL; /* Can't decrease the period */
11093ca2a321SAlan Stern
11103ca2a321SAlan Stern ret = uhci_submit_common(uhci, urb, qh);
11113ca2a321SAlan Stern if (ret == 0) {
11123ca2a321SAlan Stern urb->interval = qh->period;
11133ca2a321SAlan Stern if (!qh->bandwidth_reserved)
11143ca2a321SAlan Stern uhci_reserve_bandwidth(uhci, qh);
11153ca2a321SAlan Stern }
11163ca2a321SAlan Stern return ret;
11171da177e4SLinus Torvalds }
11181da177e4SLinus Torvalds
11191da177e4SLinus Torvalds /*
1120b1869000SAlan Stern * Fix up the data structures following a short transfer
1121b1869000SAlan Stern */
uhci_fixup_short_transfer(struct uhci_hcd * uhci,struct uhci_qh * qh,struct urb_priv * urbp)1122b1869000SAlan Stern static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
112359e29ed9SAlan Stern struct uhci_qh *qh, struct urb_priv *urbp)
1124b1869000SAlan Stern {
1125b1869000SAlan Stern struct uhci_td *td;
112659e29ed9SAlan Stern struct list_head *tmp;
112759e29ed9SAlan Stern int ret;
1128b1869000SAlan Stern
1129b1869000SAlan Stern td = list_entry(urbp->td_list.prev, struct uhci_td, list);
1130b1869000SAlan Stern if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1131b1869000SAlan Stern
1132b1869000SAlan Stern /* When a control transfer is short, we have to restart
1133b1869000SAlan Stern * the queue at the status stage transaction, which is
1134b1869000SAlan Stern * the last TD. */
113559e29ed9SAlan Stern WARN_ON(list_empty(&urbp->td_list));
113651e2f62fSJan Andersson qh->element = LINK_TO_TD(uhci, td);
113759e29ed9SAlan Stern tmp = td->list.prev;
1138b1869000SAlan Stern ret = -EINPROGRESS;
1139b1869000SAlan Stern
114059e29ed9SAlan Stern } else {
1141b1869000SAlan Stern
1142b1869000SAlan Stern /* When a bulk/interrupt transfer is short, we have to
1143b1869000SAlan Stern * fix up the toggles of the following URBs on the queue
1144b1869000SAlan Stern * before restarting the queue at the next URB. */
114551e2f62fSJan Andersson qh->initial_toggle =
114651e2f62fSJan Andersson uhci_toggle(td_token(uhci, qh->post_td)) ^ 1;
114751e2f62fSJan Andersson uhci_fixup_toggles(uhci, qh, 1);
1148b1869000SAlan Stern
114959e29ed9SAlan Stern if (list_empty(&urbp->td_list))
115059e29ed9SAlan Stern td = qh->post_td;
1151b1869000SAlan Stern qh->element = td->link;
115259e29ed9SAlan Stern tmp = urbp->td_list.prev;
115359e29ed9SAlan Stern ret = 0;
1154b1869000SAlan Stern }
1155b1869000SAlan Stern
115659e29ed9SAlan Stern /* Remove all the TDs we skipped over, from tmp back to the start */
115759e29ed9SAlan Stern while (tmp != &urbp->td_list) {
115859e29ed9SAlan Stern td = list_entry(tmp, struct uhci_td, list);
115959e29ed9SAlan Stern tmp = tmp->prev;
116059e29ed9SAlan Stern
116104538a25SAlan Stern uhci_remove_td_from_urbp(td);
116204538a25SAlan Stern uhci_free_td(uhci, td);
116359e29ed9SAlan Stern }
1164b1869000SAlan Stern return ret;
1165b1869000SAlan Stern }
1166b1869000SAlan Stern
1167b1869000SAlan Stern /*
1168b1869000SAlan Stern * Common result for control, bulk, and interrupt
1169b1869000SAlan Stern */
uhci_result_common(struct uhci_hcd * uhci,struct urb * urb)1170b1869000SAlan Stern static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
1171b1869000SAlan Stern {
1172b1869000SAlan Stern struct urb_priv *urbp = urb->hcpriv;
1173b1869000SAlan Stern struct uhci_qh *qh = urbp->qh;
117459e29ed9SAlan Stern struct uhci_td *td, *tmp;
1175b1869000SAlan Stern unsigned status;
1176b1869000SAlan Stern int ret = 0;
1177b1869000SAlan Stern
117859e29ed9SAlan Stern list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1179b1869000SAlan Stern unsigned int ctrlstat;
1180b1869000SAlan Stern int len;
1181b1869000SAlan Stern
118251e2f62fSJan Andersson ctrlstat = td_status(uhci, td);
1183b1869000SAlan Stern status = uhci_status_bits(ctrlstat);
1184b1869000SAlan Stern if (status & TD_CTRL_ACTIVE)
1185b1869000SAlan Stern return -EINPROGRESS;
1186b1869000SAlan Stern
1187b1869000SAlan Stern len = uhci_actual_length(ctrlstat);
1188b1869000SAlan Stern urb->actual_length += len;
1189b1869000SAlan Stern
1190b1869000SAlan Stern if (status) {
1191b1869000SAlan Stern ret = uhci_map_status(status,
119251e2f62fSJan Andersson uhci_packetout(td_token(uhci, td)));
1193b1869000SAlan Stern if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1194b1869000SAlan Stern /* Some debugging code */
1195be3cbc5fSDavid Brownell dev_dbg(&urb->dev->dev,
1196b1869000SAlan Stern "%s: failed with status %x\n",
1197441b62c1SHarvey Harrison __func__, status);
1198b1869000SAlan Stern
1199b1869000SAlan Stern if (debug > 1 && errbuf) {
1200b1869000SAlan Stern /* Print the chain for debugging */
1201e009f1b2SAlan Stern uhci_show_qh(uhci, urbp->qh, errbuf,
120213996ca7SChen Gang ERRBUF_LEN - EXTRA_SPACE, 0);
1203b1869000SAlan Stern lprintk(errbuf);
1204b1869000SAlan Stern }
1205b1869000SAlan Stern }
1206b1869000SAlan Stern
1207e7e7c360SAlan Stern /* Did we receive a short packet? */
120851e2f62fSJan Andersson } else if (len < uhci_expected_length(td_token(uhci, td))) {
1209b1869000SAlan Stern
1210e7e7c360SAlan Stern /* For control transfers, go to the status TD if
1211e7e7c360SAlan Stern * this isn't already the last data TD */
1212e7e7c360SAlan Stern if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1213e7e7c360SAlan Stern if (td->list.next != urbp->td_list.prev)
1214e7e7c360SAlan Stern ret = 1;
1215e7e7c360SAlan Stern }
1216e7e7c360SAlan Stern
1217e7e7c360SAlan Stern /* For bulk and interrupt, this may be an error */
1218e7e7c360SAlan Stern else if (urb->transfer_flags & URB_SHORT_NOT_OK)
1219b1869000SAlan Stern ret = -EREMOTEIO;
1220f443ddf1SAlan Stern
1221f443ddf1SAlan Stern /* Fixup needed only if this isn't the URB's last TD */
1222f443ddf1SAlan Stern else if (&td->list != urbp->td_list.prev)
1223b1869000SAlan Stern ret = 1;
1224b1869000SAlan Stern }
1225b1869000SAlan Stern
122604538a25SAlan Stern uhci_remove_td_from_urbp(td);
122759e29ed9SAlan Stern if (qh->post_td)
122804538a25SAlan Stern uhci_free_td(uhci, qh->post_td);
122959e29ed9SAlan Stern qh->post_td = td;
123059e29ed9SAlan Stern
1231b1869000SAlan Stern if (ret != 0)
1232b1869000SAlan Stern goto err;
1233b1869000SAlan Stern }
1234b1869000SAlan Stern return ret;
1235b1869000SAlan Stern
1236b1869000SAlan Stern err:
1237b1869000SAlan Stern if (ret < 0) {
1238b1869000SAlan Stern /* Note that the queue has stopped and save
1239b1869000SAlan Stern * the next toggle value */
124051e2f62fSJan Andersson qh->element = UHCI_PTR_TERM(uhci);
1241b1869000SAlan Stern qh->is_stopped = 1;
1242b1869000SAlan Stern qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL);
124351e2f62fSJan Andersson qh->initial_toggle = uhci_toggle(td_token(uhci, td)) ^
1244b1869000SAlan Stern (ret == -EREMOTEIO);
1245b1869000SAlan Stern
1246b1869000SAlan Stern } else /* Short packet received */
124759e29ed9SAlan Stern ret = uhci_fixup_short_transfer(uhci, qh, urbp);
1248b1869000SAlan Stern return ret;
1249b1869000SAlan Stern }
1250b1869000SAlan Stern
1251b1869000SAlan Stern /*
12521da177e4SLinus Torvalds * Isochronous transfers
12531da177e4SLinus Torvalds */
uhci_submit_isochronous(struct uhci_hcd * uhci,struct urb * urb,struct uhci_qh * qh)1254dccf4a48SAlan Stern static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1255dccf4a48SAlan Stern struct uhci_qh *qh)
12561da177e4SLinus Torvalds {
1257dccf4a48SAlan Stern struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */
1258c44b2250SAlan Stern int i;
1259c44b2250SAlan Stern unsigned frame, next;
1260dccf4a48SAlan Stern unsigned long destination, status;
1261b81d3436SAlan Stern struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
12621da177e4SLinus Torvalds
1263caf3827aSAlan Stern /* Values must not be too big (could overflow below) */
1264caf3827aSAlan Stern if (urb->interval >= UHCI_NUMFRAMES ||
1265caf3827aSAlan Stern urb->number_of_packets >= UHCI_NUMFRAMES)
1266caf3827aSAlan Stern return -EFBIG;
1267caf3827aSAlan Stern
1268c44b2250SAlan Stern uhci_get_current_frame_number(uhci);
1269c44b2250SAlan Stern
1270caf3827aSAlan Stern /* Check the period and figure out the starting frame number */
12713ca2a321SAlan Stern if (!qh->bandwidth_reserved) {
12723ca2a321SAlan Stern qh->period = urb->interval;
12733ca2a321SAlan Stern qh->phase = -1; /* Find the best phase */
12743ca2a321SAlan Stern i = uhci_check_bandwidth(uhci, qh);
12753ca2a321SAlan Stern if (i)
12763ca2a321SAlan Stern return i;
12773ca2a321SAlan Stern
12783ca2a321SAlan Stern /* Allow a little time to allocate the TDs */
1279c44b2250SAlan Stern next = uhci->frame_number + 10;
1280c44b2250SAlan Stern frame = qh->phase;
12813ca2a321SAlan Stern
1282c44b2250SAlan Stern /* Round up to the first available slot */
1283c44b2250SAlan Stern frame += (next - frame + qh->period - 1) & -qh->period;
12843ca2a321SAlan Stern
1285caf3827aSAlan Stern } else if (qh->period != urb->interval) {
1286caf3827aSAlan Stern return -EINVAL; /* Can't change the period */
1287caf3827aSAlan Stern
12887898ffc5SAlan Stern } else {
1289e1944017SAlan Stern next = uhci->frame_number + 1;
1290c44b2250SAlan Stern
12917898ffc5SAlan Stern /* Find the next unused frame */
1292caf3827aSAlan Stern if (list_empty(&qh->queue)) {
1293c8155cc5SAlan Stern frame = qh->iso_frame;
1294caf3827aSAlan Stern } else {
1295caf3827aSAlan Stern struct urb *lurb;
1296caf3827aSAlan Stern
1297caf3827aSAlan Stern lurb = list_entry(qh->queue.prev,
1298caf3827aSAlan Stern struct urb_priv, node)->urb;
1299caf3827aSAlan Stern frame = lurb->start_frame +
1300caf3827aSAlan Stern lurb->number_of_packets *
1301caf3827aSAlan Stern lurb->interval;
1302caf3827aSAlan Stern }
1303c44b2250SAlan Stern
1304c44b2250SAlan Stern /* Fell behind? */
1305bef073b0SAlan Stern if (!uhci_frame_before_eq(next, frame)) {
1306c44b2250SAlan Stern
1307c44b2250SAlan Stern /* USB_ISO_ASAP: Round up to the first available slot */
1308c44b2250SAlan Stern if (urb->transfer_flags & URB_ISO_ASAP)
1309c44b2250SAlan Stern frame += (next - frame + qh->period - 1) &
1310c44b2250SAlan Stern -qh->period;
1311c44b2250SAlan Stern
1312c44b2250SAlan Stern /*
1313bef073b0SAlan Stern * Not ASAP: Use the next slot in the stream,
1314bef073b0SAlan Stern * no matter what.
13157898ffc5SAlan Stern */
1316c44b2250SAlan Stern else if (!uhci_frame_before_eq(next,
1317c44b2250SAlan Stern frame + (urb->number_of_packets - 1) *
1318c44b2250SAlan Stern qh->period))
1319bef073b0SAlan Stern dev_dbg(uhci_dev(uhci), "iso underrun %p (%u+%u < %u)\n",
1320bef073b0SAlan Stern urb, frame,
1321bef073b0SAlan Stern (urb->number_of_packets - 1) *
1322bef073b0SAlan Stern qh->period,
1323bef073b0SAlan Stern next);
13247898ffc5SAlan Stern }
1325caf3827aSAlan Stern }
1326caf3827aSAlan Stern
1327caf3827aSAlan Stern /* Make sure we won't have to go too far into the future */
1328c8155cc5SAlan Stern if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES,
1329c44b2250SAlan Stern frame + urb->number_of_packets * urb->interval))
13300ed8fee1SAlan Stern return -EFBIG;
1331c44b2250SAlan Stern urb->start_frame = frame;
13320ed8fee1SAlan Stern
13331da177e4SLinus Torvalds status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
13341da177e4SLinus Torvalds destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
13351da177e4SLinus Torvalds
1336b81d3436SAlan Stern for (i = 0; i < urb->number_of_packets; i++) {
13372532178aSAlan Stern td = uhci_alloc_td(uhci);
13381da177e4SLinus Torvalds if (!td)
13391da177e4SLinus Torvalds return -ENOMEM;
13401da177e4SLinus Torvalds
134104538a25SAlan Stern uhci_add_td_to_urbp(td, urbp);
134251e2f62fSJan Andersson uhci_fill_td(uhci, td, status, destination |
1343dccf4a48SAlan Stern uhci_explen(urb->iso_frame_desc[i].length),
1344dccf4a48SAlan Stern urb->transfer_dma +
1345dccf4a48SAlan Stern urb->iso_frame_desc[i].offset);
1346b81d3436SAlan Stern }
13471da177e4SLinus Torvalds
1348dccf4a48SAlan Stern /* Set the interrupt-on-completion flag on the last packet. */
134951e2f62fSJan Andersson td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
1350dccf4a48SAlan Stern
1351dccf4a48SAlan Stern /* Add the TDs to the frame list */
1352b81d3436SAlan Stern frame = urb->start_frame;
1353b81d3436SAlan Stern list_for_each_entry(td, &urbp->td_list, list) {
1354dccf4a48SAlan Stern uhci_insert_td_in_frame_list(uhci, td, frame);
1355c8155cc5SAlan Stern frame += qh->period;
1356c8155cc5SAlan Stern }
1357c8155cc5SAlan Stern
1358c8155cc5SAlan Stern if (list_empty(&qh->queue)) {
1359c8155cc5SAlan Stern qh->iso_packet_desc = &urb->iso_frame_desc[0];
1360c8155cc5SAlan Stern qh->iso_frame = urb->start_frame;
13611da177e4SLinus Torvalds }
13621da177e4SLinus Torvalds
136317230acdSAlan Stern qh->skel = SKEL_ISO;
13643ca2a321SAlan Stern if (!qh->bandwidth_reserved)
13653ca2a321SAlan Stern uhci_reserve_bandwidth(uhci, qh);
1366dccf4a48SAlan Stern return 0;
13671da177e4SLinus Torvalds }
13681da177e4SLinus Torvalds
uhci_result_isochronous(struct uhci_hcd * uhci,struct urb * urb)13691da177e4SLinus Torvalds static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
13701da177e4SLinus Torvalds {
1371c8155cc5SAlan Stern struct uhci_td *td, *tmp;
1372c8155cc5SAlan Stern struct urb_priv *urbp = urb->hcpriv;
1373c8155cc5SAlan Stern struct uhci_qh *qh = urbp->qh;
1374c8155cc5SAlan Stern
1375c8155cc5SAlan Stern list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1376c8155cc5SAlan Stern unsigned int ctrlstat;
13771da177e4SLinus Torvalds int status;
13781da177e4SLinus Torvalds int actlength;
13791da177e4SLinus Torvalds
1380c8155cc5SAlan Stern if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame))
13811da177e4SLinus Torvalds return -EINPROGRESS;
13821da177e4SLinus Torvalds
1383c8155cc5SAlan Stern uhci_remove_tds_from_frame(uhci, qh->iso_frame);
13841da177e4SLinus Torvalds
138551e2f62fSJan Andersson ctrlstat = td_status(uhci, td);
1386c8155cc5SAlan Stern if (ctrlstat & TD_CTRL_ACTIVE) {
1387c8155cc5SAlan Stern status = -EXDEV; /* TD was added too late? */
1388c8155cc5SAlan Stern } else {
13891da177e4SLinus Torvalds status = uhci_map_status(uhci_status_bits(ctrlstat),
13901da177e4SLinus Torvalds usb_pipeout(urb->pipe));
1391c8155cc5SAlan Stern actlength = uhci_actual_length(ctrlstat);
1392c8155cc5SAlan Stern
1393c8155cc5SAlan Stern urb->actual_length += actlength;
1394c8155cc5SAlan Stern qh->iso_packet_desc->actual_length = actlength;
1395c8155cc5SAlan Stern qh->iso_packet_desc->status = status;
1396c8155cc5SAlan Stern }
1397ee7d1f3fSAlan Stern if (status)
13981da177e4SLinus Torvalds urb->error_count++;
13991da177e4SLinus Torvalds
1400c8155cc5SAlan Stern uhci_remove_td_from_urbp(td);
1401c8155cc5SAlan Stern uhci_free_td(uhci, td);
1402c8155cc5SAlan Stern qh->iso_frame += qh->period;
1403c8155cc5SAlan Stern ++qh->iso_packet_desc;
14041da177e4SLinus Torvalds }
1405ee7d1f3fSAlan Stern return 0;
14061da177e4SLinus Torvalds }
14071da177e4SLinus Torvalds
uhci_urb_enqueue(struct usb_hcd * hcd,struct urb * urb,gfp_t mem_flags)14081da177e4SLinus Torvalds static int uhci_urb_enqueue(struct usb_hcd *hcd,
140955016f10SAl Viro struct urb *urb, gfp_t mem_flags)
14101da177e4SLinus Torvalds {
14111da177e4SLinus Torvalds int ret;
14121da177e4SLinus Torvalds struct uhci_hcd *uhci = hcd_to_uhci(hcd);
14131da177e4SLinus Torvalds unsigned long flags;
1414dccf4a48SAlan Stern struct urb_priv *urbp;
1415dccf4a48SAlan Stern struct uhci_qh *qh;
14161da177e4SLinus Torvalds
14171da177e4SLinus Torvalds spin_lock_irqsave(&uhci->lock, flags);
14181da177e4SLinus Torvalds
1419e9df41c5SAlan Stern ret = usb_hcd_link_urb_to_ep(hcd, urb);
1420e9df41c5SAlan Stern if (ret)
1421e9df41c5SAlan Stern goto done_not_linked;
14221da177e4SLinus Torvalds
14231da177e4SLinus Torvalds ret = -ENOMEM;
1424dccf4a48SAlan Stern urbp = uhci_alloc_urb_priv(uhci, urb);
1425dccf4a48SAlan Stern if (!urbp)
1426dccf4a48SAlan Stern goto done;
1427dccf4a48SAlan Stern
1428e9df41c5SAlan Stern if (urb->ep->hcpriv)
1429e9df41c5SAlan Stern qh = urb->ep->hcpriv;
1430dccf4a48SAlan Stern else {
1431e9df41c5SAlan Stern qh = uhci_alloc_qh(uhci, urb->dev, urb->ep);
1432dccf4a48SAlan Stern if (!qh)
1433dccf4a48SAlan Stern goto err_no_qh;
14341da177e4SLinus Torvalds }
1435dccf4a48SAlan Stern urbp->qh = qh;
14361da177e4SLinus Torvalds
14374de7d2c2SAlan Stern switch (qh->type) {
14384de7d2c2SAlan Stern case USB_ENDPOINT_XFER_CONTROL:
1439dccf4a48SAlan Stern ret = uhci_submit_control(uhci, urb, qh);
1440dccf4a48SAlan Stern break;
14414de7d2c2SAlan Stern case USB_ENDPOINT_XFER_BULK:
1442dccf4a48SAlan Stern ret = uhci_submit_bulk(uhci, urb, qh);
14431da177e4SLinus Torvalds break;
14444de7d2c2SAlan Stern case USB_ENDPOINT_XFER_INT:
1445dccf4a48SAlan Stern ret = uhci_submit_interrupt(uhci, urb, qh);
14461da177e4SLinus Torvalds break;
14474de7d2c2SAlan Stern case USB_ENDPOINT_XFER_ISOC:
1448c8155cc5SAlan Stern urb->error_count = 0;
1449dccf4a48SAlan Stern ret = uhci_submit_isochronous(uhci, urb, qh);
14501da177e4SLinus Torvalds break;
14511da177e4SLinus Torvalds }
1452dccf4a48SAlan Stern if (ret != 0)
1453dccf4a48SAlan Stern goto err_submit_failed;
14541da177e4SLinus Torvalds
1455dccf4a48SAlan Stern /* Add this URB to the QH */
1456dccf4a48SAlan Stern list_add_tail(&urbp->node, &qh->queue);
14571da177e4SLinus Torvalds
1458dccf4a48SAlan Stern /* If the new URB is the first and only one on this QH then either
1459dccf4a48SAlan Stern * the QH is new and idle or else it's unlinked and waiting to
14602775562aSAlan Stern * become idle, so we can activate it right away. But only if the
14612775562aSAlan Stern * queue isn't stopped. */
146284afddd7SAlan Stern if (qh->queue.next == &urbp->node && !qh->is_stopped) {
1463dccf4a48SAlan Stern uhci_activate_qh(uhci, qh);
1464c5e3b741SAlan Stern uhci_urbp_wants_fsbr(uhci, urbp);
146584afddd7SAlan Stern }
1466dccf4a48SAlan Stern goto done;
1467dccf4a48SAlan Stern
1468dccf4a48SAlan Stern err_submit_failed:
1469dccf4a48SAlan Stern if (qh->state == QH_STATE_IDLE)
1470dccf4a48SAlan Stern uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */
1471dccf4a48SAlan Stern err_no_qh:
1472dccf4a48SAlan Stern uhci_free_urb_priv(uhci, urbp);
1473dccf4a48SAlan Stern done:
1474e9df41c5SAlan Stern if (ret)
1475e9df41c5SAlan Stern usb_hcd_unlink_urb_from_ep(hcd, urb);
1476e9df41c5SAlan Stern done_not_linked:
14771da177e4SLinus Torvalds spin_unlock_irqrestore(&uhci->lock, flags);
14781da177e4SLinus Torvalds return ret;
14791da177e4SLinus Torvalds }
14801da177e4SLinus Torvalds
uhci_urb_dequeue(struct usb_hcd * hcd,struct urb * urb,int status)1481e9df41c5SAlan Stern static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
14821da177e4SLinus Torvalds {
14831da177e4SLinus Torvalds struct uhci_hcd *uhci = hcd_to_uhci(hcd);
14841da177e4SLinus Torvalds unsigned long flags;
148510b8e47dSAlan Stern struct uhci_qh *qh;
1486e9df41c5SAlan Stern int rc;
14871da177e4SLinus Torvalds
14881da177e4SLinus Torvalds spin_lock_irqsave(&uhci->lock, flags);
1489e9df41c5SAlan Stern rc = usb_hcd_check_unlink_urb(hcd, urb, status);
1490e9df41c5SAlan Stern if (rc)
14911da177e4SLinus Torvalds goto done;
1492e9df41c5SAlan Stern
1493e9df41c5SAlan Stern qh = ((struct urb_priv *) urb->hcpriv)->qh;
14941da177e4SLinus Torvalds
1495dccf4a48SAlan Stern /* Remove Isochronous TDs from the frame list ASAP */
149610b8e47dSAlan Stern if (qh->type == USB_ENDPOINT_XFER_ISOC) {
1497dccf4a48SAlan Stern uhci_unlink_isochronous_tds(uhci, urb);
149810b8e47dSAlan Stern mb();
149910b8e47dSAlan Stern
150010b8e47dSAlan Stern /* If the URB has already started, update the QH unlink time */
150110b8e47dSAlan Stern uhci_get_current_frame_number(uhci);
150210b8e47dSAlan Stern if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number))
150310b8e47dSAlan Stern qh->unlink_frame = uhci->frame_number;
150410b8e47dSAlan Stern }
150510b8e47dSAlan Stern
150610b8e47dSAlan Stern uhci_unlink_qh(uhci, qh);
15071da177e4SLinus Torvalds
15081da177e4SLinus Torvalds done:
15091da177e4SLinus Torvalds spin_unlock_irqrestore(&uhci->lock, flags);
1510e9df41c5SAlan Stern return rc;
15111da177e4SLinus Torvalds }
15121da177e4SLinus Torvalds
15130ed8fee1SAlan Stern /*
15140ed8fee1SAlan Stern * Finish unlinking an URB and give it back
15150ed8fee1SAlan Stern */
uhci_giveback_urb(struct uhci_hcd * uhci,struct uhci_qh * qh,struct urb * urb,int status)15160ed8fee1SAlan Stern static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
15174a00027dSAlan Stern struct urb *urb, int status)
15180ed8fee1SAlan Stern __releases(uhci->lock)
15190ed8fee1SAlan Stern __acquires(uhci->lock)
15201da177e4SLinus Torvalds {
15211da177e4SLinus Torvalds struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
15221da177e4SLinus Torvalds
1523e7e7c360SAlan Stern if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1524e7e7c360SAlan Stern
15257ea0a2bcSAlan Stern /* Subtract off the length of the SETUP packet from
15267ea0a2bcSAlan Stern * urb->actual_length.
15277ea0a2bcSAlan Stern */
15287ea0a2bcSAlan Stern urb->actual_length -= min_t(u32, 8, urb->actual_length);
1529e7e7c360SAlan Stern }
1530e7e7c360SAlan Stern
1531c8155cc5SAlan Stern /* When giving back the first URB in an Isochronous queue,
1532c8155cc5SAlan Stern * reinitialize the QH's iso-related members for the next URB. */
1533e7e7c360SAlan Stern else if (qh->type == USB_ENDPOINT_XFER_ISOC &&
1534c8155cc5SAlan Stern urbp->node.prev == &qh->queue &&
1535c8155cc5SAlan Stern urbp->node.next != &qh->queue) {
1536c8155cc5SAlan Stern struct urb *nurb = list_entry(urbp->node.next,
1537c8155cc5SAlan Stern struct urb_priv, node)->urb;
1538c8155cc5SAlan Stern
1539c8155cc5SAlan Stern qh->iso_packet_desc = &nurb->iso_frame_desc[0];
1540c8155cc5SAlan Stern qh->iso_frame = nurb->start_frame;
1541c8155cc5SAlan Stern }
15421da177e4SLinus Torvalds
15430ed8fee1SAlan Stern /* Take the URB off the QH's queue. If the queue is now empty,
15440ed8fee1SAlan Stern * this is a perfect time for a toggle fixup. */
15450ed8fee1SAlan Stern list_del_init(&urbp->node);
15460ed8fee1SAlan Stern if (list_empty(&qh->queue) && qh->needs_fixup) {
15470ed8fee1SAlan Stern usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
15480ed8fee1SAlan Stern usb_pipeout(urb->pipe), qh->initial_toggle);
15490ed8fee1SAlan Stern qh->needs_fixup = 0;
15500ed8fee1SAlan Stern }
15510ed8fee1SAlan Stern
15520ed8fee1SAlan Stern uhci_free_urb_priv(uhci, urbp);
1553e9df41c5SAlan Stern usb_hcd_unlink_urb_from_ep(uhci_to_hcd(uhci), urb);
15540ed8fee1SAlan Stern
15550ed8fee1SAlan Stern spin_unlock(&uhci->lock);
15564a00027dSAlan Stern usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, status);
15570ed8fee1SAlan Stern spin_lock(&uhci->lock);
15580ed8fee1SAlan Stern
15590ed8fee1SAlan Stern /* If the queue is now empty, we can unlink the QH and give up its
15600ed8fee1SAlan Stern * reserved bandwidth. */
15610ed8fee1SAlan Stern if (list_empty(&qh->queue)) {
15620ed8fee1SAlan Stern uhci_unlink_qh(uhci, qh);
15633ca2a321SAlan Stern if (qh->bandwidth_reserved)
15643ca2a321SAlan Stern uhci_release_bandwidth(uhci, qh);
15650ed8fee1SAlan Stern }
15660ed8fee1SAlan Stern }
15670ed8fee1SAlan Stern
15680ed8fee1SAlan Stern /*
15690ed8fee1SAlan Stern * Scan the URBs in a QH's queue
15700ed8fee1SAlan Stern */
15710ed8fee1SAlan Stern #define QH_FINISHED_UNLINKING(qh) \
15720ed8fee1SAlan Stern (qh->state == QH_STATE_UNLINKING && \
15730ed8fee1SAlan Stern uhci->frame_number + uhci->is_stopped != qh->unlink_frame)
15740ed8fee1SAlan Stern
uhci_scan_qh(struct uhci_hcd * uhci,struct uhci_qh * qh)15757d12e780SDavid Howells static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
15760ed8fee1SAlan Stern {
15770ed8fee1SAlan Stern struct urb_priv *urbp;
15780ed8fee1SAlan Stern struct urb *urb;
15790ed8fee1SAlan Stern int status;
15800ed8fee1SAlan Stern
15810ed8fee1SAlan Stern while (!list_empty(&qh->queue)) {
15820ed8fee1SAlan Stern urbp = list_entry(qh->queue.next, struct urb_priv, node);
15830ed8fee1SAlan Stern urb = urbp->urb;
15840ed8fee1SAlan Stern
1585b1869000SAlan Stern if (qh->type == USB_ENDPOINT_XFER_ISOC)
15860ed8fee1SAlan Stern status = uhci_result_isochronous(uhci, urb);
1587b1869000SAlan Stern else
15880ed8fee1SAlan Stern status = uhci_result_common(uhci, urb);
15890ed8fee1SAlan Stern if (status == -EINPROGRESS)
15900ed8fee1SAlan Stern break;
15910ed8fee1SAlan Stern
15920ed8fee1SAlan Stern /* Dequeued but completed URBs can't be given back unless
15930ed8fee1SAlan Stern * the QH is stopped or has finished unlinking. */
1594eb231054SAlan Stern if (urb->unlinked) {
15952775562aSAlan Stern if (QH_FINISHED_UNLINKING(qh))
15962775562aSAlan Stern qh->is_stopped = 1;
15972775562aSAlan Stern else if (!qh->is_stopped)
15980ed8fee1SAlan Stern return;
15992775562aSAlan Stern }
16000ed8fee1SAlan Stern
16014a00027dSAlan Stern uhci_giveback_urb(uhci, qh, urb, status);
1602ee7d1f3fSAlan Stern if (status < 0)
16030ed8fee1SAlan Stern break;
16040ed8fee1SAlan Stern }
16050ed8fee1SAlan Stern
16060ed8fee1SAlan Stern /* If the QH is neither stopped nor finished unlinking (normal case),
16070ed8fee1SAlan Stern * our work here is done. */
16082775562aSAlan Stern if (QH_FINISHED_UNLINKING(qh))
16092775562aSAlan Stern qh->is_stopped = 1;
16102775562aSAlan Stern else if (!qh->is_stopped)
16110ed8fee1SAlan Stern return;
16120ed8fee1SAlan Stern
16130ed8fee1SAlan Stern /* Otherwise give back each of the dequeued URBs */
16142775562aSAlan Stern restart:
16150ed8fee1SAlan Stern list_for_each_entry(urbp, &qh->queue, node) {
16160ed8fee1SAlan Stern urb = urbp->urb;
1617eb231054SAlan Stern if (urb->unlinked) {
161810b8e47dSAlan Stern
161910b8e47dSAlan Stern /* Fix up the TD links and save the toggles for
162010b8e47dSAlan Stern * non-Isochronous queues. For Isochronous queues,
162110b8e47dSAlan Stern * test for too-recent dequeues. */
162210b8e47dSAlan Stern if (!uhci_cleanup_queue(uhci, qh, urb)) {
162310b8e47dSAlan Stern qh->is_stopped = 0;
162410b8e47dSAlan Stern return;
162510b8e47dSAlan Stern }
16264a00027dSAlan Stern uhci_giveback_urb(uhci, qh, urb, 0);
16270ed8fee1SAlan Stern goto restart;
16280ed8fee1SAlan Stern }
16290ed8fee1SAlan Stern }
16300ed8fee1SAlan Stern qh->is_stopped = 0;
16310ed8fee1SAlan Stern
16320ed8fee1SAlan Stern /* There are no more dequeued URBs. If there are still URBs on the
16330ed8fee1SAlan Stern * queue, the QH can now be re-activated. */
16340ed8fee1SAlan Stern if (!list_empty(&qh->queue)) {
16350ed8fee1SAlan Stern if (qh->needs_fixup)
163651e2f62fSJan Andersson uhci_fixup_toggles(uhci, qh, 0);
163784afddd7SAlan Stern
163884afddd7SAlan Stern /* If the first URB on the queue wants FSBR but its time
163984afddd7SAlan Stern * limit has expired, set the next TD to interrupt on
164084afddd7SAlan Stern * completion before reactivating the QH. */
164184afddd7SAlan Stern urbp = list_entry(qh->queue.next, struct urb_priv, node);
164284afddd7SAlan Stern if (urbp->fsbr && qh->wait_expired) {
164384afddd7SAlan Stern struct uhci_td *td = list_entry(urbp->td_list.next,
164484afddd7SAlan Stern struct uhci_td, list);
164584afddd7SAlan Stern
164651e2f62fSJan Andersson td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
164784afddd7SAlan Stern }
164884afddd7SAlan Stern
16490ed8fee1SAlan Stern uhci_activate_qh(uhci, qh);
16500ed8fee1SAlan Stern }
16510ed8fee1SAlan Stern
16520ed8fee1SAlan Stern /* The queue is empty. The QH can become idle if it is fully
16530ed8fee1SAlan Stern * unlinked. */
16540ed8fee1SAlan Stern else if (QH_FINISHED_UNLINKING(qh))
16550ed8fee1SAlan Stern uhci_make_qh_idle(uhci, qh);
16561da177e4SLinus Torvalds }
16571da177e4SLinus Torvalds
16580ed8fee1SAlan Stern /*
165984afddd7SAlan Stern * Check for queues that have made some forward progress.
166084afddd7SAlan Stern * Returns 0 if the queue is not Isochronous, is ACTIVE, and
166184afddd7SAlan Stern * has not advanced since last examined; 1 otherwise.
1662b761d9d8SAlan Stern *
1663b761d9d8SAlan Stern * Early Intel controllers have a bug which causes qh->element sometimes
1664b761d9d8SAlan Stern * not to advance when a TD completes successfully. The queue remains
1665b761d9d8SAlan Stern * stuck on the inactive completed TD. We detect such cases and advance
1666b761d9d8SAlan Stern * the element pointer by hand.
166784afddd7SAlan Stern */
uhci_advance_check(struct uhci_hcd * uhci,struct uhci_qh * qh)166884afddd7SAlan Stern static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
166984afddd7SAlan Stern {
167084afddd7SAlan Stern struct urb_priv *urbp = NULL;
167184afddd7SAlan Stern struct uhci_td *td;
167284afddd7SAlan Stern int ret = 1;
167384afddd7SAlan Stern unsigned status;
167484afddd7SAlan Stern
167584afddd7SAlan Stern if (qh->type == USB_ENDPOINT_XFER_ISOC)
1676c5e3b741SAlan Stern goto done;
167784afddd7SAlan Stern
167884afddd7SAlan Stern /* Treat an UNLINKING queue as though it hasn't advanced.
167984afddd7SAlan Stern * This is okay because reactivation will treat it as though
168084afddd7SAlan Stern * it has advanced, and if it is going to become IDLE then
168184afddd7SAlan Stern * this doesn't matter anyway. Furthermore it's possible
168284afddd7SAlan Stern * for an UNLINKING queue not to have any URBs at all, or
168384afddd7SAlan Stern * for its first URB not to have any TDs (if it was dequeued
168484afddd7SAlan Stern * just as it completed). So it's not easy in any case to
168584afddd7SAlan Stern * test whether such queues have advanced. */
168684afddd7SAlan Stern if (qh->state != QH_STATE_ACTIVE) {
168784afddd7SAlan Stern urbp = NULL;
168884afddd7SAlan Stern status = 0;
168984afddd7SAlan Stern
169084afddd7SAlan Stern } else {
169184afddd7SAlan Stern urbp = list_entry(qh->queue.next, struct urb_priv, node);
169284afddd7SAlan Stern td = list_entry(urbp->td_list.next, struct uhci_td, list);
169351e2f62fSJan Andersson status = td_status(uhci, td);
169484afddd7SAlan Stern if (!(status & TD_CTRL_ACTIVE)) {
169584afddd7SAlan Stern
169684afddd7SAlan Stern /* We're okay, the queue has advanced */
169784afddd7SAlan Stern qh->wait_expired = 0;
169884afddd7SAlan Stern qh->advance_jiffies = jiffies;
1699c5e3b741SAlan Stern goto done;
170084afddd7SAlan Stern }
1701ba297eddSAlan Stern ret = uhci->is_stopped;
170284afddd7SAlan Stern }
170384afddd7SAlan Stern
170484afddd7SAlan Stern /* The queue hasn't advanced; check for timeout */
1705c5e3b741SAlan Stern if (qh->wait_expired)
1706c5e3b741SAlan Stern goto done;
1707c5e3b741SAlan Stern
1708c5e3b741SAlan Stern if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) {
1709b761d9d8SAlan Stern
1710b761d9d8SAlan Stern /* Detect the Intel bug and work around it */
171151e2f62fSJan Andersson if (qh->post_td && qh_element(qh) ==
171251e2f62fSJan Andersson LINK_TO_TD(uhci, qh->post_td)) {
1713b761d9d8SAlan Stern qh->element = qh->post_td->link;
1714b761d9d8SAlan Stern qh->advance_jiffies = jiffies;
1715c5e3b741SAlan Stern ret = 1;
1716c5e3b741SAlan Stern goto done;
1717b761d9d8SAlan Stern }
1718b761d9d8SAlan Stern
171984afddd7SAlan Stern qh->wait_expired = 1;
172084afddd7SAlan Stern
172184afddd7SAlan Stern /* If the current URB wants FSBR, unlink it temporarily
172284afddd7SAlan Stern * so that we can safely set the next TD to interrupt on
172384afddd7SAlan Stern * completion. That way we'll know as soon as the queue
172484afddd7SAlan Stern * starts moving again. */
172584afddd7SAlan Stern if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC))
172684afddd7SAlan Stern uhci_unlink_qh(uhci, qh);
1727c5e3b741SAlan Stern
1728c5e3b741SAlan Stern } else {
1729c5e3b741SAlan Stern /* Unmoving but not-yet-expired queues keep FSBR alive */
1730c5e3b741SAlan Stern if (urbp)
1731c5e3b741SAlan Stern uhci_urbp_wants_fsbr(uhci, urbp);
173284afddd7SAlan Stern }
1733c5e3b741SAlan Stern
1734c5e3b741SAlan Stern done:
173584afddd7SAlan Stern return ret;
173684afddd7SAlan Stern }
173784afddd7SAlan Stern
173884afddd7SAlan Stern /*
17390ed8fee1SAlan Stern * Process events in the schedule, but only in one thread at a time
17400ed8fee1SAlan Stern */
uhci_scan_schedule(struct uhci_hcd * uhci)17417d12e780SDavid Howells static void uhci_scan_schedule(struct uhci_hcd *uhci)
17421da177e4SLinus Torvalds {
17430ed8fee1SAlan Stern int i;
17440ed8fee1SAlan Stern struct uhci_qh *qh;
17451da177e4SLinus Torvalds
17461da177e4SLinus Torvalds /* Don't allow re-entrant calls */
17471da177e4SLinus Torvalds if (uhci->scan_in_progress) {
17481da177e4SLinus Torvalds uhci->need_rescan = 1;
17491da177e4SLinus Torvalds return;
17501da177e4SLinus Torvalds }
17511da177e4SLinus Torvalds uhci->scan_in_progress = 1;
17521da177e4SLinus Torvalds rescan:
17531da177e4SLinus Torvalds uhci->need_rescan = 0;
1754c5e3b741SAlan Stern uhci->fsbr_is_wanted = 0;
17551da177e4SLinus Torvalds
17566c1b445cSAlan Stern uhci_clear_next_interrupt(uhci);
17571da177e4SLinus Torvalds uhci_get_current_frame_number(uhci);
1758c8155cc5SAlan Stern uhci->cur_iso_frame = uhci->frame_number;
17591da177e4SLinus Torvalds
17600ed8fee1SAlan Stern /* Go through all the QH queues and process the URBs in each one */
17610ed8fee1SAlan Stern for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) {
17620ed8fee1SAlan Stern uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
17630ed8fee1SAlan Stern struct uhci_qh, node);
17640ed8fee1SAlan Stern while ((qh = uhci->next_qh) != uhci->skelqh[i]) {
17650ed8fee1SAlan Stern uhci->next_qh = list_entry(qh->node.next,
17660ed8fee1SAlan Stern struct uhci_qh, node);
176784afddd7SAlan Stern
176884afddd7SAlan Stern if (uhci_advance_check(uhci, qh)) {
17697d12e780SDavid Howells uhci_scan_qh(uhci, qh);
1770c5e3b741SAlan Stern if (qh->state == QH_STATE_ACTIVE) {
1771c5e3b741SAlan Stern uhci_urbp_wants_fsbr(uhci,
1772c5e3b741SAlan Stern list_entry(qh->queue.next, struct urb_priv, node));
1773c5e3b741SAlan Stern }
177484afddd7SAlan Stern }
17751da177e4SLinus Torvalds }
17760ed8fee1SAlan Stern }
17771da177e4SLinus Torvalds
1778c8155cc5SAlan Stern uhci->last_iso_frame = uhci->cur_iso_frame;
17791da177e4SLinus Torvalds if (uhci->need_rescan)
17801da177e4SLinus Torvalds goto rescan;
17811da177e4SLinus Torvalds uhci->scan_in_progress = 0;
17821da177e4SLinus Torvalds
1783c5e3b741SAlan Stern if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted &&
1784c5e3b741SAlan Stern !uhci->fsbr_expiring) {
1785c5e3b741SAlan Stern uhci->fsbr_expiring = 1;
1786c5e3b741SAlan Stern mod_timer(&uhci->fsbr_timer, jiffies + FSBR_OFF_DELAY);
1787c5e3b741SAlan Stern }
178884afddd7SAlan Stern
178904538a25SAlan Stern if (list_empty(&uhci->skel_unlink_qh->node))
17901da177e4SLinus Torvalds uhci_clear_next_interrupt(uhci);
17911da177e4SLinus Torvalds else
17921da177e4SLinus Torvalds uhci_set_next_interrupt(uhci);
17931da177e4SLinus Torvalds }
1794