11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Universal Host Controller Interface driver for USB. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Maintainer: Alan Stern <stern@rowland.harvard.edu> 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * (C) Copyright 1999 Linus Torvalds 71da177e4SLinus Torvalds * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com 81da177e4SLinus Torvalds * (C) Copyright 1999 Randy Dunlap 91da177e4SLinus Torvalds * (C) Copyright 1999 Georg Acher, acher@in.tum.de 101da177e4SLinus Torvalds * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de 111da177e4SLinus Torvalds * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch 121da177e4SLinus Torvalds * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at 131da177e4SLinus Torvalds * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface 141da177e4SLinus Torvalds * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). 151da177e4SLinus Torvalds * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) 16b761d9d8SAlan Stern * (C) Copyright 2004-2006 Alan Stern, stern@rowland.harvard.edu 171da177e4SLinus Torvalds */ 181da177e4SLinus Torvalds 191da177e4SLinus Torvalds 201da177e4SLinus Torvalds /* 211da177e4SLinus Torvalds * Technically, updating td->status here is a race, but it's not really a 221da177e4SLinus Torvalds * problem. The worst that can happen is that we set the IOC bit again 231da177e4SLinus Torvalds * generating a spurious interrupt. We could fix this by creating another 241da177e4SLinus Torvalds * QH and leaving the IOC bit always set, but then we would have to play 251da177e4SLinus Torvalds * games with the FSBR code to make sure we get the correct order in all 261da177e4SLinus Torvalds * the cases. I don't think it's worth the effort 271da177e4SLinus Torvalds */ 28dccf4a48SAlan Stern static void uhci_set_next_interrupt(struct uhci_hcd *uhci) 291da177e4SLinus Torvalds { 306c1b445cSAlan Stern if (uhci->is_stopped) 311f09df8bSAlan Stern mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies); 321da177e4SLinus Torvalds uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); 331da177e4SLinus Torvalds } 341da177e4SLinus Torvalds 351da177e4SLinus Torvalds static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci) 361da177e4SLinus Torvalds { 371da177e4SLinus Torvalds uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC); 381da177e4SLinus Torvalds } 391da177e4SLinus Torvalds 4084afddd7SAlan Stern 4184afddd7SAlan Stern /* 4284afddd7SAlan Stern * Full-Speed Bandwidth Reclamation (FSBR). 4384afddd7SAlan Stern * We turn on FSBR whenever a queue that wants it is advancing, 4484afddd7SAlan Stern * and leave it on for a short time thereafter. 4584afddd7SAlan Stern */ 4684afddd7SAlan Stern static void uhci_fsbr_on(struct uhci_hcd *uhci) 4784afddd7SAlan Stern { 4884afddd7SAlan Stern uhci->fsbr_is_on = 1; 4984afddd7SAlan Stern uhci->skel_term_qh->link = cpu_to_le32( 5084afddd7SAlan Stern uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH; 5184afddd7SAlan Stern } 5284afddd7SAlan Stern 5384afddd7SAlan Stern static void uhci_fsbr_off(struct uhci_hcd *uhci) 5484afddd7SAlan Stern { 5584afddd7SAlan Stern uhci->fsbr_is_on = 0; 5684afddd7SAlan Stern uhci->skel_term_qh->link = UHCI_PTR_TERM; 5784afddd7SAlan Stern } 5884afddd7SAlan Stern 5984afddd7SAlan Stern static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb) 6084afddd7SAlan Stern { 6184afddd7SAlan Stern struct urb_priv *urbp = urb->hcpriv; 6284afddd7SAlan Stern 6384afddd7SAlan Stern if (!(urb->transfer_flags & URB_NO_FSBR)) 6484afddd7SAlan Stern urbp->fsbr = 1; 6584afddd7SAlan Stern } 6684afddd7SAlan Stern 67c5e3b741SAlan Stern static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp) 6884afddd7SAlan Stern { 6984afddd7SAlan Stern if (urbp->fsbr) { 70c5e3b741SAlan Stern uhci->fsbr_is_wanted = 1; 7184afddd7SAlan Stern if (!uhci->fsbr_is_on) 7284afddd7SAlan Stern uhci_fsbr_on(uhci); 73c5e3b741SAlan Stern else if (uhci->fsbr_expiring) { 74c5e3b741SAlan Stern uhci->fsbr_expiring = 0; 75c5e3b741SAlan Stern del_timer(&uhci->fsbr_timer); 7684afddd7SAlan Stern } 7784afddd7SAlan Stern } 78c5e3b741SAlan Stern } 79c5e3b741SAlan Stern 80c5e3b741SAlan Stern static void uhci_fsbr_timeout(unsigned long _uhci) 81c5e3b741SAlan Stern { 82c5e3b741SAlan Stern struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci; 83c5e3b741SAlan Stern unsigned long flags; 84c5e3b741SAlan Stern 85c5e3b741SAlan Stern spin_lock_irqsave(&uhci->lock, flags); 86c5e3b741SAlan Stern if (uhci->fsbr_expiring) { 87c5e3b741SAlan Stern uhci->fsbr_expiring = 0; 88c5e3b741SAlan Stern uhci_fsbr_off(uhci); 89c5e3b741SAlan Stern } 90c5e3b741SAlan Stern spin_unlock_irqrestore(&uhci->lock, flags); 91c5e3b741SAlan Stern } 9284afddd7SAlan Stern 9384afddd7SAlan Stern 942532178aSAlan Stern static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci) 951da177e4SLinus Torvalds { 961da177e4SLinus Torvalds dma_addr_t dma_handle; 971da177e4SLinus Torvalds struct uhci_td *td; 981da177e4SLinus Torvalds 991da177e4SLinus Torvalds td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle); 1001da177e4SLinus Torvalds if (!td) 1011da177e4SLinus Torvalds return NULL; 1021da177e4SLinus Torvalds 1031da177e4SLinus Torvalds td->dma_handle = dma_handle; 1041da177e4SLinus Torvalds td->frame = -1; 1051da177e4SLinus Torvalds 1061da177e4SLinus Torvalds INIT_LIST_HEAD(&td->list); 1071da177e4SLinus Torvalds INIT_LIST_HEAD(&td->fl_list); 1081da177e4SLinus Torvalds 1091da177e4SLinus Torvalds return td; 1101da177e4SLinus Torvalds } 1111da177e4SLinus Torvalds 112dccf4a48SAlan Stern static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td) 113dccf4a48SAlan Stern { 114dccf4a48SAlan Stern if (!list_empty(&td->list)) 115dccf4a48SAlan Stern dev_warn(uhci_dev(uhci), "td %p still in list!\n", td); 116dccf4a48SAlan Stern if (!list_empty(&td->fl_list)) 117dccf4a48SAlan Stern dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td); 118dccf4a48SAlan Stern 119dccf4a48SAlan Stern dma_pool_free(uhci->td_pool, td, td->dma_handle); 120dccf4a48SAlan Stern } 121dccf4a48SAlan Stern 1221da177e4SLinus Torvalds static inline void uhci_fill_td(struct uhci_td *td, u32 status, 1231da177e4SLinus Torvalds u32 token, u32 buffer) 1241da177e4SLinus Torvalds { 1251da177e4SLinus Torvalds td->status = cpu_to_le32(status); 1261da177e4SLinus Torvalds td->token = cpu_to_le32(token); 1271da177e4SLinus Torvalds td->buffer = cpu_to_le32(buffer); 1281da177e4SLinus Torvalds } 1291da177e4SLinus Torvalds 13004538a25SAlan Stern static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp) 13104538a25SAlan Stern { 13204538a25SAlan Stern list_add_tail(&td->list, &urbp->td_list); 13304538a25SAlan Stern } 13404538a25SAlan Stern 13504538a25SAlan Stern static void uhci_remove_td_from_urbp(struct uhci_td *td) 13604538a25SAlan Stern { 13704538a25SAlan Stern list_del_init(&td->list); 13804538a25SAlan Stern } 13904538a25SAlan Stern 1401da177e4SLinus Torvalds /* 141687f5f34SAlan Stern * We insert Isochronous URBs directly into the frame list at the beginning 1421da177e4SLinus Torvalds */ 143dccf4a48SAlan Stern static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci, 144dccf4a48SAlan Stern struct uhci_td *td, unsigned framenum) 1451da177e4SLinus Torvalds { 1461da177e4SLinus Torvalds framenum &= (UHCI_NUMFRAMES - 1); 1471da177e4SLinus Torvalds 1481da177e4SLinus Torvalds td->frame = framenum; 1491da177e4SLinus Torvalds 1501da177e4SLinus Torvalds /* Is there a TD already mapped there? */ 151a1d59ce8SAlan Stern if (uhci->frame_cpu[framenum]) { 1521da177e4SLinus Torvalds struct uhci_td *ftd, *ltd; 1531da177e4SLinus Torvalds 154a1d59ce8SAlan Stern ftd = uhci->frame_cpu[framenum]; 1551da177e4SLinus Torvalds ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list); 1561da177e4SLinus Torvalds 1571da177e4SLinus Torvalds list_add_tail(&td->fl_list, &ftd->fl_list); 1581da177e4SLinus Torvalds 1591da177e4SLinus Torvalds td->link = ltd->link; 1601da177e4SLinus Torvalds wmb(); 1611da177e4SLinus Torvalds ltd->link = cpu_to_le32(td->dma_handle); 1621da177e4SLinus Torvalds } else { 163a1d59ce8SAlan Stern td->link = uhci->frame[framenum]; 1641da177e4SLinus Torvalds wmb(); 165a1d59ce8SAlan Stern uhci->frame[framenum] = cpu_to_le32(td->dma_handle); 166a1d59ce8SAlan Stern uhci->frame_cpu[framenum] = td; 1671da177e4SLinus Torvalds } 1681da177e4SLinus Torvalds } 1691da177e4SLinus Torvalds 170dccf4a48SAlan Stern static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci, 171b81d3436SAlan Stern struct uhci_td *td) 1721da177e4SLinus Torvalds { 1731da177e4SLinus Torvalds /* If it's not inserted, don't remove it */ 174b81d3436SAlan Stern if (td->frame == -1) { 175b81d3436SAlan Stern WARN_ON(!list_empty(&td->fl_list)); 1761da177e4SLinus Torvalds return; 177b81d3436SAlan Stern } 1781da177e4SLinus Torvalds 179b81d3436SAlan Stern if (uhci->frame_cpu[td->frame] == td) { 1801da177e4SLinus Torvalds if (list_empty(&td->fl_list)) { 181a1d59ce8SAlan Stern uhci->frame[td->frame] = td->link; 182a1d59ce8SAlan Stern uhci->frame_cpu[td->frame] = NULL; 1831da177e4SLinus Torvalds } else { 1841da177e4SLinus Torvalds struct uhci_td *ntd; 1851da177e4SLinus Torvalds 1861da177e4SLinus Torvalds ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list); 187a1d59ce8SAlan Stern uhci->frame[td->frame] = cpu_to_le32(ntd->dma_handle); 188a1d59ce8SAlan Stern uhci->frame_cpu[td->frame] = ntd; 1891da177e4SLinus Torvalds } 1901da177e4SLinus Torvalds } else { 1911da177e4SLinus Torvalds struct uhci_td *ptd; 1921da177e4SLinus Torvalds 1931da177e4SLinus Torvalds ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list); 1941da177e4SLinus Torvalds ptd->link = td->link; 1951da177e4SLinus Torvalds } 1961da177e4SLinus Torvalds 1971da177e4SLinus Torvalds list_del_init(&td->fl_list); 1981da177e4SLinus Torvalds td->frame = -1; 1991da177e4SLinus Torvalds } 2001da177e4SLinus Torvalds 201c8155cc5SAlan Stern static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci, 202c8155cc5SAlan Stern unsigned int framenum) 203c8155cc5SAlan Stern { 204c8155cc5SAlan Stern struct uhci_td *ftd, *ltd; 205c8155cc5SAlan Stern 206c8155cc5SAlan Stern framenum &= (UHCI_NUMFRAMES - 1); 207c8155cc5SAlan Stern 208c8155cc5SAlan Stern ftd = uhci->frame_cpu[framenum]; 209c8155cc5SAlan Stern if (ftd) { 210c8155cc5SAlan Stern ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list); 211c8155cc5SAlan Stern uhci->frame[framenum] = ltd->link; 212c8155cc5SAlan Stern uhci->frame_cpu[framenum] = NULL; 213c8155cc5SAlan Stern 214c8155cc5SAlan Stern while (!list_empty(&ftd->fl_list)) 215c8155cc5SAlan Stern list_del_init(ftd->fl_list.prev); 216c8155cc5SAlan Stern } 217c8155cc5SAlan Stern } 218c8155cc5SAlan Stern 219dccf4a48SAlan Stern /* 220dccf4a48SAlan Stern * Remove all the TDs for an Isochronous URB from the frame list 221dccf4a48SAlan Stern */ 222dccf4a48SAlan Stern static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb) 223b81d3436SAlan Stern { 224b81d3436SAlan Stern struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; 225b81d3436SAlan Stern struct uhci_td *td; 226b81d3436SAlan Stern 227b81d3436SAlan Stern list_for_each_entry(td, &urbp->td_list, list) 228dccf4a48SAlan Stern uhci_remove_td_from_frame_list(uhci, td); 229b81d3436SAlan Stern } 230b81d3436SAlan Stern 231dccf4a48SAlan Stern static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, 232dccf4a48SAlan Stern struct usb_device *udev, struct usb_host_endpoint *hep) 2331da177e4SLinus Torvalds { 2341da177e4SLinus Torvalds dma_addr_t dma_handle; 2351da177e4SLinus Torvalds struct uhci_qh *qh; 2361da177e4SLinus Torvalds 2371da177e4SLinus Torvalds qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle); 2381da177e4SLinus Torvalds if (!qh) 2391da177e4SLinus Torvalds return NULL; 2401da177e4SLinus Torvalds 24159e29ed9SAlan Stern memset(qh, 0, sizeof(*qh)); 2421da177e4SLinus Torvalds qh->dma_handle = dma_handle; 2431da177e4SLinus Torvalds 2441da177e4SLinus Torvalds qh->element = UHCI_PTR_TERM; 2451da177e4SLinus Torvalds qh->link = UHCI_PTR_TERM; 2461da177e4SLinus Torvalds 247dccf4a48SAlan Stern INIT_LIST_HEAD(&qh->queue); 248dccf4a48SAlan Stern INIT_LIST_HEAD(&qh->node); 2491da177e4SLinus Torvalds 250dccf4a48SAlan Stern if (udev) { /* Normal QH */ 25185a975d0SAlan Stern qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; 25285a975d0SAlan Stern if (qh->type != USB_ENDPOINT_XFER_ISOC) { 253af0bb599SAlan Stern qh->dummy_td = uhci_alloc_td(uhci); 254af0bb599SAlan Stern if (!qh->dummy_td) { 255af0bb599SAlan Stern dma_pool_free(uhci->qh_pool, qh, dma_handle); 256af0bb599SAlan Stern return NULL; 257af0bb599SAlan Stern } 25885a975d0SAlan Stern } 259dccf4a48SAlan Stern qh->state = QH_STATE_IDLE; 260dccf4a48SAlan Stern qh->hep = hep; 261dccf4a48SAlan Stern qh->udev = udev; 262dccf4a48SAlan Stern hep->hcpriv = qh; 2631da177e4SLinus Torvalds 2643ca2a321SAlan Stern if (qh->type == USB_ENDPOINT_XFER_INT || 2653ca2a321SAlan Stern qh->type == USB_ENDPOINT_XFER_ISOC) 2663ca2a321SAlan Stern qh->load = usb_calc_bus_time(udev->speed, 2673ca2a321SAlan Stern usb_endpoint_dir_in(&hep->desc), 2683ca2a321SAlan Stern qh->type == USB_ENDPOINT_XFER_ISOC, 2693ca2a321SAlan Stern le16_to_cpu(hep->desc.wMaxPacketSize)) 2703ca2a321SAlan Stern / 1000 + 1; 2713ca2a321SAlan Stern 272dccf4a48SAlan Stern } else { /* Skeleton QH */ 273dccf4a48SAlan Stern qh->state = QH_STATE_ACTIVE; 2744de7d2c2SAlan Stern qh->type = -1; 275dccf4a48SAlan Stern } 2761da177e4SLinus Torvalds return qh; 2771da177e4SLinus Torvalds } 2781da177e4SLinus Torvalds 2791da177e4SLinus Torvalds static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) 2801da177e4SLinus Torvalds { 281dccf4a48SAlan Stern WARN_ON(qh->state != QH_STATE_IDLE && qh->udev); 282dccf4a48SAlan Stern if (!list_empty(&qh->queue)) 2831da177e4SLinus Torvalds dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh); 2841da177e4SLinus Torvalds 285dccf4a48SAlan Stern list_del(&qh->node); 286dccf4a48SAlan Stern if (qh->udev) { 287dccf4a48SAlan Stern qh->hep->hcpriv = NULL; 28885a975d0SAlan Stern if (qh->dummy_td) 289af0bb599SAlan Stern uhci_free_td(uhci, qh->dummy_td); 290dccf4a48SAlan Stern } 2911da177e4SLinus Torvalds dma_pool_free(uhci->qh_pool, qh, qh->dma_handle); 2921da177e4SLinus Torvalds } 2931da177e4SLinus Torvalds 2941da177e4SLinus Torvalds /* 295a0b458b6SAlan Stern * When a queue is stopped and a dequeued URB is given back, adjust 296a0b458b6SAlan Stern * the previous TD link (if the URB isn't first on the queue) or 297a0b458b6SAlan Stern * save its toggle value (if it is first and is currently executing). 29810b8e47dSAlan Stern * 29910b8e47dSAlan Stern * Returns 0 if the URB should not yet be given back, 1 otherwise. 3000ed8fee1SAlan Stern */ 30110b8e47dSAlan Stern static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh, 302a0b458b6SAlan Stern struct urb *urb) 3030ed8fee1SAlan Stern { 304a0b458b6SAlan Stern struct urb_priv *urbp = urb->hcpriv; 3050ed8fee1SAlan Stern struct uhci_td *td; 30610b8e47dSAlan Stern int ret = 1; 3070ed8fee1SAlan Stern 308a0b458b6SAlan Stern /* Isochronous pipes don't use toggles and their TD link pointers 30910b8e47dSAlan Stern * get adjusted during uhci_urb_dequeue(). But since their queues 31010b8e47dSAlan Stern * cannot truly be stopped, we have to watch out for dequeues 31110b8e47dSAlan Stern * occurring after the nominal unlink frame. */ 31210b8e47dSAlan Stern if (qh->type == USB_ENDPOINT_XFER_ISOC) { 31310b8e47dSAlan Stern ret = (uhci->frame_number + uhci->is_stopped != 31410b8e47dSAlan Stern qh->unlink_frame); 315c5e3b741SAlan Stern goto done; 31610b8e47dSAlan Stern } 317a0b458b6SAlan Stern 318a0b458b6SAlan Stern /* If the URB isn't first on its queue, adjust the link pointer 319a0b458b6SAlan Stern * of the last TD in the previous URB. The toggle doesn't need 320a0b458b6SAlan Stern * to be saved since this URB can't be executing yet. */ 321a0b458b6SAlan Stern if (qh->queue.next != &urbp->node) { 322a0b458b6SAlan Stern struct urb_priv *purbp; 323a0b458b6SAlan Stern struct uhci_td *ptd; 324a0b458b6SAlan Stern 325a0b458b6SAlan Stern purbp = list_entry(urbp->node.prev, struct urb_priv, node); 326a0b458b6SAlan Stern WARN_ON(list_empty(&purbp->td_list)); 327a0b458b6SAlan Stern ptd = list_entry(purbp->td_list.prev, struct uhci_td, 328a0b458b6SAlan Stern list); 329a0b458b6SAlan Stern td = list_entry(urbp->td_list.prev, struct uhci_td, 330a0b458b6SAlan Stern list); 331a0b458b6SAlan Stern ptd->link = td->link; 332c5e3b741SAlan Stern goto done; 333a0b458b6SAlan Stern } 334a0b458b6SAlan Stern 3350ed8fee1SAlan Stern /* If the QH element pointer is UHCI_PTR_TERM then then currently 3360ed8fee1SAlan Stern * executing URB has already been unlinked, so this one isn't it. */ 337a0b458b6SAlan Stern if (qh_element(qh) == UHCI_PTR_TERM) 338c5e3b741SAlan Stern goto done; 3390ed8fee1SAlan Stern qh->element = UHCI_PTR_TERM; 3400ed8fee1SAlan Stern 34185a975d0SAlan Stern /* Control pipes don't have to worry about toggles */ 342a0b458b6SAlan Stern if (qh->type == USB_ENDPOINT_XFER_CONTROL) 343c5e3b741SAlan Stern goto done; 3440ed8fee1SAlan Stern 345a0b458b6SAlan Stern /* Save the next toggle value */ 34659e29ed9SAlan Stern WARN_ON(list_empty(&urbp->td_list)); 34759e29ed9SAlan Stern td = list_entry(urbp->td_list.next, struct uhci_td, list); 3480ed8fee1SAlan Stern qh->needs_fixup = 1; 3490ed8fee1SAlan Stern qh->initial_toggle = uhci_toggle(td_token(td)); 350c5e3b741SAlan Stern 351c5e3b741SAlan Stern done: 35210b8e47dSAlan Stern return ret; 3530ed8fee1SAlan Stern } 3540ed8fee1SAlan Stern 3550ed8fee1SAlan Stern /* 3560ed8fee1SAlan Stern * Fix up the data toggles for URBs in a queue, when one of them 3570ed8fee1SAlan Stern * terminates early (short transfer, error, or dequeued). 3580ed8fee1SAlan Stern */ 3590ed8fee1SAlan Stern static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first) 3600ed8fee1SAlan Stern { 3610ed8fee1SAlan Stern struct urb_priv *urbp = NULL; 3620ed8fee1SAlan Stern struct uhci_td *td; 3630ed8fee1SAlan Stern unsigned int toggle = qh->initial_toggle; 3640ed8fee1SAlan Stern unsigned int pipe; 3650ed8fee1SAlan Stern 3660ed8fee1SAlan Stern /* Fixups for a short transfer start with the second URB in the 3670ed8fee1SAlan Stern * queue (the short URB is the first). */ 3680ed8fee1SAlan Stern if (skip_first) 3690ed8fee1SAlan Stern urbp = list_entry(qh->queue.next, struct urb_priv, node); 3700ed8fee1SAlan Stern 3710ed8fee1SAlan Stern /* When starting with the first URB, if the QH element pointer is 3720ed8fee1SAlan Stern * still valid then we know the URB's toggles are okay. */ 3730ed8fee1SAlan Stern else if (qh_element(qh) != UHCI_PTR_TERM) 3740ed8fee1SAlan Stern toggle = 2; 3750ed8fee1SAlan Stern 3760ed8fee1SAlan Stern /* Fix up the toggle for the URBs in the queue. Normally this 3770ed8fee1SAlan Stern * loop won't run more than once: When an error or short transfer 3780ed8fee1SAlan Stern * occurs, the queue usually gets emptied. */ 3791393adb2SAlan Stern urbp = list_prepare_entry(urbp, &qh->queue, node); 3800ed8fee1SAlan Stern list_for_each_entry_continue(urbp, &qh->queue, node) { 3810ed8fee1SAlan Stern 3820ed8fee1SAlan Stern /* If the first TD has the right toggle value, we don't 3830ed8fee1SAlan Stern * need to change any toggles in this URB */ 3840ed8fee1SAlan Stern td = list_entry(urbp->td_list.next, struct uhci_td, list); 3850ed8fee1SAlan Stern if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) { 386db59b464SAlan Stern td = list_entry(urbp->td_list.prev, struct uhci_td, 3870ed8fee1SAlan Stern list); 3880ed8fee1SAlan Stern toggle = uhci_toggle(td_token(td)) ^ 1; 3890ed8fee1SAlan Stern 3900ed8fee1SAlan Stern /* Otherwise all the toggles in the URB have to be switched */ 3910ed8fee1SAlan Stern } else { 3920ed8fee1SAlan Stern list_for_each_entry(td, &urbp->td_list, list) { 3930ed8fee1SAlan Stern td->token ^= __constant_cpu_to_le32( 3940ed8fee1SAlan Stern TD_TOKEN_TOGGLE); 3950ed8fee1SAlan Stern toggle ^= 1; 3960ed8fee1SAlan Stern } 3970ed8fee1SAlan Stern } 3980ed8fee1SAlan Stern } 3990ed8fee1SAlan Stern 4000ed8fee1SAlan Stern wmb(); 4010ed8fee1SAlan Stern pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe; 4020ed8fee1SAlan Stern usb_settoggle(qh->udev, usb_pipeendpoint(pipe), 4030ed8fee1SAlan Stern usb_pipeout(pipe), toggle); 4040ed8fee1SAlan Stern qh->needs_fixup = 0; 4050ed8fee1SAlan Stern } 4060ed8fee1SAlan Stern 4070ed8fee1SAlan Stern /* 408dccf4a48SAlan Stern * Put a QH on the schedule in both hardware and software 4091da177e4SLinus Torvalds */ 410dccf4a48SAlan Stern static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) 4111da177e4SLinus Torvalds { 4121da177e4SLinus Torvalds struct uhci_qh *pqh; 4131da177e4SLinus Torvalds 414dccf4a48SAlan Stern WARN_ON(list_empty(&qh->queue)); 415dccf4a48SAlan Stern 416dccf4a48SAlan Stern /* Set the element pointer if it isn't set already. 417dccf4a48SAlan Stern * This isn't needed for Isochronous queues, but it doesn't hurt. */ 418dccf4a48SAlan Stern if (qh_element(qh) == UHCI_PTR_TERM) { 419dccf4a48SAlan Stern struct urb_priv *urbp = list_entry(qh->queue.next, 420dccf4a48SAlan Stern struct urb_priv, node); 421dccf4a48SAlan Stern struct uhci_td *td = list_entry(urbp->td_list.next, 422dccf4a48SAlan Stern struct uhci_td, list); 423dccf4a48SAlan Stern 424dccf4a48SAlan Stern qh->element = cpu_to_le32(td->dma_handle); 425dccf4a48SAlan Stern } 426dccf4a48SAlan Stern 42784afddd7SAlan Stern /* Treat the queue as if it has just advanced */ 42884afddd7SAlan Stern qh->wait_expired = 0; 42984afddd7SAlan Stern qh->advance_jiffies = jiffies; 43084afddd7SAlan Stern 431dccf4a48SAlan Stern if (qh->state == QH_STATE_ACTIVE) 4321da177e4SLinus Torvalds return; 433dccf4a48SAlan Stern qh->state = QH_STATE_ACTIVE; 434dccf4a48SAlan Stern 435dccf4a48SAlan Stern /* Move the QH from its old list to the end of the appropriate 436dccf4a48SAlan Stern * skeleton's list */ 4370ed8fee1SAlan Stern if (qh == uhci->next_qh) 4380ed8fee1SAlan Stern uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, 4390ed8fee1SAlan Stern node); 440dccf4a48SAlan Stern list_move_tail(&qh->node, &qh->skel->node); 441dccf4a48SAlan Stern 442dccf4a48SAlan Stern /* Link it into the schedule */ 443dccf4a48SAlan Stern pqh = list_entry(qh->node.prev, struct uhci_qh, node); 444dccf4a48SAlan Stern qh->link = pqh->link; 445dccf4a48SAlan Stern wmb(); 446dccf4a48SAlan Stern pqh->link = UHCI_PTR_QH | cpu_to_le32(qh->dma_handle); 447dccf4a48SAlan Stern } 4481da177e4SLinus Torvalds 4491da177e4SLinus Torvalds /* 450dccf4a48SAlan Stern * Take a QH off the hardware schedule 4511da177e4SLinus Torvalds */ 452dccf4a48SAlan Stern static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) 453dccf4a48SAlan Stern { 454dccf4a48SAlan Stern struct uhci_qh *pqh; 4551da177e4SLinus Torvalds 456dccf4a48SAlan Stern if (qh->state == QH_STATE_UNLINKING) 457dccf4a48SAlan Stern return; 458dccf4a48SAlan Stern WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev); 459dccf4a48SAlan Stern qh->state = QH_STATE_UNLINKING; 4601da177e4SLinus Torvalds 461dccf4a48SAlan Stern /* Unlink the QH from the schedule and record when we did it */ 462dccf4a48SAlan Stern pqh = list_entry(qh->node.prev, struct uhci_qh, node); 463dccf4a48SAlan Stern pqh->link = qh->link; 464dccf4a48SAlan Stern mb(); 4651da177e4SLinus Torvalds 4661da177e4SLinus Torvalds uhci_get_current_frame_number(uhci); 467dccf4a48SAlan Stern qh->unlink_frame = uhci->frame_number; 4681da177e4SLinus Torvalds 469dccf4a48SAlan Stern /* Force an interrupt so we know when the QH is fully unlinked */ 470dccf4a48SAlan Stern if (list_empty(&uhci->skel_unlink_qh->node)) 4711da177e4SLinus Torvalds uhci_set_next_interrupt(uhci); 4721da177e4SLinus Torvalds 473dccf4a48SAlan Stern /* Move the QH from its old list to the end of the unlinking list */ 4740ed8fee1SAlan Stern if (qh == uhci->next_qh) 4750ed8fee1SAlan Stern uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, 4760ed8fee1SAlan Stern node); 477dccf4a48SAlan Stern list_move_tail(&qh->node, &uhci->skel_unlink_qh->node); 4781da177e4SLinus Torvalds } 4791da177e4SLinus Torvalds 4801da177e4SLinus Torvalds /* 481dccf4a48SAlan Stern * When we and the controller are through with a QH, it becomes IDLE. 482dccf4a48SAlan Stern * This happens when a QH has been off the schedule (on the unlinking 483dccf4a48SAlan Stern * list) for more than one frame, or when an error occurs while adding 484dccf4a48SAlan Stern * the first URB onto a new QH. 4851da177e4SLinus Torvalds */ 486dccf4a48SAlan Stern static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh) 487dccf4a48SAlan Stern { 488dccf4a48SAlan Stern WARN_ON(qh->state == QH_STATE_ACTIVE); 489dccf4a48SAlan Stern 4900ed8fee1SAlan Stern if (qh == uhci->next_qh) 4910ed8fee1SAlan Stern uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, 4920ed8fee1SAlan Stern node); 493dccf4a48SAlan Stern list_move(&qh->node, &uhci->idle_qh_list); 494dccf4a48SAlan Stern qh->state = QH_STATE_IDLE; 495dccf4a48SAlan Stern 49659e29ed9SAlan Stern /* Now that the QH is idle, its post_td isn't being used */ 49759e29ed9SAlan Stern if (qh->post_td) { 49859e29ed9SAlan Stern uhci_free_td(uhci, qh->post_td); 49959e29ed9SAlan Stern qh->post_td = NULL; 50059e29ed9SAlan Stern } 50159e29ed9SAlan Stern 502dccf4a48SAlan Stern /* If anyone is waiting for a QH to become idle, wake them up */ 503dccf4a48SAlan Stern if (uhci->num_waiting) 504dccf4a48SAlan Stern wake_up_all(&uhci->waitqh); 5051da177e4SLinus Torvalds } 5061da177e4SLinus Torvalds 5073ca2a321SAlan Stern /* 5083ca2a321SAlan Stern * Find the highest existing bandwidth load for a given phase and period. 5093ca2a321SAlan Stern */ 5103ca2a321SAlan Stern static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period) 5113ca2a321SAlan Stern { 5123ca2a321SAlan Stern int highest_load = uhci->load[phase]; 5133ca2a321SAlan Stern 5143ca2a321SAlan Stern for (phase += period; phase < MAX_PHASE; phase += period) 5153ca2a321SAlan Stern highest_load = max_t(int, highest_load, uhci->load[phase]); 5163ca2a321SAlan Stern return highest_load; 5173ca2a321SAlan Stern } 5183ca2a321SAlan Stern 5193ca2a321SAlan Stern /* 5203ca2a321SAlan Stern * Set qh->phase to the optimal phase for a periodic transfer and 5213ca2a321SAlan Stern * check whether the bandwidth requirement is acceptable. 5223ca2a321SAlan Stern */ 5233ca2a321SAlan Stern static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh) 5243ca2a321SAlan Stern { 5253ca2a321SAlan Stern int minimax_load; 5263ca2a321SAlan Stern 5273ca2a321SAlan Stern /* Find the optimal phase (unless it is already set) and get 5283ca2a321SAlan Stern * its load value. */ 5293ca2a321SAlan Stern if (qh->phase >= 0) 5303ca2a321SAlan Stern minimax_load = uhci_highest_load(uhci, qh->phase, qh->period); 5313ca2a321SAlan Stern else { 5323ca2a321SAlan Stern int phase, load; 5333ca2a321SAlan Stern int max_phase = min_t(int, MAX_PHASE, qh->period); 5343ca2a321SAlan Stern 5353ca2a321SAlan Stern qh->phase = 0; 5363ca2a321SAlan Stern minimax_load = uhci_highest_load(uhci, qh->phase, qh->period); 5373ca2a321SAlan Stern for (phase = 1; phase < max_phase; ++phase) { 5383ca2a321SAlan Stern load = uhci_highest_load(uhci, phase, qh->period); 5393ca2a321SAlan Stern if (load < minimax_load) { 5403ca2a321SAlan Stern minimax_load = load; 5413ca2a321SAlan Stern qh->phase = phase; 5423ca2a321SAlan Stern } 5433ca2a321SAlan Stern } 5443ca2a321SAlan Stern } 5453ca2a321SAlan Stern 5463ca2a321SAlan Stern /* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */ 5473ca2a321SAlan Stern if (minimax_load + qh->load > 900) { 5483ca2a321SAlan Stern dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: " 5493ca2a321SAlan Stern "period %d, phase %d, %d + %d us\n", 5503ca2a321SAlan Stern qh->period, qh->phase, minimax_load, qh->load); 5513ca2a321SAlan Stern return -ENOSPC; 5523ca2a321SAlan Stern } 5533ca2a321SAlan Stern return 0; 5543ca2a321SAlan Stern } 5553ca2a321SAlan Stern 5563ca2a321SAlan Stern /* 5573ca2a321SAlan Stern * Reserve a periodic QH's bandwidth in the schedule 5583ca2a321SAlan Stern */ 5593ca2a321SAlan Stern static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh) 5603ca2a321SAlan Stern { 5613ca2a321SAlan Stern int i; 5623ca2a321SAlan Stern int load = qh->load; 5633ca2a321SAlan Stern char *p = "??"; 5643ca2a321SAlan Stern 5653ca2a321SAlan Stern for (i = qh->phase; i < MAX_PHASE; i += qh->period) { 5663ca2a321SAlan Stern uhci->load[i] += load; 5673ca2a321SAlan Stern uhci->total_load += load; 5683ca2a321SAlan Stern } 5693ca2a321SAlan Stern uhci_to_hcd(uhci)->self.bandwidth_allocated = 5703ca2a321SAlan Stern uhci->total_load / MAX_PHASE; 5713ca2a321SAlan Stern switch (qh->type) { 5723ca2a321SAlan Stern case USB_ENDPOINT_XFER_INT: 5733ca2a321SAlan Stern ++uhci_to_hcd(uhci)->self.bandwidth_int_reqs; 5743ca2a321SAlan Stern p = "INT"; 5753ca2a321SAlan Stern break; 5763ca2a321SAlan Stern case USB_ENDPOINT_XFER_ISOC: 5773ca2a321SAlan Stern ++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs; 5783ca2a321SAlan Stern p = "ISO"; 5793ca2a321SAlan Stern break; 5803ca2a321SAlan Stern } 5813ca2a321SAlan Stern qh->bandwidth_reserved = 1; 5823ca2a321SAlan Stern dev_dbg(uhci_dev(uhci), 5833ca2a321SAlan Stern "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n", 5843ca2a321SAlan Stern "reserve", qh->udev->devnum, 5853ca2a321SAlan Stern qh->hep->desc.bEndpointAddress, p, 5863ca2a321SAlan Stern qh->period, qh->phase, load); 5873ca2a321SAlan Stern } 5883ca2a321SAlan Stern 5893ca2a321SAlan Stern /* 5903ca2a321SAlan Stern * Release a periodic QH's bandwidth reservation 5913ca2a321SAlan Stern */ 5923ca2a321SAlan Stern static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh) 5933ca2a321SAlan Stern { 5943ca2a321SAlan Stern int i; 5953ca2a321SAlan Stern int load = qh->load; 5963ca2a321SAlan Stern char *p = "??"; 5973ca2a321SAlan Stern 5983ca2a321SAlan Stern for (i = qh->phase; i < MAX_PHASE; i += qh->period) { 5993ca2a321SAlan Stern uhci->load[i] -= load; 6003ca2a321SAlan Stern uhci->total_load -= load; 6013ca2a321SAlan Stern } 6023ca2a321SAlan Stern uhci_to_hcd(uhci)->self.bandwidth_allocated = 6033ca2a321SAlan Stern uhci->total_load / MAX_PHASE; 6043ca2a321SAlan Stern switch (qh->type) { 6053ca2a321SAlan Stern case USB_ENDPOINT_XFER_INT: 6063ca2a321SAlan Stern --uhci_to_hcd(uhci)->self.bandwidth_int_reqs; 6073ca2a321SAlan Stern p = "INT"; 6083ca2a321SAlan Stern break; 6093ca2a321SAlan Stern case USB_ENDPOINT_XFER_ISOC: 6103ca2a321SAlan Stern --uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs; 6113ca2a321SAlan Stern p = "ISO"; 6123ca2a321SAlan Stern break; 6133ca2a321SAlan Stern } 6143ca2a321SAlan Stern qh->bandwidth_reserved = 0; 6153ca2a321SAlan Stern dev_dbg(uhci_dev(uhci), 6163ca2a321SAlan Stern "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n", 6173ca2a321SAlan Stern "release", qh->udev->devnum, 6183ca2a321SAlan Stern qh->hep->desc.bEndpointAddress, p, 6193ca2a321SAlan Stern qh->period, qh->phase, load); 6203ca2a321SAlan Stern } 6213ca2a321SAlan Stern 622dccf4a48SAlan Stern static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, 623dccf4a48SAlan Stern struct urb *urb) 6241da177e4SLinus Torvalds { 6251da177e4SLinus Torvalds struct urb_priv *urbp; 6261da177e4SLinus Torvalds 627*c3762229SRobert P. J. Day urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC); 6281da177e4SLinus Torvalds if (!urbp) 6291da177e4SLinus Torvalds return NULL; 6301da177e4SLinus Torvalds 6311da177e4SLinus Torvalds urbp->urb = urb; 6321da177e4SLinus Torvalds urb->hcpriv = urbp; 633dccf4a48SAlan Stern 634dccf4a48SAlan Stern INIT_LIST_HEAD(&urbp->node); 635dccf4a48SAlan Stern INIT_LIST_HEAD(&urbp->td_list); 6361da177e4SLinus Torvalds 6371da177e4SLinus Torvalds return urbp; 6381da177e4SLinus Torvalds } 6391da177e4SLinus Torvalds 640dccf4a48SAlan Stern static void uhci_free_urb_priv(struct uhci_hcd *uhci, 641dccf4a48SAlan Stern struct urb_priv *urbp) 6421da177e4SLinus Torvalds { 6431da177e4SLinus Torvalds struct uhci_td *td, *tmp; 6441da177e4SLinus Torvalds 645dccf4a48SAlan Stern if (!list_empty(&urbp->node)) 646dccf4a48SAlan Stern dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n", 647dccf4a48SAlan Stern urbp->urb); 6481da177e4SLinus Torvalds 6491da177e4SLinus Torvalds list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { 65004538a25SAlan Stern uhci_remove_td_from_urbp(td); 65104538a25SAlan Stern uhci_free_td(uhci, td); 6521da177e4SLinus Torvalds } 6531da177e4SLinus Torvalds 654dccf4a48SAlan Stern urbp->urb->hcpriv = NULL; 6551da177e4SLinus Torvalds kmem_cache_free(uhci_up_cachep, urbp); 6561da177e4SLinus Torvalds } 6571da177e4SLinus Torvalds 6581da177e4SLinus Torvalds /* 6591da177e4SLinus Torvalds * Map status to standard result codes 6601da177e4SLinus Torvalds * 6611da177e4SLinus Torvalds * <status> is (td_status(td) & 0xF60000), a.k.a. 6621da177e4SLinus Torvalds * uhci_status_bits(td_status(td)). 6631da177e4SLinus Torvalds * Note: <status> does not include the TD_CTRL_NAK bit. 6641da177e4SLinus Torvalds * <dir_out> is True for output TDs and False for input TDs. 6651da177e4SLinus Torvalds */ 6661da177e4SLinus Torvalds static int uhci_map_status(int status, int dir_out) 6671da177e4SLinus Torvalds { 6681da177e4SLinus Torvalds if (!status) 6691da177e4SLinus Torvalds return 0; 6701da177e4SLinus Torvalds if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */ 6711da177e4SLinus Torvalds return -EPROTO; 6721da177e4SLinus Torvalds if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */ 6731da177e4SLinus Torvalds if (dir_out) 6741da177e4SLinus Torvalds return -EPROTO; 6751da177e4SLinus Torvalds else 6761da177e4SLinus Torvalds return -EILSEQ; 6771da177e4SLinus Torvalds } 6781da177e4SLinus Torvalds if (status & TD_CTRL_BABBLE) /* Babble */ 6791da177e4SLinus Torvalds return -EOVERFLOW; 6801da177e4SLinus Torvalds if (status & TD_CTRL_DBUFERR) /* Buffer error */ 6811da177e4SLinus Torvalds return -ENOSR; 6821da177e4SLinus Torvalds if (status & TD_CTRL_STALLED) /* Stalled */ 6831da177e4SLinus Torvalds return -EPIPE; 6841da177e4SLinus Torvalds return 0; 6851da177e4SLinus Torvalds } 6861da177e4SLinus Torvalds 6871da177e4SLinus Torvalds /* 6881da177e4SLinus Torvalds * Control transfers 6891da177e4SLinus Torvalds */ 690dccf4a48SAlan Stern static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, 691dccf4a48SAlan Stern struct uhci_qh *qh) 6921da177e4SLinus Torvalds { 6931da177e4SLinus Torvalds struct uhci_td *td; 6941da177e4SLinus Torvalds unsigned long destination, status; 695dccf4a48SAlan Stern int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize); 6961da177e4SLinus Torvalds int len = urb->transfer_buffer_length; 6971da177e4SLinus Torvalds dma_addr_t data = urb->transfer_dma; 698dccf4a48SAlan Stern __le32 *plink; 69904538a25SAlan Stern struct urb_priv *urbp = urb->hcpriv; 7001da177e4SLinus Torvalds 7011da177e4SLinus Torvalds /* The "pipe" thing contains the destination in bits 8--18 */ 7021da177e4SLinus Torvalds destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP; 7031da177e4SLinus Torvalds 704af0bb599SAlan Stern /* 3 errors, dummy TD remains inactive */ 705af0bb599SAlan Stern status = uhci_maxerr(3); 7061da177e4SLinus Torvalds if (urb->dev->speed == USB_SPEED_LOW) 7071da177e4SLinus Torvalds status |= TD_CTRL_LS; 7081da177e4SLinus Torvalds 7091da177e4SLinus Torvalds /* 7101da177e4SLinus Torvalds * Build the TD for the control request setup packet 7111da177e4SLinus Torvalds */ 712af0bb599SAlan Stern td = qh->dummy_td; 71304538a25SAlan Stern uhci_add_td_to_urbp(td, urbp); 714fa346568SAlan Stern uhci_fill_td(td, status, destination | uhci_explen(8), 7151da177e4SLinus Torvalds urb->setup_dma); 716dccf4a48SAlan Stern plink = &td->link; 717af0bb599SAlan Stern status |= TD_CTRL_ACTIVE; 7181da177e4SLinus Torvalds 7191da177e4SLinus Torvalds /* 7201da177e4SLinus Torvalds * If direction is "send", change the packet ID from SETUP (0x2D) 7211da177e4SLinus Torvalds * to OUT (0xE1). Else change it from SETUP to IN (0x69) and 7221da177e4SLinus Torvalds * set Short Packet Detect (SPD) for all data packets. 7231da177e4SLinus Torvalds */ 7241da177e4SLinus Torvalds if (usb_pipeout(urb->pipe)) 7251da177e4SLinus Torvalds destination ^= (USB_PID_SETUP ^ USB_PID_OUT); 7261da177e4SLinus Torvalds else { 7271da177e4SLinus Torvalds destination ^= (USB_PID_SETUP ^ USB_PID_IN); 7281da177e4SLinus Torvalds status |= TD_CTRL_SPD; 7291da177e4SLinus Torvalds } 7301da177e4SLinus Torvalds 7311da177e4SLinus Torvalds /* 732687f5f34SAlan Stern * Build the DATA TDs 7331da177e4SLinus Torvalds */ 7341da177e4SLinus Torvalds while (len > 0) { 735dccf4a48SAlan Stern int pktsze = min(len, maxsze); 7361da177e4SLinus Torvalds 7372532178aSAlan Stern td = uhci_alloc_td(uhci); 7381da177e4SLinus Torvalds if (!td) 739af0bb599SAlan Stern goto nomem; 740dccf4a48SAlan Stern *plink = cpu_to_le32(td->dma_handle); 7411da177e4SLinus Torvalds 7421da177e4SLinus Torvalds /* Alternate Data0/1 (start with Data1) */ 7431da177e4SLinus Torvalds destination ^= TD_TOKEN_TOGGLE; 7441da177e4SLinus Torvalds 74504538a25SAlan Stern uhci_add_td_to_urbp(td, urbp); 746fa346568SAlan Stern uhci_fill_td(td, status, destination | uhci_explen(pktsze), 7471da177e4SLinus Torvalds data); 748dccf4a48SAlan Stern plink = &td->link; 7491da177e4SLinus Torvalds 7501da177e4SLinus Torvalds data += pktsze; 7511da177e4SLinus Torvalds len -= pktsze; 7521da177e4SLinus Torvalds } 7531da177e4SLinus Torvalds 7541da177e4SLinus Torvalds /* 7551da177e4SLinus Torvalds * Build the final TD for control status 7561da177e4SLinus Torvalds */ 7572532178aSAlan Stern td = uhci_alloc_td(uhci); 7581da177e4SLinus Torvalds if (!td) 759af0bb599SAlan Stern goto nomem; 760dccf4a48SAlan Stern *plink = cpu_to_le32(td->dma_handle); 7611da177e4SLinus Torvalds 7621da177e4SLinus Torvalds /* 7631da177e4SLinus Torvalds * It's IN if the pipe is an output pipe or we're not expecting 7641da177e4SLinus Torvalds * data back. 7651da177e4SLinus Torvalds */ 7661da177e4SLinus Torvalds destination &= ~TD_TOKEN_PID_MASK; 7671da177e4SLinus Torvalds if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length) 7681da177e4SLinus Torvalds destination |= USB_PID_IN; 7691da177e4SLinus Torvalds else 7701da177e4SLinus Torvalds destination |= USB_PID_OUT; 7711da177e4SLinus Torvalds 7721da177e4SLinus Torvalds destination |= TD_TOKEN_TOGGLE; /* End in Data1 */ 7731da177e4SLinus Torvalds 7741da177e4SLinus Torvalds status &= ~TD_CTRL_SPD; 7751da177e4SLinus Torvalds 77604538a25SAlan Stern uhci_add_td_to_urbp(td, urbp); 7771da177e4SLinus Torvalds uhci_fill_td(td, status | TD_CTRL_IOC, 778fa346568SAlan Stern destination | uhci_explen(0), 0); 779af0bb599SAlan Stern plink = &td->link; 780af0bb599SAlan Stern 781af0bb599SAlan Stern /* 782af0bb599SAlan Stern * Build the new dummy TD and activate the old one 783af0bb599SAlan Stern */ 784af0bb599SAlan Stern td = uhci_alloc_td(uhci); 785af0bb599SAlan Stern if (!td) 786af0bb599SAlan Stern goto nomem; 787af0bb599SAlan Stern *plink = cpu_to_le32(td->dma_handle); 788af0bb599SAlan Stern 789af0bb599SAlan Stern uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); 790af0bb599SAlan Stern wmb(); 791af0bb599SAlan Stern qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE); 792af0bb599SAlan Stern qh->dummy_td = td; 7931da177e4SLinus Torvalds 7941da177e4SLinus Torvalds /* Low-speed transfers get a different queue, and won't hog the bus. 7951da177e4SLinus Torvalds * Also, some devices enumerate better without FSBR; the easiest way 7961da177e4SLinus Torvalds * to do that is to put URBs on the low-speed queue while the device 797630aa3cfSAlan Stern * isn't in the CONFIGURED state. */ 7981da177e4SLinus Torvalds if (urb->dev->speed == USB_SPEED_LOW || 799630aa3cfSAlan Stern urb->dev->state != USB_STATE_CONFIGURED) 800dccf4a48SAlan Stern qh->skel = uhci->skel_ls_control_qh; 8011da177e4SLinus Torvalds else { 802dccf4a48SAlan Stern qh->skel = uhci->skel_fs_control_qh; 80384afddd7SAlan Stern uhci_add_fsbr(uhci, urb); 8041da177e4SLinus Torvalds } 80559e29ed9SAlan Stern 80659e29ed9SAlan Stern urb->actual_length = -8; /* Account for the SETUP packet */ 807dccf4a48SAlan Stern return 0; 808af0bb599SAlan Stern 809af0bb599SAlan Stern nomem: 810af0bb599SAlan Stern /* Remove the dummy TD from the td_list so it doesn't get freed */ 81104538a25SAlan Stern uhci_remove_td_from_urbp(qh->dummy_td); 812af0bb599SAlan Stern return -ENOMEM; 8131da177e4SLinus Torvalds } 8141da177e4SLinus Torvalds 8151da177e4SLinus Torvalds /* 8161da177e4SLinus Torvalds * Common submit for bulk and interrupt 8171da177e4SLinus Torvalds */ 818dccf4a48SAlan Stern static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, 819dccf4a48SAlan Stern struct uhci_qh *qh) 8201da177e4SLinus Torvalds { 8211da177e4SLinus Torvalds struct uhci_td *td; 8221da177e4SLinus Torvalds unsigned long destination, status; 823dccf4a48SAlan Stern int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize); 8241da177e4SLinus Torvalds int len = urb->transfer_buffer_length; 8251da177e4SLinus Torvalds dma_addr_t data = urb->transfer_dma; 826af0bb599SAlan Stern __le32 *plink; 82704538a25SAlan Stern struct urb_priv *urbp = urb->hcpriv; 828af0bb599SAlan Stern unsigned int toggle; 8291da177e4SLinus Torvalds 8301da177e4SLinus Torvalds if (len < 0) 8311da177e4SLinus Torvalds return -EINVAL; 8321da177e4SLinus Torvalds 8331da177e4SLinus Torvalds /* The "pipe" thing contains the destination in bits 8--18 */ 8341da177e4SLinus Torvalds destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); 835af0bb599SAlan Stern toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), 836af0bb599SAlan Stern usb_pipeout(urb->pipe)); 8371da177e4SLinus Torvalds 838af0bb599SAlan Stern /* 3 errors, dummy TD remains inactive */ 839af0bb599SAlan Stern status = uhci_maxerr(3); 8401da177e4SLinus Torvalds if (urb->dev->speed == USB_SPEED_LOW) 8411da177e4SLinus Torvalds status |= TD_CTRL_LS; 8421da177e4SLinus Torvalds if (usb_pipein(urb->pipe)) 8431da177e4SLinus Torvalds status |= TD_CTRL_SPD; 8441da177e4SLinus Torvalds 8451da177e4SLinus Torvalds /* 846687f5f34SAlan Stern * Build the DATA TDs 8471da177e4SLinus Torvalds */ 848af0bb599SAlan Stern plink = NULL; 849af0bb599SAlan Stern td = qh->dummy_td; 8501da177e4SLinus Torvalds do { /* Allow zero length packets */ 8511da177e4SLinus Torvalds int pktsze = maxsze; 8521da177e4SLinus Torvalds 853dccf4a48SAlan Stern if (len <= pktsze) { /* The last packet */ 8541da177e4SLinus Torvalds pktsze = len; 8551da177e4SLinus Torvalds if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) 8561da177e4SLinus Torvalds status &= ~TD_CTRL_SPD; 8571da177e4SLinus Torvalds } 8581da177e4SLinus Torvalds 859af0bb599SAlan Stern if (plink) { 8602532178aSAlan Stern td = uhci_alloc_td(uhci); 8611da177e4SLinus Torvalds if (!td) 862af0bb599SAlan Stern goto nomem; 863dccf4a48SAlan Stern *plink = cpu_to_le32(td->dma_handle); 864af0bb599SAlan Stern } 86504538a25SAlan Stern uhci_add_td_to_urbp(td, urbp); 866dccf4a48SAlan Stern uhci_fill_td(td, status, 867dccf4a48SAlan Stern destination | uhci_explen(pktsze) | 868af0bb599SAlan Stern (toggle << TD_TOKEN_TOGGLE_SHIFT), 8691da177e4SLinus Torvalds data); 870dccf4a48SAlan Stern plink = &td->link; 871af0bb599SAlan Stern status |= TD_CTRL_ACTIVE; 8721da177e4SLinus Torvalds 8731da177e4SLinus Torvalds data += pktsze; 8741da177e4SLinus Torvalds len -= maxsze; 875af0bb599SAlan Stern toggle ^= 1; 8761da177e4SLinus Torvalds } while (len > 0); 8771da177e4SLinus Torvalds 8781da177e4SLinus Torvalds /* 8791da177e4SLinus Torvalds * URB_ZERO_PACKET means adding a 0-length packet, if direction 8801da177e4SLinus Torvalds * is OUT and the transfer_length was an exact multiple of maxsze, 8811da177e4SLinus Torvalds * hence (len = transfer_length - N * maxsze) == 0 8821da177e4SLinus Torvalds * however, if transfer_length == 0, the zero packet was already 8831da177e4SLinus Torvalds * prepared above. 8841da177e4SLinus Torvalds */ 885dccf4a48SAlan Stern if ((urb->transfer_flags & URB_ZERO_PACKET) && 886dccf4a48SAlan Stern usb_pipeout(urb->pipe) && len == 0 && 887dccf4a48SAlan Stern urb->transfer_buffer_length > 0) { 8882532178aSAlan Stern td = uhci_alloc_td(uhci); 8891da177e4SLinus Torvalds if (!td) 890af0bb599SAlan Stern goto nomem; 891dccf4a48SAlan Stern *plink = cpu_to_le32(td->dma_handle); 8921da177e4SLinus Torvalds 89304538a25SAlan Stern uhci_add_td_to_urbp(td, urbp); 894af0bb599SAlan Stern uhci_fill_td(td, status, 895af0bb599SAlan Stern destination | uhci_explen(0) | 896af0bb599SAlan Stern (toggle << TD_TOKEN_TOGGLE_SHIFT), 8971da177e4SLinus Torvalds data); 898af0bb599SAlan Stern plink = &td->link; 8991da177e4SLinus Torvalds 900af0bb599SAlan Stern toggle ^= 1; 9011da177e4SLinus Torvalds } 9021da177e4SLinus Torvalds 9031da177e4SLinus Torvalds /* Set the interrupt-on-completion flag on the last packet. 9041da177e4SLinus Torvalds * A more-or-less typical 4 KB URB (= size of one memory page) 9051da177e4SLinus Torvalds * will require about 3 ms to transfer; that's a little on the 9061da177e4SLinus Torvalds * fast side but not enough to justify delaying an interrupt 9071da177e4SLinus Torvalds * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT 9081da177e4SLinus Torvalds * flag setting. */ 909dccf4a48SAlan Stern td->status |= __constant_cpu_to_le32(TD_CTRL_IOC); 9101da177e4SLinus Torvalds 911af0bb599SAlan Stern /* 912af0bb599SAlan Stern * Build the new dummy TD and activate the old one 913af0bb599SAlan Stern */ 914af0bb599SAlan Stern td = uhci_alloc_td(uhci); 915af0bb599SAlan Stern if (!td) 916af0bb599SAlan Stern goto nomem; 917af0bb599SAlan Stern *plink = cpu_to_le32(td->dma_handle); 918af0bb599SAlan Stern 919af0bb599SAlan Stern uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); 920af0bb599SAlan Stern wmb(); 921af0bb599SAlan Stern qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE); 922af0bb599SAlan Stern qh->dummy_td = td; 923af0bb599SAlan Stern 924af0bb599SAlan Stern usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), 925af0bb599SAlan Stern usb_pipeout(urb->pipe), toggle); 926dccf4a48SAlan Stern return 0; 927af0bb599SAlan Stern 928af0bb599SAlan Stern nomem: 929af0bb599SAlan Stern /* Remove the dummy TD from the td_list so it doesn't get freed */ 93004538a25SAlan Stern uhci_remove_td_from_urbp(qh->dummy_td); 931af0bb599SAlan Stern return -ENOMEM; 9321da177e4SLinus Torvalds } 9331da177e4SLinus Torvalds 934dccf4a48SAlan Stern static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, 935dccf4a48SAlan Stern struct uhci_qh *qh) 9361da177e4SLinus Torvalds { 9371da177e4SLinus Torvalds int ret; 9381da177e4SLinus Torvalds 9391da177e4SLinus Torvalds /* Can't have low-speed bulk transfers */ 9401da177e4SLinus Torvalds if (urb->dev->speed == USB_SPEED_LOW) 9411da177e4SLinus Torvalds return -EINVAL; 9421da177e4SLinus Torvalds 943dccf4a48SAlan Stern qh->skel = uhci->skel_bulk_qh; 944dccf4a48SAlan Stern ret = uhci_submit_common(uhci, urb, qh); 945dccf4a48SAlan Stern if (ret == 0) 94684afddd7SAlan Stern uhci_add_fsbr(uhci, urb); 9471da177e4SLinus Torvalds return ret; 9481da177e4SLinus Torvalds } 9491da177e4SLinus Torvalds 950caf3827aSAlan Stern static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, 951dccf4a48SAlan Stern struct uhci_qh *qh) 9521da177e4SLinus Torvalds { 9533ca2a321SAlan Stern int ret; 954caf3827aSAlan Stern 955dccf4a48SAlan Stern /* USB 1.1 interrupt transfers only involve one packet per interval. 956dccf4a48SAlan Stern * Drivers can submit URBs of any length, but longer ones will need 957dccf4a48SAlan Stern * multiple intervals to complete. 9581da177e4SLinus Torvalds */ 959caf3827aSAlan Stern 9603ca2a321SAlan Stern if (!qh->bandwidth_reserved) { 9613ca2a321SAlan Stern int exponent; 9623ca2a321SAlan Stern 963caf3827aSAlan Stern /* Figure out which power-of-two queue to use */ 964caf3827aSAlan Stern for (exponent = 7; exponent >= 0; --exponent) { 965caf3827aSAlan Stern if ((1 << exponent) <= urb->interval) 966caf3827aSAlan Stern break; 967caf3827aSAlan Stern } 968caf3827aSAlan Stern if (exponent < 0) 969caf3827aSAlan Stern return -EINVAL; 9703ca2a321SAlan Stern qh->period = 1 << exponent; 971caf3827aSAlan Stern qh->skel = uhci->skelqh[UHCI_SKEL_INDEX(exponent)]; 972caf3827aSAlan Stern 9733ca2a321SAlan Stern /* For now, interrupt phase is fixed by the layout 9743ca2a321SAlan Stern * of the QH lists. */ 9753ca2a321SAlan Stern qh->phase = (qh->period / 2) & (MAX_PHASE - 1); 9763ca2a321SAlan Stern ret = uhci_check_bandwidth(uhci, qh); 9773ca2a321SAlan Stern if (ret) 9783ca2a321SAlan Stern return ret; 9793ca2a321SAlan Stern } else if (qh->period > urb->interval) 9803ca2a321SAlan Stern return -EINVAL; /* Can't decrease the period */ 9813ca2a321SAlan Stern 9823ca2a321SAlan Stern ret = uhci_submit_common(uhci, urb, qh); 9833ca2a321SAlan Stern if (ret == 0) { 9843ca2a321SAlan Stern urb->interval = qh->period; 9853ca2a321SAlan Stern if (!qh->bandwidth_reserved) 9863ca2a321SAlan Stern uhci_reserve_bandwidth(uhci, qh); 9873ca2a321SAlan Stern } 9883ca2a321SAlan Stern return ret; 9891da177e4SLinus Torvalds } 9901da177e4SLinus Torvalds 9911da177e4SLinus Torvalds /* 992b1869000SAlan Stern * Fix up the data structures following a short transfer 993b1869000SAlan Stern */ 994b1869000SAlan Stern static int uhci_fixup_short_transfer(struct uhci_hcd *uhci, 99559e29ed9SAlan Stern struct uhci_qh *qh, struct urb_priv *urbp) 996b1869000SAlan Stern { 997b1869000SAlan Stern struct uhci_td *td; 99859e29ed9SAlan Stern struct list_head *tmp; 99959e29ed9SAlan Stern int ret; 1000b1869000SAlan Stern 1001b1869000SAlan Stern td = list_entry(urbp->td_list.prev, struct uhci_td, list); 1002b1869000SAlan Stern if (qh->type == USB_ENDPOINT_XFER_CONTROL) { 1003b1869000SAlan Stern 1004b1869000SAlan Stern /* When a control transfer is short, we have to restart 1005b1869000SAlan Stern * the queue at the status stage transaction, which is 1006b1869000SAlan Stern * the last TD. */ 100759e29ed9SAlan Stern WARN_ON(list_empty(&urbp->td_list)); 1008b1869000SAlan Stern qh->element = cpu_to_le32(td->dma_handle); 100959e29ed9SAlan Stern tmp = td->list.prev; 1010b1869000SAlan Stern ret = -EINPROGRESS; 1011b1869000SAlan Stern 101259e29ed9SAlan Stern } else { 1013b1869000SAlan Stern 1014b1869000SAlan Stern /* When a bulk/interrupt transfer is short, we have to 1015b1869000SAlan Stern * fix up the toggles of the following URBs on the queue 1016b1869000SAlan Stern * before restarting the queue at the next URB. */ 101759e29ed9SAlan Stern qh->initial_toggle = uhci_toggle(td_token(qh->post_td)) ^ 1; 1018b1869000SAlan Stern uhci_fixup_toggles(qh, 1); 1019b1869000SAlan Stern 102059e29ed9SAlan Stern if (list_empty(&urbp->td_list)) 102159e29ed9SAlan Stern td = qh->post_td; 1022b1869000SAlan Stern qh->element = td->link; 102359e29ed9SAlan Stern tmp = urbp->td_list.prev; 102459e29ed9SAlan Stern ret = 0; 1025b1869000SAlan Stern } 1026b1869000SAlan Stern 102759e29ed9SAlan Stern /* Remove all the TDs we skipped over, from tmp back to the start */ 102859e29ed9SAlan Stern while (tmp != &urbp->td_list) { 102959e29ed9SAlan Stern td = list_entry(tmp, struct uhci_td, list); 103059e29ed9SAlan Stern tmp = tmp->prev; 103159e29ed9SAlan Stern 103204538a25SAlan Stern uhci_remove_td_from_urbp(td); 103304538a25SAlan Stern uhci_free_td(uhci, td); 103459e29ed9SAlan Stern } 1035b1869000SAlan Stern return ret; 1036b1869000SAlan Stern } 1037b1869000SAlan Stern 1038b1869000SAlan Stern /* 1039b1869000SAlan Stern * Common result for control, bulk, and interrupt 1040b1869000SAlan Stern */ 1041b1869000SAlan Stern static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) 1042b1869000SAlan Stern { 1043b1869000SAlan Stern struct urb_priv *urbp = urb->hcpriv; 1044b1869000SAlan Stern struct uhci_qh *qh = urbp->qh; 104559e29ed9SAlan Stern struct uhci_td *td, *tmp; 1046b1869000SAlan Stern unsigned status; 1047b1869000SAlan Stern int ret = 0; 1048b1869000SAlan Stern 104959e29ed9SAlan Stern list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { 1050b1869000SAlan Stern unsigned int ctrlstat; 1051b1869000SAlan Stern int len; 1052b1869000SAlan Stern 1053b1869000SAlan Stern ctrlstat = td_status(td); 1054b1869000SAlan Stern status = uhci_status_bits(ctrlstat); 1055b1869000SAlan Stern if (status & TD_CTRL_ACTIVE) 1056b1869000SAlan Stern return -EINPROGRESS; 1057b1869000SAlan Stern 1058b1869000SAlan Stern len = uhci_actual_length(ctrlstat); 1059b1869000SAlan Stern urb->actual_length += len; 1060b1869000SAlan Stern 1061b1869000SAlan Stern if (status) { 1062b1869000SAlan Stern ret = uhci_map_status(status, 1063b1869000SAlan Stern uhci_packetout(td_token(td))); 1064b1869000SAlan Stern if ((debug == 1 && ret != -EPIPE) || debug > 1) { 1065b1869000SAlan Stern /* Some debugging code */ 1066be3cbc5fSDavid Brownell dev_dbg(&urb->dev->dev, 1067b1869000SAlan Stern "%s: failed with status %x\n", 1068b1869000SAlan Stern __FUNCTION__, status); 1069b1869000SAlan Stern 1070b1869000SAlan Stern if (debug > 1 && errbuf) { 1071b1869000SAlan Stern /* Print the chain for debugging */ 1072b1869000SAlan Stern uhci_show_qh(urbp->qh, errbuf, 1073b1869000SAlan Stern ERRBUF_LEN, 0); 1074b1869000SAlan Stern lprintk(errbuf); 1075b1869000SAlan Stern } 1076b1869000SAlan Stern } 1077b1869000SAlan Stern 1078b1869000SAlan Stern } else if (len < uhci_expected_length(td_token(td))) { 1079b1869000SAlan Stern 1080b1869000SAlan Stern /* We received a short packet */ 1081b1869000SAlan Stern if (urb->transfer_flags & URB_SHORT_NOT_OK) 1082b1869000SAlan Stern ret = -EREMOTEIO; 1083f443ddf1SAlan Stern 1084f443ddf1SAlan Stern /* Fixup needed only if this isn't the URB's last TD */ 1085f443ddf1SAlan Stern else if (&td->list != urbp->td_list.prev) 1086b1869000SAlan Stern ret = 1; 1087b1869000SAlan Stern } 1088b1869000SAlan Stern 108904538a25SAlan Stern uhci_remove_td_from_urbp(td); 109059e29ed9SAlan Stern if (qh->post_td) 109104538a25SAlan Stern uhci_free_td(uhci, qh->post_td); 109259e29ed9SAlan Stern qh->post_td = td; 109359e29ed9SAlan Stern 1094b1869000SAlan Stern if (ret != 0) 1095b1869000SAlan Stern goto err; 1096b1869000SAlan Stern } 1097b1869000SAlan Stern return ret; 1098b1869000SAlan Stern 1099b1869000SAlan Stern err: 1100b1869000SAlan Stern if (ret < 0) { 1101b1869000SAlan Stern /* In case a control transfer gets an error 1102b1869000SAlan Stern * during the setup stage */ 1103b1869000SAlan Stern urb->actual_length = max(urb->actual_length, 0); 1104b1869000SAlan Stern 1105b1869000SAlan Stern /* Note that the queue has stopped and save 1106b1869000SAlan Stern * the next toggle value */ 1107b1869000SAlan Stern qh->element = UHCI_PTR_TERM; 1108b1869000SAlan Stern qh->is_stopped = 1; 1109b1869000SAlan Stern qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL); 1110b1869000SAlan Stern qh->initial_toggle = uhci_toggle(td_token(td)) ^ 1111b1869000SAlan Stern (ret == -EREMOTEIO); 1112b1869000SAlan Stern 1113b1869000SAlan Stern } else /* Short packet received */ 111459e29ed9SAlan Stern ret = uhci_fixup_short_transfer(uhci, qh, urbp); 1115b1869000SAlan Stern return ret; 1116b1869000SAlan Stern } 1117b1869000SAlan Stern 1118b1869000SAlan Stern /* 11191da177e4SLinus Torvalds * Isochronous transfers 11201da177e4SLinus Torvalds */ 1121dccf4a48SAlan Stern static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, 1122dccf4a48SAlan Stern struct uhci_qh *qh) 11231da177e4SLinus Torvalds { 1124dccf4a48SAlan Stern struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */ 11250ed8fee1SAlan Stern int i, frame; 1126dccf4a48SAlan Stern unsigned long destination, status; 1127b81d3436SAlan Stern struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; 11281da177e4SLinus Torvalds 1129caf3827aSAlan Stern /* Values must not be too big (could overflow below) */ 1130caf3827aSAlan Stern if (urb->interval >= UHCI_NUMFRAMES || 1131caf3827aSAlan Stern urb->number_of_packets >= UHCI_NUMFRAMES) 1132caf3827aSAlan Stern return -EFBIG; 1133caf3827aSAlan Stern 1134caf3827aSAlan Stern /* Check the period and figure out the starting frame number */ 11353ca2a321SAlan Stern if (!qh->bandwidth_reserved) { 11363ca2a321SAlan Stern qh->period = urb->interval; 1137caf3827aSAlan Stern if (urb->transfer_flags & URB_ISO_ASAP) { 11383ca2a321SAlan Stern qh->phase = -1; /* Find the best phase */ 11393ca2a321SAlan Stern i = uhci_check_bandwidth(uhci, qh); 11403ca2a321SAlan Stern if (i) 11413ca2a321SAlan Stern return i; 11423ca2a321SAlan Stern 11433ca2a321SAlan Stern /* Allow a little time to allocate the TDs */ 1144c8155cc5SAlan Stern uhci_get_current_frame_number(uhci); 11453ca2a321SAlan Stern frame = uhci->frame_number + 10; 11463ca2a321SAlan Stern 11473ca2a321SAlan Stern /* Move forward to the first frame having the 11483ca2a321SAlan Stern * correct phase */ 11493ca2a321SAlan Stern urb->start_frame = frame + ((qh->phase - frame) & 11503ca2a321SAlan Stern (qh->period - 1)); 1151caf3827aSAlan Stern } else { 1152c8155cc5SAlan Stern i = urb->start_frame - uhci->last_iso_frame; 1153caf3827aSAlan Stern if (i <= 0 || i >= UHCI_NUMFRAMES) 1154caf3827aSAlan Stern return -EINVAL; 11553ca2a321SAlan Stern qh->phase = urb->start_frame & (qh->period - 1); 11563ca2a321SAlan Stern i = uhci_check_bandwidth(uhci, qh); 11573ca2a321SAlan Stern if (i) 11583ca2a321SAlan Stern return i; 1159caf3827aSAlan Stern } 11603ca2a321SAlan Stern 1161caf3827aSAlan Stern } else if (qh->period != urb->interval) { 1162caf3827aSAlan Stern return -EINVAL; /* Can't change the period */ 1163caf3827aSAlan Stern 1164caf3827aSAlan Stern } else { /* Pick up where the last URB leaves off */ 1165caf3827aSAlan Stern if (list_empty(&qh->queue)) { 1166c8155cc5SAlan Stern frame = qh->iso_frame; 1167caf3827aSAlan Stern } else { 1168caf3827aSAlan Stern struct urb *lurb; 1169caf3827aSAlan Stern 1170caf3827aSAlan Stern lurb = list_entry(qh->queue.prev, 1171caf3827aSAlan Stern struct urb_priv, node)->urb; 1172caf3827aSAlan Stern frame = lurb->start_frame + 1173caf3827aSAlan Stern lurb->number_of_packets * 1174caf3827aSAlan Stern lurb->interval; 1175caf3827aSAlan Stern } 1176caf3827aSAlan Stern if (urb->transfer_flags & URB_ISO_ASAP) 1177caf3827aSAlan Stern urb->start_frame = frame; 1178c8155cc5SAlan Stern else if (urb->start_frame != frame) 1179c8155cc5SAlan Stern return -EINVAL; 1180caf3827aSAlan Stern } 1181caf3827aSAlan Stern 1182caf3827aSAlan Stern /* Make sure we won't have to go too far into the future */ 1183c8155cc5SAlan Stern if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES, 1184caf3827aSAlan Stern urb->start_frame + urb->number_of_packets * 1185caf3827aSAlan Stern urb->interval)) 11860ed8fee1SAlan Stern return -EFBIG; 11870ed8fee1SAlan Stern 11881da177e4SLinus Torvalds status = TD_CTRL_ACTIVE | TD_CTRL_IOS; 11891da177e4SLinus Torvalds destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); 11901da177e4SLinus Torvalds 1191b81d3436SAlan Stern for (i = 0; i < urb->number_of_packets; i++) { 11922532178aSAlan Stern td = uhci_alloc_td(uhci); 11931da177e4SLinus Torvalds if (!td) 11941da177e4SLinus Torvalds return -ENOMEM; 11951da177e4SLinus Torvalds 119604538a25SAlan Stern uhci_add_td_to_urbp(td, urbp); 1197dccf4a48SAlan Stern uhci_fill_td(td, status, destination | 1198dccf4a48SAlan Stern uhci_explen(urb->iso_frame_desc[i].length), 1199dccf4a48SAlan Stern urb->transfer_dma + 1200dccf4a48SAlan Stern urb->iso_frame_desc[i].offset); 1201b81d3436SAlan Stern } 12021da177e4SLinus Torvalds 1203dccf4a48SAlan Stern /* Set the interrupt-on-completion flag on the last packet. */ 1204dccf4a48SAlan Stern td->status |= __constant_cpu_to_le32(TD_CTRL_IOC); 1205dccf4a48SAlan Stern 1206dccf4a48SAlan Stern /* Add the TDs to the frame list */ 1207b81d3436SAlan Stern frame = urb->start_frame; 1208b81d3436SAlan Stern list_for_each_entry(td, &urbp->td_list, list) { 1209dccf4a48SAlan Stern uhci_insert_td_in_frame_list(uhci, td, frame); 1210c8155cc5SAlan Stern frame += qh->period; 1211c8155cc5SAlan Stern } 1212c8155cc5SAlan Stern 1213c8155cc5SAlan Stern if (list_empty(&qh->queue)) { 1214c8155cc5SAlan Stern qh->iso_packet_desc = &urb->iso_frame_desc[0]; 1215c8155cc5SAlan Stern qh->iso_frame = urb->start_frame; 1216c8155cc5SAlan Stern qh->iso_status = 0; 12171da177e4SLinus Torvalds } 12181da177e4SLinus Torvalds 12193ca2a321SAlan Stern qh->skel = uhci->skel_iso_qh; 12203ca2a321SAlan Stern if (!qh->bandwidth_reserved) 12213ca2a321SAlan Stern uhci_reserve_bandwidth(uhci, qh); 1222dccf4a48SAlan Stern return 0; 12231da177e4SLinus Torvalds } 12241da177e4SLinus Torvalds 12251da177e4SLinus Torvalds static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb) 12261da177e4SLinus Torvalds { 1227c8155cc5SAlan Stern struct uhci_td *td, *tmp; 1228c8155cc5SAlan Stern struct urb_priv *urbp = urb->hcpriv; 1229c8155cc5SAlan Stern struct uhci_qh *qh = urbp->qh; 1230c8155cc5SAlan Stern 1231c8155cc5SAlan Stern list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { 1232c8155cc5SAlan Stern unsigned int ctrlstat; 12331da177e4SLinus Torvalds int status; 12341da177e4SLinus Torvalds int actlength; 12351da177e4SLinus Torvalds 1236c8155cc5SAlan Stern if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame)) 12371da177e4SLinus Torvalds return -EINPROGRESS; 12381da177e4SLinus Torvalds 1239c8155cc5SAlan Stern uhci_remove_tds_from_frame(uhci, qh->iso_frame); 12401da177e4SLinus Torvalds 1241c8155cc5SAlan Stern ctrlstat = td_status(td); 1242c8155cc5SAlan Stern if (ctrlstat & TD_CTRL_ACTIVE) { 1243c8155cc5SAlan Stern status = -EXDEV; /* TD was added too late? */ 1244c8155cc5SAlan Stern } else { 12451da177e4SLinus Torvalds status = uhci_map_status(uhci_status_bits(ctrlstat), 12461da177e4SLinus Torvalds usb_pipeout(urb->pipe)); 1247c8155cc5SAlan Stern actlength = uhci_actual_length(ctrlstat); 1248c8155cc5SAlan Stern 1249c8155cc5SAlan Stern urb->actual_length += actlength; 1250c8155cc5SAlan Stern qh->iso_packet_desc->actual_length = actlength; 1251c8155cc5SAlan Stern qh->iso_packet_desc->status = status; 1252c8155cc5SAlan Stern } 1253c8155cc5SAlan Stern 12541da177e4SLinus Torvalds if (status) { 12551da177e4SLinus Torvalds urb->error_count++; 1256c8155cc5SAlan Stern qh->iso_status = status; 12571da177e4SLinus Torvalds } 12581da177e4SLinus Torvalds 1259c8155cc5SAlan Stern uhci_remove_td_from_urbp(td); 1260c8155cc5SAlan Stern uhci_free_td(uhci, td); 1261c8155cc5SAlan Stern qh->iso_frame += qh->period; 1262c8155cc5SAlan Stern ++qh->iso_packet_desc; 12631da177e4SLinus Torvalds } 1264c8155cc5SAlan Stern return qh->iso_status; 12651da177e4SLinus Torvalds } 12661da177e4SLinus Torvalds 12671da177e4SLinus Torvalds static int uhci_urb_enqueue(struct usb_hcd *hcd, 1268dccf4a48SAlan Stern struct usb_host_endpoint *hep, 126955016f10SAl Viro struct urb *urb, gfp_t mem_flags) 12701da177e4SLinus Torvalds { 12711da177e4SLinus Torvalds int ret; 12721da177e4SLinus Torvalds struct uhci_hcd *uhci = hcd_to_uhci(hcd); 12731da177e4SLinus Torvalds unsigned long flags; 1274dccf4a48SAlan Stern struct urb_priv *urbp; 1275dccf4a48SAlan Stern struct uhci_qh *qh; 12761da177e4SLinus Torvalds 12771da177e4SLinus Torvalds spin_lock_irqsave(&uhci->lock, flags); 12781da177e4SLinus Torvalds 12791da177e4SLinus Torvalds ret = urb->status; 12801da177e4SLinus Torvalds if (ret != -EINPROGRESS) /* URB already unlinked! */ 1281dccf4a48SAlan Stern goto done; 12821da177e4SLinus Torvalds 12831da177e4SLinus Torvalds ret = -ENOMEM; 1284dccf4a48SAlan Stern urbp = uhci_alloc_urb_priv(uhci, urb); 1285dccf4a48SAlan Stern if (!urbp) 1286dccf4a48SAlan Stern goto done; 1287dccf4a48SAlan Stern 1288dccf4a48SAlan Stern if (hep->hcpriv) 1289dccf4a48SAlan Stern qh = (struct uhci_qh *) hep->hcpriv; 1290dccf4a48SAlan Stern else { 1291dccf4a48SAlan Stern qh = uhci_alloc_qh(uhci, urb->dev, hep); 1292dccf4a48SAlan Stern if (!qh) 1293dccf4a48SAlan Stern goto err_no_qh; 12941da177e4SLinus Torvalds } 1295dccf4a48SAlan Stern urbp->qh = qh; 12961da177e4SLinus Torvalds 12974de7d2c2SAlan Stern switch (qh->type) { 12984de7d2c2SAlan Stern case USB_ENDPOINT_XFER_CONTROL: 1299dccf4a48SAlan Stern ret = uhci_submit_control(uhci, urb, qh); 1300dccf4a48SAlan Stern break; 13014de7d2c2SAlan Stern case USB_ENDPOINT_XFER_BULK: 1302dccf4a48SAlan Stern ret = uhci_submit_bulk(uhci, urb, qh); 13031da177e4SLinus Torvalds break; 13044de7d2c2SAlan Stern case USB_ENDPOINT_XFER_INT: 1305dccf4a48SAlan Stern ret = uhci_submit_interrupt(uhci, urb, qh); 13061da177e4SLinus Torvalds break; 13074de7d2c2SAlan Stern case USB_ENDPOINT_XFER_ISOC: 1308c8155cc5SAlan Stern urb->error_count = 0; 1309dccf4a48SAlan Stern ret = uhci_submit_isochronous(uhci, urb, qh); 13101da177e4SLinus Torvalds break; 13111da177e4SLinus Torvalds } 1312dccf4a48SAlan Stern if (ret != 0) 1313dccf4a48SAlan Stern goto err_submit_failed; 13141da177e4SLinus Torvalds 1315dccf4a48SAlan Stern /* Add this URB to the QH */ 1316dccf4a48SAlan Stern urbp->qh = qh; 1317dccf4a48SAlan Stern list_add_tail(&urbp->node, &qh->queue); 13181da177e4SLinus Torvalds 1319dccf4a48SAlan Stern /* If the new URB is the first and only one on this QH then either 1320dccf4a48SAlan Stern * the QH is new and idle or else it's unlinked and waiting to 13212775562aSAlan Stern * become idle, so we can activate it right away. But only if the 13222775562aSAlan Stern * queue isn't stopped. */ 132384afddd7SAlan Stern if (qh->queue.next == &urbp->node && !qh->is_stopped) { 1324dccf4a48SAlan Stern uhci_activate_qh(uhci, qh); 1325c5e3b741SAlan Stern uhci_urbp_wants_fsbr(uhci, urbp); 132684afddd7SAlan Stern } 1327dccf4a48SAlan Stern goto done; 1328dccf4a48SAlan Stern 1329dccf4a48SAlan Stern err_submit_failed: 1330dccf4a48SAlan Stern if (qh->state == QH_STATE_IDLE) 1331dccf4a48SAlan Stern uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */ 1332dccf4a48SAlan Stern 1333dccf4a48SAlan Stern err_no_qh: 1334dccf4a48SAlan Stern uhci_free_urb_priv(uhci, urbp); 1335dccf4a48SAlan Stern 1336dccf4a48SAlan Stern done: 13371da177e4SLinus Torvalds spin_unlock_irqrestore(&uhci->lock, flags); 13381da177e4SLinus Torvalds return ret; 13391da177e4SLinus Torvalds } 13401da177e4SLinus Torvalds 13411da177e4SLinus Torvalds static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) 13421da177e4SLinus Torvalds { 13431da177e4SLinus Torvalds struct uhci_hcd *uhci = hcd_to_uhci(hcd); 13441da177e4SLinus Torvalds unsigned long flags; 13451da177e4SLinus Torvalds struct urb_priv *urbp; 134610b8e47dSAlan Stern struct uhci_qh *qh; 13471da177e4SLinus Torvalds 13481da177e4SLinus Torvalds spin_lock_irqsave(&uhci->lock, flags); 13491da177e4SLinus Torvalds urbp = urb->hcpriv; 13501da177e4SLinus Torvalds if (!urbp) /* URB was never linked! */ 13511da177e4SLinus Torvalds goto done; 135210b8e47dSAlan Stern qh = urbp->qh; 13531da177e4SLinus Torvalds 1354dccf4a48SAlan Stern /* Remove Isochronous TDs from the frame list ASAP */ 135510b8e47dSAlan Stern if (qh->type == USB_ENDPOINT_XFER_ISOC) { 1356dccf4a48SAlan Stern uhci_unlink_isochronous_tds(uhci, urb); 135710b8e47dSAlan Stern mb(); 135810b8e47dSAlan Stern 135910b8e47dSAlan Stern /* If the URB has already started, update the QH unlink time */ 136010b8e47dSAlan Stern uhci_get_current_frame_number(uhci); 136110b8e47dSAlan Stern if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number)) 136210b8e47dSAlan Stern qh->unlink_frame = uhci->frame_number; 136310b8e47dSAlan Stern } 136410b8e47dSAlan Stern 136510b8e47dSAlan Stern uhci_unlink_qh(uhci, qh); 13661da177e4SLinus Torvalds 13671da177e4SLinus Torvalds done: 13681da177e4SLinus Torvalds spin_unlock_irqrestore(&uhci->lock, flags); 13691da177e4SLinus Torvalds return 0; 13701da177e4SLinus Torvalds } 13711da177e4SLinus Torvalds 13720ed8fee1SAlan Stern /* 13730ed8fee1SAlan Stern * Finish unlinking an URB and give it back 13740ed8fee1SAlan Stern */ 13750ed8fee1SAlan Stern static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh, 13767d12e780SDavid Howells struct urb *urb) 13770ed8fee1SAlan Stern __releases(uhci->lock) 13780ed8fee1SAlan Stern __acquires(uhci->lock) 13791da177e4SLinus Torvalds { 13801da177e4SLinus Torvalds struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; 13811da177e4SLinus Torvalds 1382c8155cc5SAlan Stern /* When giving back the first URB in an Isochronous queue, 1383c8155cc5SAlan Stern * reinitialize the QH's iso-related members for the next URB. */ 1384c8155cc5SAlan Stern if (qh->type == USB_ENDPOINT_XFER_ISOC && 1385c8155cc5SAlan Stern urbp->node.prev == &qh->queue && 1386c8155cc5SAlan Stern urbp->node.next != &qh->queue) { 1387c8155cc5SAlan Stern struct urb *nurb = list_entry(urbp->node.next, 1388c8155cc5SAlan Stern struct urb_priv, node)->urb; 1389c8155cc5SAlan Stern 1390c8155cc5SAlan Stern qh->iso_packet_desc = &nurb->iso_frame_desc[0]; 1391c8155cc5SAlan Stern qh->iso_frame = nurb->start_frame; 1392c8155cc5SAlan Stern qh->iso_status = 0; 1393c8155cc5SAlan Stern } 13941da177e4SLinus Torvalds 13950ed8fee1SAlan Stern /* Take the URB off the QH's queue. If the queue is now empty, 13960ed8fee1SAlan Stern * this is a perfect time for a toggle fixup. */ 13970ed8fee1SAlan Stern list_del_init(&urbp->node); 13980ed8fee1SAlan Stern if (list_empty(&qh->queue) && qh->needs_fixup) { 13990ed8fee1SAlan Stern usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), 14000ed8fee1SAlan Stern usb_pipeout(urb->pipe), qh->initial_toggle); 14010ed8fee1SAlan Stern qh->needs_fixup = 0; 14020ed8fee1SAlan Stern } 14030ed8fee1SAlan Stern 14040ed8fee1SAlan Stern uhci_free_urb_priv(uhci, urbp); 14050ed8fee1SAlan Stern 14060ed8fee1SAlan Stern spin_unlock(&uhci->lock); 14077d12e780SDavid Howells usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb); 14080ed8fee1SAlan Stern spin_lock(&uhci->lock); 14090ed8fee1SAlan Stern 14100ed8fee1SAlan Stern /* If the queue is now empty, we can unlink the QH and give up its 14110ed8fee1SAlan Stern * reserved bandwidth. */ 14120ed8fee1SAlan Stern if (list_empty(&qh->queue)) { 14130ed8fee1SAlan Stern uhci_unlink_qh(uhci, qh); 14143ca2a321SAlan Stern if (qh->bandwidth_reserved) 14153ca2a321SAlan Stern uhci_release_bandwidth(uhci, qh); 14160ed8fee1SAlan Stern } 14170ed8fee1SAlan Stern } 14180ed8fee1SAlan Stern 14190ed8fee1SAlan Stern /* 14200ed8fee1SAlan Stern * Scan the URBs in a QH's queue 14210ed8fee1SAlan Stern */ 14220ed8fee1SAlan Stern #define QH_FINISHED_UNLINKING(qh) \ 14230ed8fee1SAlan Stern (qh->state == QH_STATE_UNLINKING && \ 14240ed8fee1SAlan Stern uhci->frame_number + uhci->is_stopped != qh->unlink_frame) 14250ed8fee1SAlan Stern 14267d12e780SDavid Howells static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) 14270ed8fee1SAlan Stern { 14280ed8fee1SAlan Stern struct urb_priv *urbp; 14290ed8fee1SAlan Stern struct urb *urb; 14300ed8fee1SAlan Stern int status; 14310ed8fee1SAlan Stern 14320ed8fee1SAlan Stern while (!list_empty(&qh->queue)) { 14330ed8fee1SAlan Stern urbp = list_entry(qh->queue.next, struct urb_priv, node); 14340ed8fee1SAlan Stern urb = urbp->urb; 14350ed8fee1SAlan Stern 1436b1869000SAlan Stern if (qh->type == USB_ENDPOINT_XFER_ISOC) 14370ed8fee1SAlan Stern status = uhci_result_isochronous(uhci, urb); 1438b1869000SAlan Stern else 14390ed8fee1SAlan Stern status = uhci_result_common(uhci, urb); 14400ed8fee1SAlan Stern if (status == -EINPROGRESS) 14410ed8fee1SAlan Stern break; 14420ed8fee1SAlan Stern 14430ed8fee1SAlan Stern spin_lock(&urb->lock); 14440ed8fee1SAlan Stern if (urb->status == -EINPROGRESS) /* Not dequeued */ 14450ed8fee1SAlan Stern urb->status = status; 14460ed8fee1SAlan Stern else 14472775562aSAlan Stern status = ECONNRESET; /* Not -ECONNRESET */ 14480ed8fee1SAlan Stern spin_unlock(&urb->lock); 14490ed8fee1SAlan Stern 14500ed8fee1SAlan Stern /* Dequeued but completed URBs can't be given back unless 14510ed8fee1SAlan Stern * the QH is stopped or has finished unlinking. */ 14522775562aSAlan Stern if (status == ECONNRESET) { 14532775562aSAlan Stern if (QH_FINISHED_UNLINKING(qh)) 14542775562aSAlan Stern qh->is_stopped = 1; 14552775562aSAlan Stern else if (!qh->is_stopped) 14560ed8fee1SAlan Stern return; 14572775562aSAlan Stern } 14580ed8fee1SAlan Stern 14597d12e780SDavid Howells uhci_giveback_urb(uhci, qh, urb); 14607ceb932fSAlan Stern if (status < 0 && qh->type != USB_ENDPOINT_XFER_ISOC) 14610ed8fee1SAlan Stern break; 14620ed8fee1SAlan Stern } 14630ed8fee1SAlan Stern 14640ed8fee1SAlan Stern /* If the QH is neither stopped nor finished unlinking (normal case), 14650ed8fee1SAlan Stern * our work here is done. */ 14662775562aSAlan Stern if (QH_FINISHED_UNLINKING(qh)) 14672775562aSAlan Stern qh->is_stopped = 1; 14682775562aSAlan Stern else if (!qh->is_stopped) 14690ed8fee1SAlan Stern return; 14700ed8fee1SAlan Stern 14710ed8fee1SAlan Stern /* Otherwise give back each of the dequeued URBs */ 14722775562aSAlan Stern restart: 14730ed8fee1SAlan Stern list_for_each_entry(urbp, &qh->queue, node) { 14740ed8fee1SAlan Stern urb = urbp->urb; 14750ed8fee1SAlan Stern if (urb->status != -EINPROGRESS) { 147610b8e47dSAlan Stern 147710b8e47dSAlan Stern /* Fix up the TD links and save the toggles for 147810b8e47dSAlan Stern * non-Isochronous queues. For Isochronous queues, 147910b8e47dSAlan Stern * test for too-recent dequeues. */ 148010b8e47dSAlan Stern if (!uhci_cleanup_queue(uhci, qh, urb)) { 148110b8e47dSAlan Stern qh->is_stopped = 0; 148210b8e47dSAlan Stern return; 148310b8e47dSAlan Stern } 14847d12e780SDavid Howells uhci_giveback_urb(uhci, qh, urb); 14850ed8fee1SAlan Stern goto restart; 14860ed8fee1SAlan Stern } 14870ed8fee1SAlan Stern } 14880ed8fee1SAlan Stern qh->is_stopped = 0; 14890ed8fee1SAlan Stern 14900ed8fee1SAlan Stern /* There are no more dequeued URBs. If there are still URBs on the 14910ed8fee1SAlan Stern * queue, the QH can now be re-activated. */ 14920ed8fee1SAlan Stern if (!list_empty(&qh->queue)) { 14930ed8fee1SAlan Stern if (qh->needs_fixup) 14940ed8fee1SAlan Stern uhci_fixup_toggles(qh, 0); 149584afddd7SAlan Stern 149684afddd7SAlan Stern /* If the first URB on the queue wants FSBR but its time 149784afddd7SAlan Stern * limit has expired, set the next TD to interrupt on 149884afddd7SAlan Stern * completion before reactivating the QH. */ 149984afddd7SAlan Stern urbp = list_entry(qh->queue.next, struct urb_priv, node); 150084afddd7SAlan Stern if (urbp->fsbr && qh->wait_expired) { 150184afddd7SAlan Stern struct uhci_td *td = list_entry(urbp->td_list.next, 150284afddd7SAlan Stern struct uhci_td, list); 150384afddd7SAlan Stern 150484afddd7SAlan Stern td->status |= __cpu_to_le32(TD_CTRL_IOC); 150584afddd7SAlan Stern } 150684afddd7SAlan Stern 15070ed8fee1SAlan Stern uhci_activate_qh(uhci, qh); 15080ed8fee1SAlan Stern } 15090ed8fee1SAlan Stern 15100ed8fee1SAlan Stern /* The queue is empty. The QH can become idle if it is fully 15110ed8fee1SAlan Stern * unlinked. */ 15120ed8fee1SAlan Stern else if (QH_FINISHED_UNLINKING(qh)) 15130ed8fee1SAlan Stern uhci_make_qh_idle(uhci, qh); 15141da177e4SLinus Torvalds } 15151da177e4SLinus Torvalds 15160ed8fee1SAlan Stern /* 151784afddd7SAlan Stern * Check for queues that have made some forward progress. 151884afddd7SAlan Stern * Returns 0 if the queue is not Isochronous, is ACTIVE, and 151984afddd7SAlan Stern * has not advanced since last examined; 1 otherwise. 1520b761d9d8SAlan Stern * 1521b761d9d8SAlan Stern * Early Intel controllers have a bug which causes qh->element sometimes 1522b761d9d8SAlan Stern * not to advance when a TD completes successfully. The queue remains 1523b761d9d8SAlan Stern * stuck on the inactive completed TD. We detect such cases and advance 1524b761d9d8SAlan Stern * the element pointer by hand. 152584afddd7SAlan Stern */ 152684afddd7SAlan Stern static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh) 152784afddd7SAlan Stern { 152884afddd7SAlan Stern struct urb_priv *urbp = NULL; 152984afddd7SAlan Stern struct uhci_td *td; 153084afddd7SAlan Stern int ret = 1; 153184afddd7SAlan Stern unsigned status; 153284afddd7SAlan Stern 153384afddd7SAlan Stern if (qh->type == USB_ENDPOINT_XFER_ISOC) 1534c5e3b741SAlan Stern goto done; 153584afddd7SAlan Stern 153684afddd7SAlan Stern /* Treat an UNLINKING queue as though it hasn't advanced. 153784afddd7SAlan Stern * This is okay because reactivation will treat it as though 153884afddd7SAlan Stern * it has advanced, and if it is going to become IDLE then 153984afddd7SAlan Stern * this doesn't matter anyway. Furthermore it's possible 154084afddd7SAlan Stern * for an UNLINKING queue not to have any URBs at all, or 154184afddd7SAlan Stern * for its first URB not to have any TDs (if it was dequeued 154284afddd7SAlan Stern * just as it completed). So it's not easy in any case to 154384afddd7SAlan Stern * test whether such queues have advanced. */ 154484afddd7SAlan Stern if (qh->state != QH_STATE_ACTIVE) { 154584afddd7SAlan Stern urbp = NULL; 154684afddd7SAlan Stern status = 0; 154784afddd7SAlan Stern 154884afddd7SAlan Stern } else { 154984afddd7SAlan Stern urbp = list_entry(qh->queue.next, struct urb_priv, node); 155084afddd7SAlan Stern td = list_entry(urbp->td_list.next, struct uhci_td, list); 155184afddd7SAlan Stern status = td_status(td); 155284afddd7SAlan Stern if (!(status & TD_CTRL_ACTIVE)) { 155384afddd7SAlan Stern 155484afddd7SAlan Stern /* We're okay, the queue has advanced */ 155584afddd7SAlan Stern qh->wait_expired = 0; 155684afddd7SAlan Stern qh->advance_jiffies = jiffies; 1557c5e3b741SAlan Stern goto done; 155884afddd7SAlan Stern } 155984afddd7SAlan Stern ret = 0; 156084afddd7SAlan Stern } 156184afddd7SAlan Stern 156284afddd7SAlan Stern /* The queue hasn't advanced; check for timeout */ 1563c5e3b741SAlan Stern if (qh->wait_expired) 1564c5e3b741SAlan Stern goto done; 1565c5e3b741SAlan Stern 1566c5e3b741SAlan Stern if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) { 1567b761d9d8SAlan Stern 1568b761d9d8SAlan Stern /* Detect the Intel bug and work around it */ 1569b761d9d8SAlan Stern if (qh->post_td && qh_element(qh) == 1570b761d9d8SAlan Stern cpu_to_le32(qh->post_td->dma_handle)) { 1571b761d9d8SAlan Stern qh->element = qh->post_td->link; 1572b761d9d8SAlan Stern qh->advance_jiffies = jiffies; 1573c5e3b741SAlan Stern ret = 1; 1574c5e3b741SAlan Stern goto done; 1575b761d9d8SAlan Stern } 1576b761d9d8SAlan Stern 157784afddd7SAlan Stern qh->wait_expired = 1; 157884afddd7SAlan Stern 157984afddd7SAlan Stern /* If the current URB wants FSBR, unlink it temporarily 158084afddd7SAlan Stern * so that we can safely set the next TD to interrupt on 158184afddd7SAlan Stern * completion. That way we'll know as soon as the queue 158284afddd7SAlan Stern * starts moving again. */ 158384afddd7SAlan Stern if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC)) 158484afddd7SAlan Stern uhci_unlink_qh(uhci, qh); 1585c5e3b741SAlan Stern 1586c5e3b741SAlan Stern } else { 1587c5e3b741SAlan Stern /* Unmoving but not-yet-expired queues keep FSBR alive */ 1588c5e3b741SAlan Stern if (urbp) 1589c5e3b741SAlan Stern uhci_urbp_wants_fsbr(uhci, urbp); 159084afddd7SAlan Stern } 1591c5e3b741SAlan Stern 1592c5e3b741SAlan Stern done: 159384afddd7SAlan Stern return ret; 159484afddd7SAlan Stern } 159584afddd7SAlan Stern 159684afddd7SAlan Stern /* 15970ed8fee1SAlan Stern * Process events in the schedule, but only in one thread at a time 15980ed8fee1SAlan Stern */ 15997d12e780SDavid Howells static void uhci_scan_schedule(struct uhci_hcd *uhci) 16001da177e4SLinus Torvalds { 16010ed8fee1SAlan Stern int i; 16020ed8fee1SAlan Stern struct uhci_qh *qh; 16031da177e4SLinus Torvalds 16041da177e4SLinus Torvalds /* Don't allow re-entrant calls */ 16051da177e4SLinus Torvalds if (uhci->scan_in_progress) { 16061da177e4SLinus Torvalds uhci->need_rescan = 1; 16071da177e4SLinus Torvalds return; 16081da177e4SLinus Torvalds } 16091da177e4SLinus Torvalds uhci->scan_in_progress = 1; 16101da177e4SLinus Torvalds rescan: 16111da177e4SLinus Torvalds uhci->need_rescan = 0; 1612c5e3b741SAlan Stern uhci->fsbr_is_wanted = 0; 16131da177e4SLinus Torvalds 16146c1b445cSAlan Stern uhci_clear_next_interrupt(uhci); 16151da177e4SLinus Torvalds uhci_get_current_frame_number(uhci); 1616c8155cc5SAlan Stern uhci->cur_iso_frame = uhci->frame_number; 16171da177e4SLinus Torvalds 16180ed8fee1SAlan Stern /* Go through all the QH queues and process the URBs in each one */ 16190ed8fee1SAlan Stern for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) { 16200ed8fee1SAlan Stern uhci->next_qh = list_entry(uhci->skelqh[i]->node.next, 16210ed8fee1SAlan Stern struct uhci_qh, node); 16220ed8fee1SAlan Stern while ((qh = uhci->next_qh) != uhci->skelqh[i]) { 16230ed8fee1SAlan Stern uhci->next_qh = list_entry(qh->node.next, 16240ed8fee1SAlan Stern struct uhci_qh, node); 162584afddd7SAlan Stern 162684afddd7SAlan Stern if (uhci_advance_check(uhci, qh)) { 16277d12e780SDavid Howells uhci_scan_qh(uhci, qh); 1628c5e3b741SAlan Stern if (qh->state == QH_STATE_ACTIVE) { 1629c5e3b741SAlan Stern uhci_urbp_wants_fsbr(uhci, 1630c5e3b741SAlan Stern list_entry(qh->queue.next, struct urb_priv, node)); 1631c5e3b741SAlan Stern } 163284afddd7SAlan Stern } 16331da177e4SLinus Torvalds } 16340ed8fee1SAlan Stern } 16351da177e4SLinus Torvalds 1636c8155cc5SAlan Stern uhci->last_iso_frame = uhci->cur_iso_frame; 16371da177e4SLinus Torvalds if (uhci->need_rescan) 16381da177e4SLinus Torvalds goto rescan; 16391da177e4SLinus Torvalds uhci->scan_in_progress = 0; 16401da177e4SLinus Torvalds 1641c5e3b741SAlan Stern if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted && 1642c5e3b741SAlan Stern !uhci->fsbr_expiring) { 1643c5e3b741SAlan Stern uhci->fsbr_expiring = 1; 1644c5e3b741SAlan Stern mod_timer(&uhci->fsbr_timer, jiffies + FSBR_OFF_DELAY); 1645c5e3b741SAlan Stern } 164684afddd7SAlan Stern 164704538a25SAlan Stern if (list_empty(&uhci->skel_unlink_qh->node)) 16481da177e4SLinus Torvalds uhci_clear_next_interrupt(uhci); 16491da177e4SLinus Torvalds else 16501da177e4SLinus Torvalds uhci_set_next_interrupt(uhci); 16511da177e4SLinus Torvalds } 1652