1 /* 2 * MUSB OTG driver host support 3 * 4 * Copyright 2005 Mentor Graphics Corporation 5 * Copyright (C) 2005-2006 by Texas Instruments 6 * Copyright (C) 2006-2007 Nokia Corporation 7 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * version 2 as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 21 * 02110-1301 USA 22 * 23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED 24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 */ 35 36 #include <linux/module.h> 37 #include <linux/kernel.h> 38 #include <linux/delay.h> 39 #include <linux/sched.h> 40 #include <linux/slab.h> 41 #include <linux/errno.h> 42 #include <linux/init.h> 43 #include <linux/list.h> 44 45 #include "musb_core.h" 46 #include "musb_host.h" 47 48 49 /* MUSB HOST status 22-mar-2006 50 * 51 * - There's still lots of partial code duplication for fault paths, so 52 * they aren't handled as consistently as they need to be. 53 * 54 * - PIO mostly behaved when last tested. 55 * + including ep0, with all usbtest cases 9, 10 56 * + usbtest 14 (ep0out) doesn't seem to run at all 57 * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest 58 * configurations, but otherwise double buffering passes basic tests. 59 * + for 2.6.N, for N > ~10, needs API changes for hcd framework. 60 * 61 * - DMA (CPPI) ... partially behaves, not currently recommended 62 * + about 1/15 the speed of typical EHCI implementations (PCI) 63 * + RX, all too often reqpkt seems to misbehave after tx 64 * + TX, no known issues (other than evident silicon issue) 65 * 66 * - DMA (Mentor/OMAP) ...has at least toggle update problems 67 * 68 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet 69 * starvation ... nothing yet for TX, interrupt, or bulk. 70 * 71 * - Not tested with HNP, but some SRP paths seem to behave. 72 * 73 * NOTE 24-August-2006: 74 * 75 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an 76 * extra endpoint for periodic use enabling hub + keybd + mouse. That 77 * mostly works, except that with "usbnet" it's easy to trigger cases 78 * with "ping" where RX loses. (a) ping to davinci, even "ping -f", 79 * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses 80 * although ARP RX wins. (That test was done with a full speed link.) 81 */ 82 83 84 /* 85 * NOTE on endpoint usage: 86 * 87 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN 88 * and OUT endpoints ... hardware is dedicated for those "async" queue(s). 89 * (Yes, bulk _could_ use more of the endpoints than that, and would even 90 * benefit from it.) 91 * 92 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. 93 * So far that scheduling is both dumb and optimistic: the endpoint will be 94 * "claimed" until its software queue is no longer refilled. No multiplexing 95 * of transfers between endpoints, or anything clever. 96 */ 97 98 99 static void musb_ep_program(struct musb *musb, u8 epnum, 100 struct urb *urb, int is_out, 101 u8 *buf, u32 offset, u32 len); 102 103 /* 104 * Clear TX fifo. Needed to avoid BABBLE errors. 105 */ 106 static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) 107 { 108 void __iomem *epio = ep->regs; 109 u16 csr; 110 u16 lastcsr = 0; 111 int retries = 1000; 112 113 csr = musb_readw(epio, MUSB_TXCSR); 114 while (csr & MUSB_TXCSR_FIFONOTEMPTY) { 115 if (csr != lastcsr) 116 DBG(3, "Host TX FIFONOTEMPTY csr: %02x\n", csr); 117 lastcsr = csr; 118 csr |= MUSB_TXCSR_FLUSHFIFO; 119 musb_writew(epio, MUSB_TXCSR, csr); 120 csr = musb_readw(epio, MUSB_TXCSR); 121 if (WARN(retries-- < 1, 122 "Could not flush host TX%d fifo: csr: %04x\n", 123 ep->epnum, csr)) 124 return; 125 mdelay(1); 126 } 127 } 128 129 static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep) 130 { 131 void __iomem *epio = ep->regs; 132 u16 csr; 133 int retries = 5; 134 135 /* scrub any data left in the fifo */ 136 do { 137 csr = musb_readw(epio, MUSB_TXCSR); 138 if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY))) 139 break; 140 musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO); 141 csr = musb_readw(epio, MUSB_TXCSR); 142 udelay(10); 143 } while (--retries); 144 145 WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n", 146 ep->epnum, csr); 147 148 /* and reset for the next transfer */ 149 musb_writew(epio, MUSB_TXCSR, 0); 150 } 151 152 /* 153 * Start transmit. Caller is responsible for locking shared resources. 154 * musb must be locked. 155 */ 156 static inline void musb_h_tx_start(struct musb_hw_ep *ep) 157 { 158 u16 txcsr; 159 160 /* NOTE: no locks here; caller should lock and select EP */ 161 if (ep->epnum) { 162 txcsr = musb_readw(ep->regs, MUSB_TXCSR); 163 txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS; 164 musb_writew(ep->regs, MUSB_TXCSR, txcsr); 165 } else { 166 txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY; 167 musb_writew(ep->regs, MUSB_CSR0, txcsr); 168 } 169 170 } 171 172 static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep) 173 { 174 u16 txcsr; 175 176 /* NOTE: no locks here; caller should lock and select EP */ 177 txcsr = musb_readw(ep->regs, MUSB_TXCSR); 178 txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS; 179 if (is_cppi_enabled()) 180 txcsr |= MUSB_TXCSR_DMAMODE; 181 musb_writew(ep->regs, MUSB_TXCSR, txcsr); 182 } 183 184 static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh) 185 { 186 if (is_in != 0 || ep->is_shared_fifo) 187 ep->in_qh = qh; 188 if (is_in == 0 || ep->is_shared_fifo) 189 ep->out_qh = qh; 190 } 191 192 static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in) 193 { 194 return is_in ? ep->in_qh : ep->out_qh; 195 } 196 197 /* 198 * Start the URB at the front of an endpoint's queue 199 * end must be claimed from the caller. 200 * 201 * Context: controller locked, irqs blocked 202 */ 203 static void 204 musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) 205 { 206 u16 frame; 207 u32 len; 208 void __iomem *mbase = musb->mregs; 209 struct urb *urb = next_urb(qh); 210 void *buf = urb->transfer_buffer; 211 u32 offset = 0; 212 struct musb_hw_ep *hw_ep = qh->hw_ep; 213 unsigned pipe = urb->pipe; 214 u8 address = usb_pipedevice(pipe); 215 int epnum = hw_ep->epnum; 216 217 /* initialize software qh state */ 218 qh->offset = 0; 219 qh->segsize = 0; 220 221 /* gather right source of data */ 222 switch (qh->type) { 223 case USB_ENDPOINT_XFER_CONTROL: 224 /* control transfers always start with SETUP */ 225 is_in = 0; 226 musb->ep0_stage = MUSB_EP0_START; 227 buf = urb->setup_packet; 228 len = 8; 229 break; 230 case USB_ENDPOINT_XFER_ISOC: 231 qh->iso_idx = 0; 232 qh->frame = 0; 233 offset = urb->iso_frame_desc[0].offset; 234 len = urb->iso_frame_desc[0].length; 235 break; 236 default: /* bulk, interrupt */ 237 /* actual_length may be nonzero on retry paths */ 238 buf = urb->transfer_buffer + urb->actual_length; 239 len = urb->transfer_buffer_length - urb->actual_length; 240 } 241 242 DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", 243 qh, urb, address, qh->epnum, 244 is_in ? "in" : "out", 245 ({char *s; switch (qh->type) { 246 case USB_ENDPOINT_XFER_CONTROL: s = ""; break; 247 case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break; 248 case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break; 249 default: s = "-intr"; break; 250 }; s; }), 251 epnum, buf + offset, len); 252 253 /* Configure endpoint */ 254 musb_ep_set_qh(hw_ep, is_in, qh); 255 musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len); 256 257 /* transmit may have more work: start it when it is time */ 258 if (is_in) 259 return; 260 261 /* determine if the time is right for a periodic transfer */ 262 switch (qh->type) { 263 case USB_ENDPOINT_XFER_ISOC: 264 case USB_ENDPOINT_XFER_INT: 265 DBG(3, "check whether there's still time for periodic Tx\n"); 266 frame = musb_readw(mbase, MUSB_FRAME); 267 /* FIXME this doesn't implement that scheduling policy ... 268 * or handle framecounter wrapping 269 */ 270 if ((urb->transfer_flags & URB_ISO_ASAP) 271 || (frame >= urb->start_frame)) { 272 /* REVISIT the SOF irq handler shouldn't duplicate 273 * this code; and we don't init urb->start_frame... 274 */ 275 qh->frame = 0; 276 goto start; 277 } else { 278 qh->frame = urb->start_frame; 279 /* enable SOF interrupt so we can count down */ 280 DBG(1, "SOF for %d\n", epnum); 281 #if 1 /* ifndef CONFIG_ARCH_DAVINCI */ 282 musb_writeb(mbase, MUSB_INTRUSBE, 0xff); 283 #endif 284 } 285 break; 286 default: 287 start: 288 DBG(4, "Start TX%d %s\n", epnum, 289 hw_ep->tx_channel ? "dma" : "pio"); 290 291 if (!hw_ep->tx_channel) 292 musb_h_tx_start(hw_ep); 293 else if (is_cppi_enabled() || tusb_dma_omap()) 294 musb_h_tx_dma_start(hw_ep); 295 } 296 } 297 298 /* Context: caller owns controller lock, IRQs are blocked */ 299 static void musb_giveback(struct musb *musb, struct urb *urb, int status) 300 __releases(musb->lock) 301 __acquires(musb->lock) 302 { 303 DBG(({ int level; switch (status) { 304 case 0: 305 level = 4; 306 break; 307 /* common/boring faults */ 308 case -EREMOTEIO: 309 case -ESHUTDOWN: 310 case -ECONNRESET: 311 case -EPIPE: 312 level = 3; 313 break; 314 default: 315 level = 2; 316 break; 317 }; level; }), 318 "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n", 319 urb, urb->complete, status, 320 usb_pipedevice(urb->pipe), 321 usb_pipeendpoint(urb->pipe), 322 usb_pipein(urb->pipe) ? "in" : "out", 323 urb->actual_length, urb->transfer_buffer_length 324 ); 325 326 usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb); 327 spin_unlock(&musb->lock); 328 usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status); 329 spin_lock(&musb->lock); 330 } 331 332 /* For bulk/interrupt endpoints only */ 333 static inline void musb_save_toggle(struct musb_qh *qh, int is_in, 334 struct urb *urb) 335 { 336 void __iomem *epio = qh->hw_ep->regs; 337 u16 csr; 338 339 /* 340 * FIXME: the current Mentor DMA code seems to have 341 * problems getting toggle correct. 342 */ 343 344 if (is_in) 345 csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE; 346 else 347 csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE; 348 349 usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0); 350 } 351 352 /* 353 * Advance this hardware endpoint's queue, completing the specified URB and 354 * advancing to either the next URB queued to that qh, or else invalidating 355 * that qh and advancing to the next qh scheduled after the current one. 356 * 357 * Context: caller owns controller lock, IRQs are blocked 358 */ 359 static void musb_advance_schedule(struct musb *musb, struct urb *urb, 360 struct musb_hw_ep *hw_ep, int is_in) 361 { 362 struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in); 363 struct musb_hw_ep *ep = qh->hw_ep; 364 int ready = qh->is_ready; 365 int status; 366 367 status = (urb->status == -EINPROGRESS) ? 0 : urb->status; 368 369 /* save toggle eagerly, for paranoia */ 370 switch (qh->type) { 371 case USB_ENDPOINT_XFER_BULK: 372 case USB_ENDPOINT_XFER_INT: 373 musb_save_toggle(qh, is_in, urb); 374 break; 375 case USB_ENDPOINT_XFER_ISOC: 376 if (status == 0 && urb->error_count) 377 status = -EXDEV; 378 break; 379 } 380 381 qh->is_ready = 0; 382 musb_giveback(musb, urb, status); 383 qh->is_ready = ready; 384 385 /* reclaim resources (and bandwidth) ASAP; deschedule it, and 386 * invalidate qh as soon as list_empty(&hep->urb_list) 387 */ 388 if (list_empty(&qh->hep->urb_list)) { 389 struct list_head *head; 390 391 if (is_in) 392 ep->rx_reinit = 1; 393 else 394 ep->tx_reinit = 1; 395 396 /* Clobber old pointers to this qh */ 397 musb_ep_set_qh(ep, is_in, NULL); 398 qh->hep->hcpriv = NULL; 399 400 switch (qh->type) { 401 402 case USB_ENDPOINT_XFER_CONTROL: 403 case USB_ENDPOINT_XFER_BULK: 404 /* fifo policy for these lists, except that NAKing 405 * should rotate a qh to the end (for fairness). 406 */ 407 if (qh->mux == 1) { 408 head = qh->ring.prev; 409 list_del(&qh->ring); 410 kfree(qh); 411 qh = first_qh(head); 412 break; 413 } 414 415 case USB_ENDPOINT_XFER_ISOC: 416 case USB_ENDPOINT_XFER_INT: 417 /* this is where periodic bandwidth should be 418 * de-allocated if it's tracked and allocated; 419 * and where we'd update the schedule tree... 420 */ 421 kfree(qh); 422 qh = NULL; 423 break; 424 } 425 } 426 427 if (qh != NULL && qh->is_ready) { 428 DBG(4, "... next ep%d %cX urb %p\n", 429 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); 430 musb_start_urb(musb, is_in, qh); 431 } 432 } 433 434 static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr) 435 { 436 /* we don't want fifo to fill itself again; 437 * ignore dma (various models), 438 * leave toggle alone (may not have been saved yet) 439 */ 440 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY; 441 csr &= ~(MUSB_RXCSR_H_REQPKT 442 | MUSB_RXCSR_H_AUTOREQ 443 | MUSB_RXCSR_AUTOCLEAR); 444 445 /* write 2x to allow double buffering */ 446 musb_writew(hw_ep->regs, MUSB_RXCSR, csr); 447 musb_writew(hw_ep->regs, MUSB_RXCSR, csr); 448 449 /* flush writebuffer */ 450 return musb_readw(hw_ep->regs, MUSB_RXCSR); 451 } 452 453 /* 454 * PIO RX for a packet (or part of it). 455 */ 456 static bool 457 musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err) 458 { 459 u16 rx_count; 460 u8 *buf; 461 u16 csr; 462 bool done = false; 463 u32 length; 464 int do_flush = 0; 465 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 466 void __iomem *epio = hw_ep->regs; 467 struct musb_qh *qh = hw_ep->in_qh; 468 int pipe = urb->pipe; 469 void *buffer = urb->transfer_buffer; 470 471 /* musb_ep_select(mbase, epnum); */ 472 rx_count = musb_readw(epio, MUSB_RXCOUNT); 473 DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count, 474 urb->transfer_buffer, qh->offset, 475 urb->transfer_buffer_length); 476 477 /* unload FIFO */ 478 if (usb_pipeisoc(pipe)) { 479 int status = 0; 480 struct usb_iso_packet_descriptor *d; 481 482 if (iso_err) { 483 status = -EILSEQ; 484 urb->error_count++; 485 } 486 487 d = urb->iso_frame_desc + qh->iso_idx; 488 buf = buffer + d->offset; 489 length = d->length; 490 if (rx_count > length) { 491 if (status == 0) { 492 status = -EOVERFLOW; 493 urb->error_count++; 494 } 495 DBG(2, "** OVERFLOW %d into %d\n", rx_count, length); 496 do_flush = 1; 497 } else 498 length = rx_count; 499 urb->actual_length += length; 500 d->actual_length = length; 501 502 d->status = status; 503 504 /* see if we are done */ 505 done = (++qh->iso_idx >= urb->number_of_packets); 506 } else { 507 /* non-isoch */ 508 buf = buffer + qh->offset; 509 length = urb->transfer_buffer_length - qh->offset; 510 if (rx_count > length) { 511 if (urb->status == -EINPROGRESS) 512 urb->status = -EOVERFLOW; 513 DBG(2, "** OVERFLOW %d into %d\n", rx_count, length); 514 do_flush = 1; 515 } else 516 length = rx_count; 517 urb->actual_length += length; 518 qh->offset += length; 519 520 /* see if we are done */ 521 done = (urb->actual_length == urb->transfer_buffer_length) 522 || (rx_count < qh->maxpacket) 523 || (urb->status != -EINPROGRESS); 524 if (done 525 && (urb->status == -EINPROGRESS) 526 && (urb->transfer_flags & URB_SHORT_NOT_OK) 527 && (urb->actual_length 528 < urb->transfer_buffer_length)) 529 urb->status = -EREMOTEIO; 530 } 531 532 musb_read_fifo(hw_ep, length, buf); 533 534 csr = musb_readw(epio, MUSB_RXCSR); 535 csr |= MUSB_RXCSR_H_WZC_BITS; 536 if (unlikely(do_flush)) 537 musb_h_flush_rxfifo(hw_ep, csr); 538 else { 539 /* REVISIT this assumes AUTOCLEAR is never set */ 540 csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT); 541 if (!done) 542 csr |= MUSB_RXCSR_H_REQPKT; 543 musb_writew(epio, MUSB_RXCSR, csr); 544 } 545 546 return done; 547 } 548 549 /* we don't always need to reinit a given side of an endpoint... 550 * when we do, use tx/rx reinit routine and then construct a new CSR 551 * to address data toggle, NYET, and DMA or PIO. 552 * 553 * it's possible that driver bugs (especially for DMA) or aborting a 554 * transfer might have left the endpoint busier than it should be. 555 * the busy/not-empty tests are basically paranoia. 556 */ 557 static void 558 musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) 559 { 560 u16 csr; 561 562 /* NOTE: we know the "rx" fifo reinit never triggers for ep0. 563 * That always uses tx_reinit since ep0 repurposes TX register 564 * offsets; the initial SETUP packet is also a kind of OUT. 565 */ 566 567 /* if programmed for Tx, put it in RX mode */ 568 if (ep->is_shared_fifo) { 569 csr = musb_readw(ep->regs, MUSB_TXCSR); 570 if (csr & MUSB_TXCSR_MODE) { 571 musb_h_tx_flush_fifo(ep); 572 csr = musb_readw(ep->regs, MUSB_TXCSR); 573 musb_writew(ep->regs, MUSB_TXCSR, 574 csr | MUSB_TXCSR_FRCDATATOG); 575 } 576 577 /* 578 * Clear the MODE bit (and everything else) to enable Rx. 579 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB. 580 */ 581 if (csr & MUSB_TXCSR_DMAMODE) 582 musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE); 583 musb_writew(ep->regs, MUSB_TXCSR, 0); 584 585 /* scrub all previous state, clearing toggle */ 586 } else { 587 csr = musb_readw(ep->regs, MUSB_RXCSR); 588 if (csr & MUSB_RXCSR_RXPKTRDY) 589 WARNING("rx%d, packet/%d ready?\n", ep->epnum, 590 musb_readw(ep->regs, MUSB_RXCOUNT)); 591 592 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG); 593 } 594 595 /* target addr and (for multipoint) hub addr/port */ 596 if (musb->is_multipoint) { 597 musb_write_rxfunaddr(ep->target_regs, qh->addr_reg); 598 musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg); 599 musb_write_rxhubport(ep->target_regs, qh->h_port_reg); 600 601 } else 602 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg); 603 604 /* protocol/endpoint, interval/NAKlimit, i/o size */ 605 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); 606 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); 607 /* NOTE: bulk combining rewrites high bits of maxpacket */ 608 /* Set RXMAXP with the FIFO size of the endpoint 609 * to disable double buffer mode. 610 */ 611 if (musb->hwvers < MUSB_HWVERS_2000) 612 musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx); 613 else 614 musb_writew(ep->regs, MUSB_RXMAXP, 615 qh->maxpacket | ((qh->hb_mult - 1) << 11)); 616 617 ep->rx_reinit = 0; 618 } 619 620 static bool musb_tx_dma_program(struct dma_controller *dma, 621 struct musb_hw_ep *hw_ep, struct musb_qh *qh, 622 struct urb *urb, u32 offset, u32 length) 623 { 624 struct dma_channel *channel = hw_ep->tx_channel; 625 void __iomem *epio = hw_ep->regs; 626 u16 pkt_size = qh->maxpacket; 627 u16 csr; 628 u8 mode; 629 630 #ifdef CONFIG_USB_INVENTRA_DMA 631 if (length > channel->max_len) 632 length = channel->max_len; 633 634 csr = musb_readw(epio, MUSB_TXCSR); 635 if (length > pkt_size) { 636 mode = 1; 637 csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB; 638 /* autoset shouldn't be set in high bandwidth */ 639 if (qh->hb_mult == 1) 640 csr |= MUSB_TXCSR_AUTOSET; 641 } else { 642 mode = 0; 643 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE); 644 csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */ 645 } 646 channel->desired_mode = mode; 647 musb_writew(epio, MUSB_TXCSR, csr); 648 #else 649 if (!is_cppi_enabled() && !tusb_dma_omap()) 650 return false; 651 652 channel->actual_len = 0; 653 654 /* 655 * TX uses "RNDIS" mode automatically but needs help 656 * to identify the zero-length-final-packet case. 657 */ 658 mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0; 659 #endif 660 661 qh->segsize = length; 662 663 if (!dma->channel_program(channel, pkt_size, mode, 664 urb->transfer_dma + offset, length)) { 665 dma->channel_release(channel); 666 hw_ep->tx_channel = NULL; 667 668 csr = musb_readw(epio, MUSB_TXCSR); 669 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB); 670 musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS); 671 return false; 672 } 673 return true; 674 } 675 676 /* 677 * Program an HDRC endpoint as per the given URB 678 * Context: irqs blocked, controller lock held 679 */ 680 static void musb_ep_program(struct musb *musb, u8 epnum, 681 struct urb *urb, int is_out, 682 u8 *buf, u32 offset, u32 len) 683 { 684 struct dma_controller *dma_controller; 685 struct dma_channel *dma_channel; 686 u8 dma_ok; 687 void __iomem *mbase = musb->mregs; 688 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 689 void __iomem *epio = hw_ep->regs; 690 struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out); 691 u16 packet_sz = qh->maxpacket; 692 693 DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s " 694 "h_addr%02x h_port%02x bytes %d\n", 695 is_out ? "-->" : "<--", 696 epnum, urb, urb->dev->speed, 697 qh->addr_reg, qh->epnum, is_out ? "out" : "in", 698 qh->h_addr_reg, qh->h_port_reg, 699 len); 700 701 musb_ep_select(mbase, epnum); 702 703 /* candidate for DMA? */ 704 dma_controller = musb->dma_controller; 705 if (is_dma_capable() && epnum && dma_controller) { 706 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel; 707 if (!dma_channel) { 708 dma_channel = dma_controller->channel_alloc( 709 dma_controller, hw_ep, is_out); 710 if (is_out) 711 hw_ep->tx_channel = dma_channel; 712 else 713 hw_ep->rx_channel = dma_channel; 714 } 715 } else 716 dma_channel = NULL; 717 718 /* make sure we clear DMAEnab, autoSet bits from previous run */ 719 720 /* OUT/transmit/EP0 or IN/receive? */ 721 if (is_out) { 722 u16 csr; 723 u16 int_txe; 724 u16 load_count; 725 726 csr = musb_readw(epio, MUSB_TXCSR); 727 728 /* disable interrupt in case we flush */ 729 int_txe = musb_readw(mbase, MUSB_INTRTXE); 730 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); 731 732 /* general endpoint setup */ 733 if (epnum) { 734 /* flush all old state, set default */ 735 musb_h_tx_flush_fifo(hw_ep); 736 737 /* 738 * We must not clear the DMAMODE bit before or in 739 * the same cycle with the DMAENAB bit, so we clear 740 * the latter first... 741 */ 742 csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT 743 | MUSB_TXCSR_AUTOSET 744 | MUSB_TXCSR_DMAENAB 745 | MUSB_TXCSR_FRCDATATOG 746 | MUSB_TXCSR_H_RXSTALL 747 | MUSB_TXCSR_H_ERROR 748 | MUSB_TXCSR_TXPKTRDY 749 ); 750 csr |= MUSB_TXCSR_MODE; 751 752 if (usb_gettoggle(urb->dev, qh->epnum, 1)) 753 csr |= MUSB_TXCSR_H_WR_DATATOGGLE 754 | MUSB_TXCSR_H_DATATOGGLE; 755 else 756 csr |= MUSB_TXCSR_CLRDATATOG; 757 758 musb_writew(epio, MUSB_TXCSR, csr); 759 /* REVISIT may need to clear FLUSHFIFO ... */ 760 csr &= ~MUSB_TXCSR_DMAMODE; 761 musb_writew(epio, MUSB_TXCSR, csr); 762 csr = musb_readw(epio, MUSB_TXCSR); 763 } else { 764 /* endpoint 0: just flush */ 765 musb_h_ep0_flush_fifo(hw_ep); 766 } 767 768 /* target addr and (for multipoint) hub addr/port */ 769 if (musb->is_multipoint) { 770 musb_write_txfunaddr(mbase, epnum, qh->addr_reg); 771 musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg); 772 musb_write_txhubport(mbase, epnum, qh->h_port_reg); 773 /* FIXME if !epnum, do the same for RX ... */ 774 } else 775 musb_writeb(mbase, MUSB_FADDR, qh->addr_reg); 776 777 /* protocol/endpoint/interval/NAKlimit */ 778 if (epnum) { 779 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); 780 if (can_bulk_split(musb, qh->type)) 781 musb_writew(epio, MUSB_TXMAXP, 782 packet_sz 783 | ((hw_ep->max_packet_sz_tx / 784 packet_sz) - 1) << 11); 785 else 786 musb_writew(epio, MUSB_TXMAXP, 787 packet_sz); 788 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); 789 } else { 790 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); 791 if (musb->is_multipoint) 792 musb_writeb(epio, MUSB_TYPE0, 793 qh->type_reg); 794 } 795 796 if (can_bulk_split(musb, qh->type)) 797 load_count = min((u32) hw_ep->max_packet_sz_tx, 798 len); 799 else 800 load_count = min((u32) packet_sz, len); 801 802 if (dma_channel && musb_tx_dma_program(dma_controller, 803 hw_ep, qh, urb, offset, len)) 804 load_count = 0; 805 806 if (load_count) { 807 /* PIO to load FIFO */ 808 qh->segsize = load_count; 809 musb_write_fifo(hw_ep, load_count, buf); 810 } 811 812 /* re-enable interrupt */ 813 musb_writew(mbase, MUSB_INTRTXE, int_txe); 814 815 /* IN/receive */ 816 } else { 817 u16 csr; 818 819 if (hw_ep->rx_reinit) { 820 musb_rx_reinit(musb, qh, hw_ep); 821 822 /* init new state: toggle and NYET, maybe DMA later */ 823 if (usb_gettoggle(urb->dev, qh->epnum, 0)) 824 csr = MUSB_RXCSR_H_WR_DATATOGGLE 825 | MUSB_RXCSR_H_DATATOGGLE; 826 else 827 csr = 0; 828 if (qh->type == USB_ENDPOINT_XFER_INT) 829 csr |= MUSB_RXCSR_DISNYET; 830 831 } else { 832 csr = musb_readw(hw_ep->regs, MUSB_RXCSR); 833 834 if (csr & (MUSB_RXCSR_RXPKTRDY 835 | MUSB_RXCSR_DMAENAB 836 | MUSB_RXCSR_H_REQPKT)) 837 ERR("broken !rx_reinit, ep%d csr %04x\n", 838 hw_ep->epnum, csr); 839 840 /* scrub any stale state, leaving toggle alone */ 841 csr &= MUSB_RXCSR_DISNYET; 842 } 843 844 /* kick things off */ 845 846 if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { 847 /* candidate for DMA */ 848 if (dma_channel) { 849 dma_channel->actual_len = 0L; 850 qh->segsize = len; 851 852 /* AUTOREQ is in a DMA register */ 853 musb_writew(hw_ep->regs, MUSB_RXCSR, csr); 854 csr = musb_readw(hw_ep->regs, 855 MUSB_RXCSR); 856 857 /* unless caller treats short rx transfers as 858 * errors, we dare not queue multiple transfers. 859 */ 860 dma_ok = dma_controller->channel_program( 861 dma_channel, packet_sz, 862 !(urb->transfer_flags 863 & URB_SHORT_NOT_OK), 864 urb->transfer_dma + offset, 865 qh->segsize); 866 if (!dma_ok) { 867 dma_controller->channel_release( 868 dma_channel); 869 hw_ep->rx_channel = NULL; 870 dma_channel = NULL; 871 } else 872 csr |= MUSB_RXCSR_DMAENAB; 873 } 874 } 875 876 csr |= MUSB_RXCSR_H_REQPKT; 877 DBG(7, "RXCSR%d := %04x\n", epnum, csr); 878 musb_writew(hw_ep->regs, MUSB_RXCSR, csr); 879 csr = musb_readw(hw_ep->regs, MUSB_RXCSR); 880 } 881 } 882 883 884 /* 885 * Service the default endpoint (ep0) as host. 886 * Return true until it's time to start the status stage. 887 */ 888 static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb) 889 { 890 bool more = false; 891 u8 *fifo_dest = NULL; 892 u16 fifo_count = 0; 893 struct musb_hw_ep *hw_ep = musb->control_ep; 894 struct musb_qh *qh = hw_ep->in_qh; 895 struct usb_ctrlrequest *request; 896 897 switch (musb->ep0_stage) { 898 case MUSB_EP0_IN: 899 fifo_dest = urb->transfer_buffer + urb->actual_length; 900 fifo_count = min_t(size_t, len, urb->transfer_buffer_length - 901 urb->actual_length); 902 if (fifo_count < len) 903 urb->status = -EOVERFLOW; 904 905 musb_read_fifo(hw_ep, fifo_count, fifo_dest); 906 907 urb->actual_length += fifo_count; 908 if (len < qh->maxpacket) { 909 /* always terminate on short read; it's 910 * rarely reported as an error. 911 */ 912 } else if (urb->actual_length < 913 urb->transfer_buffer_length) 914 more = true; 915 break; 916 case MUSB_EP0_START: 917 request = (struct usb_ctrlrequest *) urb->setup_packet; 918 919 if (!request->wLength) { 920 DBG(4, "start no-DATA\n"); 921 break; 922 } else if (request->bRequestType & USB_DIR_IN) { 923 DBG(4, "start IN-DATA\n"); 924 musb->ep0_stage = MUSB_EP0_IN; 925 more = true; 926 break; 927 } else { 928 DBG(4, "start OUT-DATA\n"); 929 musb->ep0_stage = MUSB_EP0_OUT; 930 more = true; 931 } 932 /* FALLTHROUGH */ 933 case MUSB_EP0_OUT: 934 fifo_count = min_t(size_t, qh->maxpacket, 935 urb->transfer_buffer_length - 936 urb->actual_length); 937 if (fifo_count) { 938 fifo_dest = (u8 *) (urb->transfer_buffer 939 + urb->actual_length); 940 DBG(3, "Sending %d byte%s to ep0 fifo %p\n", 941 fifo_count, 942 (fifo_count == 1) ? "" : "s", 943 fifo_dest); 944 musb_write_fifo(hw_ep, fifo_count, fifo_dest); 945 946 urb->actual_length += fifo_count; 947 more = true; 948 } 949 break; 950 default: 951 ERR("bogus ep0 stage %d\n", musb->ep0_stage); 952 break; 953 } 954 955 return more; 956 } 957 958 /* 959 * Handle default endpoint interrupt as host. Only called in IRQ time 960 * from musb_interrupt(). 961 * 962 * called with controller irqlocked 963 */ 964 irqreturn_t musb_h_ep0_irq(struct musb *musb) 965 { 966 struct urb *urb; 967 u16 csr, len; 968 int status = 0; 969 void __iomem *mbase = musb->mregs; 970 struct musb_hw_ep *hw_ep = musb->control_ep; 971 void __iomem *epio = hw_ep->regs; 972 struct musb_qh *qh = hw_ep->in_qh; 973 bool complete = false; 974 irqreturn_t retval = IRQ_NONE; 975 976 /* ep0 only has one queue, "in" */ 977 urb = next_urb(qh); 978 979 musb_ep_select(mbase, 0); 980 csr = musb_readw(epio, MUSB_CSR0); 981 len = (csr & MUSB_CSR0_RXPKTRDY) 982 ? musb_readb(epio, MUSB_COUNT0) 983 : 0; 984 985 DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n", 986 csr, qh, len, urb, musb->ep0_stage); 987 988 /* if we just did status stage, we are done */ 989 if (MUSB_EP0_STATUS == musb->ep0_stage) { 990 retval = IRQ_HANDLED; 991 complete = true; 992 } 993 994 /* prepare status */ 995 if (csr & MUSB_CSR0_H_RXSTALL) { 996 DBG(6, "STALLING ENDPOINT\n"); 997 status = -EPIPE; 998 999 } else if (csr & MUSB_CSR0_H_ERROR) { 1000 DBG(2, "no response, csr0 %04x\n", csr); 1001 status = -EPROTO; 1002 1003 } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) { 1004 DBG(2, "control NAK timeout\n"); 1005 1006 /* NOTE: this code path would be a good place to PAUSE a 1007 * control transfer, if another one is queued, so that 1008 * ep0 is more likely to stay busy. That's already done 1009 * for bulk RX transfers. 1010 * 1011 * if (qh->ring.next != &musb->control), then 1012 * we have a candidate... NAKing is *NOT* an error 1013 */ 1014 musb_writew(epio, MUSB_CSR0, 0); 1015 retval = IRQ_HANDLED; 1016 } 1017 1018 if (status) { 1019 DBG(6, "aborting\n"); 1020 retval = IRQ_HANDLED; 1021 if (urb) 1022 urb->status = status; 1023 complete = true; 1024 1025 /* use the proper sequence to abort the transfer */ 1026 if (csr & MUSB_CSR0_H_REQPKT) { 1027 csr &= ~MUSB_CSR0_H_REQPKT; 1028 musb_writew(epio, MUSB_CSR0, csr); 1029 csr &= ~MUSB_CSR0_H_NAKTIMEOUT; 1030 musb_writew(epio, MUSB_CSR0, csr); 1031 } else { 1032 musb_h_ep0_flush_fifo(hw_ep); 1033 } 1034 1035 musb_writeb(epio, MUSB_NAKLIMIT0, 0); 1036 1037 /* clear it */ 1038 musb_writew(epio, MUSB_CSR0, 0); 1039 } 1040 1041 if (unlikely(!urb)) { 1042 /* stop endpoint since we have no place for its data, this 1043 * SHOULD NEVER HAPPEN! */ 1044 ERR("no URB for end 0\n"); 1045 1046 musb_h_ep0_flush_fifo(hw_ep); 1047 goto done; 1048 } 1049 1050 if (!complete) { 1051 /* call common logic and prepare response */ 1052 if (musb_h_ep0_continue(musb, len, urb)) { 1053 /* more packets required */ 1054 csr = (MUSB_EP0_IN == musb->ep0_stage) 1055 ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY; 1056 } else { 1057 /* data transfer complete; perform status phase */ 1058 if (usb_pipeout(urb->pipe) 1059 || !urb->transfer_buffer_length) 1060 csr = MUSB_CSR0_H_STATUSPKT 1061 | MUSB_CSR0_H_REQPKT; 1062 else 1063 csr = MUSB_CSR0_H_STATUSPKT 1064 | MUSB_CSR0_TXPKTRDY; 1065 1066 /* flag status stage */ 1067 musb->ep0_stage = MUSB_EP0_STATUS; 1068 1069 DBG(5, "ep0 STATUS, csr %04x\n", csr); 1070 1071 } 1072 musb_writew(epio, MUSB_CSR0, csr); 1073 retval = IRQ_HANDLED; 1074 } else 1075 musb->ep0_stage = MUSB_EP0_IDLE; 1076 1077 /* call completion handler if done */ 1078 if (complete) 1079 musb_advance_schedule(musb, urb, hw_ep, 1); 1080 done: 1081 return retval; 1082 } 1083 1084 1085 #ifdef CONFIG_USB_INVENTRA_DMA 1086 1087 /* Host side TX (OUT) using Mentor DMA works as follows: 1088 submit_urb -> 1089 - if queue was empty, Program Endpoint 1090 - ... which starts DMA to fifo in mode 1 or 0 1091 1092 DMA Isr (transfer complete) -> TxAvail() 1093 - Stop DMA (~DmaEnab) (<--- Alert ... currently happens 1094 only in musb_cleanup_urb) 1095 - TxPktRdy has to be set in mode 0 or for 1096 short packets in mode 1. 1097 */ 1098 1099 #endif 1100 1101 /* Service a Tx-Available or dma completion irq for the endpoint */ 1102 void musb_host_tx(struct musb *musb, u8 epnum) 1103 { 1104 int pipe; 1105 bool done = false; 1106 u16 tx_csr; 1107 size_t length = 0; 1108 size_t offset = 0; 1109 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 1110 void __iomem *epio = hw_ep->regs; 1111 struct musb_qh *qh = hw_ep->out_qh; 1112 struct urb *urb = next_urb(qh); 1113 u32 status = 0; 1114 void __iomem *mbase = musb->mregs; 1115 struct dma_channel *dma; 1116 1117 musb_ep_select(mbase, epnum); 1118 tx_csr = musb_readw(epio, MUSB_TXCSR); 1119 1120 /* with CPPI, DMA sometimes triggers "extra" irqs */ 1121 if (!urb) { 1122 DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); 1123 return; 1124 } 1125 1126 pipe = urb->pipe; 1127 dma = is_dma_capable() ? hw_ep->tx_channel : NULL; 1128 DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr, 1129 dma ? ", dma" : ""); 1130 1131 /* check for errors */ 1132 if (tx_csr & MUSB_TXCSR_H_RXSTALL) { 1133 /* dma was disabled, fifo flushed */ 1134 DBG(3, "TX end %d stall\n", epnum); 1135 1136 /* stall; record URB status */ 1137 status = -EPIPE; 1138 1139 } else if (tx_csr & MUSB_TXCSR_H_ERROR) { 1140 /* (NON-ISO) dma was disabled, fifo flushed */ 1141 DBG(3, "TX 3strikes on ep=%d\n", epnum); 1142 1143 status = -ETIMEDOUT; 1144 1145 } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) { 1146 DBG(6, "TX end=%d device not responding\n", epnum); 1147 1148 /* NOTE: this code path would be a good place to PAUSE a 1149 * transfer, if there's some other (nonperiodic) tx urb 1150 * that could use this fifo. (dma complicates it...) 1151 * That's already done for bulk RX transfers. 1152 * 1153 * if (bulk && qh->ring.next != &musb->out_bulk), then 1154 * we have a candidate... NAKing is *NOT* an error 1155 */ 1156 musb_ep_select(mbase, epnum); 1157 musb_writew(epio, MUSB_TXCSR, 1158 MUSB_TXCSR_H_WZC_BITS 1159 | MUSB_TXCSR_TXPKTRDY); 1160 return; 1161 } 1162 1163 if (status) { 1164 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1165 dma->status = MUSB_DMA_STATUS_CORE_ABORT; 1166 (void) musb->dma_controller->channel_abort(dma); 1167 } 1168 1169 /* do the proper sequence to abort the transfer in the 1170 * usb core; the dma engine should already be stopped. 1171 */ 1172 musb_h_tx_flush_fifo(hw_ep); 1173 tx_csr &= ~(MUSB_TXCSR_AUTOSET 1174 | MUSB_TXCSR_DMAENAB 1175 | MUSB_TXCSR_H_ERROR 1176 | MUSB_TXCSR_H_RXSTALL 1177 | MUSB_TXCSR_H_NAKTIMEOUT 1178 ); 1179 1180 musb_ep_select(mbase, epnum); 1181 musb_writew(epio, MUSB_TXCSR, tx_csr); 1182 /* REVISIT may need to clear FLUSHFIFO ... */ 1183 musb_writew(epio, MUSB_TXCSR, tx_csr); 1184 musb_writeb(epio, MUSB_TXINTERVAL, 0); 1185 1186 done = true; 1187 } 1188 1189 /* second cppi case */ 1190 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1191 DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); 1192 return; 1193 } 1194 1195 if (is_dma_capable() && dma && !status) { 1196 /* 1197 * DMA has completed. But if we're using DMA mode 1 (multi 1198 * packet DMA), we need a terminal TXPKTRDY interrupt before 1199 * we can consider this transfer completed, lest we trash 1200 * its last packet when writing the next URB's data. So we 1201 * switch back to mode 0 to get that interrupt; we'll come 1202 * back here once it happens. 1203 */ 1204 if (tx_csr & MUSB_TXCSR_DMAMODE) { 1205 /* 1206 * We shouldn't clear DMAMODE with DMAENAB set; so 1207 * clear them in a safe order. That should be OK 1208 * once TXPKTRDY has been set (and I've never seen 1209 * it being 0 at this moment -- DMA interrupt latency 1210 * is significant) but if it hasn't been then we have 1211 * no choice but to stop being polite and ignore the 1212 * programmer's guide... :-) 1213 * 1214 * Note that we must write TXCSR with TXPKTRDY cleared 1215 * in order not to re-trigger the packet send (this bit 1216 * can't be cleared by CPU), and there's another caveat: 1217 * TXPKTRDY may be set shortly and then cleared in the 1218 * double-buffered FIFO mode, so we do an extra TXCSR 1219 * read for debouncing... 1220 */ 1221 tx_csr &= musb_readw(epio, MUSB_TXCSR); 1222 if (tx_csr & MUSB_TXCSR_TXPKTRDY) { 1223 tx_csr &= ~(MUSB_TXCSR_DMAENAB | 1224 MUSB_TXCSR_TXPKTRDY); 1225 musb_writew(epio, MUSB_TXCSR, 1226 tx_csr | MUSB_TXCSR_H_WZC_BITS); 1227 } 1228 tx_csr &= ~(MUSB_TXCSR_DMAMODE | 1229 MUSB_TXCSR_TXPKTRDY); 1230 musb_writew(epio, MUSB_TXCSR, 1231 tx_csr | MUSB_TXCSR_H_WZC_BITS); 1232 1233 /* 1234 * There is no guarantee that we'll get an interrupt 1235 * after clearing DMAMODE as we might have done this 1236 * too late (after TXPKTRDY was cleared by controller). 1237 * Re-read TXCSR as we have spoiled its previous value. 1238 */ 1239 tx_csr = musb_readw(epio, MUSB_TXCSR); 1240 } 1241 1242 /* 1243 * We may get here from a DMA completion or TXPKTRDY interrupt. 1244 * In any case, we must check the FIFO status here and bail out 1245 * only if the FIFO still has data -- that should prevent the 1246 * "missed" TXPKTRDY interrupts and deal with double-buffered 1247 * FIFO mode too... 1248 */ 1249 if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) { 1250 DBG(2, "DMA complete but packet still in FIFO, " 1251 "CSR %04x\n", tx_csr); 1252 return; 1253 } 1254 } 1255 1256 if (!status || dma || usb_pipeisoc(pipe)) { 1257 if (dma) 1258 length = dma->actual_len; 1259 else 1260 length = qh->segsize; 1261 qh->offset += length; 1262 1263 if (usb_pipeisoc(pipe)) { 1264 struct usb_iso_packet_descriptor *d; 1265 1266 d = urb->iso_frame_desc + qh->iso_idx; 1267 d->actual_length = length; 1268 d->status = status; 1269 if (++qh->iso_idx >= urb->number_of_packets) { 1270 done = true; 1271 } else { 1272 d++; 1273 offset = d->offset; 1274 length = d->length; 1275 } 1276 } else if (dma) { 1277 done = true; 1278 } else { 1279 /* see if we need to send more data, or ZLP */ 1280 if (qh->segsize < qh->maxpacket) 1281 done = true; 1282 else if (qh->offset == urb->transfer_buffer_length 1283 && !(urb->transfer_flags 1284 & URB_ZERO_PACKET)) 1285 done = true; 1286 if (!done) { 1287 offset = qh->offset; 1288 length = urb->transfer_buffer_length - offset; 1289 } 1290 } 1291 } 1292 1293 /* urb->status != -EINPROGRESS means request has been faulted, 1294 * so we must abort this transfer after cleanup 1295 */ 1296 if (urb->status != -EINPROGRESS) { 1297 done = true; 1298 if (status == 0) 1299 status = urb->status; 1300 } 1301 1302 if (done) { 1303 /* set status */ 1304 urb->status = status; 1305 urb->actual_length = qh->offset; 1306 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); 1307 return; 1308 } else if (usb_pipeisoc(pipe) && dma) { 1309 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb, 1310 offset, length)) { 1311 if (is_cppi_enabled() || tusb_dma_omap()) 1312 musb_h_tx_dma_start(hw_ep); 1313 return; 1314 } 1315 } else if (tx_csr & MUSB_TXCSR_DMAENAB) { 1316 DBG(1, "not complete, but DMA enabled?\n"); 1317 return; 1318 } 1319 1320 /* 1321 * PIO: start next packet in this URB. 1322 * 1323 * REVISIT: some docs say that when hw_ep->tx_double_buffered, 1324 * (and presumably, FIFO is not half-full) we should write *two* 1325 * packets before updating TXCSR; other docs disagree... 1326 */ 1327 if (length > qh->maxpacket) 1328 length = qh->maxpacket; 1329 musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset); 1330 qh->segsize = length; 1331 1332 musb_ep_select(mbase, epnum); 1333 musb_writew(epio, MUSB_TXCSR, 1334 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); 1335 } 1336 1337 1338 #ifdef CONFIG_USB_INVENTRA_DMA 1339 1340 /* Host side RX (IN) using Mentor DMA works as follows: 1341 submit_urb -> 1342 - if queue was empty, ProgramEndpoint 1343 - first IN token is sent out (by setting ReqPkt) 1344 LinuxIsr -> RxReady() 1345 /\ => first packet is received 1346 | - Set in mode 0 (DmaEnab, ~ReqPkt) 1347 | -> DMA Isr (transfer complete) -> RxReady() 1348 | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab) 1349 | - if urb not complete, send next IN token (ReqPkt) 1350 | | else complete urb. 1351 | | 1352 --------------------------- 1353 * 1354 * Nuances of mode 1: 1355 * For short packets, no ack (+RxPktRdy) is sent automatically 1356 * (even if AutoClear is ON) 1357 * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent 1358 * automatically => major problem, as collecting the next packet becomes 1359 * difficult. Hence mode 1 is not used. 1360 * 1361 * REVISIT 1362 * All we care about at this driver level is that 1363 * (a) all URBs terminate with REQPKT cleared and fifo(s) empty; 1364 * (b) termination conditions are: short RX, or buffer full; 1365 * (c) fault modes include 1366 * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO. 1367 * (and that endpoint's dma queue stops immediately) 1368 * - overflow (full, PLUS more bytes in the terminal packet) 1369 * 1370 * So for example, usb-storage sets URB_SHORT_NOT_OK, and would 1371 * thus be a great candidate for using mode 1 ... for all but the 1372 * last packet of one URB's transfer. 1373 */ 1374 1375 #endif 1376 1377 /* Schedule next QH from musb->in_bulk and move the current qh to 1378 * the end; avoids starvation for other endpoints. 1379 */ 1380 static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep) 1381 { 1382 struct dma_channel *dma; 1383 struct urb *urb; 1384 void __iomem *mbase = musb->mregs; 1385 void __iomem *epio = ep->regs; 1386 struct musb_qh *cur_qh, *next_qh; 1387 u16 rx_csr; 1388 1389 musb_ep_select(mbase, ep->epnum); 1390 dma = is_dma_capable() ? ep->rx_channel : NULL; 1391 1392 /* clear nak timeout bit */ 1393 rx_csr = musb_readw(epio, MUSB_RXCSR); 1394 rx_csr |= MUSB_RXCSR_H_WZC_BITS; 1395 rx_csr &= ~MUSB_RXCSR_DATAERROR; 1396 musb_writew(epio, MUSB_RXCSR, rx_csr); 1397 1398 cur_qh = first_qh(&musb->in_bulk); 1399 if (cur_qh) { 1400 urb = next_urb(cur_qh); 1401 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1402 dma->status = MUSB_DMA_STATUS_CORE_ABORT; 1403 musb->dma_controller->channel_abort(dma); 1404 urb->actual_length += dma->actual_len; 1405 dma->actual_len = 0L; 1406 } 1407 musb_save_toggle(cur_qh, 1, urb); 1408 1409 /* move cur_qh to end of queue */ 1410 list_move_tail(&cur_qh->ring, &musb->in_bulk); 1411 1412 /* get the next qh from musb->in_bulk */ 1413 next_qh = first_qh(&musb->in_bulk); 1414 1415 /* set rx_reinit and schedule the next qh */ 1416 ep->rx_reinit = 1; 1417 musb_start_urb(musb, 1, next_qh); 1418 } 1419 } 1420 1421 /* 1422 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, 1423 * and high-bandwidth IN transfer cases. 1424 */ 1425 void musb_host_rx(struct musb *musb, u8 epnum) 1426 { 1427 struct urb *urb; 1428 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 1429 void __iomem *epio = hw_ep->regs; 1430 struct musb_qh *qh = hw_ep->in_qh; 1431 size_t xfer_len; 1432 void __iomem *mbase = musb->mregs; 1433 int pipe; 1434 u16 rx_csr, val; 1435 bool iso_err = false; 1436 bool done = false; 1437 u32 status; 1438 struct dma_channel *dma; 1439 1440 musb_ep_select(mbase, epnum); 1441 1442 urb = next_urb(qh); 1443 dma = is_dma_capable() ? hw_ep->rx_channel : NULL; 1444 status = 0; 1445 xfer_len = 0; 1446 1447 rx_csr = musb_readw(epio, MUSB_RXCSR); 1448 val = rx_csr; 1449 1450 if (unlikely(!urb)) { 1451 /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least 1452 * usbtest #11 (unlinks) triggers it regularly, sometimes 1453 * with fifo full. (Only with DMA??) 1454 */ 1455 DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val, 1456 musb_readw(epio, MUSB_RXCOUNT)); 1457 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); 1458 return; 1459 } 1460 1461 pipe = urb->pipe; 1462 1463 DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n", 1464 epnum, rx_csr, urb->actual_length, 1465 dma ? dma->actual_len : 0); 1466 1467 /* check for errors, concurrent stall & unlink is not really 1468 * handled yet! */ 1469 if (rx_csr & MUSB_RXCSR_H_RXSTALL) { 1470 DBG(3, "RX end %d STALL\n", epnum); 1471 1472 /* stall; record URB status */ 1473 status = -EPIPE; 1474 1475 } else if (rx_csr & MUSB_RXCSR_H_ERROR) { 1476 DBG(3, "end %d RX proto error\n", epnum); 1477 1478 status = -EPROTO; 1479 musb_writeb(epio, MUSB_RXINTERVAL, 0); 1480 1481 } else if (rx_csr & MUSB_RXCSR_DATAERROR) { 1482 1483 if (USB_ENDPOINT_XFER_ISOC != qh->type) { 1484 DBG(6, "RX end %d NAK timeout\n", epnum); 1485 1486 /* NOTE: NAKing is *NOT* an error, so we want to 1487 * continue. Except ... if there's a request for 1488 * another QH, use that instead of starving it. 1489 * 1490 * Devices like Ethernet and serial adapters keep 1491 * reads posted at all times, which will starve 1492 * other devices without this logic. 1493 */ 1494 if (usb_pipebulk(urb->pipe) 1495 && qh->mux == 1 1496 && !list_is_singular(&musb->in_bulk)) { 1497 musb_bulk_rx_nak_timeout(musb, hw_ep); 1498 return; 1499 } 1500 musb_ep_select(mbase, epnum); 1501 rx_csr |= MUSB_RXCSR_H_WZC_BITS; 1502 rx_csr &= ~MUSB_RXCSR_DATAERROR; 1503 musb_writew(epio, MUSB_RXCSR, rx_csr); 1504 1505 goto finish; 1506 } else { 1507 DBG(4, "RX end %d ISO data error\n", epnum); 1508 /* packet error reported later */ 1509 iso_err = true; 1510 } 1511 } else if (rx_csr & MUSB_RXCSR_INCOMPRX) { 1512 DBG(3, "end %d high bandwidth incomplete ISO packet RX\n", 1513 epnum); 1514 status = -EPROTO; 1515 } 1516 1517 /* faults abort the transfer */ 1518 if (status) { 1519 /* clean up dma and collect transfer count */ 1520 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1521 dma->status = MUSB_DMA_STATUS_CORE_ABORT; 1522 (void) musb->dma_controller->channel_abort(dma); 1523 xfer_len = dma->actual_len; 1524 } 1525 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); 1526 musb_writeb(epio, MUSB_RXINTERVAL, 0); 1527 done = true; 1528 goto finish; 1529 } 1530 1531 if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) { 1532 /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */ 1533 ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr); 1534 goto finish; 1535 } 1536 1537 /* thorough shutdown for now ... given more precise fault handling 1538 * and better queueing support, we might keep a DMA pipeline going 1539 * while processing this irq for earlier completions. 1540 */ 1541 1542 /* FIXME this is _way_ too much in-line logic for Mentor DMA */ 1543 1544 #ifndef CONFIG_USB_INVENTRA_DMA 1545 if (rx_csr & MUSB_RXCSR_H_REQPKT) { 1546 /* REVISIT this happened for a while on some short reads... 1547 * the cleanup still needs investigation... looks bad... 1548 * and also duplicates dma cleanup code above ... plus, 1549 * shouldn't this be the "half full" double buffer case? 1550 */ 1551 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1552 dma->status = MUSB_DMA_STATUS_CORE_ABORT; 1553 (void) musb->dma_controller->channel_abort(dma); 1554 xfer_len = dma->actual_len; 1555 done = true; 1556 } 1557 1558 DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr, 1559 xfer_len, dma ? ", dma" : ""); 1560 rx_csr &= ~MUSB_RXCSR_H_REQPKT; 1561 1562 musb_ep_select(mbase, epnum); 1563 musb_writew(epio, MUSB_RXCSR, 1564 MUSB_RXCSR_H_WZC_BITS | rx_csr); 1565 } 1566 #endif 1567 if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) { 1568 xfer_len = dma->actual_len; 1569 1570 val &= ~(MUSB_RXCSR_DMAENAB 1571 | MUSB_RXCSR_H_AUTOREQ 1572 | MUSB_RXCSR_AUTOCLEAR 1573 | MUSB_RXCSR_RXPKTRDY); 1574 musb_writew(hw_ep->regs, MUSB_RXCSR, val); 1575 1576 #ifdef CONFIG_USB_INVENTRA_DMA 1577 if (usb_pipeisoc(pipe)) { 1578 struct usb_iso_packet_descriptor *d; 1579 1580 d = urb->iso_frame_desc + qh->iso_idx; 1581 d->actual_length = xfer_len; 1582 1583 /* even if there was an error, we did the dma 1584 * for iso_frame_desc->length 1585 */ 1586 if (d->status != EILSEQ && d->status != -EOVERFLOW) 1587 d->status = 0; 1588 1589 if (++qh->iso_idx >= urb->number_of_packets) 1590 done = true; 1591 else 1592 done = false; 1593 1594 } else { 1595 /* done if urb buffer is full or short packet is recd */ 1596 done = (urb->actual_length + xfer_len >= 1597 urb->transfer_buffer_length 1598 || dma->actual_len < qh->maxpacket); 1599 } 1600 1601 /* send IN token for next packet, without AUTOREQ */ 1602 if (!done) { 1603 val |= MUSB_RXCSR_H_REQPKT; 1604 musb_writew(epio, MUSB_RXCSR, 1605 MUSB_RXCSR_H_WZC_BITS | val); 1606 } 1607 1608 DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum, 1609 done ? "off" : "reset", 1610 musb_readw(epio, MUSB_RXCSR), 1611 musb_readw(epio, MUSB_RXCOUNT)); 1612 #else 1613 done = true; 1614 #endif 1615 } else if (urb->status == -EINPROGRESS) { 1616 /* if no errors, be sure a packet is ready for unloading */ 1617 if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) { 1618 status = -EPROTO; 1619 ERR("Rx interrupt with no errors or packet!\n"); 1620 1621 /* FIXME this is another "SHOULD NEVER HAPPEN" */ 1622 1623 /* SCRUB (RX) */ 1624 /* do the proper sequence to abort the transfer */ 1625 musb_ep_select(mbase, epnum); 1626 val &= ~MUSB_RXCSR_H_REQPKT; 1627 musb_writew(epio, MUSB_RXCSR, val); 1628 goto finish; 1629 } 1630 1631 /* we are expecting IN packets */ 1632 #ifdef CONFIG_USB_INVENTRA_DMA 1633 if (dma) { 1634 struct dma_controller *c; 1635 u16 rx_count; 1636 int ret, length; 1637 dma_addr_t buf; 1638 1639 rx_count = musb_readw(epio, MUSB_RXCOUNT); 1640 1641 DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n", 1642 epnum, rx_count, 1643 urb->transfer_dma 1644 + urb->actual_length, 1645 qh->offset, 1646 urb->transfer_buffer_length); 1647 1648 c = musb->dma_controller; 1649 1650 if (usb_pipeisoc(pipe)) { 1651 int d_status = 0; 1652 struct usb_iso_packet_descriptor *d; 1653 1654 d = urb->iso_frame_desc + qh->iso_idx; 1655 1656 if (iso_err) { 1657 d_status = -EILSEQ; 1658 urb->error_count++; 1659 } 1660 if (rx_count > d->length) { 1661 if (d_status == 0) { 1662 d_status = -EOVERFLOW; 1663 urb->error_count++; 1664 } 1665 DBG(2, "** OVERFLOW %d into %d\n",\ 1666 rx_count, d->length); 1667 1668 length = d->length; 1669 } else 1670 length = rx_count; 1671 d->status = d_status; 1672 buf = urb->transfer_dma + d->offset; 1673 } else { 1674 length = rx_count; 1675 buf = urb->transfer_dma + 1676 urb->actual_length; 1677 } 1678 1679 dma->desired_mode = 0; 1680 #ifdef USE_MODE1 1681 /* because of the issue below, mode 1 will 1682 * only rarely behave with correct semantics. 1683 */ 1684 if ((urb->transfer_flags & 1685 URB_SHORT_NOT_OK) 1686 && (urb->transfer_buffer_length - 1687 urb->actual_length) 1688 > qh->maxpacket) 1689 dma->desired_mode = 1; 1690 if (rx_count < hw_ep->max_packet_sz_rx) { 1691 length = rx_count; 1692 dma->desired_mode = 0; 1693 } else { 1694 length = urb->transfer_buffer_length; 1695 } 1696 #endif 1697 1698 /* Disadvantage of using mode 1: 1699 * It's basically usable only for mass storage class; essentially all 1700 * other protocols also terminate transfers on short packets. 1701 * 1702 * Details: 1703 * An extra IN token is sent at the end of the transfer (due to AUTOREQ) 1704 * If you try to use mode 1 for (transfer_buffer_length - 512), and try 1705 * to use the extra IN token to grab the last packet using mode 0, then 1706 * the problem is that you cannot be sure when the device will send the 1707 * last packet and RxPktRdy set. Sometimes the packet is recd too soon 1708 * such that it gets lost when RxCSR is re-set at the end of the mode 1 1709 * transfer, while sometimes it is recd just a little late so that if you 1710 * try to configure for mode 0 soon after the mode 1 transfer is 1711 * completed, you will find rxcount 0. Okay, so you might think why not 1712 * wait for an interrupt when the pkt is recd. Well, you won't get any! 1713 */ 1714 1715 val = musb_readw(epio, MUSB_RXCSR); 1716 val &= ~MUSB_RXCSR_H_REQPKT; 1717 1718 if (dma->desired_mode == 0) 1719 val &= ~MUSB_RXCSR_H_AUTOREQ; 1720 else 1721 val |= MUSB_RXCSR_H_AUTOREQ; 1722 val |= MUSB_RXCSR_DMAENAB; 1723 1724 /* autoclear shouldn't be set in high bandwidth */ 1725 if (qh->hb_mult == 1) 1726 val |= MUSB_RXCSR_AUTOCLEAR; 1727 1728 musb_writew(epio, MUSB_RXCSR, 1729 MUSB_RXCSR_H_WZC_BITS | val); 1730 1731 /* REVISIT if when actual_length != 0, 1732 * transfer_buffer_length needs to be 1733 * adjusted first... 1734 */ 1735 ret = c->channel_program( 1736 dma, qh->maxpacket, 1737 dma->desired_mode, buf, length); 1738 1739 if (!ret) { 1740 c->channel_release(dma); 1741 hw_ep->rx_channel = NULL; 1742 dma = NULL; 1743 /* REVISIT reset CSR */ 1744 } 1745 } 1746 #endif /* Mentor DMA */ 1747 1748 if (!dma) { 1749 done = musb_host_packet_rx(musb, urb, 1750 epnum, iso_err); 1751 DBG(6, "read %spacket\n", done ? "last " : ""); 1752 } 1753 } 1754 1755 finish: 1756 urb->actual_length += xfer_len; 1757 qh->offset += xfer_len; 1758 if (done) { 1759 if (urb->status == -EINPROGRESS) 1760 urb->status = status; 1761 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN); 1762 } 1763 } 1764 1765 /* schedule nodes correspond to peripheral endpoints, like an OHCI QH. 1766 * the software schedule associates multiple such nodes with a given 1767 * host side hardware endpoint + direction; scheduling may activate 1768 * that hardware endpoint. 1769 */ 1770 static int musb_schedule( 1771 struct musb *musb, 1772 struct musb_qh *qh, 1773 int is_in) 1774 { 1775 int idle; 1776 int best_diff; 1777 int best_end, epnum; 1778 struct musb_hw_ep *hw_ep = NULL; 1779 struct list_head *head = NULL; 1780 u8 toggle; 1781 u8 txtype; 1782 struct urb *urb = next_urb(qh); 1783 1784 /* use fixed hardware for control and bulk */ 1785 if (qh->type == USB_ENDPOINT_XFER_CONTROL) { 1786 head = &musb->control; 1787 hw_ep = musb->control_ep; 1788 goto success; 1789 } 1790 1791 /* else, periodic transfers get muxed to other endpoints */ 1792 1793 /* 1794 * We know this qh hasn't been scheduled, so all we need to do 1795 * is choose which hardware endpoint to put it on ... 1796 * 1797 * REVISIT what we really want here is a regular schedule tree 1798 * like e.g. OHCI uses. 1799 */ 1800 best_diff = 4096; 1801 best_end = -1; 1802 1803 for (epnum = 1, hw_ep = musb->endpoints + 1; 1804 epnum < musb->nr_endpoints; 1805 epnum++, hw_ep++) { 1806 int diff; 1807 1808 if (musb_ep_get_qh(hw_ep, is_in) != NULL) 1809 continue; 1810 1811 if (hw_ep == musb->bulk_ep) 1812 continue; 1813 1814 if (is_in) 1815 diff = hw_ep->max_packet_sz_rx; 1816 else 1817 diff = hw_ep->max_packet_sz_tx; 1818 diff -= (qh->maxpacket * qh->hb_mult); 1819 1820 if (diff >= 0 && best_diff > diff) { 1821 1822 /* 1823 * Mentor controller has a bug in that if we schedule 1824 * a BULK Tx transfer on an endpoint that had earlier 1825 * handled ISOC then the BULK transfer has to start on 1826 * a zero toggle. If the BULK transfer starts on a 1 1827 * toggle then this transfer will fail as the mentor 1828 * controller starts the Bulk transfer on a 0 toggle 1829 * irrespective of the programming of the toggle bits 1830 * in the TXCSR register. Check for this condition 1831 * while allocating the EP for a Tx Bulk transfer. If 1832 * so skip this EP. 1833 */ 1834 hw_ep = musb->endpoints + epnum; 1835 toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in); 1836 txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE) 1837 >> 4) & 0x3; 1838 if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) && 1839 toggle && (txtype == USB_ENDPOINT_XFER_ISOC)) 1840 continue; 1841 1842 best_diff = diff; 1843 best_end = epnum; 1844 } 1845 } 1846 /* use bulk reserved ep1 if no other ep is free */ 1847 if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) { 1848 hw_ep = musb->bulk_ep; 1849 if (is_in) 1850 head = &musb->in_bulk; 1851 else 1852 head = &musb->out_bulk; 1853 1854 /* Enable bulk RX NAK timeout scheme when bulk requests are 1855 * multiplexed. This scheme doen't work in high speed to full 1856 * speed scenario as NAK interrupts are not coming from a 1857 * full speed device connected to a high speed device. 1858 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and 1859 * 4 (8 frame or 8ms) for FS device. 1860 */ 1861 if (is_in && qh->dev) 1862 qh->intv_reg = 1863 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4; 1864 goto success; 1865 } else if (best_end < 0) { 1866 return -ENOSPC; 1867 } 1868 1869 idle = 1; 1870 qh->mux = 0; 1871 hw_ep = musb->endpoints + best_end; 1872 DBG(4, "qh %p periodic slot %d\n", qh, best_end); 1873 success: 1874 if (head) { 1875 idle = list_empty(head); 1876 list_add_tail(&qh->ring, head); 1877 qh->mux = 1; 1878 } 1879 qh->hw_ep = hw_ep; 1880 qh->hep->hcpriv = qh; 1881 if (idle) 1882 musb_start_urb(musb, is_in, qh); 1883 return 0; 1884 } 1885 1886 static int musb_urb_enqueue( 1887 struct usb_hcd *hcd, 1888 struct urb *urb, 1889 gfp_t mem_flags) 1890 { 1891 unsigned long flags; 1892 struct musb *musb = hcd_to_musb(hcd); 1893 struct usb_host_endpoint *hep = urb->ep; 1894 struct musb_qh *qh; 1895 struct usb_endpoint_descriptor *epd = &hep->desc; 1896 int ret; 1897 unsigned type_reg; 1898 unsigned interval; 1899 1900 /* host role must be active */ 1901 if (!is_host_active(musb) || !musb->is_active) 1902 return -ENODEV; 1903 1904 spin_lock_irqsave(&musb->lock, flags); 1905 ret = usb_hcd_link_urb_to_ep(hcd, urb); 1906 qh = ret ? NULL : hep->hcpriv; 1907 if (qh) 1908 urb->hcpriv = qh; 1909 spin_unlock_irqrestore(&musb->lock, flags); 1910 1911 /* DMA mapping was already done, if needed, and this urb is on 1912 * hep->urb_list now ... so we're done, unless hep wasn't yet 1913 * scheduled onto a live qh. 1914 * 1915 * REVISIT best to keep hep->hcpriv valid until the endpoint gets 1916 * disabled, testing for empty qh->ring and avoiding qh setup costs 1917 * except for the first urb queued after a config change. 1918 */ 1919 if (qh || ret) 1920 return ret; 1921 1922 /* Allocate and initialize qh, minimizing the work done each time 1923 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. 1924 * 1925 * REVISIT consider a dedicated qh kmem_cache, so it's harder 1926 * for bugs in other kernel code to break this driver... 1927 */ 1928 qh = kzalloc(sizeof *qh, mem_flags); 1929 if (!qh) { 1930 spin_lock_irqsave(&musb->lock, flags); 1931 usb_hcd_unlink_urb_from_ep(hcd, urb); 1932 spin_unlock_irqrestore(&musb->lock, flags); 1933 return -ENOMEM; 1934 } 1935 1936 qh->hep = hep; 1937 qh->dev = urb->dev; 1938 INIT_LIST_HEAD(&qh->ring); 1939 qh->is_ready = 1; 1940 1941 qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize); 1942 qh->type = usb_endpoint_type(epd); 1943 1944 /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier. 1945 * Some musb cores don't support high bandwidth ISO transfers; and 1946 * we don't (yet!) support high bandwidth interrupt transfers. 1947 */ 1948 qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03); 1949 if (qh->hb_mult > 1) { 1950 int ok = (qh->type == USB_ENDPOINT_XFER_ISOC); 1951 1952 if (ok) 1953 ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx) 1954 || (usb_pipeout(urb->pipe) && musb->hb_iso_tx); 1955 if (!ok) { 1956 ret = -EMSGSIZE; 1957 goto done; 1958 } 1959 qh->maxpacket &= 0x7ff; 1960 } 1961 1962 qh->epnum = usb_endpoint_num(epd); 1963 1964 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ 1965 qh->addr_reg = (u8) usb_pipedevice(urb->pipe); 1966 1967 /* precompute rxtype/txtype/type0 register */ 1968 type_reg = (qh->type << 4) | qh->epnum; 1969 switch (urb->dev->speed) { 1970 case USB_SPEED_LOW: 1971 type_reg |= 0xc0; 1972 break; 1973 case USB_SPEED_FULL: 1974 type_reg |= 0x80; 1975 break; 1976 default: 1977 type_reg |= 0x40; 1978 } 1979 qh->type_reg = type_reg; 1980 1981 /* Precompute RXINTERVAL/TXINTERVAL register */ 1982 switch (qh->type) { 1983 case USB_ENDPOINT_XFER_INT: 1984 /* 1985 * Full/low speeds use the linear encoding, 1986 * high speed uses the logarithmic encoding. 1987 */ 1988 if (urb->dev->speed <= USB_SPEED_FULL) { 1989 interval = max_t(u8, epd->bInterval, 1); 1990 break; 1991 } 1992 /* FALLTHROUGH */ 1993 case USB_ENDPOINT_XFER_ISOC: 1994 /* ISO always uses logarithmic encoding */ 1995 interval = min_t(u8, epd->bInterval, 16); 1996 break; 1997 default: 1998 /* REVISIT we actually want to use NAK limits, hinting to the 1999 * transfer scheduling logic to try some other qh, e.g. try 2000 * for 2 msec first: 2001 * 2002 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2; 2003 * 2004 * The downside of disabling this is that transfer scheduling 2005 * gets VERY unfair for nonperiodic transfers; a misbehaving 2006 * peripheral could make that hurt. That's perfectly normal 2007 * for reads from network or serial adapters ... so we have 2008 * partial NAKlimit support for bulk RX. 2009 * 2010 * The upside of disabling it is simpler transfer scheduling. 2011 */ 2012 interval = 0; 2013 } 2014 qh->intv_reg = interval; 2015 2016 /* precompute addressing for external hub/tt ports */ 2017 if (musb->is_multipoint) { 2018 struct usb_device *parent = urb->dev->parent; 2019 2020 if (parent != hcd->self.root_hub) { 2021 qh->h_addr_reg = (u8) parent->devnum; 2022 2023 /* set up tt info if needed */ 2024 if (urb->dev->tt) { 2025 qh->h_port_reg = (u8) urb->dev->ttport; 2026 if (urb->dev->tt->hub) 2027 qh->h_addr_reg = 2028 (u8) urb->dev->tt->hub->devnum; 2029 if (urb->dev->tt->multi) 2030 qh->h_addr_reg |= 0x80; 2031 } 2032 } 2033 } 2034 2035 /* invariant: hep->hcpriv is null OR the qh that's already scheduled. 2036 * until we get real dma queues (with an entry for each urb/buffer), 2037 * we only have work to do in the former case. 2038 */ 2039 spin_lock_irqsave(&musb->lock, flags); 2040 if (hep->hcpriv) { 2041 /* some concurrent activity submitted another urb to hep... 2042 * odd, rare, error prone, but legal. 2043 */ 2044 kfree(qh); 2045 ret = 0; 2046 } else 2047 ret = musb_schedule(musb, qh, 2048 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK); 2049 2050 if (ret == 0) { 2051 urb->hcpriv = qh; 2052 /* FIXME set urb->start_frame for iso/intr, it's tested in 2053 * musb_start_urb(), but otherwise only konicawc cares ... 2054 */ 2055 } 2056 spin_unlock_irqrestore(&musb->lock, flags); 2057 2058 done: 2059 if (ret != 0) { 2060 spin_lock_irqsave(&musb->lock, flags); 2061 usb_hcd_unlink_urb_from_ep(hcd, urb); 2062 spin_unlock_irqrestore(&musb->lock, flags); 2063 kfree(qh); 2064 } 2065 return ret; 2066 } 2067 2068 2069 /* 2070 * abort a transfer that's at the head of a hardware queue. 2071 * called with controller locked, irqs blocked 2072 * that hardware queue advances to the next transfer, unless prevented 2073 */ 2074 static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh) 2075 { 2076 struct musb_hw_ep *ep = qh->hw_ep; 2077 void __iomem *epio = ep->regs; 2078 unsigned hw_end = ep->epnum; 2079 void __iomem *regs = ep->musb->mregs; 2080 int is_in = usb_pipein(urb->pipe); 2081 int status = 0; 2082 u16 csr; 2083 2084 musb_ep_select(regs, hw_end); 2085 2086 if (is_dma_capable()) { 2087 struct dma_channel *dma; 2088 2089 dma = is_in ? ep->rx_channel : ep->tx_channel; 2090 if (dma) { 2091 status = ep->musb->dma_controller->channel_abort(dma); 2092 DBG(status ? 1 : 3, 2093 "abort %cX%d DMA for urb %p --> %d\n", 2094 is_in ? 'R' : 'T', ep->epnum, 2095 urb, status); 2096 urb->actual_length += dma->actual_len; 2097 } 2098 } 2099 2100 /* turn off DMA requests, discard state, stop polling ... */ 2101 if (is_in) { 2102 /* giveback saves bulk toggle */ 2103 csr = musb_h_flush_rxfifo(ep, 0); 2104 2105 /* REVISIT we still get an irq; should likely clear the 2106 * endpoint's irq status here to avoid bogus irqs. 2107 * clearing that status is platform-specific... 2108 */ 2109 } else if (ep->epnum) { 2110 musb_h_tx_flush_fifo(ep); 2111 csr = musb_readw(epio, MUSB_TXCSR); 2112 csr &= ~(MUSB_TXCSR_AUTOSET 2113 | MUSB_TXCSR_DMAENAB 2114 | MUSB_TXCSR_H_RXSTALL 2115 | MUSB_TXCSR_H_NAKTIMEOUT 2116 | MUSB_TXCSR_H_ERROR 2117 | MUSB_TXCSR_TXPKTRDY); 2118 musb_writew(epio, MUSB_TXCSR, csr); 2119 /* REVISIT may need to clear FLUSHFIFO ... */ 2120 musb_writew(epio, MUSB_TXCSR, csr); 2121 /* flush cpu writebuffer */ 2122 csr = musb_readw(epio, MUSB_TXCSR); 2123 } else { 2124 musb_h_ep0_flush_fifo(ep); 2125 } 2126 if (status == 0) 2127 musb_advance_schedule(ep->musb, urb, ep, is_in); 2128 return status; 2129 } 2130 2131 static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) 2132 { 2133 struct musb *musb = hcd_to_musb(hcd); 2134 struct musb_qh *qh; 2135 unsigned long flags; 2136 int is_in = usb_pipein(urb->pipe); 2137 int ret; 2138 2139 DBG(4, "urb=%p, dev%d ep%d%s\n", urb, 2140 usb_pipedevice(urb->pipe), 2141 usb_pipeendpoint(urb->pipe), 2142 is_in ? "in" : "out"); 2143 2144 spin_lock_irqsave(&musb->lock, flags); 2145 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 2146 if (ret) 2147 goto done; 2148 2149 qh = urb->hcpriv; 2150 if (!qh) 2151 goto done; 2152 2153 /* 2154 * Any URB not actively programmed into endpoint hardware can be 2155 * immediately given back; that's any URB not at the head of an 2156 * endpoint queue, unless someday we get real DMA queues. And even 2157 * if it's at the head, it might not be known to the hardware... 2158 * 2159 * Otherwise abort current transfer, pending DMA, etc.; urb->status 2160 * has already been updated. This is a synchronous abort; it'd be 2161 * OK to hold off until after some IRQ, though. 2162 * 2163 * NOTE: qh is invalid unless !list_empty(&hep->urb_list) 2164 */ 2165 if (!qh->is_ready 2166 || urb->urb_list.prev != &qh->hep->urb_list 2167 || musb_ep_get_qh(qh->hw_ep, is_in) != qh) { 2168 int ready = qh->is_ready; 2169 2170 qh->is_ready = 0; 2171 musb_giveback(musb, urb, 0); 2172 qh->is_ready = ready; 2173 2174 /* If nothing else (usually musb_giveback) is using it 2175 * and its URB list has emptied, recycle this qh. 2176 */ 2177 if (ready && list_empty(&qh->hep->urb_list)) { 2178 qh->hep->hcpriv = NULL; 2179 list_del(&qh->ring); 2180 kfree(qh); 2181 } 2182 } else 2183 ret = musb_cleanup_urb(urb, qh); 2184 done: 2185 spin_unlock_irqrestore(&musb->lock, flags); 2186 return ret; 2187 } 2188 2189 /* disable an endpoint */ 2190 static void 2191 musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) 2192 { 2193 u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN; 2194 unsigned long flags; 2195 struct musb *musb = hcd_to_musb(hcd); 2196 struct musb_qh *qh; 2197 struct urb *urb; 2198 2199 spin_lock_irqsave(&musb->lock, flags); 2200 2201 qh = hep->hcpriv; 2202 if (qh == NULL) 2203 goto exit; 2204 2205 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ 2206 2207 /* Kick the first URB off the hardware, if needed */ 2208 qh->is_ready = 0; 2209 if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) { 2210 urb = next_urb(qh); 2211 2212 /* make software (then hardware) stop ASAP */ 2213 if (!urb->unlinked) 2214 urb->status = -ESHUTDOWN; 2215 2216 /* cleanup */ 2217 musb_cleanup_urb(urb, qh); 2218 2219 /* Then nuke all the others ... and advance the 2220 * queue on hw_ep (e.g. bulk ring) when we're done. 2221 */ 2222 while (!list_empty(&hep->urb_list)) { 2223 urb = next_urb(qh); 2224 urb->status = -ESHUTDOWN; 2225 musb_advance_schedule(musb, urb, qh->hw_ep, is_in); 2226 } 2227 } else { 2228 /* Just empty the queue; the hardware is busy with 2229 * other transfers, and since !qh->is_ready nothing 2230 * will activate any of these as it advances. 2231 */ 2232 while (!list_empty(&hep->urb_list)) 2233 musb_giveback(musb, next_urb(qh), -ESHUTDOWN); 2234 2235 hep->hcpriv = NULL; 2236 list_del(&qh->ring); 2237 kfree(qh); 2238 } 2239 exit: 2240 spin_unlock_irqrestore(&musb->lock, flags); 2241 } 2242 2243 static int musb_h_get_frame_number(struct usb_hcd *hcd) 2244 { 2245 struct musb *musb = hcd_to_musb(hcd); 2246 2247 return musb_readw(musb->mregs, MUSB_FRAME); 2248 } 2249 2250 static int musb_h_start(struct usb_hcd *hcd) 2251 { 2252 struct musb *musb = hcd_to_musb(hcd); 2253 2254 /* NOTE: musb_start() is called when the hub driver turns 2255 * on port power, or when (OTG) peripheral starts. 2256 */ 2257 hcd->state = HC_STATE_RUNNING; 2258 musb->port1_status = 0; 2259 return 0; 2260 } 2261 2262 static void musb_h_stop(struct usb_hcd *hcd) 2263 { 2264 musb_stop(hcd_to_musb(hcd)); 2265 hcd->state = HC_STATE_HALT; 2266 } 2267 2268 static int musb_bus_suspend(struct usb_hcd *hcd) 2269 { 2270 struct musb *musb = hcd_to_musb(hcd); 2271 u8 devctl; 2272 2273 if (!is_host_active(musb)) 2274 return 0; 2275 2276 switch (musb->xceiv->state) { 2277 case OTG_STATE_A_SUSPEND: 2278 return 0; 2279 case OTG_STATE_A_WAIT_VRISE: 2280 /* ID could be grounded even if there's no device 2281 * on the other end of the cable. NOTE that the 2282 * A_WAIT_VRISE timers are messy with MUSB... 2283 */ 2284 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 2285 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) 2286 musb->xceiv->state = OTG_STATE_A_WAIT_BCON; 2287 break; 2288 default: 2289 break; 2290 } 2291 2292 if (musb->is_active) { 2293 WARNING("trying to suspend as %s while active\n", 2294 otg_state_string(musb)); 2295 return -EBUSY; 2296 } else 2297 return 0; 2298 } 2299 2300 static int musb_bus_resume(struct usb_hcd *hcd) 2301 { 2302 /* resuming child port does the work */ 2303 return 0; 2304 } 2305 2306 const struct hc_driver musb_hc_driver = { 2307 .description = "musb-hcd", 2308 .product_desc = "MUSB HDRC host driver", 2309 .hcd_priv_size = sizeof(struct musb), 2310 .flags = HCD_USB2 | HCD_MEMORY, 2311 2312 /* not using irq handler or reset hooks from usbcore, since 2313 * those must be shared with peripheral code for OTG configs 2314 */ 2315 2316 .start = musb_h_start, 2317 .stop = musb_h_stop, 2318 2319 .get_frame_number = musb_h_get_frame_number, 2320 2321 .urb_enqueue = musb_urb_enqueue, 2322 .urb_dequeue = musb_urb_dequeue, 2323 .endpoint_disable = musb_h_disable, 2324 2325 .hub_status_data = musb_hub_status_data, 2326 .hub_control = musb_hub_control, 2327 .bus_suspend = musb_bus_suspend, 2328 .bus_resume = musb_bus_resume, 2329 /* .start_port_reset = NULL, */ 2330 /* .hub_irq_enable = NULL, */ 2331 }; 2332