1 /* 2 * MUSB OTG driver host support 3 * 4 * Copyright 2005 Mentor Graphics Corporation 5 * Copyright (C) 2005-2006 by Texas Instruments 6 * Copyright (C) 2006-2007 Nokia Corporation 7 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * version 2 as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 21 * 02110-1301 USA 22 * 23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED 24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 */ 35 36 #include <linux/module.h> 37 #include <linux/kernel.h> 38 #include <linux/delay.h> 39 #include <linux/sched.h> 40 #include <linux/slab.h> 41 #include <linux/errno.h> 42 #include <linux/list.h> 43 #include <linux/dma-mapping.h> 44 45 #include "musb_core.h" 46 #include "musb_host.h" 47 #include "musb_trace.h" 48 49 /* MUSB HOST status 22-mar-2006 50 * 51 * - There's still lots of partial code duplication for fault paths, so 52 * they aren't handled as consistently as they need to be. 53 * 54 * - PIO mostly behaved when last tested. 55 * + including ep0, with all usbtest cases 9, 10 56 * + usbtest 14 (ep0out) doesn't seem to run at all 57 * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest 58 * configurations, but otherwise double buffering passes basic tests. 59 * + for 2.6.N, for N > ~10, needs API changes for hcd framework. 60 * 61 * - DMA (CPPI) ... partially behaves, not currently recommended 62 * + about 1/15 the speed of typical EHCI implementations (PCI) 63 * + RX, all too often reqpkt seems to misbehave after tx 64 * + TX, no known issues (other than evident silicon issue) 65 * 66 * - DMA (Mentor/OMAP) ...has at least toggle update problems 67 * 68 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet 69 * starvation ... nothing yet for TX, interrupt, or bulk. 70 * 71 * - Not tested with HNP, but some SRP paths seem to behave. 72 * 73 * NOTE 24-August-2006: 74 * 75 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an 76 * extra endpoint for periodic use enabling hub + keybd + mouse. That 77 * mostly works, except that with "usbnet" it's easy to trigger cases 78 * with "ping" where RX loses. (a) ping to davinci, even "ping -f", 79 * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses 80 * although ARP RX wins. (That test was done with a full speed link.) 81 */ 82 83 84 /* 85 * NOTE on endpoint usage: 86 * 87 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN 88 * and OUT endpoints ... hardware is dedicated for those "async" queue(s). 89 * (Yes, bulk _could_ use more of the endpoints than that, and would even 90 * benefit from it.) 91 * 92 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. 93 * So far that scheduling is both dumb and optimistic: the endpoint will be 94 * "claimed" until its software queue is no longer refilled. No multiplexing 95 * of transfers between endpoints, or anything clever. 96 */ 97 98 struct musb *hcd_to_musb(struct usb_hcd *hcd) 99 { 100 return *(struct musb **) hcd->hcd_priv; 101 } 102 103 104 static void musb_ep_program(struct musb *musb, u8 epnum, 105 struct urb *urb, int is_out, 106 u8 *buf, u32 offset, u32 len); 107 108 /* 109 * Clear TX fifo. Needed to avoid BABBLE errors. 110 */ 111 static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) 112 { 113 struct musb *musb = ep->musb; 114 void __iomem *epio = ep->regs; 115 u16 csr; 116 int retries = 1000; 117 118 csr = musb_readw(epio, MUSB_TXCSR); 119 while (csr & MUSB_TXCSR_FIFONOTEMPTY) { 120 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY; 121 musb_writew(epio, MUSB_TXCSR, csr); 122 csr = musb_readw(epio, MUSB_TXCSR); 123 124 /* 125 * FIXME: sometimes the tx fifo flush failed, it has been 126 * observed during device disconnect on AM335x. 127 * 128 * To reproduce the issue, ensure tx urb(s) are queued when 129 * unplug the usb device which is connected to AM335x usb 130 * host port. 131 * 132 * I found using a usb-ethernet device and running iperf 133 * (client on AM335x) has very high chance to trigger it. 134 * 135 * Better to turn on musb_dbg() in musb_cleanup_urb() with 136 * CPPI enabled to see the issue when aborting the tx channel. 137 */ 138 if (dev_WARN_ONCE(musb->controller, retries-- < 1, 139 "Could not flush host TX%d fifo: csr: %04x\n", 140 ep->epnum, csr)) 141 return; 142 } 143 } 144 145 static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep) 146 { 147 void __iomem *epio = ep->regs; 148 u16 csr; 149 int retries = 5; 150 151 /* scrub any data left in the fifo */ 152 do { 153 csr = musb_readw(epio, MUSB_TXCSR); 154 if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY))) 155 break; 156 musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO); 157 csr = musb_readw(epio, MUSB_TXCSR); 158 udelay(10); 159 } while (--retries); 160 161 WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n", 162 ep->epnum, csr); 163 164 /* and reset for the next transfer */ 165 musb_writew(epio, MUSB_TXCSR, 0); 166 } 167 168 /* 169 * Start transmit. Caller is responsible for locking shared resources. 170 * musb must be locked. 171 */ 172 static inline void musb_h_tx_start(struct musb_hw_ep *ep) 173 { 174 u16 txcsr; 175 176 /* NOTE: no locks here; caller should lock and select EP */ 177 if (ep->epnum) { 178 txcsr = musb_readw(ep->regs, MUSB_TXCSR); 179 txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS; 180 musb_writew(ep->regs, MUSB_TXCSR, txcsr); 181 } else { 182 txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY; 183 musb_writew(ep->regs, MUSB_CSR0, txcsr); 184 } 185 186 } 187 188 static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep) 189 { 190 u16 txcsr; 191 192 /* NOTE: no locks here; caller should lock and select EP */ 193 txcsr = musb_readw(ep->regs, MUSB_TXCSR); 194 txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS; 195 if (is_cppi_enabled(ep->musb)) 196 txcsr |= MUSB_TXCSR_DMAMODE; 197 musb_writew(ep->regs, MUSB_TXCSR, txcsr); 198 } 199 200 static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh) 201 { 202 if (is_in != 0 || ep->is_shared_fifo) 203 ep->in_qh = qh; 204 if (is_in == 0 || ep->is_shared_fifo) 205 ep->out_qh = qh; 206 } 207 208 static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in) 209 { 210 return is_in ? ep->in_qh : ep->out_qh; 211 } 212 213 /* 214 * Start the URB at the front of an endpoint's queue 215 * end must be claimed from the caller. 216 * 217 * Context: controller locked, irqs blocked 218 */ 219 static void 220 musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) 221 { 222 u16 frame; 223 u32 len; 224 void __iomem *mbase = musb->mregs; 225 struct urb *urb = next_urb(qh); 226 void *buf = urb->transfer_buffer; 227 u32 offset = 0; 228 struct musb_hw_ep *hw_ep = qh->hw_ep; 229 int epnum = hw_ep->epnum; 230 231 /* initialize software qh state */ 232 qh->offset = 0; 233 qh->segsize = 0; 234 235 /* gather right source of data */ 236 switch (qh->type) { 237 case USB_ENDPOINT_XFER_CONTROL: 238 /* control transfers always start with SETUP */ 239 is_in = 0; 240 musb->ep0_stage = MUSB_EP0_START; 241 buf = urb->setup_packet; 242 len = 8; 243 break; 244 case USB_ENDPOINT_XFER_ISOC: 245 qh->iso_idx = 0; 246 qh->frame = 0; 247 offset = urb->iso_frame_desc[0].offset; 248 len = urb->iso_frame_desc[0].length; 249 break; 250 default: /* bulk, interrupt */ 251 /* actual_length may be nonzero on retry paths */ 252 buf = urb->transfer_buffer + urb->actual_length; 253 len = urb->transfer_buffer_length - urb->actual_length; 254 } 255 256 trace_musb_urb_start(musb, urb); 257 258 /* Configure endpoint */ 259 musb_ep_set_qh(hw_ep, is_in, qh); 260 musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len); 261 262 /* transmit may have more work: start it when it is time */ 263 if (is_in) 264 return; 265 266 /* determine if the time is right for a periodic transfer */ 267 switch (qh->type) { 268 case USB_ENDPOINT_XFER_ISOC: 269 case USB_ENDPOINT_XFER_INT: 270 musb_dbg(musb, "check whether there's still time for periodic Tx"); 271 frame = musb_readw(mbase, MUSB_FRAME); 272 /* FIXME this doesn't implement that scheduling policy ... 273 * or handle framecounter wrapping 274 */ 275 if (1) { /* Always assume URB_ISO_ASAP */ 276 /* REVISIT the SOF irq handler shouldn't duplicate 277 * this code; and we don't init urb->start_frame... 278 */ 279 qh->frame = 0; 280 goto start; 281 } else { 282 qh->frame = urb->start_frame; 283 /* enable SOF interrupt so we can count down */ 284 musb_dbg(musb, "SOF for %d", epnum); 285 #if 1 /* ifndef CONFIG_ARCH_DAVINCI */ 286 musb_writeb(mbase, MUSB_INTRUSBE, 0xff); 287 #endif 288 } 289 break; 290 default: 291 start: 292 musb_dbg(musb, "Start TX%d %s", epnum, 293 hw_ep->tx_channel ? "dma" : "pio"); 294 295 if (!hw_ep->tx_channel) 296 musb_h_tx_start(hw_ep); 297 else if (is_cppi_enabled(musb) || tusb_dma_omap(musb)) 298 musb_h_tx_dma_start(hw_ep); 299 } 300 } 301 302 /* Context: caller owns controller lock, IRQs are blocked */ 303 static void musb_giveback(struct musb *musb, struct urb *urb, int status) 304 __releases(musb->lock) 305 __acquires(musb->lock) 306 { 307 trace_musb_urb_gb(musb, urb); 308 309 usb_hcd_unlink_urb_from_ep(musb->hcd, urb); 310 spin_unlock(&musb->lock); 311 usb_hcd_giveback_urb(musb->hcd, urb, status); 312 spin_lock(&musb->lock); 313 } 314 315 /* For bulk/interrupt endpoints only */ 316 static inline void musb_save_toggle(struct musb_qh *qh, int is_in, 317 struct urb *urb) 318 { 319 void __iomem *epio = qh->hw_ep->regs; 320 u16 csr; 321 322 /* 323 * FIXME: the current Mentor DMA code seems to have 324 * problems getting toggle correct. 325 */ 326 327 if (is_in) 328 csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE; 329 else 330 csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE; 331 332 usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0); 333 } 334 335 /* 336 * Advance this hardware endpoint's queue, completing the specified URB and 337 * advancing to either the next URB queued to that qh, or else invalidating 338 * that qh and advancing to the next qh scheduled after the current one. 339 * 340 * Context: caller owns controller lock, IRQs are blocked 341 */ 342 static void musb_advance_schedule(struct musb *musb, struct urb *urb, 343 struct musb_hw_ep *hw_ep, int is_in) 344 { 345 struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in); 346 struct musb_hw_ep *ep = qh->hw_ep; 347 int ready = qh->is_ready; 348 int status; 349 350 status = (urb->status == -EINPROGRESS) ? 0 : urb->status; 351 352 /* save toggle eagerly, for paranoia */ 353 switch (qh->type) { 354 case USB_ENDPOINT_XFER_BULK: 355 case USB_ENDPOINT_XFER_INT: 356 musb_save_toggle(qh, is_in, urb); 357 break; 358 case USB_ENDPOINT_XFER_ISOC: 359 if (status == 0 && urb->error_count) 360 status = -EXDEV; 361 break; 362 } 363 364 qh->is_ready = 0; 365 musb_giveback(musb, urb, status); 366 qh->is_ready = ready; 367 368 /* reclaim resources (and bandwidth) ASAP; deschedule it, and 369 * invalidate qh as soon as list_empty(&hep->urb_list) 370 */ 371 if (list_empty(&qh->hep->urb_list)) { 372 struct list_head *head; 373 struct dma_controller *dma = musb->dma_controller; 374 375 if (is_in) { 376 ep->rx_reinit = 1; 377 if (ep->rx_channel) { 378 dma->channel_release(ep->rx_channel); 379 ep->rx_channel = NULL; 380 } 381 } else { 382 ep->tx_reinit = 1; 383 if (ep->tx_channel) { 384 dma->channel_release(ep->tx_channel); 385 ep->tx_channel = NULL; 386 } 387 } 388 389 /* Clobber old pointers to this qh */ 390 musb_ep_set_qh(ep, is_in, NULL); 391 qh->hep->hcpriv = NULL; 392 393 switch (qh->type) { 394 395 case USB_ENDPOINT_XFER_CONTROL: 396 case USB_ENDPOINT_XFER_BULK: 397 /* fifo policy for these lists, except that NAKing 398 * should rotate a qh to the end (for fairness). 399 */ 400 if (qh->mux == 1) { 401 head = qh->ring.prev; 402 list_del(&qh->ring); 403 kfree(qh); 404 qh = first_qh(head); 405 break; 406 } 407 408 case USB_ENDPOINT_XFER_ISOC: 409 case USB_ENDPOINT_XFER_INT: 410 /* this is where periodic bandwidth should be 411 * de-allocated if it's tracked and allocated; 412 * and where we'd update the schedule tree... 413 */ 414 kfree(qh); 415 qh = NULL; 416 break; 417 } 418 } 419 420 /* 421 * The pipe must be broken if current urb->status is set, so don't 422 * start next urb. 423 * TODO: to minimize the risk of regression, only check urb->status 424 * for RX, until we have a test case to understand the behavior of TX. 425 */ 426 if ((!status || !is_in) && qh && qh->is_ready) { 427 musb_dbg(musb, "... next ep%d %cX urb %p", 428 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); 429 musb_start_urb(musb, is_in, qh); 430 } 431 } 432 433 static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr) 434 { 435 /* we don't want fifo to fill itself again; 436 * ignore dma (various models), 437 * leave toggle alone (may not have been saved yet) 438 */ 439 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY; 440 csr &= ~(MUSB_RXCSR_H_REQPKT 441 | MUSB_RXCSR_H_AUTOREQ 442 | MUSB_RXCSR_AUTOCLEAR); 443 444 /* write 2x to allow double buffering */ 445 musb_writew(hw_ep->regs, MUSB_RXCSR, csr); 446 musb_writew(hw_ep->regs, MUSB_RXCSR, csr); 447 448 /* flush writebuffer */ 449 return musb_readw(hw_ep->regs, MUSB_RXCSR); 450 } 451 452 /* 453 * PIO RX for a packet (or part of it). 454 */ 455 static bool 456 musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err) 457 { 458 u16 rx_count; 459 u8 *buf; 460 u16 csr; 461 bool done = false; 462 u32 length; 463 int do_flush = 0; 464 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 465 void __iomem *epio = hw_ep->regs; 466 struct musb_qh *qh = hw_ep->in_qh; 467 int pipe = urb->pipe; 468 void *buffer = urb->transfer_buffer; 469 470 /* musb_ep_select(mbase, epnum); */ 471 rx_count = musb_readw(epio, MUSB_RXCOUNT); 472 musb_dbg(musb, "RX%d count %d, buffer %p len %d/%d", epnum, rx_count, 473 urb->transfer_buffer, qh->offset, 474 urb->transfer_buffer_length); 475 476 /* unload FIFO */ 477 if (usb_pipeisoc(pipe)) { 478 int status = 0; 479 struct usb_iso_packet_descriptor *d; 480 481 if (iso_err) { 482 status = -EILSEQ; 483 urb->error_count++; 484 } 485 486 d = urb->iso_frame_desc + qh->iso_idx; 487 buf = buffer + d->offset; 488 length = d->length; 489 if (rx_count > length) { 490 if (status == 0) { 491 status = -EOVERFLOW; 492 urb->error_count++; 493 } 494 musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length); 495 do_flush = 1; 496 } else 497 length = rx_count; 498 urb->actual_length += length; 499 d->actual_length = length; 500 501 d->status = status; 502 503 /* see if we are done */ 504 done = (++qh->iso_idx >= urb->number_of_packets); 505 } else { 506 /* non-isoch */ 507 buf = buffer + qh->offset; 508 length = urb->transfer_buffer_length - qh->offset; 509 if (rx_count > length) { 510 if (urb->status == -EINPROGRESS) 511 urb->status = -EOVERFLOW; 512 musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length); 513 do_flush = 1; 514 } else 515 length = rx_count; 516 urb->actual_length += length; 517 qh->offset += length; 518 519 /* see if we are done */ 520 done = (urb->actual_length == urb->transfer_buffer_length) 521 || (rx_count < qh->maxpacket) 522 || (urb->status != -EINPROGRESS); 523 if (done 524 && (urb->status == -EINPROGRESS) 525 && (urb->transfer_flags & URB_SHORT_NOT_OK) 526 && (urb->actual_length 527 < urb->transfer_buffer_length)) 528 urb->status = -EREMOTEIO; 529 } 530 531 musb_read_fifo(hw_ep, length, buf); 532 533 csr = musb_readw(epio, MUSB_RXCSR); 534 csr |= MUSB_RXCSR_H_WZC_BITS; 535 if (unlikely(do_flush)) 536 musb_h_flush_rxfifo(hw_ep, csr); 537 else { 538 /* REVISIT this assumes AUTOCLEAR is never set */ 539 csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT); 540 if (!done) 541 csr |= MUSB_RXCSR_H_REQPKT; 542 musb_writew(epio, MUSB_RXCSR, csr); 543 } 544 545 return done; 546 } 547 548 /* we don't always need to reinit a given side of an endpoint... 549 * when we do, use tx/rx reinit routine and then construct a new CSR 550 * to address data toggle, NYET, and DMA or PIO. 551 * 552 * it's possible that driver bugs (especially for DMA) or aborting a 553 * transfer might have left the endpoint busier than it should be. 554 * the busy/not-empty tests are basically paranoia. 555 */ 556 static void 557 musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum) 558 { 559 struct musb_hw_ep *ep = musb->endpoints + epnum; 560 u16 csr; 561 562 /* NOTE: we know the "rx" fifo reinit never triggers for ep0. 563 * That always uses tx_reinit since ep0 repurposes TX register 564 * offsets; the initial SETUP packet is also a kind of OUT. 565 */ 566 567 /* if programmed for Tx, put it in RX mode */ 568 if (ep->is_shared_fifo) { 569 csr = musb_readw(ep->regs, MUSB_TXCSR); 570 if (csr & MUSB_TXCSR_MODE) { 571 musb_h_tx_flush_fifo(ep); 572 csr = musb_readw(ep->regs, MUSB_TXCSR); 573 musb_writew(ep->regs, MUSB_TXCSR, 574 csr | MUSB_TXCSR_FRCDATATOG); 575 } 576 577 /* 578 * Clear the MODE bit (and everything else) to enable Rx. 579 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB. 580 */ 581 if (csr & MUSB_TXCSR_DMAMODE) 582 musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE); 583 musb_writew(ep->regs, MUSB_TXCSR, 0); 584 585 /* scrub all previous state, clearing toggle */ 586 } 587 csr = musb_readw(ep->regs, MUSB_RXCSR); 588 if (csr & MUSB_RXCSR_RXPKTRDY) 589 WARNING("rx%d, packet/%d ready?\n", ep->epnum, 590 musb_readw(ep->regs, MUSB_RXCOUNT)); 591 592 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG); 593 594 /* target addr and (for multipoint) hub addr/port */ 595 if (musb->is_multipoint) { 596 musb_write_rxfunaddr(musb, epnum, qh->addr_reg); 597 musb_write_rxhubaddr(musb, epnum, qh->h_addr_reg); 598 musb_write_rxhubport(musb, epnum, qh->h_port_reg); 599 } else 600 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg); 601 602 /* protocol/endpoint, interval/NAKlimit, i/o size */ 603 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); 604 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); 605 /* NOTE: bulk combining rewrites high bits of maxpacket */ 606 /* Set RXMAXP with the FIFO size of the endpoint 607 * to disable double buffer mode. 608 */ 609 if (musb->double_buffer_not_ok) 610 musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx); 611 else 612 musb_writew(ep->regs, MUSB_RXMAXP, 613 qh->maxpacket | ((qh->hb_mult - 1) << 11)); 614 615 ep->rx_reinit = 0; 616 } 617 618 static void musb_tx_dma_set_mode_mentor(struct dma_controller *dma, 619 struct musb_hw_ep *hw_ep, struct musb_qh *qh, 620 struct urb *urb, u32 offset, 621 u32 *length, u8 *mode) 622 { 623 struct dma_channel *channel = hw_ep->tx_channel; 624 void __iomem *epio = hw_ep->regs; 625 u16 pkt_size = qh->maxpacket; 626 u16 csr; 627 628 if (*length > channel->max_len) 629 *length = channel->max_len; 630 631 csr = musb_readw(epio, MUSB_TXCSR); 632 if (*length > pkt_size) { 633 *mode = 1; 634 csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB; 635 /* autoset shouldn't be set in high bandwidth */ 636 /* 637 * Enable Autoset according to table 638 * below 639 * bulk_split hb_mult Autoset_Enable 640 * 0 1 Yes(Normal) 641 * 0 >1 No(High BW ISO) 642 * 1 1 Yes(HS bulk) 643 * 1 >1 Yes(FS bulk) 644 */ 645 if (qh->hb_mult == 1 || (qh->hb_mult > 1 && 646 can_bulk_split(hw_ep->musb, qh->type))) 647 csr |= MUSB_TXCSR_AUTOSET; 648 } else { 649 *mode = 0; 650 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE); 651 csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */ 652 } 653 channel->desired_mode = *mode; 654 musb_writew(epio, MUSB_TXCSR, csr); 655 } 656 657 static void musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma, 658 struct musb_hw_ep *hw_ep, 659 struct musb_qh *qh, 660 struct urb *urb, 661 u32 offset, 662 u32 *length, 663 u8 *mode) 664 { 665 struct dma_channel *channel = hw_ep->tx_channel; 666 667 channel->actual_len = 0; 668 669 /* 670 * TX uses "RNDIS" mode automatically but needs help 671 * to identify the zero-length-final-packet case. 672 */ 673 *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0; 674 } 675 676 static bool musb_tx_dma_program(struct dma_controller *dma, 677 struct musb_hw_ep *hw_ep, struct musb_qh *qh, 678 struct urb *urb, u32 offset, u32 length) 679 { 680 struct dma_channel *channel = hw_ep->tx_channel; 681 u16 pkt_size = qh->maxpacket; 682 u8 mode; 683 684 if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb)) 685 musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset, 686 &length, &mode); 687 else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb)) 688 musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset, 689 &length, &mode); 690 else 691 return false; 692 693 qh->segsize = length; 694 695 /* 696 * Ensure the data reaches to main memory before starting 697 * DMA transfer 698 */ 699 wmb(); 700 701 if (!dma->channel_program(channel, pkt_size, mode, 702 urb->transfer_dma + offset, length)) { 703 void __iomem *epio = hw_ep->regs; 704 u16 csr; 705 706 dma->channel_release(channel); 707 hw_ep->tx_channel = NULL; 708 709 csr = musb_readw(epio, MUSB_TXCSR); 710 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB); 711 musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS); 712 return false; 713 } 714 return true; 715 } 716 717 /* 718 * Program an HDRC endpoint as per the given URB 719 * Context: irqs blocked, controller lock held 720 */ 721 static void musb_ep_program(struct musb *musb, u8 epnum, 722 struct urb *urb, int is_out, 723 u8 *buf, u32 offset, u32 len) 724 { 725 struct dma_controller *dma_controller; 726 struct dma_channel *dma_channel; 727 u8 dma_ok; 728 void __iomem *mbase = musb->mregs; 729 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 730 void __iomem *epio = hw_ep->regs; 731 struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out); 732 u16 packet_sz = qh->maxpacket; 733 u8 use_dma = 1; 734 u16 csr; 735 736 musb_dbg(musb, "%s hw%d urb %p spd%d dev%d ep%d%s " 737 "h_addr%02x h_port%02x bytes %d", 738 is_out ? "-->" : "<--", 739 epnum, urb, urb->dev->speed, 740 qh->addr_reg, qh->epnum, is_out ? "out" : "in", 741 qh->h_addr_reg, qh->h_port_reg, 742 len); 743 744 musb_ep_select(mbase, epnum); 745 746 if (is_out && !len) { 747 use_dma = 0; 748 csr = musb_readw(epio, MUSB_TXCSR); 749 csr &= ~MUSB_TXCSR_DMAENAB; 750 musb_writew(epio, MUSB_TXCSR, csr); 751 hw_ep->tx_channel = NULL; 752 } 753 754 /* candidate for DMA? */ 755 dma_controller = musb->dma_controller; 756 if (use_dma && is_dma_capable() && epnum && dma_controller) { 757 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel; 758 if (!dma_channel) { 759 dma_channel = dma_controller->channel_alloc( 760 dma_controller, hw_ep, is_out); 761 if (is_out) 762 hw_ep->tx_channel = dma_channel; 763 else 764 hw_ep->rx_channel = dma_channel; 765 } 766 } else 767 dma_channel = NULL; 768 769 /* make sure we clear DMAEnab, autoSet bits from previous run */ 770 771 /* OUT/transmit/EP0 or IN/receive? */ 772 if (is_out) { 773 u16 csr; 774 u16 int_txe; 775 u16 load_count; 776 777 csr = musb_readw(epio, MUSB_TXCSR); 778 779 /* disable interrupt in case we flush */ 780 int_txe = musb->intrtxe; 781 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); 782 783 /* general endpoint setup */ 784 if (epnum) { 785 /* flush all old state, set default */ 786 /* 787 * We could be flushing valid 788 * packets in double buffering 789 * case 790 */ 791 if (!hw_ep->tx_double_buffered) 792 musb_h_tx_flush_fifo(hw_ep); 793 794 /* 795 * We must not clear the DMAMODE bit before or in 796 * the same cycle with the DMAENAB bit, so we clear 797 * the latter first... 798 */ 799 csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT 800 | MUSB_TXCSR_AUTOSET 801 | MUSB_TXCSR_DMAENAB 802 | MUSB_TXCSR_FRCDATATOG 803 | MUSB_TXCSR_H_RXSTALL 804 | MUSB_TXCSR_H_ERROR 805 | MUSB_TXCSR_TXPKTRDY 806 ); 807 csr |= MUSB_TXCSR_MODE; 808 809 if (!hw_ep->tx_double_buffered) { 810 if (usb_gettoggle(urb->dev, qh->epnum, 1)) 811 csr |= MUSB_TXCSR_H_WR_DATATOGGLE 812 | MUSB_TXCSR_H_DATATOGGLE; 813 else 814 csr |= MUSB_TXCSR_CLRDATATOG; 815 } 816 817 musb_writew(epio, MUSB_TXCSR, csr); 818 /* REVISIT may need to clear FLUSHFIFO ... */ 819 csr &= ~MUSB_TXCSR_DMAMODE; 820 musb_writew(epio, MUSB_TXCSR, csr); 821 csr = musb_readw(epio, MUSB_TXCSR); 822 } else { 823 /* endpoint 0: just flush */ 824 musb_h_ep0_flush_fifo(hw_ep); 825 } 826 827 /* target addr and (for multipoint) hub addr/port */ 828 if (musb->is_multipoint) { 829 musb_write_txfunaddr(musb, epnum, qh->addr_reg); 830 musb_write_txhubaddr(musb, epnum, qh->h_addr_reg); 831 musb_write_txhubport(musb, epnum, qh->h_port_reg); 832 /* FIXME if !epnum, do the same for RX ... */ 833 } else 834 musb_writeb(mbase, MUSB_FADDR, qh->addr_reg); 835 836 /* protocol/endpoint/interval/NAKlimit */ 837 if (epnum) { 838 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); 839 if (musb->double_buffer_not_ok) { 840 musb_writew(epio, MUSB_TXMAXP, 841 hw_ep->max_packet_sz_tx); 842 } else if (can_bulk_split(musb, qh->type)) { 843 qh->hb_mult = hw_ep->max_packet_sz_tx 844 / packet_sz; 845 musb_writew(epio, MUSB_TXMAXP, packet_sz 846 | ((qh->hb_mult) - 1) << 11); 847 } else { 848 musb_writew(epio, MUSB_TXMAXP, 849 qh->maxpacket | 850 ((qh->hb_mult - 1) << 11)); 851 } 852 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); 853 } else { 854 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); 855 if (musb->is_multipoint) 856 musb_writeb(epio, MUSB_TYPE0, 857 qh->type_reg); 858 } 859 860 if (can_bulk_split(musb, qh->type)) 861 load_count = min((u32) hw_ep->max_packet_sz_tx, 862 len); 863 else 864 load_count = min((u32) packet_sz, len); 865 866 if (dma_channel && musb_tx_dma_program(dma_controller, 867 hw_ep, qh, urb, offset, len)) 868 load_count = 0; 869 870 if (load_count) { 871 /* PIO to load FIFO */ 872 qh->segsize = load_count; 873 if (!buf) { 874 sg_miter_start(&qh->sg_miter, urb->sg, 1, 875 SG_MITER_ATOMIC 876 | SG_MITER_FROM_SG); 877 if (!sg_miter_next(&qh->sg_miter)) { 878 dev_err(musb->controller, 879 "error: sg" 880 "list empty\n"); 881 sg_miter_stop(&qh->sg_miter); 882 goto finish; 883 } 884 buf = qh->sg_miter.addr + urb->sg->offset + 885 urb->actual_length; 886 load_count = min_t(u32, load_count, 887 qh->sg_miter.length); 888 musb_write_fifo(hw_ep, load_count, buf); 889 qh->sg_miter.consumed = load_count; 890 sg_miter_stop(&qh->sg_miter); 891 } else 892 musb_write_fifo(hw_ep, load_count, buf); 893 } 894 finish: 895 /* re-enable interrupt */ 896 musb_writew(mbase, MUSB_INTRTXE, int_txe); 897 898 /* IN/receive */ 899 } else { 900 u16 csr; 901 902 if (hw_ep->rx_reinit) { 903 musb_rx_reinit(musb, qh, epnum); 904 905 /* init new state: toggle and NYET, maybe DMA later */ 906 if (usb_gettoggle(urb->dev, qh->epnum, 0)) 907 csr = MUSB_RXCSR_H_WR_DATATOGGLE 908 | MUSB_RXCSR_H_DATATOGGLE; 909 else 910 csr = 0; 911 if (qh->type == USB_ENDPOINT_XFER_INT) 912 csr |= MUSB_RXCSR_DISNYET; 913 914 } else { 915 csr = musb_readw(hw_ep->regs, MUSB_RXCSR); 916 917 if (csr & (MUSB_RXCSR_RXPKTRDY 918 | MUSB_RXCSR_DMAENAB 919 | MUSB_RXCSR_H_REQPKT)) 920 ERR("broken !rx_reinit, ep%d csr %04x\n", 921 hw_ep->epnum, csr); 922 923 /* scrub any stale state, leaving toggle alone */ 924 csr &= MUSB_RXCSR_DISNYET; 925 } 926 927 /* kick things off */ 928 929 if ((is_cppi_enabled(musb) || tusb_dma_omap(musb)) && dma_channel) { 930 /* Candidate for DMA */ 931 dma_channel->actual_len = 0L; 932 qh->segsize = len; 933 934 /* AUTOREQ is in a DMA register */ 935 musb_writew(hw_ep->regs, MUSB_RXCSR, csr); 936 csr = musb_readw(hw_ep->regs, MUSB_RXCSR); 937 938 /* 939 * Unless caller treats short RX transfers as 940 * errors, we dare not queue multiple transfers. 941 */ 942 dma_ok = dma_controller->channel_program(dma_channel, 943 packet_sz, !(urb->transfer_flags & 944 URB_SHORT_NOT_OK), 945 urb->transfer_dma + offset, 946 qh->segsize); 947 if (!dma_ok) { 948 dma_controller->channel_release(dma_channel); 949 hw_ep->rx_channel = dma_channel = NULL; 950 } else 951 csr |= MUSB_RXCSR_DMAENAB; 952 } 953 954 csr |= MUSB_RXCSR_H_REQPKT; 955 musb_dbg(musb, "RXCSR%d := %04x", epnum, csr); 956 musb_writew(hw_ep->regs, MUSB_RXCSR, csr); 957 csr = musb_readw(hw_ep->regs, MUSB_RXCSR); 958 } 959 } 960 961 /* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to 962 * the end; avoids starvation for other endpoints. 963 */ 964 static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep, 965 int is_in) 966 { 967 struct dma_channel *dma; 968 struct urb *urb; 969 void __iomem *mbase = musb->mregs; 970 void __iomem *epio = ep->regs; 971 struct musb_qh *cur_qh, *next_qh; 972 u16 rx_csr, tx_csr; 973 974 musb_ep_select(mbase, ep->epnum); 975 if (is_in) { 976 dma = is_dma_capable() ? ep->rx_channel : NULL; 977 978 /* 979 * Need to stop the transaction by clearing REQPKT first 980 * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED 981 * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2 982 */ 983 rx_csr = musb_readw(epio, MUSB_RXCSR); 984 rx_csr |= MUSB_RXCSR_H_WZC_BITS; 985 rx_csr &= ~MUSB_RXCSR_H_REQPKT; 986 musb_writew(epio, MUSB_RXCSR, rx_csr); 987 rx_csr &= ~MUSB_RXCSR_DATAERROR; 988 musb_writew(epio, MUSB_RXCSR, rx_csr); 989 990 cur_qh = first_qh(&musb->in_bulk); 991 } else { 992 dma = is_dma_capable() ? ep->tx_channel : NULL; 993 994 /* clear nak timeout bit */ 995 tx_csr = musb_readw(epio, MUSB_TXCSR); 996 tx_csr |= MUSB_TXCSR_H_WZC_BITS; 997 tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT; 998 musb_writew(epio, MUSB_TXCSR, tx_csr); 999 1000 cur_qh = first_qh(&musb->out_bulk); 1001 } 1002 if (cur_qh) { 1003 urb = next_urb(cur_qh); 1004 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1005 dma->status = MUSB_DMA_STATUS_CORE_ABORT; 1006 musb->dma_controller->channel_abort(dma); 1007 urb->actual_length += dma->actual_len; 1008 dma->actual_len = 0L; 1009 } 1010 musb_save_toggle(cur_qh, is_in, urb); 1011 1012 if (is_in) { 1013 /* move cur_qh to end of queue */ 1014 list_move_tail(&cur_qh->ring, &musb->in_bulk); 1015 1016 /* get the next qh from musb->in_bulk */ 1017 next_qh = first_qh(&musb->in_bulk); 1018 1019 /* set rx_reinit and schedule the next qh */ 1020 ep->rx_reinit = 1; 1021 } else { 1022 /* move cur_qh to end of queue */ 1023 list_move_tail(&cur_qh->ring, &musb->out_bulk); 1024 1025 /* get the next qh from musb->out_bulk */ 1026 next_qh = first_qh(&musb->out_bulk); 1027 1028 /* set tx_reinit and schedule the next qh */ 1029 ep->tx_reinit = 1; 1030 } 1031 musb_start_urb(musb, is_in, next_qh); 1032 } 1033 } 1034 1035 /* 1036 * Service the default endpoint (ep0) as host. 1037 * Return true until it's time to start the status stage. 1038 */ 1039 static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb) 1040 { 1041 bool more = false; 1042 u8 *fifo_dest = NULL; 1043 u16 fifo_count = 0; 1044 struct musb_hw_ep *hw_ep = musb->control_ep; 1045 struct musb_qh *qh = hw_ep->in_qh; 1046 struct usb_ctrlrequest *request; 1047 1048 switch (musb->ep0_stage) { 1049 case MUSB_EP0_IN: 1050 fifo_dest = urb->transfer_buffer + urb->actual_length; 1051 fifo_count = min_t(size_t, len, urb->transfer_buffer_length - 1052 urb->actual_length); 1053 if (fifo_count < len) 1054 urb->status = -EOVERFLOW; 1055 1056 musb_read_fifo(hw_ep, fifo_count, fifo_dest); 1057 1058 urb->actual_length += fifo_count; 1059 if (len < qh->maxpacket) { 1060 /* always terminate on short read; it's 1061 * rarely reported as an error. 1062 */ 1063 } else if (urb->actual_length < 1064 urb->transfer_buffer_length) 1065 more = true; 1066 break; 1067 case MUSB_EP0_START: 1068 request = (struct usb_ctrlrequest *) urb->setup_packet; 1069 1070 if (!request->wLength) { 1071 musb_dbg(musb, "start no-DATA"); 1072 break; 1073 } else if (request->bRequestType & USB_DIR_IN) { 1074 musb_dbg(musb, "start IN-DATA"); 1075 musb->ep0_stage = MUSB_EP0_IN; 1076 more = true; 1077 break; 1078 } else { 1079 musb_dbg(musb, "start OUT-DATA"); 1080 musb->ep0_stage = MUSB_EP0_OUT; 1081 more = true; 1082 } 1083 /* FALLTHROUGH */ 1084 case MUSB_EP0_OUT: 1085 fifo_count = min_t(size_t, qh->maxpacket, 1086 urb->transfer_buffer_length - 1087 urb->actual_length); 1088 if (fifo_count) { 1089 fifo_dest = (u8 *) (urb->transfer_buffer 1090 + urb->actual_length); 1091 musb_dbg(musb, "Sending %d byte%s to ep0 fifo %p", 1092 fifo_count, 1093 (fifo_count == 1) ? "" : "s", 1094 fifo_dest); 1095 musb_write_fifo(hw_ep, fifo_count, fifo_dest); 1096 1097 urb->actual_length += fifo_count; 1098 more = true; 1099 } 1100 break; 1101 default: 1102 ERR("bogus ep0 stage %d\n", musb->ep0_stage); 1103 break; 1104 } 1105 1106 return more; 1107 } 1108 1109 /* 1110 * Handle default endpoint interrupt as host. Only called in IRQ time 1111 * from musb_interrupt(). 1112 * 1113 * called with controller irqlocked 1114 */ 1115 irqreturn_t musb_h_ep0_irq(struct musb *musb) 1116 { 1117 struct urb *urb; 1118 u16 csr, len; 1119 int status = 0; 1120 void __iomem *mbase = musb->mregs; 1121 struct musb_hw_ep *hw_ep = musb->control_ep; 1122 void __iomem *epio = hw_ep->regs; 1123 struct musb_qh *qh = hw_ep->in_qh; 1124 bool complete = false; 1125 irqreturn_t retval = IRQ_NONE; 1126 1127 /* ep0 only has one queue, "in" */ 1128 urb = next_urb(qh); 1129 1130 musb_ep_select(mbase, 0); 1131 csr = musb_readw(epio, MUSB_CSR0); 1132 len = (csr & MUSB_CSR0_RXPKTRDY) 1133 ? musb_readb(epio, MUSB_COUNT0) 1134 : 0; 1135 1136 musb_dbg(musb, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d", 1137 csr, qh, len, urb, musb->ep0_stage); 1138 1139 /* if we just did status stage, we are done */ 1140 if (MUSB_EP0_STATUS == musb->ep0_stage) { 1141 retval = IRQ_HANDLED; 1142 complete = true; 1143 } 1144 1145 /* prepare status */ 1146 if (csr & MUSB_CSR0_H_RXSTALL) { 1147 musb_dbg(musb, "STALLING ENDPOINT"); 1148 status = -EPIPE; 1149 1150 } else if (csr & MUSB_CSR0_H_ERROR) { 1151 musb_dbg(musb, "no response, csr0 %04x", csr); 1152 status = -EPROTO; 1153 1154 } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) { 1155 musb_dbg(musb, "control NAK timeout"); 1156 1157 /* NOTE: this code path would be a good place to PAUSE a 1158 * control transfer, if another one is queued, so that 1159 * ep0 is more likely to stay busy. That's already done 1160 * for bulk RX transfers. 1161 * 1162 * if (qh->ring.next != &musb->control), then 1163 * we have a candidate... NAKing is *NOT* an error 1164 */ 1165 musb_writew(epio, MUSB_CSR0, 0); 1166 retval = IRQ_HANDLED; 1167 } 1168 1169 if (status) { 1170 musb_dbg(musb, "aborting"); 1171 retval = IRQ_HANDLED; 1172 if (urb) 1173 urb->status = status; 1174 complete = true; 1175 1176 /* use the proper sequence to abort the transfer */ 1177 if (csr & MUSB_CSR0_H_REQPKT) { 1178 csr &= ~MUSB_CSR0_H_REQPKT; 1179 musb_writew(epio, MUSB_CSR0, csr); 1180 csr &= ~MUSB_CSR0_H_NAKTIMEOUT; 1181 musb_writew(epio, MUSB_CSR0, csr); 1182 } else { 1183 musb_h_ep0_flush_fifo(hw_ep); 1184 } 1185 1186 musb_writeb(epio, MUSB_NAKLIMIT0, 0); 1187 1188 /* clear it */ 1189 musb_writew(epio, MUSB_CSR0, 0); 1190 } 1191 1192 if (unlikely(!urb)) { 1193 /* stop endpoint since we have no place for its data, this 1194 * SHOULD NEVER HAPPEN! */ 1195 ERR("no URB for end 0\n"); 1196 1197 musb_h_ep0_flush_fifo(hw_ep); 1198 goto done; 1199 } 1200 1201 if (!complete) { 1202 /* call common logic and prepare response */ 1203 if (musb_h_ep0_continue(musb, len, urb)) { 1204 /* more packets required */ 1205 csr = (MUSB_EP0_IN == musb->ep0_stage) 1206 ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY; 1207 } else { 1208 /* data transfer complete; perform status phase */ 1209 if (usb_pipeout(urb->pipe) 1210 || !urb->transfer_buffer_length) 1211 csr = MUSB_CSR0_H_STATUSPKT 1212 | MUSB_CSR0_H_REQPKT; 1213 else 1214 csr = MUSB_CSR0_H_STATUSPKT 1215 | MUSB_CSR0_TXPKTRDY; 1216 1217 /* disable ping token in status phase */ 1218 csr |= MUSB_CSR0_H_DIS_PING; 1219 1220 /* flag status stage */ 1221 musb->ep0_stage = MUSB_EP0_STATUS; 1222 1223 musb_dbg(musb, "ep0 STATUS, csr %04x", csr); 1224 1225 } 1226 musb_writew(epio, MUSB_CSR0, csr); 1227 retval = IRQ_HANDLED; 1228 } else 1229 musb->ep0_stage = MUSB_EP0_IDLE; 1230 1231 /* call completion handler if done */ 1232 if (complete) 1233 musb_advance_schedule(musb, urb, hw_ep, 1); 1234 done: 1235 return retval; 1236 } 1237 1238 1239 #ifdef CONFIG_USB_INVENTRA_DMA 1240 1241 /* Host side TX (OUT) using Mentor DMA works as follows: 1242 submit_urb -> 1243 - if queue was empty, Program Endpoint 1244 - ... which starts DMA to fifo in mode 1 or 0 1245 1246 DMA Isr (transfer complete) -> TxAvail() 1247 - Stop DMA (~DmaEnab) (<--- Alert ... currently happens 1248 only in musb_cleanup_urb) 1249 - TxPktRdy has to be set in mode 0 or for 1250 short packets in mode 1. 1251 */ 1252 1253 #endif 1254 1255 /* Service a Tx-Available or dma completion irq for the endpoint */ 1256 void musb_host_tx(struct musb *musb, u8 epnum) 1257 { 1258 int pipe; 1259 bool done = false; 1260 u16 tx_csr; 1261 size_t length = 0; 1262 size_t offset = 0; 1263 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 1264 void __iomem *epio = hw_ep->regs; 1265 struct musb_qh *qh = hw_ep->out_qh; 1266 struct urb *urb = next_urb(qh); 1267 u32 status = 0; 1268 void __iomem *mbase = musb->mregs; 1269 struct dma_channel *dma; 1270 bool transfer_pending = false; 1271 1272 musb_ep_select(mbase, epnum); 1273 tx_csr = musb_readw(epio, MUSB_TXCSR); 1274 1275 /* with CPPI, DMA sometimes triggers "extra" irqs */ 1276 if (!urb) { 1277 musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr); 1278 return; 1279 } 1280 1281 pipe = urb->pipe; 1282 dma = is_dma_capable() ? hw_ep->tx_channel : NULL; 1283 trace_musb_urb_tx(musb, urb); 1284 musb_dbg(musb, "OUT/TX%d end, csr %04x%s", epnum, tx_csr, 1285 dma ? ", dma" : ""); 1286 1287 /* check for errors */ 1288 if (tx_csr & MUSB_TXCSR_H_RXSTALL) { 1289 /* dma was disabled, fifo flushed */ 1290 musb_dbg(musb, "TX end %d stall", epnum); 1291 1292 /* stall; record URB status */ 1293 status = -EPIPE; 1294 1295 } else if (tx_csr & MUSB_TXCSR_H_ERROR) { 1296 /* (NON-ISO) dma was disabled, fifo flushed */ 1297 musb_dbg(musb, "TX 3strikes on ep=%d", epnum); 1298 1299 status = -ETIMEDOUT; 1300 1301 } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) { 1302 if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1 1303 && !list_is_singular(&musb->out_bulk)) { 1304 musb_dbg(musb, "NAK timeout on TX%d ep", epnum); 1305 musb_bulk_nak_timeout(musb, hw_ep, 0); 1306 } else { 1307 musb_dbg(musb, "TX ep%d device not responding", epnum); 1308 /* NOTE: this code path would be a good place to PAUSE a 1309 * transfer, if there's some other (nonperiodic) tx urb 1310 * that could use this fifo. (dma complicates it...) 1311 * That's already done for bulk RX transfers. 1312 * 1313 * if (bulk && qh->ring.next != &musb->out_bulk), then 1314 * we have a candidate... NAKing is *NOT* an error 1315 */ 1316 musb_ep_select(mbase, epnum); 1317 musb_writew(epio, MUSB_TXCSR, 1318 MUSB_TXCSR_H_WZC_BITS 1319 | MUSB_TXCSR_TXPKTRDY); 1320 } 1321 return; 1322 } 1323 1324 done: 1325 if (status) { 1326 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1327 dma->status = MUSB_DMA_STATUS_CORE_ABORT; 1328 musb->dma_controller->channel_abort(dma); 1329 } 1330 1331 /* do the proper sequence to abort the transfer in the 1332 * usb core; the dma engine should already be stopped. 1333 */ 1334 musb_h_tx_flush_fifo(hw_ep); 1335 tx_csr &= ~(MUSB_TXCSR_AUTOSET 1336 | MUSB_TXCSR_DMAENAB 1337 | MUSB_TXCSR_H_ERROR 1338 | MUSB_TXCSR_H_RXSTALL 1339 | MUSB_TXCSR_H_NAKTIMEOUT 1340 ); 1341 1342 musb_ep_select(mbase, epnum); 1343 musb_writew(epio, MUSB_TXCSR, tx_csr); 1344 /* REVISIT may need to clear FLUSHFIFO ... */ 1345 musb_writew(epio, MUSB_TXCSR, tx_csr); 1346 musb_writeb(epio, MUSB_TXINTERVAL, 0); 1347 1348 done = true; 1349 } 1350 1351 /* second cppi case */ 1352 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1353 musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr); 1354 return; 1355 } 1356 1357 if (is_dma_capable() && dma && !status) { 1358 /* 1359 * DMA has completed. But if we're using DMA mode 1 (multi 1360 * packet DMA), we need a terminal TXPKTRDY interrupt before 1361 * we can consider this transfer completed, lest we trash 1362 * its last packet when writing the next URB's data. So we 1363 * switch back to mode 0 to get that interrupt; we'll come 1364 * back here once it happens. 1365 */ 1366 if (tx_csr & MUSB_TXCSR_DMAMODE) { 1367 /* 1368 * We shouldn't clear DMAMODE with DMAENAB set; so 1369 * clear them in a safe order. That should be OK 1370 * once TXPKTRDY has been set (and I've never seen 1371 * it being 0 at this moment -- DMA interrupt latency 1372 * is significant) but if it hasn't been then we have 1373 * no choice but to stop being polite and ignore the 1374 * programmer's guide... :-) 1375 * 1376 * Note that we must write TXCSR with TXPKTRDY cleared 1377 * in order not to re-trigger the packet send (this bit 1378 * can't be cleared by CPU), and there's another caveat: 1379 * TXPKTRDY may be set shortly and then cleared in the 1380 * double-buffered FIFO mode, so we do an extra TXCSR 1381 * read for debouncing... 1382 */ 1383 tx_csr &= musb_readw(epio, MUSB_TXCSR); 1384 if (tx_csr & MUSB_TXCSR_TXPKTRDY) { 1385 tx_csr &= ~(MUSB_TXCSR_DMAENAB | 1386 MUSB_TXCSR_TXPKTRDY); 1387 musb_writew(epio, MUSB_TXCSR, 1388 tx_csr | MUSB_TXCSR_H_WZC_BITS); 1389 } 1390 tx_csr &= ~(MUSB_TXCSR_DMAMODE | 1391 MUSB_TXCSR_TXPKTRDY); 1392 musb_writew(epio, MUSB_TXCSR, 1393 tx_csr | MUSB_TXCSR_H_WZC_BITS); 1394 1395 /* 1396 * There is no guarantee that we'll get an interrupt 1397 * after clearing DMAMODE as we might have done this 1398 * too late (after TXPKTRDY was cleared by controller). 1399 * Re-read TXCSR as we have spoiled its previous value. 1400 */ 1401 tx_csr = musb_readw(epio, MUSB_TXCSR); 1402 } 1403 1404 /* 1405 * We may get here from a DMA completion or TXPKTRDY interrupt. 1406 * In any case, we must check the FIFO status here and bail out 1407 * only if the FIFO still has data -- that should prevent the 1408 * "missed" TXPKTRDY interrupts and deal with double-buffered 1409 * FIFO mode too... 1410 */ 1411 if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) { 1412 musb_dbg(musb, 1413 "DMA complete but FIFO not empty, CSR %04x", 1414 tx_csr); 1415 return; 1416 } 1417 } 1418 1419 if (!status || dma || usb_pipeisoc(pipe)) { 1420 if (dma) 1421 length = dma->actual_len; 1422 else 1423 length = qh->segsize; 1424 qh->offset += length; 1425 1426 if (usb_pipeisoc(pipe)) { 1427 struct usb_iso_packet_descriptor *d; 1428 1429 d = urb->iso_frame_desc + qh->iso_idx; 1430 d->actual_length = length; 1431 d->status = status; 1432 if (++qh->iso_idx >= urb->number_of_packets) { 1433 done = true; 1434 } else { 1435 d++; 1436 offset = d->offset; 1437 length = d->length; 1438 } 1439 } else if (dma && urb->transfer_buffer_length == qh->offset) { 1440 done = true; 1441 } else { 1442 /* see if we need to send more data, or ZLP */ 1443 if (qh->segsize < qh->maxpacket) 1444 done = true; 1445 else if (qh->offset == urb->transfer_buffer_length 1446 && !(urb->transfer_flags 1447 & URB_ZERO_PACKET)) 1448 done = true; 1449 if (!done) { 1450 offset = qh->offset; 1451 length = urb->transfer_buffer_length - offset; 1452 transfer_pending = true; 1453 } 1454 } 1455 } 1456 1457 /* urb->status != -EINPROGRESS means request has been faulted, 1458 * so we must abort this transfer after cleanup 1459 */ 1460 if (urb->status != -EINPROGRESS) { 1461 done = true; 1462 if (status == 0) 1463 status = urb->status; 1464 } 1465 1466 if (done) { 1467 /* set status */ 1468 urb->status = status; 1469 urb->actual_length = qh->offset; 1470 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); 1471 return; 1472 } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) { 1473 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb, 1474 offset, length)) { 1475 if (is_cppi_enabled(musb) || tusb_dma_omap(musb)) 1476 musb_h_tx_dma_start(hw_ep); 1477 return; 1478 } 1479 } else if (tx_csr & MUSB_TXCSR_DMAENAB) { 1480 musb_dbg(musb, "not complete, but DMA enabled?"); 1481 return; 1482 } 1483 1484 /* 1485 * PIO: start next packet in this URB. 1486 * 1487 * REVISIT: some docs say that when hw_ep->tx_double_buffered, 1488 * (and presumably, FIFO is not half-full) we should write *two* 1489 * packets before updating TXCSR; other docs disagree... 1490 */ 1491 if (length > qh->maxpacket) 1492 length = qh->maxpacket; 1493 /* Unmap the buffer so that CPU can use it */ 1494 usb_hcd_unmap_urb_for_dma(musb->hcd, urb); 1495 1496 /* 1497 * We need to map sg if the transfer_buffer is 1498 * NULL. 1499 */ 1500 if (!urb->transfer_buffer) 1501 qh->use_sg = true; 1502 1503 if (qh->use_sg) { 1504 /* sg_miter_start is already done in musb_ep_program */ 1505 if (!sg_miter_next(&qh->sg_miter)) { 1506 dev_err(musb->controller, "error: sg list empty\n"); 1507 sg_miter_stop(&qh->sg_miter); 1508 status = -EINVAL; 1509 goto done; 1510 } 1511 urb->transfer_buffer = qh->sg_miter.addr; 1512 length = min_t(u32, length, qh->sg_miter.length); 1513 musb_write_fifo(hw_ep, length, urb->transfer_buffer); 1514 qh->sg_miter.consumed = length; 1515 sg_miter_stop(&qh->sg_miter); 1516 } else { 1517 musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset); 1518 } 1519 1520 qh->segsize = length; 1521 1522 if (qh->use_sg) { 1523 if (offset + length >= urb->transfer_buffer_length) 1524 qh->use_sg = false; 1525 } 1526 1527 musb_ep_select(mbase, epnum); 1528 musb_writew(epio, MUSB_TXCSR, 1529 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); 1530 } 1531 1532 #ifdef CONFIG_USB_TI_CPPI41_DMA 1533 /* Seems to set up ISO for cppi41 and not advance len. See commit c57c41d */ 1534 static int musb_rx_dma_iso_cppi41(struct dma_controller *dma, 1535 struct musb_hw_ep *hw_ep, 1536 struct musb_qh *qh, 1537 struct urb *urb, 1538 size_t len) 1539 { 1540 struct dma_channel *channel = hw_ep->rx_channel; 1541 void __iomem *epio = hw_ep->regs; 1542 dma_addr_t *buf; 1543 u32 length; 1544 u16 val; 1545 1546 buf = (void *)urb->iso_frame_desc[qh->iso_idx].offset + 1547 (u32)urb->transfer_dma; 1548 1549 length = urb->iso_frame_desc[qh->iso_idx].length; 1550 1551 val = musb_readw(epio, MUSB_RXCSR); 1552 val |= MUSB_RXCSR_DMAENAB; 1553 musb_writew(hw_ep->regs, MUSB_RXCSR, val); 1554 1555 return dma->channel_program(channel, qh->maxpacket, 0, 1556 (u32)buf, length); 1557 } 1558 #else 1559 static inline int musb_rx_dma_iso_cppi41(struct dma_controller *dma, 1560 struct musb_hw_ep *hw_ep, 1561 struct musb_qh *qh, 1562 struct urb *urb, 1563 size_t len) 1564 { 1565 return false; 1566 } 1567 #endif 1568 1569 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \ 1570 defined(CONFIG_USB_TI_CPPI41_DMA) 1571 /* Host side RX (IN) using Mentor DMA works as follows: 1572 submit_urb -> 1573 - if queue was empty, ProgramEndpoint 1574 - first IN token is sent out (by setting ReqPkt) 1575 LinuxIsr -> RxReady() 1576 /\ => first packet is received 1577 | - Set in mode 0 (DmaEnab, ~ReqPkt) 1578 | -> DMA Isr (transfer complete) -> RxReady() 1579 | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab) 1580 | - if urb not complete, send next IN token (ReqPkt) 1581 | | else complete urb. 1582 | | 1583 --------------------------- 1584 * 1585 * Nuances of mode 1: 1586 * For short packets, no ack (+RxPktRdy) is sent automatically 1587 * (even if AutoClear is ON) 1588 * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent 1589 * automatically => major problem, as collecting the next packet becomes 1590 * difficult. Hence mode 1 is not used. 1591 * 1592 * REVISIT 1593 * All we care about at this driver level is that 1594 * (a) all URBs terminate with REQPKT cleared and fifo(s) empty; 1595 * (b) termination conditions are: short RX, or buffer full; 1596 * (c) fault modes include 1597 * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO. 1598 * (and that endpoint's dma queue stops immediately) 1599 * - overflow (full, PLUS more bytes in the terminal packet) 1600 * 1601 * So for example, usb-storage sets URB_SHORT_NOT_OK, and would 1602 * thus be a great candidate for using mode 1 ... for all but the 1603 * last packet of one URB's transfer. 1604 */ 1605 static int musb_rx_dma_inventra_cppi41(struct dma_controller *dma, 1606 struct musb_hw_ep *hw_ep, 1607 struct musb_qh *qh, 1608 struct urb *urb, 1609 size_t len) 1610 { 1611 struct dma_channel *channel = hw_ep->rx_channel; 1612 void __iomem *epio = hw_ep->regs; 1613 u16 val; 1614 int pipe; 1615 bool done; 1616 1617 pipe = urb->pipe; 1618 1619 if (usb_pipeisoc(pipe)) { 1620 struct usb_iso_packet_descriptor *d; 1621 1622 d = urb->iso_frame_desc + qh->iso_idx; 1623 d->actual_length = len; 1624 1625 /* even if there was an error, we did the dma 1626 * for iso_frame_desc->length 1627 */ 1628 if (d->status != -EILSEQ && d->status != -EOVERFLOW) 1629 d->status = 0; 1630 1631 if (++qh->iso_idx >= urb->number_of_packets) { 1632 done = true; 1633 } else { 1634 /* REVISIT: Why ignore return value here? */ 1635 if (musb_dma_cppi41(hw_ep->musb)) 1636 done = musb_rx_dma_iso_cppi41(dma, hw_ep, qh, 1637 urb, len); 1638 done = false; 1639 } 1640 1641 } else { 1642 /* done if urb buffer is full or short packet is recd */ 1643 done = (urb->actual_length + len >= 1644 urb->transfer_buffer_length 1645 || channel->actual_len < qh->maxpacket 1646 || channel->rx_packet_done); 1647 } 1648 1649 /* send IN token for next packet, without AUTOREQ */ 1650 if (!done) { 1651 val = musb_readw(epio, MUSB_RXCSR); 1652 val |= MUSB_RXCSR_H_REQPKT; 1653 musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val); 1654 } 1655 1656 return done; 1657 } 1658 1659 /* Disadvantage of using mode 1: 1660 * It's basically usable only for mass storage class; essentially all 1661 * other protocols also terminate transfers on short packets. 1662 * 1663 * Details: 1664 * An extra IN token is sent at the end of the transfer (due to AUTOREQ) 1665 * If you try to use mode 1 for (transfer_buffer_length - 512), and try 1666 * to use the extra IN token to grab the last packet using mode 0, then 1667 * the problem is that you cannot be sure when the device will send the 1668 * last packet and RxPktRdy set. Sometimes the packet is recd too soon 1669 * such that it gets lost when RxCSR is re-set at the end of the mode 1 1670 * transfer, while sometimes it is recd just a little late so that if you 1671 * try to configure for mode 0 soon after the mode 1 transfer is 1672 * completed, you will find rxcount 0. Okay, so you might think why not 1673 * wait for an interrupt when the pkt is recd. Well, you won't get any! 1674 */ 1675 static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma, 1676 struct musb_hw_ep *hw_ep, 1677 struct musb_qh *qh, 1678 struct urb *urb, 1679 size_t len, 1680 u8 iso_err) 1681 { 1682 struct musb *musb = hw_ep->musb; 1683 void __iomem *epio = hw_ep->regs; 1684 struct dma_channel *channel = hw_ep->rx_channel; 1685 u16 rx_count, val; 1686 int length, pipe, done; 1687 dma_addr_t buf; 1688 1689 rx_count = musb_readw(epio, MUSB_RXCOUNT); 1690 pipe = urb->pipe; 1691 1692 if (usb_pipeisoc(pipe)) { 1693 int d_status = 0; 1694 struct usb_iso_packet_descriptor *d; 1695 1696 d = urb->iso_frame_desc + qh->iso_idx; 1697 1698 if (iso_err) { 1699 d_status = -EILSEQ; 1700 urb->error_count++; 1701 } 1702 if (rx_count > d->length) { 1703 if (d_status == 0) { 1704 d_status = -EOVERFLOW; 1705 urb->error_count++; 1706 } 1707 musb_dbg(musb, "** OVERFLOW %d into %d", 1708 rx_count, d->length); 1709 1710 length = d->length; 1711 } else 1712 length = rx_count; 1713 d->status = d_status; 1714 buf = urb->transfer_dma + d->offset; 1715 } else { 1716 length = rx_count; 1717 buf = urb->transfer_dma + urb->actual_length; 1718 } 1719 1720 channel->desired_mode = 0; 1721 #ifdef USE_MODE1 1722 /* because of the issue below, mode 1 will 1723 * only rarely behave with correct semantics. 1724 */ 1725 if ((urb->transfer_flags & URB_SHORT_NOT_OK) 1726 && (urb->transfer_buffer_length - urb->actual_length) 1727 > qh->maxpacket) 1728 channel->desired_mode = 1; 1729 if (rx_count < hw_ep->max_packet_sz_rx) { 1730 length = rx_count; 1731 channel->desired_mode = 0; 1732 } else { 1733 length = urb->transfer_buffer_length; 1734 } 1735 #endif 1736 1737 /* See comments above on disadvantages of using mode 1 */ 1738 val = musb_readw(epio, MUSB_RXCSR); 1739 val &= ~MUSB_RXCSR_H_REQPKT; 1740 1741 if (channel->desired_mode == 0) 1742 val &= ~MUSB_RXCSR_H_AUTOREQ; 1743 else 1744 val |= MUSB_RXCSR_H_AUTOREQ; 1745 val |= MUSB_RXCSR_DMAENAB; 1746 1747 /* autoclear shouldn't be set in high bandwidth */ 1748 if (qh->hb_mult == 1) 1749 val |= MUSB_RXCSR_AUTOCLEAR; 1750 1751 musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val); 1752 1753 /* REVISIT if when actual_length != 0, 1754 * transfer_buffer_length needs to be 1755 * adjusted first... 1756 */ 1757 done = dma->channel_program(channel, qh->maxpacket, 1758 channel->desired_mode, 1759 buf, length); 1760 1761 if (!done) { 1762 dma->channel_release(channel); 1763 hw_ep->rx_channel = NULL; 1764 channel = NULL; 1765 val = musb_readw(epio, MUSB_RXCSR); 1766 val &= ~(MUSB_RXCSR_DMAENAB 1767 | MUSB_RXCSR_H_AUTOREQ 1768 | MUSB_RXCSR_AUTOCLEAR); 1769 musb_writew(epio, MUSB_RXCSR, val); 1770 } 1771 1772 return done; 1773 } 1774 #else 1775 static inline int musb_rx_dma_inventra_cppi41(struct dma_controller *dma, 1776 struct musb_hw_ep *hw_ep, 1777 struct musb_qh *qh, 1778 struct urb *urb, 1779 size_t len) 1780 { 1781 return false; 1782 } 1783 1784 static inline int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma, 1785 struct musb_hw_ep *hw_ep, 1786 struct musb_qh *qh, 1787 struct urb *urb, 1788 size_t len, 1789 u8 iso_err) 1790 { 1791 return false; 1792 } 1793 #endif 1794 1795 /* 1796 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, 1797 * and high-bandwidth IN transfer cases. 1798 */ 1799 void musb_host_rx(struct musb *musb, u8 epnum) 1800 { 1801 struct urb *urb; 1802 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 1803 struct dma_controller *c = musb->dma_controller; 1804 void __iomem *epio = hw_ep->regs; 1805 struct musb_qh *qh = hw_ep->in_qh; 1806 size_t xfer_len; 1807 void __iomem *mbase = musb->mregs; 1808 int pipe; 1809 u16 rx_csr, val; 1810 bool iso_err = false; 1811 bool done = false; 1812 u32 status; 1813 struct dma_channel *dma; 1814 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; 1815 1816 musb_ep_select(mbase, epnum); 1817 1818 urb = next_urb(qh); 1819 dma = is_dma_capable() ? hw_ep->rx_channel : NULL; 1820 status = 0; 1821 xfer_len = 0; 1822 1823 rx_csr = musb_readw(epio, MUSB_RXCSR); 1824 val = rx_csr; 1825 1826 if (unlikely(!urb)) { 1827 /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least 1828 * usbtest #11 (unlinks) triggers it regularly, sometimes 1829 * with fifo full. (Only with DMA??) 1830 */ 1831 musb_dbg(musb, "BOGUS RX%d ready, csr %04x, count %d", 1832 epnum, val, musb_readw(epio, MUSB_RXCOUNT)); 1833 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); 1834 return; 1835 } 1836 1837 pipe = urb->pipe; 1838 1839 trace_musb_urb_rx(musb, urb); 1840 1841 /* check for errors, concurrent stall & unlink is not really 1842 * handled yet! */ 1843 if (rx_csr & MUSB_RXCSR_H_RXSTALL) { 1844 musb_dbg(musb, "RX end %d STALL", epnum); 1845 1846 /* stall; record URB status */ 1847 status = -EPIPE; 1848 1849 } else if (rx_csr & MUSB_RXCSR_H_ERROR) { 1850 musb_dbg(musb, "end %d RX proto error", epnum); 1851 1852 status = -EPROTO; 1853 musb_writeb(epio, MUSB_RXINTERVAL, 0); 1854 1855 rx_csr &= ~MUSB_RXCSR_H_ERROR; 1856 musb_writew(epio, MUSB_RXCSR, rx_csr); 1857 1858 } else if (rx_csr & MUSB_RXCSR_DATAERROR) { 1859 1860 if (USB_ENDPOINT_XFER_ISOC != qh->type) { 1861 musb_dbg(musb, "RX end %d NAK timeout", epnum); 1862 1863 /* NOTE: NAKing is *NOT* an error, so we want to 1864 * continue. Except ... if there's a request for 1865 * another QH, use that instead of starving it. 1866 * 1867 * Devices like Ethernet and serial adapters keep 1868 * reads posted at all times, which will starve 1869 * other devices without this logic. 1870 */ 1871 if (usb_pipebulk(urb->pipe) 1872 && qh->mux == 1 1873 && !list_is_singular(&musb->in_bulk)) { 1874 musb_bulk_nak_timeout(musb, hw_ep, 1); 1875 return; 1876 } 1877 musb_ep_select(mbase, epnum); 1878 rx_csr |= MUSB_RXCSR_H_WZC_BITS; 1879 rx_csr &= ~MUSB_RXCSR_DATAERROR; 1880 musb_writew(epio, MUSB_RXCSR, rx_csr); 1881 1882 goto finish; 1883 } else { 1884 musb_dbg(musb, "RX end %d ISO data error", epnum); 1885 /* packet error reported later */ 1886 iso_err = true; 1887 } 1888 } else if (rx_csr & MUSB_RXCSR_INCOMPRX) { 1889 musb_dbg(musb, "end %d high bandwidth incomplete ISO packet RX", 1890 epnum); 1891 status = -EPROTO; 1892 } 1893 1894 /* faults abort the transfer */ 1895 if (status) { 1896 /* clean up dma and collect transfer count */ 1897 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1898 dma->status = MUSB_DMA_STATUS_CORE_ABORT; 1899 musb->dma_controller->channel_abort(dma); 1900 xfer_len = dma->actual_len; 1901 } 1902 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); 1903 musb_writeb(epio, MUSB_RXINTERVAL, 0); 1904 done = true; 1905 goto finish; 1906 } 1907 1908 if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) { 1909 /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */ 1910 ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr); 1911 goto finish; 1912 } 1913 1914 /* thorough shutdown for now ... given more precise fault handling 1915 * and better queueing support, we might keep a DMA pipeline going 1916 * while processing this irq for earlier completions. 1917 */ 1918 1919 /* FIXME this is _way_ too much in-line logic for Mentor DMA */ 1920 if (!musb_dma_inventra(musb) && !musb_dma_ux500(musb) && 1921 (rx_csr & MUSB_RXCSR_H_REQPKT)) { 1922 /* REVISIT this happened for a while on some short reads... 1923 * the cleanup still needs investigation... looks bad... 1924 * and also duplicates dma cleanup code above ... plus, 1925 * shouldn't this be the "half full" double buffer case? 1926 */ 1927 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1928 dma->status = MUSB_DMA_STATUS_CORE_ABORT; 1929 musb->dma_controller->channel_abort(dma); 1930 xfer_len = dma->actual_len; 1931 done = true; 1932 } 1933 1934 musb_dbg(musb, "RXCSR%d %04x, reqpkt, len %zu%s", epnum, rx_csr, 1935 xfer_len, dma ? ", dma" : ""); 1936 rx_csr &= ~MUSB_RXCSR_H_REQPKT; 1937 1938 musb_ep_select(mbase, epnum); 1939 musb_writew(epio, MUSB_RXCSR, 1940 MUSB_RXCSR_H_WZC_BITS | rx_csr); 1941 } 1942 1943 if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) { 1944 xfer_len = dma->actual_len; 1945 1946 val &= ~(MUSB_RXCSR_DMAENAB 1947 | MUSB_RXCSR_H_AUTOREQ 1948 | MUSB_RXCSR_AUTOCLEAR 1949 | MUSB_RXCSR_RXPKTRDY); 1950 musb_writew(hw_ep->regs, MUSB_RXCSR, val); 1951 1952 if (musb_dma_inventra(musb) || musb_dma_ux500(musb) || 1953 musb_dma_cppi41(musb)) { 1954 done = musb_rx_dma_inventra_cppi41(c, hw_ep, qh, urb, xfer_len); 1955 musb_dbg(hw_ep->musb, 1956 "ep %d dma %s, rxcsr %04x, rxcount %d", 1957 epnum, done ? "off" : "reset", 1958 musb_readw(epio, MUSB_RXCSR), 1959 musb_readw(epio, MUSB_RXCOUNT)); 1960 } else { 1961 done = true; 1962 } 1963 1964 } else if (urb->status == -EINPROGRESS) { 1965 /* if no errors, be sure a packet is ready for unloading */ 1966 if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) { 1967 status = -EPROTO; 1968 ERR("Rx interrupt with no errors or packet!\n"); 1969 1970 /* FIXME this is another "SHOULD NEVER HAPPEN" */ 1971 1972 /* SCRUB (RX) */ 1973 /* do the proper sequence to abort the transfer */ 1974 musb_ep_select(mbase, epnum); 1975 val &= ~MUSB_RXCSR_H_REQPKT; 1976 musb_writew(epio, MUSB_RXCSR, val); 1977 goto finish; 1978 } 1979 1980 /* we are expecting IN packets */ 1981 if ((musb_dma_inventra(musb) || musb_dma_ux500(musb) || 1982 musb_dma_cppi41(musb)) && dma) { 1983 musb_dbg(hw_ep->musb, 1984 "RX%d count %d, buffer 0x%llx len %d/%d", 1985 epnum, musb_readw(epio, MUSB_RXCOUNT), 1986 (unsigned long long) urb->transfer_dma 1987 + urb->actual_length, 1988 qh->offset, 1989 urb->transfer_buffer_length); 1990 1991 if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb, 1992 xfer_len, iso_err)) 1993 goto finish; 1994 else 1995 dev_err(musb->controller, "error: rx_dma failed\n"); 1996 } 1997 1998 if (!dma) { 1999 unsigned int received_len; 2000 2001 /* Unmap the buffer so that CPU can use it */ 2002 usb_hcd_unmap_urb_for_dma(musb->hcd, urb); 2003 2004 /* 2005 * We need to map sg if the transfer_buffer is 2006 * NULL. 2007 */ 2008 if (!urb->transfer_buffer) { 2009 qh->use_sg = true; 2010 sg_miter_start(&qh->sg_miter, urb->sg, 1, 2011 sg_flags); 2012 } 2013 2014 if (qh->use_sg) { 2015 if (!sg_miter_next(&qh->sg_miter)) { 2016 dev_err(musb->controller, "error: sg list empty\n"); 2017 sg_miter_stop(&qh->sg_miter); 2018 status = -EINVAL; 2019 done = true; 2020 goto finish; 2021 } 2022 urb->transfer_buffer = qh->sg_miter.addr; 2023 received_len = urb->actual_length; 2024 qh->offset = 0x0; 2025 done = musb_host_packet_rx(musb, urb, epnum, 2026 iso_err); 2027 /* Calculate the number of bytes received */ 2028 received_len = urb->actual_length - 2029 received_len; 2030 qh->sg_miter.consumed = received_len; 2031 sg_miter_stop(&qh->sg_miter); 2032 } else { 2033 done = musb_host_packet_rx(musb, urb, 2034 epnum, iso_err); 2035 } 2036 musb_dbg(musb, "read %spacket", done ? "last " : ""); 2037 } 2038 } 2039 2040 finish: 2041 urb->actual_length += xfer_len; 2042 qh->offset += xfer_len; 2043 if (done) { 2044 if (qh->use_sg) 2045 qh->use_sg = false; 2046 2047 if (urb->status == -EINPROGRESS) 2048 urb->status = status; 2049 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN); 2050 } 2051 } 2052 2053 /* schedule nodes correspond to peripheral endpoints, like an OHCI QH. 2054 * the software schedule associates multiple such nodes with a given 2055 * host side hardware endpoint + direction; scheduling may activate 2056 * that hardware endpoint. 2057 */ 2058 static int musb_schedule( 2059 struct musb *musb, 2060 struct musb_qh *qh, 2061 int is_in) 2062 { 2063 int idle = 0; 2064 int best_diff; 2065 int best_end, epnum; 2066 struct musb_hw_ep *hw_ep = NULL; 2067 struct list_head *head = NULL; 2068 u8 toggle; 2069 u8 txtype; 2070 struct urb *urb = next_urb(qh); 2071 2072 /* use fixed hardware for control and bulk */ 2073 if (qh->type == USB_ENDPOINT_XFER_CONTROL) { 2074 head = &musb->control; 2075 hw_ep = musb->control_ep; 2076 goto success; 2077 } 2078 2079 /* else, periodic transfers get muxed to other endpoints */ 2080 2081 /* 2082 * We know this qh hasn't been scheduled, so all we need to do 2083 * is choose which hardware endpoint to put it on ... 2084 * 2085 * REVISIT what we really want here is a regular schedule tree 2086 * like e.g. OHCI uses. 2087 */ 2088 best_diff = 4096; 2089 best_end = -1; 2090 2091 for (epnum = 1, hw_ep = musb->endpoints + 1; 2092 epnum < musb->nr_endpoints; 2093 epnum++, hw_ep++) { 2094 int diff; 2095 2096 if (musb_ep_get_qh(hw_ep, is_in) != NULL) 2097 continue; 2098 2099 if (hw_ep == musb->bulk_ep) 2100 continue; 2101 2102 if (is_in) 2103 diff = hw_ep->max_packet_sz_rx; 2104 else 2105 diff = hw_ep->max_packet_sz_tx; 2106 diff -= (qh->maxpacket * qh->hb_mult); 2107 2108 if (diff >= 0 && best_diff > diff) { 2109 2110 /* 2111 * Mentor controller has a bug in that if we schedule 2112 * a BULK Tx transfer on an endpoint that had earlier 2113 * handled ISOC then the BULK transfer has to start on 2114 * a zero toggle. If the BULK transfer starts on a 1 2115 * toggle then this transfer will fail as the mentor 2116 * controller starts the Bulk transfer on a 0 toggle 2117 * irrespective of the programming of the toggle bits 2118 * in the TXCSR register. Check for this condition 2119 * while allocating the EP for a Tx Bulk transfer. If 2120 * so skip this EP. 2121 */ 2122 hw_ep = musb->endpoints + epnum; 2123 toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in); 2124 txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE) 2125 >> 4) & 0x3; 2126 if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) && 2127 toggle && (txtype == USB_ENDPOINT_XFER_ISOC)) 2128 continue; 2129 2130 best_diff = diff; 2131 best_end = epnum; 2132 } 2133 } 2134 /* use bulk reserved ep1 if no other ep is free */ 2135 if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) { 2136 hw_ep = musb->bulk_ep; 2137 if (is_in) 2138 head = &musb->in_bulk; 2139 else 2140 head = &musb->out_bulk; 2141 2142 /* Enable bulk RX/TX NAK timeout scheme when bulk requests are 2143 * multiplexed. This scheme does not work in high speed to full 2144 * speed scenario as NAK interrupts are not coming from a 2145 * full speed device connected to a high speed device. 2146 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and 2147 * 4 (8 frame or 8ms) for FS device. 2148 */ 2149 if (qh->dev) 2150 qh->intv_reg = 2151 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4; 2152 goto success; 2153 } else if (best_end < 0) { 2154 return -ENOSPC; 2155 } 2156 2157 idle = 1; 2158 qh->mux = 0; 2159 hw_ep = musb->endpoints + best_end; 2160 musb_dbg(musb, "qh %p periodic slot %d", qh, best_end); 2161 success: 2162 if (head) { 2163 idle = list_empty(head); 2164 list_add_tail(&qh->ring, head); 2165 qh->mux = 1; 2166 } 2167 qh->hw_ep = hw_ep; 2168 qh->hep->hcpriv = qh; 2169 if (idle) 2170 musb_start_urb(musb, is_in, qh); 2171 return 0; 2172 } 2173 2174 static int musb_urb_enqueue( 2175 struct usb_hcd *hcd, 2176 struct urb *urb, 2177 gfp_t mem_flags) 2178 { 2179 unsigned long flags; 2180 struct musb *musb = hcd_to_musb(hcd); 2181 struct usb_host_endpoint *hep = urb->ep; 2182 struct musb_qh *qh; 2183 struct usb_endpoint_descriptor *epd = &hep->desc; 2184 int ret; 2185 unsigned type_reg; 2186 unsigned interval; 2187 2188 /* host role must be active */ 2189 if (!is_host_active(musb) || !musb->is_active) 2190 return -ENODEV; 2191 2192 trace_musb_urb_enq(musb, urb); 2193 2194 spin_lock_irqsave(&musb->lock, flags); 2195 ret = usb_hcd_link_urb_to_ep(hcd, urb); 2196 qh = ret ? NULL : hep->hcpriv; 2197 if (qh) 2198 urb->hcpriv = qh; 2199 spin_unlock_irqrestore(&musb->lock, flags); 2200 2201 /* DMA mapping was already done, if needed, and this urb is on 2202 * hep->urb_list now ... so we're done, unless hep wasn't yet 2203 * scheduled onto a live qh. 2204 * 2205 * REVISIT best to keep hep->hcpriv valid until the endpoint gets 2206 * disabled, testing for empty qh->ring and avoiding qh setup costs 2207 * except for the first urb queued after a config change. 2208 */ 2209 if (qh || ret) 2210 return ret; 2211 2212 /* Allocate and initialize qh, minimizing the work done each time 2213 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. 2214 * 2215 * REVISIT consider a dedicated qh kmem_cache, so it's harder 2216 * for bugs in other kernel code to break this driver... 2217 */ 2218 qh = kzalloc(sizeof *qh, mem_flags); 2219 if (!qh) { 2220 spin_lock_irqsave(&musb->lock, flags); 2221 usb_hcd_unlink_urb_from_ep(hcd, urb); 2222 spin_unlock_irqrestore(&musb->lock, flags); 2223 return -ENOMEM; 2224 } 2225 2226 qh->hep = hep; 2227 qh->dev = urb->dev; 2228 INIT_LIST_HEAD(&qh->ring); 2229 qh->is_ready = 1; 2230 2231 qh->maxpacket = usb_endpoint_maxp(epd); 2232 qh->type = usb_endpoint_type(epd); 2233 2234 /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier. 2235 * Some musb cores don't support high bandwidth ISO transfers; and 2236 * we don't (yet!) support high bandwidth interrupt transfers. 2237 */ 2238 qh->hb_mult = usb_endpoint_maxp_mult(epd); 2239 if (qh->hb_mult > 1) { 2240 int ok = (qh->type == USB_ENDPOINT_XFER_ISOC); 2241 2242 if (ok) 2243 ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx) 2244 || (usb_pipeout(urb->pipe) && musb->hb_iso_tx); 2245 if (!ok) { 2246 ret = -EMSGSIZE; 2247 goto done; 2248 } 2249 qh->maxpacket &= 0x7ff; 2250 } 2251 2252 qh->epnum = usb_endpoint_num(epd); 2253 2254 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ 2255 qh->addr_reg = (u8) usb_pipedevice(urb->pipe); 2256 2257 /* precompute rxtype/txtype/type0 register */ 2258 type_reg = (qh->type << 4) | qh->epnum; 2259 switch (urb->dev->speed) { 2260 case USB_SPEED_LOW: 2261 type_reg |= 0xc0; 2262 break; 2263 case USB_SPEED_FULL: 2264 type_reg |= 0x80; 2265 break; 2266 default: 2267 type_reg |= 0x40; 2268 } 2269 qh->type_reg = type_reg; 2270 2271 /* Precompute RXINTERVAL/TXINTERVAL register */ 2272 switch (qh->type) { 2273 case USB_ENDPOINT_XFER_INT: 2274 /* 2275 * Full/low speeds use the linear encoding, 2276 * high speed uses the logarithmic encoding. 2277 */ 2278 if (urb->dev->speed <= USB_SPEED_FULL) { 2279 interval = max_t(u8, epd->bInterval, 1); 2280 break; 2281 } 2282 /* FALLTHROUGH */ 2283 case USB_ENDPOINT_XFER_ISOC: 2284 /* ISO always uses logarithmic encoding */ 2285 interval = min_t(u8, epd->bInterval, 16); 2286 break; 2287 default: 2288 /* REVISIT we actually want to use NAK limits, hinting to the 2289 * transfer scheduling logic to try some other qh, e.g. try 2290 * for 2 msec first: 2291 * 2292 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2; 2293 * 2294 * The downside of disabling this is that transfer scheduling 2295 * gets VERY unfair for nonperiodic transfers; a misbehaving 2296 * peripheral could make that hurt. That's perfectly normal 2297 * for reads from network or serial adapters ... so we have 2298 * partial NAKlimit support for bulk RX. 2299 * 2300 * The upside of disabling it is simpler transfer scheduling. 2301 */ 2302 interval = 0; 2303 } 2304 qh->intv_reg = interval; 2305 2306 /* precompute addressing for external hub/tt ports */ 2307 if (musb->is_multipoint) { 2308 struct usb_device *parent = urb->dev->parent; 2309 2310 if (parent != hcd->self.root_hub) { 2311 qh->h_addr_reg = (u8) parent->devnum; 2312 2313 /* set up tt info if needed */ 2314 if (urb->dev->tt) { 2315 qh->h_port_reg = (u8) urb->dev->ttport; 2316 if (urb->dev->tt->hub) 2317 qh->h_addr_reg = 2318 (u8) urb->dev->tt->hub->devnum; 2319 if (urb->dev->tt->multi) 2320 qh->h_addr_reg |= 0x80; 2321 } 2322 } 2323 } 2324 2325 /* invariant: hep->hcpriv is null OR the qh that's already scheduled. 2326 * until we get real dma queues (with an entry for each urb/buffer), 2327 * we only have work to do in the former case. 2328 */ 2329 spin_lock_irqsave(&musb->lock, flags); 2330 if (hep->hcpriv || !next_urb(qh)) { 2331 /* some concurrent activity submitted another urb to hep... 2332 * odd, rare, error prone, but legal. 2333 */ 2334 kfree(qh); 2335 qh = NULL; 2336 ret = 0; 2337 } else 2338 ret = musb_schedule(musb, qh, 2339 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK); 2340 2341 if (ret == 0) { 2342 urb->hcpriv = qh; 2343 /* FIXME set urb->start_frame for iso/intr, it's tested in 2344 * musb_start_urb(), but otherwise only konicawc cares ... 2345 */ 2346 } 2347 spin_unlock_irqrestore(&musb->lock, flags); 2348 2349 done: 2350 if (ret != 0) { 2351 spin_lock_irqsave(&musb->lock, flags); 2352 usb_hcd_unlink_urb_from_ep(hcd, urb); 2353 spin_unlock_irqrestore(&musb->lock, flags); 2354 kfree(qh); 2355 } 2356 return ret; 2357 } 2358 2359 2360 /* 2361 * abort a transfer that's at the head of a hardware queue. 2362 * called with controller locked, irqs blocked 2363 * that hardware queue advances to the next transfer, unless prevented 2364 */ 2365 static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh) 2366 { 2367 struct musb_hw_ep *ep = qh->hw_ep; 2368 struct musb *musb = ep->musb; 2369 void __iomem *epio = ep->regs; 2370 unsigned hw_end = ep->epnum; 2371 void __iomem *regs = ep->musb->mregs; 2372 int is_in = usb_pipein(urb->pipe); 2373 int status = 0; 2374 u16 csr; 2375 struct dma_channel *dma = NULL; 2376 2377 musb_ep_select(regs, hw_end); 2378 2379 if (is_dma_capable()) { 2380 dma = is_in ? ep->rx_channel : ep->tx_channel; 2381 if (dma) { 2382 status = ep->musb->dma_controller->channel_abort(dma); 2383 musb_dbg(musb, "abort %cX%d DMA for urb %p --> %d", 2384 is_in ? 'R' : 'T', ep->epnum, 2385 urb, status); 2386 urb->actual_length += dma->actual_len; 2387 } 2388 } 2389 2390 /* turn off DMA requests, discard state, stop polling ... */ 2391 if (ep->epnum && is_in) { 2392 /* giveback saves bulk toggle */ 2393 csr = musb_h_flush_rxfifo(ep, 0); 2394 2395 /* clear the endpoint's irq status here to avoid bogus irqs */ 2396 if (is_dma_capable() && dma) 2397 musb_platform_clear_ep_rxintr(musb, ep->epnum); 2398 } else if (ep->epnum) { 2399 musb_h_tx_flush_fifo(ep); 2400 csr = musb_readw(epio, MUSB_TXCSR); 2401 csr &= ~(MUSB_TXCSR_AUTOSET 2402 | MUSB_TXCSR_DMAENAB 2403 | MUSB_TXCSR_H_RXSTALL 2404 | MUSB_TXCSR_H_NAKTIMEOUT 2405 | MUSB_TXCSR_H_ERROR 2406 | MUSB_TXCSR_TXPKTRDY); 2407 musb_writew(epio, MUSB_TXCSR, csr); 2408 /* REVISIT may need to clear FLUSHFIFO ... */ 2409 musb_writew(epio, MUSB_TXCSR, csr); 2410 /* flush cpu writebuffer */ 2411 csr = musb_readw(epio, MUSB_TXCSR); 2412 } else { 2413 musb_h_ep0_flush_fifo(ep); 2414 } 2415 if (status == 0) 2416 musb_advance_schedule(ep->musb, urb, ep, is_in); 2417 return status; 2418 } 2419 2420 static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) 2421 { 2422 struct musb *musb = hcd_to_musb(hcd); 2423 struct musb_qh *qh; 2424 unsigned long flags; 2425 int is_in = usb_pipein(urb->pipe); 2426 int ret; 2427 2428 trace_musb_urb_deq(musb, urb); 2429 2430 spin_lock_irqsave(&musb->lock, flags); 2431 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 2432 if (ret) 2433 goto done; 2434 2435 qh = urb->hcpriv; 2436 if (!qh) 2437 goto done; 2438 2439 /* 2440 * Any URB not actively programmed into endpoint hardware can be 2441 * immediately given back; that's any URB not at the head of an 2442 * endpoint queue, unless someday we get real DMA queues. And even 2443 * if it's at the head, it might not be known to the hardware... 2444 * 2445 * Otherwise abort current transfer, pending DMA, etc.; urb->status 2446 * has already been updated. This is a synchronous abort; it'd be 2447 * OK to hold off until after some IRQ, though. 2448 * 2449 * NOTE: qh is invalid unless !list_empty(&hep->urb_list) 2450 */ 2451 if (!qh->is_ready 2452 || urb->urb_list.prev != &qh->hep->urb_list 2453 || musb_ep_get_qh(qh->hw_ep, is_in) != qh) { 2454 int ready = qh->is_ready; 2455 2456 qh->is_ready = 0; 2457 musb_giveback(musb, urb, 0); 2458 qh->is_ready = ready; 2459 2460 /* If nothing else (usually musb_giveback) is using it 2461 * and its URB list has emptied, recycle this qh. 2462 */ 2463 if (ready && list_empty(&qh->hep->urb_list)) { 2464 qh->hep->hcpriv = NULL; 2465 list_del(&qh->ring); 2466 kfree(qh); 2467 } 2468 } else 2469 ret = musb_cleanup_urb(urb, qh); 2470 done: 2471 spin_unlock_irqrestore(&musb->lock, flags); 2472 return ret; 2473 } 2474 2475 /* disable an endpoint */ 2476 static void 2477 musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) 2478 { 2479 u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN; 2480 unsigned long flags; 2481 struct musb *musb = hcd_to_musb(hcd); 2482 struct musb_qh *qh; 2483 struct urb *urb; 2484 2485 spin_lock_irqsave(&musb->lock, flags); 2486 2487 qh = hep->hcpriv; 2488 if (qh == NULL) 2489 goto exit; 2490 2491 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ 2492 2493 /* Kick the first URB off the hardware, if needed */ 2494 qh->is_ready = 0; 2495 if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) { 2496 urb = next_urb(qh); 2497 2498 /* make software (then hardware) stop ASAP */ 2499 if (!urb->unlinked) 2500 urb->status = -ESHUTDOWN; 2501 2502 /* cleanup */ 2503 musb_cleanup_urb(urb, qh); 2504 2505 /* Then nuke all the others ... and advance the 2506 * queue on hw_ep (e.g. bulk ring) when we're done. 2507 */ 2508 while (!list_empty(&hep->urb_list)) { 2509 urb = next_urb(qh); 2510 urb->status = -ESHUTDOWN; 2511 musb_advance_schedule(musb, urb, qh->hw_ep, is_in); 2512 } 2513 } else { 2514 /* Just empty the queue; the hardware is busy with 2515 * other transfers, and since !qh->is_ready nothing 2516 * will activate any of these as it advances. 2517 */ 2518 while (!list_empty(&hep->urb_list)) 2519 musb_giveback(musb, next_urb(qh), -ESHUTDOWN); 2520 2521 hep->hcpriv = NULL; 2522 list_del(&qh->ring); 2523 kfree(qh); 2524 } 2525 exit: 2526 spin_unlock_irqrestore(&musb->lock, flags); 2527 } 2528 2529 static int musb_h_get_frame_number(struct usb_hcd *hcd) 2530 { 2531 struct musb *musb = hcd_to_musb(hcd); 2532 2533 return musb_readw(musb->mregs, MUSB_FRAME); 2534 } 2535 2536 static int musb_h_start(struct usb_hcd *hcd) 2537 { 2538 struct musb *musb = hcd_to_musb(hcd); 2539 2540 /* NOTE: musb_start() is called when the hub driver turns 2541 * on port power, or when (OTG) peripheral starts. 2542 */ 2543 hcd->state = HC_STATE_RUNNING; 2544 musb->port1_status = 0; 2545 return 0; 2546 } 2547 2548 static void musb_h_stop(struct usb_hcd *hcd) 2549 { 2550 musb_stop(hcd_to_musb(hcd)); 2551 hcd->state = HC_STATE_HALT; 2552 } 2553 2554 static int musb_bus_suspend(struct usb_hcd *hcd) 2555 { 2556 struct musb *musb = hcd_to_musb(hcd); 2557 u8 devctl; 2558 2559 musb_port_suspend(musb, true); 2560 2561 if (!is_host_active(musb)) 2562 return 0; 2563 2564 switch (musb->xceiv->otg->state) { 2565 case OTG_STATE_A_SUSPEND: 2566 return 0; 2567 case OTG_STATE_A_WAIT_VRISE: 2568 /* ID could be grounded even if there's no device 2569 * on the other end of the cable. NOTE that the 2570 * A_WAIT_VRISE timers are messy with MUSB... 2571 */ 2572 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 2573 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) 2574 musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON; 2575 break; 2576 default: 2577 break; 2578 } 2579 2580 if (musb->is_active) { 2581 WARNING("trying to suspend as %s while active\n", 2582 usb_otg_state_string(musb->xceiv->otg->state)); 2583 return -EBUSY; 2584 } else 2585 return 0; 2586 } 2587 2588 static int musb_bus_resume(struct usb_hcd *hcd) 2589 { 2590 struct musb *musb = hcd_to_musb(hcd); 2591 2592 if (musb->config && 2593 musb->config->host_port_deassert_reset_at_resume) 2594 musb_port_reset(musb, false); 2595 2596 return 0; 2597 } 2598 2599 #ifndef CONFIG_MUSB_PIO_ONLY 2600 2601 #define MUSB_USB_DMA_ALIGN 4 2602 2603 struct musb_temp_buffer { 2604 void *kmalloc_ptr; 2605 void *old_xfer_buffer; 2606 u8 data[0]; 2607 }; 2608 2609 static void musb_free_temp_buffer(struct urb *urb) 2610 { 2611 enum dma_data_direction dir; 2612 struct musb_temp_buffer *temp; 2613 size_t length; 2614 2615 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) 2616 return; 2617 2618 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 2619 2620 temp = container_of(urb->transfer_buffer, struct musb_temp_buffer, 2621 data); 2622 2623 if (dir == DMA_FROM_DEVICE) { 2624 if (usb_pipeisoc(urb->pipe)) 2625 length = urb->transfer_buffer_length; 2626 else 2627 length = urb->actual_length; 2628 2629 memcpy(temp->old_xfer_buffer, temp->data, length); 2630 } 2631 urb->transfer_buffer = temp->old_xfer_buffer; 2632 kfree(temp->kmalloc_ptr); 2633 2634 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; 2635 } 2636 2637 static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags) 2638 { 2639 enum dma_data_direction dir; 2640 struct musb_temp_buffer *temp; 2641 void *kmalloc_ptr; 2642 size_t kmalloc_size; 2643 2644 if (urb->num_sgs || urb->sg || 2645 urb->transfer_buffer_length == 0 || 2646 !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1))) 2647 return 0; 2648 2649 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 2650 2651 /* Allocate a buffer with enough padding for alignment */ 2652 kmalloc_size = urb->transfer_buffer_length + 2653 sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1; 2654 2655 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); 2656 if (!kmalloc_ptr) 2657 return -ENOMEM; 2658 2659 /* Position our struct temp_buffer such that data is aligned */ 2660 temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN); 2661 2662 2663 temp->kmalloc_ptr = kmalloc_ptr; 2664 temp->old_xfer_buffer = urb->transfer_buffer; 2665 if (dir == DMA_TO_DEVICE) 2666 memcpy(temp->data, urb->transfer_buffer, 2667 urb->transfer_buffer_length); 2668 urb->transfer_buffer = temp->data; 2669 2670 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; 2671 2672 return 0; 2673 } 2674 2675 static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, 2676 gfp_t mem_flags) 2677 { 2678 struct musb *musb = hcd_to_musb(hcd); 2679 int ret; 2680 2681 /* 2682 * The DMA engine in RTL1.8 and above cannot handle 2683 * DMA addresses that are not aligned to a 4 byte boundary. 2684 * For such engine implemented (un)map_urb_for_dma hooks. 2685 * Do not use these hooks for RTL<1.8 2686 */ 2687 if (musb->hwvers < MUSB_HWVERS_1800) 2688 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); 2689 2690 ret = musb_alloc_temp_buffer(urb, mem_flags); 2691 if (ret) 2692 return ret; 2693 2694 ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); 2695 if (ret) 2696 musb_free_temp_buffer(urb); 2697 2698 return ret; 2699 } 2700 2701 static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) 2702 { 2703 struct musb *musb = hcd_to_musb(hcd); 2704 2705 usb_hcd_unmap_urb_for_dma(hcd, urb); 2706 2707 /* Do not use this hook for RTL<1.8 (see description above) */ 2708 if (musb->hwvers < MUSB_HWVERS_1800) 2709 return; 2710 2711 musb_free_temp_buffer(urb); 2712 } 2713 #endif /* !CONFIG_MUSB_PIO_ONLY */ 2714 2715 static const struct hc_driver musb_hc_driver = { 2716 .description = "musb-hcd", 2717 .product_desc = "MUSB HDRC host driver", 2718 .hcd_priv_size = sizeof(struct musb *), 2719 .flags = HCD_USB2 | HCD_MEMORY, 2720 2721 /* not using irq handler or reset hooks from usbcore, since 2722 * those must be shared with peripheral code for OTG configs 2723 */ 2724 2725 .start = musb_h_start, 2726 .stop = musb_h_stop, 2727 2728 .get_frame_number = musb_h_get_frame_number, 2729 2730 .urb_enqueue = musb_urb_enqueue, 2731 .urb_dequeue = musb_urb_dequeue, 2732 .endpoint_disable = musb_h_disable, 2733 2734 #ifndef CONFIG_MUSB_PIO_ONLY 2735 .map_urb_for_dma = musb_map_urb_for_dma, 2736 .unmap_urb_for_dma = musb_unmap_urb_for_dma, 2737 #endif 2738 2739 .hub_status_data = musb_hub_status_data, 2740 .hub_control = musb_hub_control, 2741 .bus_suspend = musb_bus_suspend, 2742 .bus_resume = musb_bus_resume, 2743 /* .start_port_reset = NULL, */ 2744 /* .hub_irq_enable = NULL, */ 2745 }; 2746 2747 int musb_host_alloc(struct musb *musb) 2748 { 2749 struct device *dev = musb->controller; 2750 2751 /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */ 2752 musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev)); 2753 if (!musb->hcd) 2754 return -EINVAL; 2755 2756 *musb->hcd->hcd_priv = (unsigned long) musb; 2757 musb->hcd->self.uses_pio_for_control = 1; 2758 musb->hcd->uses_new_polling = 1; 2759 musb->hcd->has_tt = 1; 2760 2761 return 0; 2762 } 2763 2764 void musb_host_cleanup(struct musb *musb) 2765 { 2766 if (musb->port_mode == MUSB_PORT_MODE_GADGET) 2767 return; 2768 usb_remove_hcd(musb->hcd); 2769 } 2770 2771 void musb_host_free(struct musb *musb) 2772 { 2773 usb_put_hcd(musb->hcd); 2774 } 2775 2776 int musb_host_setup(struct musb *musb, int power_budget) 2777 { 2778 int ret; 2779 struct usb_hcd *hcd = musb->hcd; 2780 2781 if (musb->port_mode == MUSB_PORT_MODE_HOST) { 2782 MUSB_HST_MODE(musb); 2783 musb->xceiv->otg->default_a = 1; 2784 musb->xceiv->otg->state = OTG_STATE_A_IDLE; 2785 } 2786 otg_set_host(musb->xceiv->otg, &hcd->self); 2787 hcd->self.otg_port = 1; 2788 musb->xceiv->otg->host = &hcd->self; 2789 hcd->power_budget = 2 * (power_budget ? : 250); 2790 2791 ret = usb_add_hcd(hcd, 0, 0); 2792 if (ret < 0) 2793 return ret; 2794 2795 device_wakeup_enable(hcd->self.controller); 2796 return 0; 2797 } 2798 2799 void musb_host_resume_root_hub(struct musb *musb) 2800 { 2801 usb_hcd_resume_root_hub(musb->hcd); 2802 } 2803 2804 void musb_host_poke_root_hub(struct musb *musb) 2805 { 2806 MUSB_HST_MODE(musb); 2807 if (musb->hcd->status_urb) 2808 usb_hcd_poll_rh_status(musb->hcd); 2809 else 2810 usb_hcd_resume_root_hub(musb->hcd); 2811 } 2812