1 /* 2 * MUSB OTG driver host support 3 * 4 * Copyright 2005 Mentor Graphics Corporation 5 * Copyright (C) 2005-2006 by Texas Instruments 6 * Copyright (C) 2006-2007 Nokia Corporation 7 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * version 2 as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 21 * 02110-1301 USA 22 * 23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED 24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 */ 35 36 #include <linux/module.h> 37 #include <linux/kernel.h> 38 #include <linux/delay.h> 39 #include <linux/sched.h> 40 #include <linux/slab.h> 41 #include <linux/errno.h> 42 #include <linux/list.h> 43 #include <linux/dma-mapping.h> 44 45 #include "musb_core.h" 46 #include "musb_host.h" 47 48 /* MUSB HOST status 22-mar-2006 49 * 50 * - There's still lots of partial code duplication for fault paths, so 51 * they aren't handled as consistently as they need to be. 52 * 53 * - PIO mostly behaved when last tested. 54 * + including ep0, with all usbtest cases 9, 10 55 * + usbtest 14 (ep0out) doesn't seem to run at all 56 * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest 57 * configurations, but otherwise double buffering passes basic tests. 58 * + for 2.6.N, for N > ~10, needs API changes for hcd framework. 59 * 60 * - DMA (CPPI) ... partially behaves, not currently recommended 61 * + about 1/15 the speed of typical EHCI implementations (PCI) 62 * + RX, all too often reqpkt seems to misbehave after tx 63 * + TX, no known issues (other than evident silicon issue) 64 * 65 * - DMA (Mentor/OMAP) ...has at least toggle update problems 66 * 67 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet 68 * starvation ... nothing yet for TX, interrupt, or bulk. 69 * 70 * - Not tested with HNP, but some SRP paths seem to behave. 71 * 72 * NOTE 24-August-2006: 73 * 74 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an 75 * extra endpoint for periodic use enabling hub + keybd + mouse. That 76 * mostly works, except that with "usbnet" it's easy to trigger cases 77 * with "ping" where RX loses. (a) ping to davinci, even "ping -f", 78 * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses 79 * although ARP RX wins. (That test was done with a full speed link.) 80 */ 81 82 83 /* 84 * NOTE on endpoint usage: 85 * 86 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN 87 * and OUT endpoints ... hardware is dedicated for those "async" queue(s). 88 * (Yes, bulk _could_ use more of the endpoints than that, and would even 89 * benefit from it.) 90 * 91 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. 92 * So far that scheduling is both dumb and optimistic: the endpoint will be 93 * "claimed" until its software queue is no longer refilled. No multiplexing 94 * of transfers between endpoints, or anything clever. 95 */ 96 97 struct musb *hcd_to_musb(struct usb_hcd *hcd) 98 { 99 return *(struct musb **) hcd->hcd_priv; 100 } 101 102 103 static void musb_ep_program(struct musb *musb, u8 epnum, 104 struct urb *urb, int is_out, 105 u8 *buf, u32 offset, u32 len); 106 107 /* 108 * Clear TX fifo. Needed to avoid BABBLE errors. 109 */ 110 static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) 111 { 112 struct musb *musb = ep->musb; 113 void __iomem *epio = ep->regs; 114 u16 csr; 115 int retries = 1000; 116 117 csr = musb_readw(epio, MUSB_TXCSR); 118 while (csr & MUSB_TXCSR_FIFONOTEMPTY) { 119 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY; 120 musb_writew(epio, MUSB_TXCSR, csr); 121 csr = musb_readw(epio, MUSB_TXCSR); 122 123 /* 124 * FIXME: sometimes the tx fifo flush failed, it has been 125 * observed during device disconnect on AM335x. 126 * 127 * To reproduce the issue, ensure tx urb(s) are queued when 128 * unplug the usb device which is connected to AM335x usb 129 * host port. 130 * 131 * I found using a usb-ethernet device and running iperf 132 * (client on AM335x) has very high chance to trigger it. 133 * 134 * Better to turn on dev_dbg() in musb_cleanup_urb() with 135 * CPPI enabled to see the issue when aborting the tx channel. 136 */ 137 if (dev_WARN_ONCE(musb->controller, retries-- < 1, 138 "Could not flush host TX%d fifo: csr: %04x\n", 139 ep->epnum, csr)) 140 return; 141 } 142 } 143 144 static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep) 145 { 146 void __iomem *epio = ep->regs; 147 u16 csr; 148 int retries = 5; 149 150 /* scrub any data left in the fifo */ 151 do { 152 csr = musb_readw(epio, MUSB_TXCSR); 153 if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY))) 154 break; 155 musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO); 156 csr = musb_readw(epio, MUSB_TXCSR); 157 udelay(10); 158 } while (--retries); 159 160 WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n", 161 ep->epnum, csr); 162 163 /* and reset for the next transfer */ 164 musb_writew(epio, MUSB_TXCSR, 0); 165 } 166 167 /* 168 * Start transmit. Caller is responsible for locking shared resources. 169 * musb must be locked. 170 */ 171 static inline void musb_h_tx_start(struct musb_hw_ep *ep) 172 { 173 u16 txcsr; 174 175 /* NOTE: no locks here; caller should lock and select EP */ 176 if (ep->epnum) { 177 txcsr = musb_readw(ep->regs, MUSB_TXCSR); 178 txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS; 179 musb_writew(ep->regs, MUSB_TXCSR, txcsr); 180 } else { 181 txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY; 182 musb_writew(ep->regs, MUSB_CSR0, txcsr); 183 } 184 185 } 186 187 static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep) 188 { 189 u16 txcsr; 190 191 /* NOTE: no locks here; caller should lock and select EP */ 192 txcsr = musb_readw(ep->regs, MUSB_TXCSR); 193 txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS; 194 if (is_cppi_enabled(ep->musb)) 195 txcsr |= MUSB_TXCSR_DMAMODE; 196 musb_writew(ep->regs, MUSB_TXCSR, txcsr); 197 } 198 199 static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh) 200 { 201 if (is_in != 0 || ep->is_shared_fifo) 202 ep->in_qh = qh; 203 if (is_in == 0 || ep->is_shared_fifo) 204 ep->out_qh = qh; 205 } 206 207 static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in) 208 { 209 return is_in ? ep->in_qh : ep->out_qh; 210 } 211 212 /* 213 * Start the URB at the front of an endpoint's queue 214 * end must be claimed from the caller. 215 * 216 * Context: controller locked, irqs blocked 217 */ 218 static void 219 musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) 220 { 221 u16 frame; 222 u32 len; 223 void __iomem *mbase = musb->mregs; 224 struct urb *urb = next_urb(qh); 225 void *buf = urb->transfer_buffer; 226 u32 offset = 0; 227 struct musb_hw_ep *hw_ep = qh->hw_ep; 228 unsigned pipe = urb->pipe; 229 u8 address = usb_pipedevice(pipe); 230 int epnum = hw_ep->epnum; 231 232 /* initialize software qh state */ 233 qh->offset = 0; 234 qh->segsize = 0; 235 236 /* gather right source of data */ 237 switch (qh->type) { 238 case USB_ENDPOINT_XFER_CONTROL: 239 /* control transfers always start with SETUP */ 240 is_in = 0; 241 musb->ep0_stage = MUSB_EP0_START; 242 buf = urb->setup_packet; 243 len = 8; 244 break; 245 case USB_ENDPOINT_XFER_ISOC: 246 qh->iso_idx = 0; 247 qh->frame = 0; 248 offset = urb->iso_frame_desc[0].offset; 249 len = urb->iso_frame_desc[0].length; 250 break; 251 default: /* bulk, interrupt */ 252 /* actual_length may be nonzero on retry paths */ 253 buf = urb->transfer_buffer + urb->actual_length; 254 len = urb->transfer_buffer_length - urb->actual_length; 255 } 256 257 dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", 258 qh, urb, address, qh->epnum, 259 is_in ? "in" : "out", 260 ({char *s; switch (qh->type) { 261 case USB_ENDPOINT_XFER_CONTROL: s = ""; break; 262 case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break; 263 case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break; 264 default: s = "-intr"; break; 265 } s; }), 266 epnum, buf + offset, len); 267 268 /* Configure endpoint */ 269 musb_ep_set_qh(hw_ep, is_in, qh); 270 musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len); 271 272 /* transmit may have more work: start it when it is time */ 273 if (is_in) 274 return; 275 276 /* determine if the time is right for a periodic transfer */ 277 switch (qh->type) { 278 case USB_ENDPOINT_XFER_ISOC: 279 case USB_ENDPOINT_XFER_INT: 280 dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n"); 281 frame = musb_readw(mbase, MUSB_FRAME); 282 /* FIXME this doesn't implement that scheduling policy ... 283 * or handle framecounter wrapping 284 */ 285 if (1) { /* Always assume URB_ISO_ASAP */ 286 /* REVISIT the SOF irq handler shouldn't duplicate 287 * this code; and we don't init urb->start_frame... 288 */ 289 qh->frame = 0; 290 goto start; 291 } else { 292 qh->frame = urb->start_frame; 293 /* enable SOF interrupt so we can count down */ 294 dev_dbg(musb->controller, "SOF for %d\n", epnum); 295 #if 1 /* ifndef CONFIG_ARCH_DAVINCI */ 296 musb_writeb(mbase, MUSB_INTRUSBE, 0xff); 297 #endif 298 } 299 break; 300 default: 301 start: 302 dev_dbg(musb->controller, "Start TX%d %s\n", epnum, 303 hw_ep->tx_channel ? "dma" : "pio"); 304 305 if (!hw_ep->tx_channel) 306 musb_h_tx_start(hw_ep); 307 else if (is_cppi_enabled(musb) || tusb_dma_omap(musb)) 308 musb_h_tx_dma_start(hw_ep); 309 } 310 } 311 312 /* Context: caller owns controller lock, IRQs are blocked */ 313 static void musb_giveback(struct musb *musb, struct urb *urb, int status) 314 __releases(musb->lock) 315 __acquires(musb->lock) 316 { 317 dev_dbg(musb->controller, 318 "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n", 319 urb, urb->complete, status, 320 usb_pipedevice(urb->pipe), 321 usb_pipeendpoint(urb->pipe), 322 usb_pipein(urb->pipe) ? "in" : "out", 323 urb->actual_length, urb->transfer_buffer_length 324 ); 325 326 usb_hcd_unlink_urb_from_ep(musb->hcd, urb); 327 spin_unlock(&musb->lock); 328 usb_hcd_giveback_urb(musb->hcd, urb, status); 329 spin_lock(&musb->lock); 330 } 331 332 /* For bulk/interrupt endpoints only */ 333 static inline void musb_save_toggle(struct musb_qh *qh, int is_in, 334 struct urb *urb) 335 { 336 void __iomem *epio = qh->hw_ep->regs; 337 u16 csr; 338 339 /* 340 * FIXME: the current Mentor DMA code seems to have 341 * problems getting toggle correct. 342 */ 343 344 if (is_in) 345 csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE; 346 else 347 csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE; 348 349 usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0); 350 } 351 352 /* 353 * Advance this hardware endpoint's queue, completing the specified URB and 354 * advancing to either the next URB queued to that qh, or else invalidating 355 * that qh and advancing to the next qh scheduled after the current one. 356 * 357 * Context: caller owns controller lock, IRQs are blocked 358 */ 359 static void musb_advance_schedule(struct musb *musb, struct urb *urb, 360 struct musb_hw_ep *hw_ep, int is_in) 361 { 362 struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in); 363 struct musb_hw_ep *ep = qh->hw_ep; 364 int ready = qh->is_ready; 365 int status; 366 367 status = (urb->status == -EINPROGRESS) ? 0 : urb->status; 368 369 /* save toggle eagerly, for paranoia */ 370 switch (qh->type) { 371 case USB_ENDPOINT_XFER_BULK: 372 case USB_ENDPOINT_XFER_INT: 373 musb_save_toggle(qh, is_in, urb); 374 break; 375 case USB_ENDPOINT_XFER_ISOC: 376 if (status == 0 && urb->error_count) 377 status = -EXDEV; 378 break; 379 } 380 381 qh->is_ready = 0; 382 musb_giveback(musb, urb, status); 383 qh->is_ready = ready; 384 385 /* reclaim resources (and bandwidth) ASAP; deschedule it, and 386 * invalidate qh as soon as list_empty(&hep->urb_list) 387 */ 388 if (list_empty(&qh->hep->urb_list)) { 389 struct list_head *head; 390 struct dma_controller *dma = musb->dma_controller; 391 392 if (is_in) { 393 ep->rx_reinit = 1; 394 if (ep->rx_channel) { 395 dma->channel_release(ep->rx_channel); 396 ep->rx_channel = NULL; 397 } 398 } else { 399 ep->tx_reinit = 1; 400 if (ep->tx_channel) { 401 dma->channel_release(ep->tx_channel); 402 ep->tx_channel = NULL; 403 } 404 } 405 406 /* Clobber old pointers to this qh */ 407 musb_ep_set_qh(ep, is_in, NULL); 408 qh->hep->hcpriv = NULL; 409 410 switch (qh->type) { 411 412 case USB_ENDPOINT_XFER_CONTROL: 413 case USB_ENDPOINT_XFER_BULK: 414 /* fifo policy for these lists, except that NAKing 415 * should rotate a qh to the end (for fairness). 416 */ 417 if (qh->mux == 1) { 418 head = qh->ring.prev; 419 list_del(&qh->ring); 420 kfree(qh); 421 qh = first_qh(head); 422 break; 423 } 424 425 case USB_ENDPOINT_XFER_ISOC: 426 case USB_ENDPOINT_XFER_INT: 427 /* this is where periodic bandwidth should be 428 * de-allocated if it's tracked and allocated; 429 * and where we'd update the schedule tree... 430 */ 431 kfree(qh); 432 qh = NULL; 433 break; 434 } 435 } 436 437 /* 438 * The pipe must be broken if current urb->status is set, so don't 439 * start next urb. 440 * TODO: to minimize the risk of regression, only check urb->status 441 * for RX, until we have a test case to understand the behavior of TX. 442 */ 443 if ((!status || !is_in) && qh && qh->is_ready) { 444 dev_dbg(musb->controller, "... next ep%d %cX urb %p\n", 445 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); 446 musb_start_urb(musb, is_in, qh); 447 } 448 } 449 450 static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr) 451 { 452 /* we don't want fifo to fill itself again; 453 * ignore dma (various models), 454 * leave toggle alone (may not have been saved yet) 455 */ 456 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY; 457 csr &= ~(MUSB_RXCSR_H_REQPKT 458 | MUSB_RXCSR_H_AUTOREQ 459 | MUSB_RXCSR_AUTOCLEAR); 460 461 /* write 2x to allow double buffering */ 462 musb_writew(hw_ep->regs, MUSB_RXCSR, csr); 463 musb_writew(hw_ep->regs, MUSB_RXCSR, csr); 464 465 /* flush writebuffer */ 466 return musb_readw(hw_ep->regs, MUSB_RXCSR); 467 } 468 469 /* 470 * PIO RX for a packet (or part of it). 471 */ 472 static bool 473 musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err) 474 { 475 u16 rx_count; 476 u8 *buf; 477 u16 csr; 478 bool done = false; 479 u32 length; 480 int do_flush = 0; 481 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 482 void __iomem *epio = hw_ep->regs; 483 struct musb_qh *qh = hw_ep->in_qh; 484 int pipe = urb->pipe; 485 void *buffer = urb->transfer_buffer; 486 487 /* musb_ep_select(mbase, epnum); */ 488 rx_count = musb_readw(epio, MUSB_RXCOUNT); 489 dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count, 490 urb->transfer_buffer, qh->offset, 491 urb->transfer_buffer_length); 492 493 /* unload FIFO */ 494 if (usb_pipeisoc(pipe)) { 495 int status = 0; 496 struct usb_iso_packet_descriptor *d; 497 498 if (iso_err) { 499 status = -EILSEQ; 500 urb->error_count++; 501 } 502 503 d = urb->iso_frame_desc + qh->iso_idx; 504 buf = buffer + d->offset; 505 length = d->length; 506 if (rx_count > length) { 507 if (status == 0) { 508 status = -EOVERFLOW; 509 urb->error_count++; 510 } 511 dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length); 512 do_flush = 1; 513 } else 514 length = rx_count; 515 urb->actual_length += length; 516 d->actual_length = length; 517 518 d->status = status; 519 520 /* see if we are done */ 521 done = (++qh->iso_idx >= urb->number_of_packets); 522 } else { 523 /* non-isoch */ 524 buf = buffer + qh->offset; 525 length = urb->transfer_buffer_length - qh->offset; 526 if (rx_count > length) { 527 if (urb->status == -EINPROGRESS) 528 urb->status = -EOVERFLOW; 529 dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length); 530 do_flush = 1; 531 } else 532 length = rx_count; 533 urb->actual_length += length; 534 qh->offset += length; 535 536 /* see if we are done */ 537 done = (urb->actual_length == urb->transfer_buffer_length) 538 || (rx_count < qh->maxpacket) 539 || (urb->status != -EINPROGRESS); 540 if (done 541 && (urb->status == -EINPROGRESS) 542 && (urb->transfer_flags & URB_SHORT_NOT_OK) 543 && (urb->actual_length 544 < urb->transfer_buffer_length)) 545 urb->status = -EREMOTEIO; 546 } 547 548 musb_read_fifo(hw_ep, length, buf); 549 550 csr = musb_readw(epio, MUSB_RXCSR); 551 csr |= MUSB_RXCSR_H_WZC_BITS; 552 if (unlikely(do_flush)) 553 musb_h_flush_rxfifo(hw_ep, csr); 554 else { 555 /* REVISIT this assumes AUTOCLEAR is never set */ 556 csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT); 557 if (!done) 558 csr |= MUSB_RXCSR_H_REQPKT; 559 musb_writew(epio, MUSB_RXCSR, csr); 560 } 561 562 return done; 563 } 564 565 /* we don't always need to reinit a given side of an endpoint... 566 * when we do, use tx/rx reinit routine and then construct a new CSR 567 * to address data toggle, NYET, and DMA or PIO. 568 * 569 * it's possible that driver bugs (especially for DMA) or aborting a 570 * transfer might have left the endpoint busier than it should be. 571 * the busy/not-empty tests are basically paranoia. 572 */ 573 static void 574 musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum) 575 { 576 struct musb_hw_ep *ep = musb->endpoints + epnum; 577 u16 csr; 578 579 /* NOTE: we know the "rx" fifo reinit never triggers for ep0. 580 * That always uses tx_reinit since ep0 repurposes TX register 581 * offsets; the initial SETUP packet is also a kind of OUT. 582 */ 583 584 /* if programmed for Tx, put it in RX mode */ 585 if (ep->is_shared_fifo) { 586 csr = musb_readw(ep->regs, MUSB_TXCSR); 587 if (csr & MUSB_TXCSR_MODE) { 588 musb_h_tx_flush_fifo(ep); 589 csr = musb_readw(ep->regs, MUSB_TXCSR); 590 musb_writew(ep->regs, MUSB_TXCSR, 591 csr | MUSB_TXCSR_FRCDATATOG); 592 } 593 594 /* 595 * Clear the MODE bit (and everything else) to enable Rx. 596 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB. 597 */ 598 if (csr & MUSB_TXCSR_DMAMODE) 599 musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE); 600 musb_writew(ep->regs, MUSB_TXCSR, 0); 601 602 /* scrub all previous state, clearing toggle */ 603 } 604 csr = musb_readw(ep->regs, MUSB_RXCSR); 605 if (csr & MUSB_RXCSR_RXPKTRDY) 606 WARNING("rx%d, packet/%d ready?\n", ep->epnum, 607 musb_readw(ep->regs, MUSB_RXCOUNT)); 608 609 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG); 610 611 /* target addr and (for multipoint) hub addr/port */ 612 if (musb->is_multipoint) { 613 musb_write_rxfunaddr(musb, epnum, qh->addr_reg); 614 musb_write_rxhubaddr(musb, epnum, qh->h_addr_reg); 615 musb_write_rxhubport(musb, epnum, qh->h_port_reg); 616 } else 617 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg); 618 619 /* protocol/endpoint, interval/NAKlimit, i/o size */ 620 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); 621 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); 622 /* NOTE: bulk combining rewrites high bits of maxpacket */ 623 /* Set RXMAXP with the FIFO size of the endpoint 624 * to disable double buffer mode. 625 */ 626 if (musb->double_buffer_not_ok) 627 musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx); 628 else 629 musb_writew(ep->regs, MUSB_RXMAXP, 630 qh->maxpacket | ((qh->hb_mult - 1) << 11)); 631 632 ep->rx_reinit = 0; 633 } 634 635 static void musb_tx_dma_set_mode_mentor(struct dma_controller *dma, 636 struct musb_hw_ep *hw_ep, struct musb_qh *qh, 637 struct urb *urb, u32 offset, 638 u32 *length, u8 *mode) 639 { 640 struct dma_channel *channel = hw_ep->tx_channel; 641 void __iomem *epio = hw_ep->regs; 642 u16 pkt_size = qh->maxpacket; 643 u16 csr; 644 645 if (*length > channel->max_len) 646 *length = channel->max_len; 647 648 csr = musb_readw(epio, MUSB_TXCSR); 649 if (*length > pkt_size) { 650 *mode = 1; 651 csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB; 652 /* autoset shouldn't be set in high bandwidth */ 653 /* 654 * Enable Autoset according to table 655 * below 656 * bulk_split hb_mult Autoset_Enable 657 * 0 1 Yes(Normal) 658 * 0 >1 No(High BW ISO) 659 * 1 1 Yes(HS bulk) 660 * 1 >1 Yes(FS bulk) 661 */ 662 if (qh->hb_mult == 1 || (qh->hb_mult > 1 && 663 can_bulk_split(hw_ep->musb, qh->type))) 664 csr |= MUSB_TXCSR_AUTOSET; 665 } else { 666 *mode = 0; 667 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE); 668 csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */ 669 } 670 channel->desired_mode = *mode; 671 musb_writew(epio, MUSB_TXCSR, csr); 672 } 673 674 static void musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma, 675 struct musb_hw_ep *hw_ep, 676 struct musb_qh *qh, 677 struct urb *urb, 678 u32 offset, 679 u32 *length, 680 u8 *mode) 681 { 682 struct dma_channel *channel = hw_ep->tx_channel; 683 684 channel->actual_len = 0; 685 686 /* 687 * TX uses "RNDIS" mode automatically but needs help 688 * to identify the zero-length-final-packet case. 689 */ 690 *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0; 691 } 692 693 static bool musb_tx_dma_program(struct dma_controller *dma, 694 struct musb_hw_ep *hw_ep, struct musb_qh *qh, 695 struct urb *urb, u32 offset, u32 length) 696 { 697 struct dma_channel *channel = hw_ep->tx_channel; 698 u16 pkt_size = qh->maxpacket; 699 u8 mode; 700 701 if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb)) 702 musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset, 703 &length, &mode); 704 else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb)) 705 musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset, 706 &length, &mode); 707 else 708 return false; 709 710 qh->segsize = length; 711 712 /* 713 * Ensure the data reaches to main memory before starting 714 * DMA transfer 715 */ 716 wmb(); 717 718 if (!dma->channel_program(channel, pkt_size, mode, 719 urb->transfer_dma + offset, length)) { 720 void __iomem *epio = hw_ep->regs; 721 u16 csr; 722 723 dma->channel_release(channel); 724 hw_ep->tx_channel = NULL; 725 726 csr = musb_readw(epio, MUSB_TXCSR); 727 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB); 728 musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS); 729 return false; 730 } 731 return true; 732 } 733 734 /* 735 * Program an HDRC endpoint as per the given URB 736 * Context: irqs blocked, controller lock held 737 */ 738 static void musb_ep_program(struct musb *musb, u8 epnum, 739 struct urb *urb, int is_out, 740 u8 *buf, u32 offset, u32 len) 741 { 742 struct dma_controller *dma_controller; 743 struct dma_channel *dma_channel; 744 u8 dma_ok; 745 void __iomem *mbase = musb->mregs; 746 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 747 void __iomem *epio = hw_ep->regs; 748 struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out); 749 u16 packet_sz = qh->maxpacket; 750 u8 use_dma = 1; 751 u16 csr; 752 753 dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s " 754 "h_addr%02x h_port%02x bytes %d\n", 755 is_out ? "-->" : "<--", 756 epnum, urb, urb->dev->speed, 757 qh->addr_reg, qh->epnum, is_out ? "out" : "in", 758 qh->h_addr_reg, qh->h_port_reg, 759 len); 760 761 musb_ep_select(mbase, epnum); 762 763 if (is_out && !len) { 764 use_dma = 0; 765 csr = musb_readw(epio, MUSB_TXCSR); 766 csr &= ~MUSB_TXCSR_DMAENAB; 767 musb_writew(epio, MUSB_TXCSR, csr); 768 hw_ep->tx_channel = NULL; 769 } 770 771 /* candidate for DMA? */ 772 dma_controller = musb->dma_controller; 773 if (use_dma && is_dma_capable() && epnum && dma_controller) { 774 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel; 775 if (!dma_channel) { 776 dma_channel = dma_controller->channel_alloc( 777 dma_controller, hw_ep, is_out); 778 if (is_out) 779 hw_ep->tx_channel = dma_channel; 780 else 781 hw_ep->rx_channel = dma_channel; 782 } 783 } else 784 dma_channel = NULL; 785 786 /* make sure we clear DMAEnab, autoSet bits from previous run */ 787 788 /* OUT/transmit/EP0 or IN/receive? */ 789 if (is_out) { 790 u16 csr; 791 u16 int_txe; 792 u16 load_count; 793 794 csr = musb_readw(epio, MUSB_TXCSR); 795 796 /* disable interrupt in case we flush */ 797 int_txe = musb->intrtxe; 798 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); 799 800 /* general endpoint setup */ 801 if (epnum) { 802 /* flush all old state, set default */ 803 /* 804 * We could be flushing valid 805 * packets in double buffering 806 * case 807 */ 808 if (!hw_ep->tx_double_buffered) 809 musb_h_tx_flush_fifo(hw_ep); 810 811 /* 812 * We must not clear the DMAMODE bit before or in 813 * the same cycle with the DMAENAB bit, so we clear 814 * the latter first... 815 */ 816 csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT 817 | MUSB_TXCSR_AUTOSET 818 | MUSB_TXCSR_DMAENAB 819 | MUSB_TXCSR_FRCDATATOG 820 | MUSB_TXCSR_H_RXSTALL 821 | MUSB_TXCSR_H_ERROR 822 | MUSB_TXCSR_TXPKTRDY 823 ); 824 csr |= MUSB_TXCSR_MODE; 825 826 if (!hw_ep->tx_double_buffered) { 827 if (usb_gettoggle(urb->dev, qh->epnum, 1)) 828 csr |= MUSB_TXCSR_H_WR_DATATOGGLE 829 | MUSB_TXCSR_H_DATATOGGLE; 830 else 831 csr |= MUSB_TXCSR_CLRDATATOG; 832 } 833 834 musb_writew(epio, MUSB_TXCSR, csr); 835 /* REVISIT may need to clear FLUSHFIFO ... */ 836 csr &= ~MUSB_TXCSR_DMAMODE; 837 musb_writew(epio, MUSB_TXCSR, csr); 838 csr = musb_readw(epio, MUSB_TXCSR); 839 } else { 840 /* endpoint 0: just flush */ 841 musb_h_ep0_flush_fifo(hw_ep); 842 } 843 844 /* target addr and (for multipoint) hub addr/port */ 845 if (musb->is_multipoint) { 846 musb_write_txfunaddr(musb, epnum, qh->addr_reg); 847 musb_write_txhubaddr(musb, epnum, qh->h_addr_reg); 848 musb_write_txhubport(musb, epnum, qh->h_port_reg); 849 /* FIXME if !epnum, do the same for RX ... */ 850 } else 851 musb_writeb(mbase, MUSB_FADDR, qh->addr_reg); 852 853 /* protocol/endpoint/interval/NAKlimit */ 854 if (epnum) { 855 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); 856 if (musb->double_buffer_not_ok) { 857 musb_writew(epio, MUSB_TXMAXP, 858 hw_ep->max_packet_sz_tx); 859 } else if (can_bulk_split(musb, qh->type)) { 860 qh->hb_mult = hw_ep->max_packet_sz_tx 861 / packet_sz; 862 musb_writew(epio, MUSB_TXMAXP, packet_sz 863 | ((qh->hb_mult) - 1) << 11); 864 } else { 865 musb_writew(epio, MUSB_TXMAXP, 866 qh->maxpacket | 867 ((qh->hb_mult - 1) << 11)); 868 } 869 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); 870 } else { 871 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); 872 if (musb->is_multipoint) 873 musb_writeb(epio, MUSB_TYPE0, 874 qh->type_reg); 875 } 876 877 if (can_bulk_split(musb, qh->type)) 878 load_count = min((u32) hw_ep->max_packet_sz_tx, 879 len); 880 else 881 load_count = min((u32) packet_sz, len); 882 883 if (dma_channel && musb_tx_dma_program(dma_controller, 884 hw_ep, qh, urb, offset, len)) 885 load_count = 0; 886 887 if (load_count) { 888 /* PIO to load FIFO */ 889 qh->segsize = load_count; 890 if (!buf) { 891 sg_miter_start(&qh->sg_miter, urb->sg, 1, 892 SG_MITER_ATOMIC 893 | SG_MITER_FROM_SG); 894 if (!sg_miter_next(&qh->sg_miter)) { 895 dev_err(musb->controller, 896 "error: sg" 897 "list empty\n"); 898 sg_miter_stop(&qh->sg_miter); 899 goto finish; 900 } 901 buf = qh->sg_miter.addr + urb->sg->offset + 902 urb->actual_length; 903 load_count = min_t(u32, load_count, 904 qh->sg_miter.length); 905 musb_write_fifo(hw_ep, load_count, buf); 906 qh->sg_miter.consumed = load_count; 907 sg_miter_stop(&qh->sg_miter); 908 } else 909 musb_write_fifo(hw_ep, load_count, buf); 910 } 911 finish: 912 /* re-enable interrupt */ 913 musb_writew(mbase, MUSB_INTRTXE, int_txe); 914 915 /* IN/receive */ 916 } else { 917 u16 csr; 918 919 if (hw_ep->rx_reinit) { 920 musb_rx_reinit(musb, qh, epnum); 921 922 /* init new state: toggle and NYET, maybe DMA later */ 923 if (usb_gettoggle(urb->dev, qh->epnum, 0)) 924 csr = MUSB_RXCSR_H_WR_DATATOGGLE 925 | MUSB_RXCSR_H_DATATOGGLE; 926 else 927 csr = 0; 928 if (qh->type == USB_ENDPOINT_XFER_INT) 929 csr |= MUSB_RXCSR_DISNYET; 930 931 } else { 932 csr = musb_readw(hw_ep->regs, MUSB_RXCSR); 933 934 if (csr & (MUSB_RXCSR_RXPKTRDY 935 | MUSB_RXCSR_DMAENAB 936 | MUSB_RXCSR_H_REQPKT)) 937 ERR("broken !rx_reinit, ep%d csr %04x\n", 938 hw_ep->epnum, csr); 939 940 /* scrub any stale state, leaving toggle alone */ 941 csr &= MUSB_RXCSR_DISNYET; 942 } 943 944 /* kick things off */ 945 946 if ((is_cppi_enabled(musb) || tusb_dma_omap(musb)) && dma_channel) { 947 /* Candidate for DMA */ 948 dma_channel->actual_len = 0L; 949 qh->segsize = len; 950 951 /* AUTOREQ is in a DMA register */ 952 musb_writew(hw_ep->regs, MUSB_RXCSR, csr); 953 csr = musb_readw(hw_ep->regs, MUSB_RXCSR); 954 955 /* 956 * Unless caller treats short RX transfers as 957 * errors, we dare not queue multiple transfers. 958 */ 959 dma_ok = dma_controller->channel_program(dma_channel, 960 packet_sz, !(urb->transfer_flags & 961 URB_SHORT_NOT_OK), 962 urb->transfer_dma + offset, 963 qh->segsize); 964 if (!dma_ok) { 965 dma_controller->channel_release(dma_channel); 966 hw_ep->rx_channel = dma_channel = NULL; 967 } else 968 csr |= MUSB_RXCSR_DMAENAB; 969 } 970 971 csr |= MUSB_RXCSR_H_REQPKT; 972 dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr); 973 musb_writew(hw_ep->regs, MUSB_RXCSR, csr); 974 csr = musb_readw(hw_ep->regs, MUSB_RXCSR); 975 } 976 } 977 978 /* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to 979 * the end; avoids starvation for other endpoints. 980 */ 981 static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep, 982 int is_in) 983 { 984 struct dma_channel *dma; 985 struct urb *urb; 986 void __iomem *mbase = musb->mregs; 987 void __iomem *epio = ep->regs; 988 struct musb_qh *cur_qh, *next_qh; 989 u16 rx_csr, tx_csr; 990 991 musb_ep_select(mbase, ep->epnum); 992 if (is_in) { 993 dma = is_dma_capable() ? ep->rx_channel : NULL; 994 995 /* 996 * Need to stop the transaction by clearing REQPKT first 997 * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED 998 * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2 999 */ 1000 rx_csr = musb_readw(epio, MUSB_RXCSR); 1001 rx_csr |= MUSB_RXCSR_H_WZC_BITS; 1002 rx_csr &= ~MUSB_RXCSR_H_REQPKT; 1003 musb_writew(epio, MUSB_RXCSR, rx_csr); 1004 rx_csr &= ~MUSB_RXCSR_DATAERROR; 1005 musb_writew(epio, MUSB_RXCSR, rx_csr); 1006 1007 cur_qh = first_qh(&musb->in_bulk); 1008 } else { 1009 dma = is_dma_capable() ? ep->tx_channel : NULL; 1010 1011 /* clear nak timeout bit */ 1012 tx_csr = musb_readw(epio, MUSB_TXCSR); 1013 tx_csr |= MUSB_TXCSR_H_WZC_BITS; 1014 tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT; 1015 musb_writew(epio, MUSB_TXCSR, tx_csr); 1016 1017 cur_qh = first_qh(&musb->out_bulk); 1018 } 1019 if (cur_qh) { 1020 urb = next_urb(cur_qh); 1021 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1022 dma->status = MUSB_DMA_STATUS_CORE_ABORT; 1023 musb->dma_controller->channel_abort(dma); 1024 urb->actual_length += dma->actual_len; 1025 dma->actual_len = 0L; 1026 } 1027 musb_save_toggle(cur_qh, is_in, urb); 1028 1029 if (is_in) { 1030 /* move cur_qh to end of queue */ 1031 list_move_tail(&cur_qh->ring, &musb->in_bulk); 1032 1033 /* get the next qh from musb->in_bulk */ 1034 next_qh = first_qh(&musb->in_bulk); 1035 1036 /* set rx_reinit and schedule the next qh */ 1037 ep->rx_reinit = 1; 1038 } else { 1039 /* move cur_qh to end of queue */ 1040 list_move_tail(&cur_qh->ring, &musb->out_bulk); 1041 1042 /* get the next qh from musb->out_bulk */ 1043 next_qh = first_qh(&musb->out_bulk); 1044 1045 /* set tx_reinit and schedule the next qh */ 1046 ep->tx_reinit = 1; 1047 } 1048 musb_start_urb(musb, is_in, next_qh); 1049 } 1050 } 1051 1052 /* 1053 * Service the default endpoint (ep0) as host. 1054 * Return true until it's time to start the status stage. 1055 */ 1056 static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb) 1057 { 1058 bool more = false; 1059 u8 *fifo_dest = NULL; 1060 u16 fifo_count = 0; 1061 struct musb_hw_ep *hw_ep = musb->control_ep; 1062 struct musb_qh *qh = hw_ep->in_qh; 1063 struct usb_ctrlrequest *request; 1064 1065 switch (musb->ep0_stage) { 1066 case MUSB_EP0_IN: 1067 fifo_dest = urb->transfer_buffer + urb->actual_length; 1068 fifo_count = min_t(size_t, len, urb->transfer_buffer_length - 1069 urb->actual_length); 1070 if (fifo_count < len) 1071 urb->status = -EOVERFLOW; 1072 1073 musb_read_fifo(hw_ep, fifo_count, fifo_dest); 1074 1075 urb->actual_length += fifo_count; 1076 if (len < qh->maxpacket) { 1077 /* always terminate on short read; it's 1078 * rarely reported as an error. 1079 */ 1080 } else if (urb->actual_length < 1081 urb->transfer_buffer_length) 1082 more = true; 1083 break; 1084 case MUSB_EP0_START: 1085 request = (struct usb_ctrlrequest *) urb->setup_packet; 1086 1087 if (!request->wLength) { 1088 dev_dbg(musb->controller, "start no-DATA\n"); 1089 break; 1090 } else if (request->bRequestType & USB_DIR_IN) { 1091 dev_dbg(musb->controller, "start IN-DATA\n"); 1092 musb->ep0_stage = MUSB_EP0_IN; 1093 more = true; 1094 break; 1095 } else { 1096 dev_dbg(musb->controller, "start OUT-DATA\n"); 1097 musb->ep0_stage = MUSB_EP0_OUT; 1098 more = true; 1099 } 1100 /* FALLTHROUGH */ 1101 case MUSB_EP0_OUT: 1102 fifo_count = min_t(size_t, qh->maxpacket, 1103 urb->transfer_buffer_length - 1104 urb->actual_length); 1105 if (fifo_count) { 1106 fifo_dest = (u8 *) (urb->transfer_buffer 1107 + urb->actual_length); 1108 dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n", 1109 fifo_count, 1110 (fifo_count == 1) ? "" : "s", 1111 fifo_dest); 1112 musb_write_fifo(hw_ep, fifo_count, fifo_dest); 1113 1114 urb->actual_length += fifo_count; 1115 more = true; 1116 } 1117 break; 1118 default: 1119 ERR("bogus ep0 stage %d\n", musb->ep0_stage); 1120 break; 1121 } 1122 1123 return more; 1124 } 1125 1126 /* 1127 * Handle default endpoint interrupt as host. Only called in IRQ time 1128 * from musb_interrupt(). 1129 * 1130 * called with controller irqlocked 1131 */ 1132 irqreturn_t musb_h_ep0_irq(struct musb *musb) 1133 { 1134 struct urb *urb; 1135 u16 csr, len; 1136 int status = 0; 1137 void __iomem *mbase = musb->mregs; 1138 struct musb_hw_ep *hw_ep = musb->control_ep; 1139 void __iomem *epio = hw_ep->regs; 1140 struct musb_qh *qh = hw_ep->in_qh; 1141 bool complete = false; 1142 irqreturn_t retval = IRQ_NONE; 1143 1144 /* ep0 only has one queue, "in" */ 1145 urb = next_urb(qh); 1146 1147 musb_ep_select(mbase, 0); 1148 csr = musb_readw(epio, MUSB_CSR0); 1149 len = (csr & MUSB_CSR0_RXPKTRDY) 1150 ? musb_readb(epio, MUSB_COUNT0) 1151 : 0; 1152 1153 dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n", 1154 csr, qh, len, urb, musb->ep0_stage); 1155 1156 /* if we just did status stage, we are done */ 1157 if (MUSB_EP0_STATUS == musb->ep0_stage) { 1158 retval = IRQ_HANDLED; 1159 complete = true; 1160 } 1161 1162 /* prepare status */ 1163 if (csr & MUSB_CSR0_H_RXSTALL) { 1164 dev_dbg(musb->controller, "STALLING ENDPOINT\n"); 1165 status = -EPIPE; 1166 1167 } else if (csr & MUSB_CSR0_H_ERROR) { 1168 dev_dbg(musb->controller, "no response, csr0 %04x\n", csr); 1169 status = -EPROTO; 1170 1171 } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) { 1172 dev_dbg(musb->controller, "control NAK timeout\n"); 1173 1174 /* NOTE: this code path would be a good place to PAUSE a 1175 * control transfer, if another one is queued, so that 1176 * ep0 is more likely to stay busy. That's already done 1177 * for bulk RX transfers. 1178 * 1179 * if (qh->ring.next != &musb->control), then 1180 * we have a candidate... NAKing is *NOT* an error 1181 */ 1182 musb_writew(epio, MUSB_CSR0, 0); 1183 retval = IRQ_HANDLED; 1184 } 1185 1186 if (status) { 1187 dev_dbg(musb->controller, "aborting\n"); 1188 retval = IRQ_HANDLED; 1189 if (urb) 1190 urb->status = status; 1191 complete = true; 1192 1193 /* use the proper sequence to abort the transfer */ 1194 if (csr & MUSB_CSR0_H_REQPKT) { 1195 csr &= ~MUSB_CSR0_H_REQPKT; 1196 musb_writew(epio, MUSB_CSR0, csr); 1197 csr &= ~MUSB_CSR0_H_NAKTIMEOUT; 1198 musb_writew(epio, MUSB_CSR0, csr); 1199 } else { 1200 musb_h_ep0_flush_fifo(hw_ep); 1201 } 1202 1203 musb_writeb(epio, MUSB_NAKLIMIT0, 0); 1204 1205 /* clear it */ 1206 musb_writew(epio, MUSB_CSR0, 0); 1207 } 1208 1209 if (unlikely(!urb)) { 1210 /* stop endpoint since we have no place for its data, this 1211 * SHOULD NEVER HAPPEN! */ 1212 ERR("no URB for end 0\n"); 1213 1214 musb_h_ep0_flush_fifo(hw_ep); 1215 goto done; 1216 } 1217 1218 if (!complete) { 1219 /* call common logic and prepare response */ 1220 if (musb_h_ep0_continue(musb, len, urb)) { 1221 /* more packets required */ 1222 csr = (MUSB_EP0_IN == musb->ep0_stage) 1223 ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY; 1224 } else { 1225 /* data transfer complete; perform status phase */ 1226 if (usb_pipeout(urb->pipe) 1227 || !urb->transfer_buffer_length) 1228 csr = MUSB_CSR0_H_STATUSPKT 1229 | MUSB_CSR0_H_REQPKT; 1230 else 1231 csr = MUSB_CSR0_H_STATUSPKT 1232 | MUSB_CSR0_TXPKTRDY; 1233 1234 /* disable ping token in status phase */ 1235 csr |= MUSB_CSR0_H_DIS_PING; 1236 1237 /* flag status stage */ 1238 musb->ep0_stage = MUSB_EP0_STATUS; 1239 1240 dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr); 1241 1242 } 1243 musb_writew(epio, MUSB_CSR0, csr); 1244 retval = IRQ_HANDLED; 1245 } else 1246 musb->ep0_stage = MUSB_EP0_IDLE; 1247 1248 /* call completion handler if done */ 1249 if (complete) 1250 musb_advance_schedule(musb, urb, hw_ep, 1); 1251 done: 1252 return retval; 1253 } 1254 1255 1256 #ifdef CONFIG_USB_INVENTRA_DMA 1257 1258 /* Host side TX (OUT) using Mentor DMA works as follows: 1259 submit_urb -> 1260 - if queue was empty, Program Endpoint 1261 - ... which starts DMA to fifo in mode 1 or 0 1262 1263 DMA Isr (transfer complete) -> TxAvail() 1264 - Stop DMA (~DmaEnab) (<--- Alert ... currently happens 1265 only in musb_cleanup_urb) 1266 - TxPktRdy has to be set in mode 0 or for 1267 short packets in mode 1. 1268 */ 1269 1270 #endif 1271 1272 /* Service a Tx-Available or dma completion irq for the endpoint */ 1273 void musb_host_tx(struct musb *musb, u8 epnum) 1274 { 1275 int pipe; 1276 bool done = false; 1277 u16 tx_csr; 1278 size_t length = 0; 1279 size_t offset = 0; 1280 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 1281 void __iomem *epio = hw_ep->regs; 1282 struct musb_qh *qh = hw_ep->out_qh; 1283 struct urb *urb = next_urb(qh); 1284 u32 status = 0; 1285 void __iomem *mbase = musb->mregs; 1286 struct dma_channel *dma; 1287 bool transfer_pending = false; 1288 1289 musb_ep_select(mbase, epnum); 1290 tx_csr = musb_readw(epio, MUSB_TXCSR); 1291 1292 /* with CPPI, DMA sometimes triggers "extra" irqs */ 1293 if (!urb) { 1294 dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr); 1295 return; 1296 } 1297 1298 pipe = urb->pipe; 1299 dma = is_dma_capable() ? hw_ep->tx_channel : NULL; 1300 dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr, 1301 dma ? ", dma" : ""); 1302 1303 /* check for errors */ 1304 if (tx_csr & MUSB_TXCSR_H_RXSTALL) { 1305 /* dma was disabled, fifo flushed */ 1306 dev_dbg(musb->controller, "TX end %d stall\n", epnum); 1307 1308 /* stall; record URB status */ 1309 status = -EPIPE; 1310 1311 } else if (tx_csr & MUSB_TXCSR_H_ERROR) { 1312 /* (NON-ISO) dma was disabled, fifo flushed */ 1313 dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum); 1314 1315 status = -ETIMEDOUT; 1316 1317 } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) { 1318 if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1 1319 && !list_is_singular(&musb->out_bulk)) { 1320 dev_dbg(musb->controller, 1321 "NAK timeout on TX%d ep\n", epnum); 1322 musb_bulk_nak_timeout(musb, hw_ep, 0); 1323 } else { 1324 dev_dbg(musb->controller, 1325 "TX end=%d device not responding\n", epnum); 1326 /* NOTE: this code path would be a good place to PAUSE a 1327 * transfer, if there's some other (nonperiodic) tx urb 1328 * that could use this fifo. (dma complicates it...) 1329 * That's already done for bulk RX transfers. 1330 * 1331 * if (bulk && qh->ring.next != &musb->out_bulk), then 1332 * we have a candidate... NAKing is *NOT* an error 1333 */ 1334 musb_ep_select(mbase, epnum); 1335 musb_writew(epio, MUSB_TXCSR, 1336 MUSB_TXCSR_H_WZC_BITS 1337 | MUSB_TXCSR_TXPKTRDY); 1338 } 1339 return; 1340 } 1341 1342 done: 1343 if (status) { 1344 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1345 dma->status = MUSB_DMA_STATUS_CORE_ABORT; 1346 musb->dma_controller->channel_abort(dma); 1347 } 1348 1349 /* do the proper sequence to abort the transfer in the 1350 * usb core; the dma engine should already be stopped. 1351 */ 1352 musb_h_tx_flush_fifo(hw_ep); 1353 tx_csr &= ~(MUSB_TXCSR_AUTOSET 1354 | MUSB_TXCSR_DMAENAB 1355 | MUSB_TXCSR_H_ERROR 1356 | MUSB_TXCSR_H_RXSTALL 1357 | MUSB_TXCSR_H_NAKTIMEOUT 1358 ); 1359 1360 musb_ep_select(mbase, epnum); 1361 musb_writew(epio, MUSB_TXCSR, tx_csr); 1362 /* REVISIT may need to clear FLUSHFIFO ... */ 1363 musb_writew(epio, MUSB_TXCSR, tx_csr); 1364 musb_writeb(epio, MUSB_TXINTERVAL, 0); 1365 1366 done = true; 1367 } 1368 1369 /* second cppi case */ 1370 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1371 dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr); 1372 return; 1373 } 1374 1375 if (is_dma_capable() && dma && !status) { 1376 /* 1377 * DMA has completed. But if we're using DMA mode 1 (multi 1378 * packet DMA), we need a terminal TXPKTRDY interrupt before 1379 * we can consider this transfer completed, lest we trash 1380 * its last packet when writing the next URB's data. So we 1381 * switch back to mode 0 to get that interrupt; we'll come 1382 * back here once it happens. 1383 */ 1384 if (tx_csr & MUSB_TXCSR_DMAMODE) { 1385 /* 1386 * We shouldn't clear DMAMODE with DMAENAB set; so 1387 * clear them in a safe order. That should be OK 1388 * once TXPKTRDY has been set (and I've never seen 1389 * it being 0 at this moment -- DMA interrupt latency 1390 * is significant) but if it hasn't been then we have 1391 * no choice but to stop being polite and ignore the 1392 * programmer's guide... :-) 1393 * 1394 * Note that we must write TXCSR with TXPKTRDY cleared 1395 * in order not to re-trigger the packet send (this bit 1396 * can't be cleared by CPU), and there's another caveat: 1397 * TXPKTRDY may be set shortly and then cleared in the 1398 * double-buffered FIFO mode, so we do an extra TXCSR 1399 * read for debouncing... 1400 */ 1401 tx_csr &= musb_readw(epio, MUSB_TXCSR); 1402 if (tx_csr & MUSB_TXCSR_TXPKTRDY) { 1403 tx_csr &= ~(MUSB_TXCSR_DMAENAB | 1404 MUSB_TXCSR_TXPKTRDY); 1405 musb_writew(epio, MUSB_TXCSR, 1406 tx_csr | MUSB_TXCSR_H_WZC_BITS); 1407 } 1408 tx_csr &= ~(MUSB_TXCSR_DMAMODE | 1409 MUSB_TXCSR_TXPKTRDY); 1410 musb_writew(epio, MUSB_TXCSR, 1411 tx_csr | MUSB_TXCSR_H_WZC_BITS); 1412 1413 /* 1414 * There is no guarantee that we'll get an interrupt 1415 * after clearing DMAMODE as we might have done this 1416 * too late (after TXPKTRDY was cleared by controller). 1417 * Re-read TXCSR as we have spoiled its previous value. 1418 */ 1419 tx_csr = musb_readw(epio, MUSB_TXCSR); 1420 } 1421 1422 /* 1423 * We may get here from a DMA completion or TXPKTRDY interrupt. 1424 * In any case, we must check the FIFO status here and bail out 1425 * only if the FIFO still has data -- that should prevent the 1426 * "missed" TXPKTRDY interrupts and deal with double-buffered 1427 * FIFO mode too... 1428 */ 1429 if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) { 1430 dev_dbg(musb->controller, "DMA complete but packet still in FIFO, " 1431 "CSR %04x\n", tx_csr); 1432 return; 1433 } 1434 } 1435 1436 if (!status || dma || usb_pipeisoc(pipe)) { 1437 if (dma) 1438 length = dma->actual_len; 1439 else 1440 length = qh->segsize; 1441 qh->offset += length; 1442 1443 if (usb_pipeisoc(pipe)) { 1444 struct usb_iso_packet_descriptor *d; 1445 1446 d = urb->iso_frame_desc + qh->iso_idx; 1447 d->actual_length = length; 1448 d->status = status; 1449 if (++qh->iso_idx >= urb->number_of_packets) { 1450 done = true; 1451 } else { 1452 d++; 1453 offset = d->offset; 1454 length = d->length; 1455 } 1456 } else if (dma && urb->transfer_buffer_length == qh->offset) { 1457 done = true; 1458 } else { 1459 /* see if we need to send more data, or ZLP */ 1460 if (qh->segsize < qh->maxpacket) 1461 done = true; 1462 else if (qh->offset == urb->transfer_buffer_length 1463 && !(urb->transfer_flags 1464 & URB_ZERO_PACKET)) 1465 done = true; 1466 if (!done) { 1467 offset = qh->offset; 1468 length = urb->transfer_buffer_length - offset; 1469 transfer_pending = true; 1470 } 1471 } 1472 } 1473 1474 /* urb->status != -EINPROGRESS means request has been faulted, 1475 * so we must abort this transfer after cleanup 1476 */ 1477 if (urb->status != -EINPROGRESS) { 1478 done = true; 1479 if (status == 0) 1480 status = urb->status; 1481 } 1482 1483 if (done) { 1484 /* set status */ 1485 urb->status = status; 1486 urb->actual_length = qh->offset; 1487 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); 1488 return; 1489 } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) { 1490 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb, 1491 offset, length)) { 1492 if (is_cppi_enabled(musb) || tusb_dma_omap(musb)) 1493 musb_h_tx_dma_start(hw_ep); 1494 return; 1495 } 1496 } else if (tx_csr & MUSB_TXCSR_DMAENAB) { 1497 dev_dbg(musb->controller, "not complete, but DMA enabled?\n"); 1498 return; 1499 } 1500 1501 /* 1502 * PIO: start next packet in this URB. 1503 * 1504 * REVISIT: some docs say that when hw_ep->tx_double_buffered, 1505 * (and presumably, FIFO is not half-full) we should write *two* 1506 * packets before updating TXCSR; other docs disagree... 1507 */ 1508 if (length > qh->maxpacket) 1509 length = qh->maxpacket; 1510 /* Unmap the buffer so that CPU can use it */ 1511 usb_hcd_unmap_urb_for_dma(musb->hcd, urb); 1512 1513 /* 1514 * We need to map sg if the transfer_buffer is 1515 * NULL. 1516 */ 1517 if (!urb->transfer_buffer) 1518 qh->use_sg = true; 1519 1520 if (qh->use_sg) { 1521 /* sg_miter_start is already done in musb_ep_program */ 1522 if (!sg_miter_next(&qh->sg_miter)) { 1523 dev_err(musb->controller, "error: sg list empty\n"); 1524 sg_miter_stop(&qh->sg_miter); 1525 status = -EINVAL; 1526 goto done; 1527 } 1528 urb->transfer_buffer = qh->sg_miter.addr; 1529 length = min_t(u32, length, qh->sg_miter.length); 1530 musb_write_fifo(hw_ep, length, urb->transfer_buffer); 1531 qh->sg_miter.consumed = length; 1532 sg_miter_stop(&qh->sg_miter); 1533 } else { 1534 musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset); 1535 } 1536 1537 qh->segsize = length; 1538 1539 if (qh->use_sg) { 1540 if (offset + length >= urb->transfer_buffer_length) 1541 qh->use_sg = false; 1542 } 1543 1544 musb_ep_select(mbase, epnum); 1545 musb_writew(epio, MUSB_TXCSR, 1546 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); 1547 } 1548 1549 #ifdef CONFIG_USB_TI_CPPI41_DMA 1550 /* Seems to set up ISO for cppi41 and not advance len. See commit c57c41d */ 1551 static int musb_rx_dma_iso_cppi41(struct dma_controller *dma, 1552 struct musb_hw_ep *hw_ep, 1553 struct musb_qh *qh, 1554 struct urb *urb, 1555 size_t len) 1556 { 1557 struct dma_channel *channel = hw_ep->rx_channel; 1558 void __iomem *epio = hw_ep->regs; 1559 dma_addr_t *buf; 1560 u32 length, res; 1561 u16 val; 1562 1563 buf = (void *)urb->iso_frame_desc[qh->iso_idx].offset + 1564 (u32)urb->transfer_dma; 1565 1566 length = urb->iso_frame_desc[qh->iso_idx].length; 1567 1568 val = musb_readw(epio, MUSB_RXCSR); 1569 val |= MUSB_RXCSR_DMAENAB; 1570 musb_writew(hw_ep->regs, MUSB_RXCSR, val); 1571 1572 res = dma->channel_program(channel, qh->maxpacket, 0, 1573 (u32)buf, length); 1574 1575 return res; 1576 } 1577 #else 1578 static inline int musb_rx_dma_iso_cppi41(struct dma_controller *dma, 1579 struct musb_hw_ep *hw_ep, 1580 struct musb_qh *qh, 1581 struct urb *urb, 1582 size_t len) 1583 { 1584 return false; 1585 } 1586 #endif 1587 1588 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \ 1589 defined(CONFIG_USB_TI_CPPI41_DMA) 1590 /* Host side RX (IN) using Mentor DMA works as follows: 1591 submit_urb -> 1592 - if queue was empty, ProgramEndpoint 1593 - first IN token is sent out (by setting ReqPkt) 1594 LinuxIsr -> RxReady() 1595 /\ => first packet is received 1596 | - Set in mode 0 (DmaEnab, ~ReqPkt) 1597 | -> DMA Isr (transfer complete) -> RxReady() 1598 | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab) 1599 | - if urb not complete, send next IN token (ReqPkt) 1600 | | else complete urb. 1601 | | 1602 --------------------------- 1603 * 1604 * Nuances of mode 1: 1605 * For short packets, no ack (+RxPktRdy) is sent automatically 1606 * (even if AutoClear is ON) 1607 * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent 1608 * automatically => major problem, as collecting the next packet becomes 1609 * difficult. Hence mode 1 is not used. 1610 * 1611 * REVISIT 1612 * All we care about at this driver level is that 1613 * (a) all URBs terminate with REQPKT cleared and fifo(s) empty; 1614 * (b) termination conditions are: short RX, or buffer full; 1615 * (c) fault modes include 1616 * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO. 1617 * (and that endpoint's dma queue stops immediately) 1618 * - overflow (full, PLUS more bytes in the terminal packet) 1619 * 1620 * So for example, usb-storage sets URB_SHORT_NOT_OK, and would 1621 * thus be a great candidate for using mode 1 ... for all but the 1622 * last packet of one URB's transfer. 1623 */ 1624 static int musb_rx_dma_inventra_cppi41(struct dma_controller *dma, 1625 struct musb_hw_ep *hw_ep, 1626 struct musb_qh *qh, 1627 struct urb *urb, 1628 size_t len) 1629 { 1630 struct dma_channel *channel = hw_ep->rx_channel; 1631 void __iomem *epio = hw_ep->regs; 1632 u16 val; 1633 int pipe; 1634 bool done; 1635 1636 pipe = urb->pipe; 1637 1638 if (usb_pipeisoc(pipe)) { 1639 struct usb_iso_packet_descriptor *d; 1640 1641 d = urb->iso_frame_desc + qh->iso_idx; 1642 d->actual_length = len; 1643 1644 /* even if there was an error, we did the dma 1645 * for iso_frame_desc->length 1646 */ 1647 if (d->status != -EILSEQ && d->status != -EOVERFLOW) 1648 d->status = 0; 1649 1650 if (++qh->iso_idx >= urb->number_of_packets) { 1651 done = true; 1652 } else { 1653 /* REVISIT: Why ignore return value here? */ 1654 if (musb_dma_cppi41(hw_ep->musb)) 1655 done = musb_rx_dma_iso_cppi41(dma, hw_ep, qh, 1656 urb, len); 1657 done = false; 1658 } 1659 1660 } else { 1661 /* done if urb buffer is full or short packet is recd */ 1662 done = (urb->actual_length + len >= 1663 urb->transfer_buffer_length 1664 || channel->actual_len < qh->maxpacket 1665 || channel->rx_packet_done); 1666 } 1667 1668 /* send IN token for next packet, without AUTOREQ */ 1669 if (!done) { 1670 val = musb_readw(epio, MUSB_RXCSR); 1671 val |= MUSB_RXCSR_H_REQPKT; 1672 musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val); 1673 } 1674 1675 return done; 1676 } 1677 1678 /* Disadvantage of using mode 1: 1679 * It's basically usable only for mass storage class; essentially all 1680 * other protocols also terminate transfers on short packets. 1681 * 1682 * Details: 1683 * An extra IN token is sent at the end of the transfer (due to AUTOREQ) 1684 * If you try to use mode 1 for (transfer_buffer_length - 512), and try 1685 * to use the extra IN token to grab the last packet using mode 0, then 1686 * the problem is that you cannot be sure when the device will send the 1687 * last packet and RxPktRdy set. Sometimes the packet is recd too soon 1688 * such that it gets lost when RxCSR is re-set at the end of the mode 1 1689 * transfer, while sometimes it is recd just a little late so that if you 1690 * try to configure for mode 0 soon after the mode 1 transfer is 1691 * completed, you will find rxcount 0. Okay, so you might think why not 1692 * wait for an interrupt when the pkt is recd. Well, you won't get any! 1693 */ 1694 static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma, 1695 struct musb_hw_ep *hw_ep, 1696 struct musb_qh *qh, 1697 struct urb *urb, 1698 size_t len, 1699 u8 iso_err) 1700 { 1701 struct musb *musb = hw_ep->musb; 1702 void __iomem *epio = hw_ep->regs; 1703 struct dma_channel *channel = hw_ep->rx_channel; 1704 u16 rx_count, val; 1705 int length, pipe, done; 1706 dma_addr_t buf; 1707 1708 rx_count = musb_readw(epio, MUSB_RXCOUNT); 1709 pipe = urb->pipe; 1710 1711 if (usb_pipeisoc(pipe)) { 1712 int d_status = 0; 1713 struct usb_iso_packet_descriptor *d; 1714 1715 d = urb->iso_frame_desc + qh->iso_idx; 1716 1717 if (iso_err) { 1718 d_status = -EILSEQ; 1719 urb->error_count++; 1720 } 1721 if (rx_count > d->length) { 1722 if (d_status == 0) { 1723 d_status = -EOVERFLOW; 1724 urb->error_count++; 1725 } 1726 dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", 1727 rx_count, d->length); 1728 1729 length = d->length; 1730 } else 1731 length = rx_count; 1732 d->status = d_status; 1733 buf = urb->transfer_dma + d->offset; 1734 } else { 1735 length = rx_count; 1736 buf = urb->transfer_dma + urb->actual_length; 1737 } 1738 1739 channel->desired_mode = 0; 1740 #ifdef USE_MODE1 1741 /* because of the issue below, mode 1 will 1742 * only rarely behave with correct semantics. 1743 */ 1744 if ((urb->transfer_flags & URB_SHORT_NOT_OK) 1745 && (urb->transfer_buffer_length - urb->actual_length) 1746 > qh->maxpacket) 1747 channel->desired_mode = 1; 1748 if (rx_count < hw_ep->max_packet_sz_rx) { 1749 length = rx_count; 1750 channel->desired_mode = 0; 1751 } else { 1752 length = urb->transfer_buffer_length; 1753 } 1754 #endif 1755 1756 /* See comments above on disadvantages of using mode 1 */ 1757 val = musb_readw(epio, MUSB_RXCSR); 1758 val &= ~MUSB_RXCSR_H_REQPKT; 1759 1760 if (channel->desired_mode == 0) 1761 val &= ~MUSB_RXCSR_H_AUTOREQ; 1762 else 1763 val |= MUSB_RXCSR_H_AUTOREQ; 1764 val |= MUSB_RXCSR_DMAENAB; 1765 1766 /* autoclear shouldn't be set in high bandwidth */ 1767 if (qh->hb_mult == 1) 1768 val |= MUSB_RXCSR_AUTOCLEAR; 1769 1770 musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val); 1771 1772 /* REVISIT if when actual_length != 0, 1773 * transfer_buffer_length needs to be 1774 * adjusted first... 1775 */ 1776 done = dma->channel_program(channel, qh->maxpacket, 1777 channel->desired_mode, 1778 buf, length); 1779 1780 if (!done) { 1781 dma->channel_release(channel); 1782 hw_ep->rx_channel = NULL; 1783 channel = NULL; 1784 val = musb_readw(epio, MUSB_RXCSR); 1785 val &= ~(MUSB_RXCSR_DMAENAB 1786 | MUSB_RXCSR_H_AUTOREQ 1787 | MUSB_RXCSR_AUTOCLEAR); 1788 musb_writew(epio, MUSB_RXCSR, val); 1789 } 1790 1791 return done; 1792 } 1793 #else 1794 static inline int musb_rx_dma_inventra_cppi41(struct dma_controller *dma, 1795 struct musb_hw_ep *hw_ep, 1796 struct musb_qh *qh, 1797 struct urb *urb, 1798 size_t len) 1799 { 1800 return false; 1801 } 1802 1803 static inline int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma, 1804 struct musb_hw_ep *hw_ep, 1805 struct musb_qh *qh, 1806 struct urb *urb, 1807 size_t len, 1808 u8 iso_err) 1809 { 1810 return false; 1811 } 1812 #endif 1813 1814 /* 1815 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, 1816 * and high-bandwidth IN transfer cases. 1817 */ 1818 void musb_host_rx(struct musb *musb, u8 epnum) 1819 { 1820 struct urb *urb; 1821 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 1822 struct dma_controller *c = musb->dma_controller; 1823 void __iomem *epio = hw_ep->regs; 1824 struct musb_qh *qh = hw_ep->in_qh; 1825 size_t xfer_len; 1826 void __iomem *mbase = musb->mregs; 1827 int pipe; 1828 u16 rx_csr, val; 1829 bool iso_err = false; 1830 bool done = false; 1831 u32 status; 1832 struct dma_channel *dma; 1833 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; 1834 1835 musb_ep_select(mbase, epnum); 1836 1837 urb = next_urb(qh); 1838 dma = is_dma_capable() ? hw_ep->rx_channel : NULL; 1839 status = 0; 1840 xfer_len = 0; 1841 1842 rx_csr = musb_readw(epio, MUSB_RXCSR); 1843 val = rx_csr; 1844 1845 if (unlikely(!urb)) { 1846 /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least 1847 * usbtest #11 (unlinks) triggers it regularly, sometimes 1848 * with fifo full. (Only with DMA??) 1849 */ 1850 dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val, 1851 musb_readw(epio, MUSB_RXCOUNT)); 1852 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); 1853 return; 1854 } 1855 1856 pipe = urb->pipe; 1857 1858 dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n", 1859 epnum, rx_csr, urb->actual_length, 1860 dma ? dma->actual_len : 0); 1861 1862 /* check for errors, concurrent stall & unlink is not really 1863 * handled yet! */ 1864 if (rx_csr & MUSB_RXCSR_H_RXSTALL) { 1865 dev_dbg(musb->controller, "RX end %d STALL\n", epnum); 1866 1867 /* stall; record URB status */ 1868 status = -EPIPE; 1869 1870 } else if (rx_csr & MUSB_RXCSR_H_ERROR) { 1871 dev_dbg(musb->controller, "end %d RX proto error\n", epnum); 1872 1873 status = -EPROTO; 1874 musb_writeb(epio, MUSB_RXINTERVAL, 0); 1875 1876 rx_csr &= ~MUSB_RXCSR_H_ERROR; 1877 musb_writew(epio, MUSB_RXCSR, rx_csr); 1878 1879 } else if (rx_csr & MUSB_RXCSR_DATAERROR) { 1880 1881 if (USB_ENDPOINT_XFER_ISOC != qh->type) { 1882 dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum); 1883 1884 /* NOTE: NAKing is *NOT* an error, so we want to 1885 * continue. Except ... if there's a request for 1886 * another QH, use that instead of starving it. 1887 * 1888 * Devices like Ethernet and serial adapters keep 1889 * reads posted at all times, which will starve 1890 * other devices without this logic. 1891 */ 1892 if (usb_pipebulk(urb->pipe) 1893 && qh->mux == 1 1894 && !list_is_singular(&musb->in_bulk)) { 1895 musb_bulk_nak_timeout(musb, hw_ep, 1); 1896 return; 1897 } 1898 musb_ep_select(mbase, epnum); 1899 rx_csr |= MUSB_RXCSR_H_WZC_BITS; 1900 rx_csr &= ~MUSB_RXCSR_DATAERROR; 1901 musb_writew(epio, MUSB_RXCSR, rx_csr); 1902 1903 goto finish; 1904 } else { 1905 dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum); 1906 /* packet error reported later */ 1907 iso_err = true; 1908 } 1909 } else if (rx_csr & MUSB_RXCSR_INCOMPRX) { 1910 dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n", 1911 epnum); 1912 status = -EPROTO; 1913 } 1914 1915 /* faults abort the transfer */ 1916 if (status) { 1917 /* clean up dma and collect transfer count */ 1918 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1919 dma->status = MUSB_DMA_STATUS_CORE_ABORT; 1920 musb->dma_controller->channel_abort(dma); 1921 xfer_len = dma->actual_len; 1922 } 1923 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); 1924 musb_writeb(epio, MUSB_RXINTERVAL, 0); 1925 done = true; 1926 goto finish; 1927 } 1928 1929 if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) { 1930 /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */ 1931 ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr); 1932 goto finish; 1933 } 1934 1935 /* thorough shutdown for now ... given more precise fault handling 1936 * and better queueing support, we might keep a DMA pipeline going 1937 * while processing this irq for earlier completions. 1938 */ 1939 1940 /* FIXME this is _way_ too much in-line logic for Mentor DMA */ 1941 if (!musb_dma_inventra(musb) && !musb_dma_ux500(musb) && 1942 (rx_csr & MUSB_RXCSR_H_REQPKT)) { 1943 /* REVISIT this happened for a while on some short reads... 1944 * the cleanup still needs investigation... looks bad... 1945 * and also duplicates dma cleanup code above ... plus, 1946 * shouldn't this be the "half full" double buffer case? 1947 */ 1948 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 1949 dma->status = MUSB_DMA_STATUS_CORE_ABORT; 1950 musb->dma_controller->channel_abort(dma); 1951 xfer_len = dma->actual_len; 1952 done = true; 1953 } 1954 1955 dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr, 1956 xfer_len, dma ? ", dma" : ""); 1957 rx_csr &= ~MUSB_RXCSR_H_REQPKT; 1958 1959 musb_ep_select(mbase, epnum); 1960 musb_writew(epio, MUSB_RXCSR, 1961 MUSB_RXCSR_H_WZC_BITS | rx_csr); 1962 } 1963 1964 if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) { 1965 xfer_len = dma->actual_len; 1966 1967 val &= ~(MUSB_RXCSR_DMAENAB 1968 | MUSB_RXCSR_H_AUTOREQ 1969 | MUSB_RXCSR_AUTOCLEAR 1970 | MUSB_RXCSR_RXPKTRDY); 1971 musb_writew(hw_ep->regs, MUSB_RXCSR, val); 1972 1973 if (musb_dma_inventra(musb) || musb_dma_ux500(musb) || 1974 musb_dma_cppi41(musb)) { 1975 done = musb_rx_dma_inventra_cppi41(c, hw_ep, qh, urb, xfer_len); 1976 dev_dbg(hw_ep->musb->controller, 1977 "ep %d dma %s, rxcsr %04x, rxcount %d\n", 1978 epnum, done ? "off" : "reset", 1979 musb_readw(epio, MUSB_RXCSR), 1980 musb_readw(epio, MUSB_RXCOUNT)); 1981 } else { 1982 done = true; 1983 } 1984 1985 } else if (urb->status == -EINPROGRESS) { 1986 /* if no errors, be sure a packet is ready for unloading */ 1987 if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) { 1988 status = -EPROTO; 1989 ERR("Rx interrupt with no errors or packet!\n"); 1990 1991 /* FIXME this is another "SHOULD NEVER HAPPEN" */ 1992 1993 /* SCRUB (RX) */ 1994 /* do the proper sequence to abort the transfer */ 1995 musb_ep_select(mbase, epnum); 1996 val &= ~MUSB_RXCSR_H_REQPKT; 1997 musb_writew(epio, MUSB_RXCSR, val); 1998 goto finish; 1999 } 2000 2001 /* we are expecting IN packets */ 2002 if ((musb_dma_inventra(musb) || musb_dma_ux500(musb) || 2003 musb_dma_cppi41(musb)) && dma) { 2004 dev_dbg(hw_ep->musb->controller, 2005 "RX%d count %d, buffer 0x%llx len %d/%d\n", 2006 epnum, musb_readw(epio, MUSB_RXCOUNT), 2007 (unsigned long long) urb->transfer_dma 2008 + urb->actual_length, 2009 qh->offset, 2010 urb->transfer_buffer_length); 2011 2012 if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb, 2013 xfer_len, iso_err)) 2014 goto finish; 2015 else 2016 dev_err(musb->controller, "error: rx_dma failed\n"); 2017 } 2018 2019 if (!dma) { 2020 unsigned int received_len; 2021 2022 /* Unmap the buffer so that CPU can use it */ 2023 usb_hcd_unmap_urb_for_dma(musb->hcd, urb); 2024 2025 /* 2026 * We need to map sg if the transfer_buffer is 2027 * NULL. 2028 */ 2029 if (!urb->transfer_buffer) { 2030 qh->use_sg = true; 2031 sg_miter_start(&qh->sg_miter, urb->sg, 1, 2032 sg_flags); 2033 } 2034 2035 if (qh->use_sg) { 2036 if (!sg_miter_next(&qh->sg_miter)) { 2037 dev_err(musb->controller, "error: sg list empty\n"); 2038 sg_miter_stop(&qh->sg_miter); 2039 status = -EINVAL; 2040 done = true; 2041 goto finish; 2042 } 2043 urb->transfer_buffer = qh->sg_miter.addr; 2044 received_len = urb->actual_length; 2045 qh->offset = 0x0; 2046 done = musb_host_packet_rx(musb, urb, epnum, 2047 iso_err); 2048 /* Calculate the number of bytes received */ 2049 received_len = urb->actual_length - 2050 received_len; 2051 qh->sg_miter.consumed = received_len; 2052 sg_miter_stop(&qh->sg_miter); 2053 } else { 2054 done = musb_host_packet_rx(musb, urb, 2055 epnum, iso_err); 2056 } 2057 dev_dbg(musb->controller, "read %spacket\n", done ? "last " : ""); 2058 } 2059 } 2060 2061 finish: 2062 urb->actual_length += xfer_len; 2063 qh->offset += xfer_len; 2064 if (done) { 2065 if (qh->use_sg) 2066 qh->use_sg = false; 2067 2068 if (urb->status == -EINPROGRESS) 2069 urb->status = status; 2070 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN); 2071 } 2072 } 2073 2074 /* schedule nodes correspond to peripheral endpoints, like an OHCI QH. 2075 * the software schedule associates multiple such nodes with a given 2076 * host side hardware endpoint + direction; scheduling may activate 2077 * that hardware endpoint. 2078 */ 2079 static int musb_schedule( 2080 struct musb *musb, 2081 struct musb_qh *qh, 2082 int is_in) 2083 { 2084 int idle = 0; 2085 int best_diff; 2086 int best_end, epnum; 2087 struct musb_hw_ep *hw_ep = NULL; 2088 struct list_head *head = NULL; 2089 u8 toggle; 2090 u8 txtype; 2091 struct urb *urb = next_urb(qh); 2092 2093 /* use fixed hardware for control and bulk */ 2094 if (qh->type == USB_ENDPOINT_XFER_CONTROL) { 2095 head = &musb->control; 2096 hw_ep = musb->control_ep; 2097 goto success; 2098 } 2099 2100 /* else, periodic transfers get muxed to other endpoints */ 2101 2102 /* 2103 * We know this qh hasn't been scheduled, so all we need to do 2104 * is choose which hardware endpoint to put it on ... 2105 * 2106 * REVISIT what we really want here is a regular schedule tree 2107 * like e.g. OHCI uses. 2108 */ 2109 best_diff = 4096; 2110 best_end = -1; 2111 2112 for (epnum = 1, hw_ep = musb->endpoints + 1; 2113 epnum < musb->nr_endpoints; 2114 epnum++, hw_ep++) { 2115 int diff; 2116 2117 if (musb_ep_get_qh(hw_ep, is_in) != NULL) 2118 continue; 2119 2120 if (hw_ep == musb->bulk_ep) 2121 continue; 2122 2123 if (is_in) 2124 diff = hw_ep->max_packet_sz_rx; 2125 else 2126 diff = hw_ep->max_packet_sz_tx; 2127 diff -= (qh->maxpacket * qh->hb_mult); 2128 2129 if (diff >= 0 && best_diff > diff) { 2130 2131 /* 2132 * Mentor controller has a bug in that if we schedule 2133 * a BULK Tx transfer on an endpoint that had earlier 2134 * handled ISOC then the BULK transfer has to start on 2135 * a zero toggle. If the BULK transfer starts on a 1 2136 * toggle then this transfer will fail as the mentor 2137 * controller starts the Bulk transfer on a 0 toggle 2138 * irrespective of the programming of the toggle bits 2139 * in the TXCSR register. Check for this condition 2140 * while allocating the EP for a Tx Bulk transfer. If 2141 * so skip this EP. 2142 */ 2143 hw_ep = musb->endpoints + epnum; 2144 toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in); 2145 txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE) 2146 >> 4) & 0x3; 2147 if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) && 2148 toggle && (txtype == USB_ENDPOINT_XFER_ISOC)) 2149 continue; 2150 2151 best_diff = diff; 2152 best_end = epnum; 2153 } 2154 } 2155 /* use bulk reserved ep1 if no other ep is free */ 2156 if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) { 2157 hw_ep = musb->bulk_ep; 2158 if (is_in) 2159 head = &musb->in_bulk; 2160 else 2161 head = &musb->out_bulk; 2162 2163 /* Enable bulk RX/TX NAK timeout scheme when bulk requests are 2164 * multiplexed. This scheme does not work in high speed to full 2165 * speed scenario as NAK interrupts are not coming from a 2166 * full speed device connected to a high speed device. 2167 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and 2168 * 4 (8 frame or 8ms) for FS device. 2169 */ 2170 if (qh->dev) 2171 qh->intv_reg = 2172 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4; 2173 goto success; 2174 } else if (best_end < 0) { 2175 return -ENOSPC; 2176 } 2177 2178 idle = 1; 2179 qh->mux = 0; 2180 hw_ep = musb->endpoints + best_end; 2181 dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end); 2182 success: 2183 if (head) { 2184 idle = list_empty(head); 2185 list_add_tail(&qh->ring, head); 2186 qh->mux = 1; 2187 } 2188 qh->hw_ep = hw_ep; 2189 qh->hep->hcpriv = qh; 2190 if (idle) 2191 musb_start_urb(musb, is_in, qh); 2192 return 0; 2193 } 2194 2195 static int musb_urb_enqueue( 2196 struct usb_hcd *hcd, 2197 struct urb *urb, 2198 gfp_t mem_flags) 2199 { 2200 unsigned long flags; 2201 struct musb *musb = hcd_to_musb(hcd); 2202 struct usb_host_endpoint *hep = urb->ep; 2203 struct musb_qh *qh; 2204 struct usb_endpoint_descriptor *epd = &hep->desc; 2205 int ret; 2206 unsigned type_reg; 2207 unsigned interval; 2208 2209 /* host role must be active */ 2210 if (!is_host_active(musb) || !musb->is_active) 2211 return -ENODEV; 2212 2213 spin_lock_irqsave(&musb->lock, flags); 2214 ret = usb_hcd_link_urb_to_ep(hcd, urb); 2215 qh = ret ? NULL : hep->hcpriv; 2216 if (qh) 2217 urb->hcpriv = qh; 2218 spin_unlock_irqrestore(&musb->lock, flags); 2219 2220 /* DMA mapping was already done, if needed, and this urb is on 2221 * hep->urb_list now ... so we're done, unless hep wasn't yet 2222 * scheduled onto a live qh. 2223 * 2224 * REVISIT best to keep hep->hcpriv valid until the endpoint gets 2225 * disabled, testing for empty qh->ring and avoiding qh setup costs 2226 * except for the first urb queued after a config change. 2227 */ 2228 if (qh || ret) 2229 return ret; 2230 2231 /* Allocate and initialize qh, minimizing the work done each time 2232 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. 2233 * 2234 * REVISIT consider a dedicated qh kmem_cache, so it's harder 2235 * for bugs in other kernel code to break this driver... 2236 */ 2237 qh = kzalloc(sizeof *qh, mem_flags); 2238 if (!qh) { 2239 spin_lock_irqsave(&musb->lock, flags); 2240 usb_hcd_unlink_urb_from_ep(hcd, urb); 2241 spin_unlock_irqrestore(&musb->lock, flags); 2242 return -ENOMEM; 2243 } 2244 2245 qh->hep = hep; 2246 qh->dev = urb->dev; 2247 INIT_LIST_HEAD(&qh->ring); 2248 qh->is_ready = 1; 2249 2250 qh->maxpacket = usb_endpoint_maxp(epd); 2251 qh->type = usb_endpoint_type(epd); 2252 2253 /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier. 2254 * Some musb cores don't support high bandwidth ISO transfers; and 2255 * we don't (yet!) support high bandwidth interrupt transfers. 2256 */ 2257 qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03); 2258 if (qh->hb_mult > 1) { 2259 int ok = (qh->type == USB_ENDPOINT_XFER_ISOC); 2260 2261 if (ok) 2262 ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx) 2263 || (usb_pipeout(urb->pipe) && musb->hb_iso_tx); 2264 if (!ok) { 2265 ret = -EMSGSIZE; 2266 goto done; 2267 } 2268 qh->maxpacket &= 0x7ff; 2269 } 2270 2271 qh->epnum = usb_endpoint_num(epd); 2272 2273 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ 2274 qh->addr_reg = (u8) usb_pipedevice(urb->pipe); 2275 2276 /* precompute rxtype/txtype/type0 register */ 2277 type_reg = (qh->type << 4) | qh->epnum; 2278 switch (urb->dev->speed) { 2279 case USB_SPEED_LOW: 2280 type_reg |= 0xc0; 2281 break; 2282 case USB_SPEED_FULL: 2283 type_reg |= 0x80; 2284 break; 2285 default: 2286 type_reg |= 0x40; 2287 } 2288 qh->type_reg = type_reg; 2289 2290 /* Precompute RXINTERVAL/TXINTERVAL register */ 2291 switch (qh->type) { 2292 case USB_ENDPOINT_XFER_INT: 2293 /* 2294 * Full/low speeds use the linear encoding, 2295 * high speed uses the logarithmic encoding. 2296 */ 2297 if (urb->dev->speed <= USB_SPEED_FULL) { 2298 interval = max_t(u8, epd->bInterval, 1); 2299 break; 2300 } 2301 /* FALLTHROUGH */ 2302 case USB_ENDPOINT_XFER_ISOC: 2303 /* ISO always uses logarithmic encoding */ 2304 interval = min_t(u8, epd->bInterval, 16); 2305 break; 2306 default: 2307 /* REVISIT we actually want to use NAK limits, hinting to the 2308 * transfer scheduling logic to try some other qh, e.g. try 2309 * for 2 msec first: 2310 * 2311 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2; 2312 * 2313 * The downside of disabling this is that transfer scheduling 2314 * gets VERY unfair for nonperiodic transfers; a misbehaving 2315 * peripheral could make that hurt. That's perfectly normal 2316 * for reads from network or serial adapters ... so we have 2317 * partial NAKlimit support for bulk RX. 2318 * 2319 * The upside of disabling it is simpler transfer scheduling. 2320 */ 2321 interval = 0; 2322 } 2323 qh->intv_reg = interval; 2324 2325 /* precompute addressing for external hub/tt ports */ 2326 if (musb->is_multipoint) { 2327 struct usb_device *parent = urb->dev->parent; 2328 2329 if (parent != hcd->self.root_hub) { 2330 qh->h_addr_reg = (u8) parent->devnum; 2331 2332 /* set up tt info if needed */ 2333 if (urb->dev->tt) { 2334 qh->h_port_reg = (u8) urb->dev->ttport; 2335 if (urb->dev->tt->hub) 2336 qh->h_addr_reg = 2337 (u8) urb->dev->tt->hub->devnum; 2338 if (urb->dev->tt->multi) 2339 qh->h_addr_reg |= 0x80; 2340 } 2341 } 2342 } 2343 2344 /* invariant: hep->hcpriv is null OR the qh that's already scheduled. 2345 * until we get real dma queues (with an entry for each urb/buffer), 2346 * we only have work to do in the former case. 2347 */ 2348 spin_lock_irqsave(&musb->lock, flags); 2349 if (hep->hcpriv || !next_urb(qh)) { 2350 /* some concurrent activity submitted another urb to hep... 2351 * odd, rare, error prone, but legal. 2352 */ 2353 kfree(qh); 2354 qh = NULL; 2355 ret = 0; 2356 } else 2357 ret = musb_schedule(musb, qh, 2358 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK); 2359 2360 if (ret == 0) { 2361 urb->hcpriv = qh; 2362 /* FIXME set urb->start_frame for iso/intr, it's tested in 2363 * musb_start_urb(), but otherwise only konicawc cares ... 2364 */ 2365 } 2366 spin_unlock_irqrestore(&musb->lock, flags); 2367 2368 done: 2369 if (ret != 0) { 2370 spin_lock_irqsave(&musb->lock, flags); 2371 usb_hcd_unlink_urb_from_ep(hcd, urb); 2372 spin_unlock_irqrestore(&musb->lock, flags); 2373 kfree(qh); 2374 } 2375 return ret; 2376 } 2377 2378 2379 /* 2380 * abort a transfer that's at the head of a hardware queue. 2381 * called with controller locked, irqs blocked 2382 * that hardware queue advances to the next transfer, unless prevented 2383 */ 2384 static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh) 2385 { 2386 struct musb_hw_ep *ep = qh->hw_ep; 2387 struct musb *musb = ep->musb; 2388 void __iomem *epio = ep->regs; 2389 unsigned hw_end = ep->epnum; 2390 void __iomem *regs = ep->musb->mregs; 2391 int is_in = usb_pipein(urb->pipe); 2392 int status = 0; 2393 u16 csr; 2394 2395 musb_ep_select(regs, hw_end); 2396 2397 if (is_dma_capable()) { 2398 struct dma_channel *dma; 2399 2400 dma = is_in ? ep->rx_channel : ep->tx_channel; 2401 if (dma) { 2402 status = ep->musb->dma_controller->channel_abort(dma); 2403 dev_dbg(musb->controller, 2404 "abort %cX%d DMA for urb %p --> %d\n", 2405 is_in ? 'R' : 'T', ep->epnum, 2406 urb, status); 2407 urb->actual_length += dma->actual_len; 2408 } 2409 } 2410 2411 /* turn off DMA requests, discard state, stop polling ... */ 2412 if (ep->epnum && is_in) { 2413 /* giveback saves bulk toggle */ 2414 csr = musb_h_flush_rxfifo(ep, 0); 2415 2416 /* REVISIT we still get an irq; should likely clear the 2417 * endpoint's irq status here to avoid bogus irqs. 2418 * clearing that status is platform-specific... 2419 */ 2420 } else if (ep->epnum) { 2421 musb_h_tx_flush_fifo(ep); 2422 csr = musb_readw(epio, MUSB_TXCSR); 2423 csr &= ~(MUSB_TXCSR_AUTOSET 2424 | MUSB_TXCSR_DMAENAB 2425 | MUSB_TXCSR_H_RXSTALL 2426 | MUSB_TXCSR_H_NAKTIMEOUT 2427 | MUSB_TXCSR_H_ERROR 2428 | MUSB_TXCSR_TXPKTRDY); 2429 musb_writew(epio, MUSB_TXCSR, csr); 2430 /* REVISIT may need to clear FLUSHFIFO ... */ 2431 musb_writew(epio, MUSB_TXCSR, csr); 2432 /* flush cpu writebuffer */ 2433 csr = musb_readw(epio, MUSB_TXCSR); 2434 } else { 2435 musb_h_ep0_flush_fifo(ep); 2436 } 2437 if (status == 0) 2438 musb_advance_schedule(ep->musb, urb, ep, is_in); 2439 return status; 2440 } 2441 2442 static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) 2443 { 2444 struct musb *musb = hcd_to_musb(hcd); 2445 struct musb_qh *qh; 2446 unsigned long flags; 2447 int is_in = usb_pipein(urb->pipe); 2448 int ret; 2449 2450 dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb, 2451 usb_pipedevice(urb->pipe), 2452 usb_pipeendpoint(urb->pipe), 2453 is_in ? "in" : "out"); 2454 2455 spin_lock_irqsave(&musb->lock, flags); 2456 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 2457 if (ret) 2458 goto done; 2459 2460 qh = urb->hcpriv; 2461 if (!qh) 2462 goto done; 2463 2464 /* 2465 * Any URB not actively programmed into endpoint hardware can be 2466 * immediately given back; that's any URB not at the head of an 2467 * endpoint queue, unless someday we get real DMA queues. And even 2468 * if it's at the head, it might not be known to the hardware... 2469 * 2470 * Otherwise abort current transfer, pending DMA, etc.; urb->status 2471 * has already been updated. This is a synchronous abort; it'd be 2472 * OK to hold off until after some IRQ, though. 2473 * 2474 * NOTE: qh is invalid unless !list_empty(&hep->urb_list) 2475 */ 2476 if (!qh->is_ready 2477 || urb->urb_list.prev != &qh->hep->urb_list 2478 || musb_ep_get_qh(qh->hw_ep, is_in) != qh) { 2479 int ready = qh->is_ready; 2480 2481 qh->is_ready = 0; 2482 musb_giveback(musb, urb, 0); 2483 qh->is_ready = ready; 2484 2485 /* If nothing else (usually musb_giveback) is using it 2486 * and its URB list has emptied, recycle this qh. 2487 */ 2488 if (ready && list_empty(&qh->hep->urb_list)) { 2489 qh->hep->hcpriv = NULL; 2490 list_del(&qh->ring); 2491 kfree(qh); 2492 } 2493 } else 2494 ret = musb_cleanup_urb(urb, qh); 2495 done: 2496 spin_unlock_irqrestore(&musb->lock, flags); 2497 return ret; 2498 } 2499 2500 /* disable an endpoint */ 2501 static void 2502 musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) 2503 { 2504 u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN; 2505 unsigned long flags; 2506 struct musb *musb = hcd_to_musb(hcd); 2507 struct musb_qh *qh; 2508 struct urb *urb; 2509 2510 spin_lock_irqsave(&musb->lock, flags); 2511 2512 qh = hep->hcpriv; 2513 if (qh == NULL) 2514 goto exit; 2515 2516 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ 2517 2518 /* Kick the first URB off the hardware, if needed */ 2519 qh->is_ready = 0; 2520 if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) { 2521 urb = next_urb(qh); 2522 2523 /* make software (then hardware) stop ASAP */ 2524 if (!urb->unlinked) 2525 urb->status = -ESHUTDOWN; 2526 2527 /* cleanup */ 2528 musb_cleanup_urb(urb, qh); 2529 2530 /* Then nuke all the others ... and advance the 2531 * queue on hw_ep (e.g. bulk ring) when we're done. 2532 */ 2533 while (!list_empty(&hep->urb_list)) { 2534 urb = next_urb(qh); 2535 urb->status = -ESHUTDOWN; 2536 musb_advance_schedule(musb, urb, qh->hw_ep, is_in); 2537 } 2538 } else { 2539 /* Just empty the queue; the hardware is busy with 2540 * other transfers, and since !qh->is_ready nothing 2541 * will activate any of these as it advances. 2542 */ 2543 while (!list_empty(&hep->urb_list)) 2544 musb_giveback(musb, next_urb(qh), -ESHUTDOWN); 2545 2546 hep->hcpriv = NULL; 2547 list_del(&qh->ring); 2548 kfree(qh); 2549 } 2550 exit: 2551 spin_unlock_irqrestore(&musb->lock, flags); 2552 } 2553 2554 static int musb_h_get_frame_number(struct usb_hcd *hcd) 2555 { 2556 struct musb *musb = hcd_to_musb(hcd); 2557 2558 return musb_readw(musb->mregs, MUSB_FRAME); 2559 } 2560 2561 static int musb_h_start(struct usb_hcd *hcd) 2562 { 2563 struct musb *musb = hcd_to_musb(hcd); 2564 2565 /* NOTE: musb_start() is called when the hub driver turns 2566 * on port power, or when (OTG) peripheral starts. 2567 */ 2568 hcd->state = HC_STATE_RUNNING; 2569 musb->port1_status = 0; 2570 return 0; 2571 } 2572 2573 static void musb_h_stop(struct usb_hcd *hcd) 2574 { 2575 musb_stop(hcd_to_musb(hcd)); 2576 hcd->state = HC_STATE_HALT; 2577 } 2578 2579 static int musb_bus_suspend(struct usb_hcd *hcd) 2580 { 2581 struct musb *musb = hcd_to_musb(hcd); 2582 u8 devctl; 2583 2584 musb_port_suspend(musb, true); 2585 2586 if (!is_host_active(musb)) 2587 return 0; 2588 2589 switch (musb->xceiv->otg->state) { 2590 case OTG_STATE_A_SUSPEND: 2591 return 0; 2592 case OTG_STATE_A_WAIT_VRISE: 2593 /* ID could be grounded even if there's no device 2594 * on the other end of the cable. NOTE that the 2595 * A_WAIT_VRISE timers are messy with MUSB... 2596 */ 2597 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 2598 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) 2599 musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON; 2600 break; 2601 default: 2602 break; 2603 } 2604 2605 if (musb->is_active) { 2606 WARNING("trying to suspend as %s while active\n", 2607 usb_otg_state_string(musb->xceiv->otg->state)); 2608 return -EBUSY; 2609 } else 2610 return 0; 2611 } 2612 2613 static int musb_bus_resume(struct usb_hcd *hcd) 2614 { 2615 struct musb *musb = hcd_to_musb(hcd); 2616 2617 if (musb->config && 2618 musb->config->host_port_deassert_reset_at_resume) 2619 musb_port_reset(musb, false); 2620 2621 return 0; 2622 } 2623 2624 #ifndef CONFIG_MUSB_PIO_ONLY 2625 2626 #define MUSB_USB_DMA_ALIGN 4 2627 2628 struct musb_temp_buffer { 2629 void *kmalloc_ptr; 2630 void *old_xfer_buffer; 2631 u8 data[0]; 2632 }; 2633 2634 static void musb_free_temp_buffer(struct urb *urb) 2635 { 2636 enum dma_data_direction dir; 2637 struct musb_temp_buffer *temp; 2638 size_t length; 2639 2640 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) 2641 return; 2642 2643 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 2644 2645 temp = container_of(urb->transfer_buffer, struct musb_temp_buffer, 2646 data); 2647 2648 if (dir == DMA_FROM_DEVICE) { 2649 if (usb_pipeisoc(urb->pipe)) 2650 length = urb->transfer_buffer_length; 2651 else 2652 length = urb->actual_length; 2653 2654 memcpy(temp->old_xfer_buffer, temp->data, length); 2655 } 2656 urb->transfer_buffer = temp->old_xfer_buffer; 2657 kfree(temp->kmalloc_ptr); 2658 2659 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; 2660 } 2661 2662 static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags) 2663 { 2664 enum dma_data_direction dir; 2665 struct musb_temp_buffer *temp; 2666 void *kmalloc_ptr; 2667 size_t kmalloc_size; 2668 2669 if (urb->num_sgs || urb->sg || 2670 urb->transfer_buffer_length == 0 || 2671 !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1))) 2672 return 0; 2673 2674 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 2675 2676 /* Allocate a buffer with enough padding for alignment */ 2677 kmalloc_size = urb->transfer_buffer_length + 2678 sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1; 2679 2680 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); 2681 if (!kmalloc_ptr) 2682 return -ENOMEM; 2683 2684 /* Position our struct temp_buffer such that data is aligned */ 2685 temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN); 2686 2687 2688 temp->kmalloc_ptr = kmalloc_ptr; 2689 temp->old_xfer_buffer = urb->transfer_buffer; 2690 if (dir == DMA_TO_DEVICE) 2691 memcpy(temp->data, urb->transfer_buffer, 2692 urb->transfer_buffer_length); 2693 urb->transfer_buffer = temp->data; 2694 2695 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; 2696 2697 return 0; 2698 } 2699 2700 static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, 2701 gfp_t mem_flags) 2702 { 2703 struct musb *musb = hcd_to_musb(hcd); 2704 int ret; 2705 2706 /* 2707 * The DMA engine in RTL1.8 and above cannot handle 2708 * DMA addresses that are not aligned to a 4 byte boundary. 2709 * For such engine implemented (un)map_urb_for_dma hooks. 2710 * Do not use these hooks for RTL<1.8 2711 */ 2712 if (musb->hwvers < MUSB_HWVERS_1800) 2713 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); 2714 2715 ret = musb_alloc_temp_buffer(urb, mem_flags); 2716 if (ret) 2717 return ret; 2718 2719 ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); 2720 if (ret) 2721 musb_free_temp_buffer(urb); 2722 2723 return ret; 2724 } 2725 2726 static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) 2727 { 2728 struct musb *musb = hcd_to_musb(hcd); 2729 2730 usb_hcd_unmap_urb_for_dma(hcd, urb); 2731 2732 /* Do not use this hook for RTL<1.8 (see description above) */ 2733 if (musb->hwvers < MUSB_HWVERS_1800) 2734 return; 2735 2736 musb_free_temp_buffer(urb); 2737 } 2738 #endif /* !CONFIG_MUSB_PIO_ONLY */ 2739 2740 static const struct hc_driver musb_hc_driver = { 2741 .description = "musb-hcd", 2742 .product_desc = "MUSB HDRC host driver", 2743 .hcd_priv_size = sizeof(struct musb *), 2744 .flags = HCD_USB2 | HCD_MEMORY, 2745 2746 /* not using irq handler or reset hooks from usbcore, since 2747 * those must be shared with peripheral code for OTG configs 2748 */ 2749 2750 .start = musb_h_start, 2751 .stop = musb_h_stop, 2752 2753 .get_frame_number = musb_h_get_frame_number, 2754 2755 .urb_enqueue = musb_urb_enqueue, 2756 .urb_dequeue = musb_urb_dequeue, 2757 .endpoint_disable = musb_h_disable, 2758 2759 #ifndef CONFIG_MUSB_PIO_ONLY 2760 .map_urb_for_dma = musb_map_urb_for_dma, 2761 .unmap_urb_for_dma = musb_unmap_urb_for_dma, 2762 #endif 2763 2764 .hub_status_data = musb_hub_status_data, 2765 .hub_control = musb_hub_control, 2766 .bus_suspend = musb_bus_suspend, 2767 .bus_resume = musb_bus_resume, 2768 /* .start_port_reset = NULL, */ 2769 /* .hub_irq_enable = NULL, */ 2770 }; 2771 2772 int musb_host_alloc(struct musb *musb) 2773 { 2774 struct device *dev = musb->controller; 2775 2776 /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */ 2777 musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev)); 2778 if (!musb->hcd) 2779 return -EINVAL; 2780 2781 *musb->hcd->hcd_priv = (unsigned long) musb; 2782 musb->hcd->self.uses_pio_for_control = 1; 2783 musb->hcd->uses_new_polling = 1; 2784 musb->hcd->has_tt = 1; 2785 2786 return 0; 2787 } 2788 2789 void musb_host_cleanup(struct musb *musb) 2790 { 2791 if (musb->port_mode == MUSB_PORT_MODE_GADGET) 2792 return; 2793 usb_remove_hcd(musb->hcd); 2794 } 2795 2796 void musb_host_free(struct musb *musb) 2797 { 2798 usb_put_hcd(musb->hcd); 2799 } 2800 2801 int musb_host_setup(struct musb *musb, int power_budget) 2802 { 2803 int ret; 2804 struct usb_hcd *hcd = musb->hcd; 2805 2806 MUSB_HST_MODE(musb); 2807 musb->xceiv->otg->default_a = 1; 2808 musb->xceiv->otg->state = OTG_STATE_A_IDLE; 2809 2810 otg_set_host(musb->xceiv->otg, &hcd->self); 2811 hcd->self.otg_port = 1; 2812 musb->xceiv->otg->host = &hcd->self; 2813 hcd->power_budget = 2 * (power_budget ? : 250); 2814 2815 ret = usb_add_hcd(hcd, 0, 0); 2816 if (ret < 0) 2817 return ret; 2818 2819 device_wakeup_enable(hcd->self.controller); 2820 return 0; 2821 } 2822 2823 void musb_host_resume_root_hub(struct musb *musb) 2824 { 2825 usb_hcd_resume_root_hub(musb->hcd); 2826 } 2827 2828 void musb_host_poke_root_hub(struct musb *musb) 2829 { 2830 MUSB_HST_MODE(musb); 2831 if (musb->hcd->status_urb) 2832 usb_hcd_poll_rh_status(musb->hcd); 2833 else 2834 usb_hcd_resume_root_hub(musb->hcd); 2835 } 2836