1550a7375SFelipe Balbi /* 2550a7375SFelipe Balbi * MUSB OTG driver peripheral support 3550a7375SFelipe Balbi * 4550a7375SFelipe Balbi * Copyright 2005 Mentor Graphics Corporation 5550a7375SFelipe Balbi * Copyright (C) 2005-2006 by Texas Instruments 6550a7375SFelipe Balbi * Copyright (C) 2006-2007 Nokia Corporation 7550a7375SFelipe Balbi * 8550a7375SFelipe Balbi * This program is free software; you can redistribute it and/or 9550a7375SFelipe Balbi * modify it under the terms of the GNU General Public License 10550a7375SFelipe Balbi * version 2 as published by the Free Software Foundation. 11550a7375SFelipe Balbi * 12550a7375SFelipe Balbi * This program is distributed in the hope that it will be useful, but 13550a7375SFelipe Balbi * WITHOUT ANY WARRANTY; without even the implied warranty of 14550a7375SFelipe Balbi * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15550a7375SFelipe Balbi * General Public License for more details. 16550a7375SFelipe Balbi * 17550a7375SFelipe Balbi * You should have received a copy of the GNU General Public License 18550a7375SFelipe Balbi * along with this program; if not, write to the Free Software 19550a7375SFelipe Balbi * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 20550a7375SFelipe Balbi * 02110-1301 USA 21550a7375SFelipe Balbi * 22550a7375SFelipe Balbi * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED 23550a7375SFelipe Balbi * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 24550a7375SFelipe Balbi * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 25550a7375SFelipe Balbi * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26550a7375SFelipe Balbi * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27550a7375SFelipe Balbi * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 28550a7375SFelipe Balbi * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 29550a7375SFelipe Balbi * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30550a7375SFelipe Balbi * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31550a7375SFelipe Balbi * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32550a7375SFelipe Balbi * 33550a7375SFelipe Balbi */ 34550a7375SFelipe Balbi 35550a7375SFelipe Balbi #include <linux/kernel.h> 36550a7375SFelipe Balbi #include <linux/list.h> 37550a7375SFelipe Balbi #include <linux/timer.h> 38550a7375SFelipe Balbi #include <linux/module.h> 39550a7375SFelipe Balbi #include <linux/smp.h> 40550a7375SFelipe Balbi #include <linux/spinlock.h> 41550a7375SFelipe Balbi #include <linux/delay.h> 42550a7375SFelipe Balbi #include <linux/moduleparam.h> 43550a7375SFelipe Balbi #include <linux/stat.h> 44550a7375SFelipe Balbi #include <linux/dma-mapping.h> 45550a7375SFelipe Balbi 46550a7375SFelipe Balbi #include "musb_core.h" 47550a7375SFelipe Balbi 48550a7375SFelipe Balbi 49550a7375SFelipe Balbi /* MUSB PERIPHERAL status 3-mar-2006: 50550a7375SFelipe Balbi * 51550a7375SFelipe Balbi * - EP0 seems solid. It passes both USBCV and usbtest control cases. 52550a7375SFelipe Balbi * Minor glitches: 53550a7375SFelipe Balbi * 54550a7375SFelipe Balbi * + remote wakeup to Linux hosts work, but saw USBCV failures; 55550a7375SFelipe Balbi * in one test run (operator error?) 56550a7375SFelipe Balbi * + endpoint halt tests -- in both usbtest and usbcv -- seem 57550a7375SFelipe Balbi * to break when dma is enabled ... is something wrongly 58550a7375SFelipe Balbi * clearing SENDSTALL? 59550a7375SFelipe Balbi * 60550a7375SFelipe Balbi * - Mass storage behaved ok when last tested. Network traffic patterns 61550a7375SFelipe Balbi * (with lots of short transfers etc) need retesting; they turn up the 62550a7375SFelipe Balbi * worst cases of the DMA, since short packets are typical but are not 63550a7375SFelipe Balbi * required. 64550a7375SFelipe Balbi * 65550a7375SFelipe Balbi * - TX/IN 66550a7375SFelipe Balbi * + both pio and dma behave in with network and g_zero tests 67550a7375SFelipe Balbi * + no cppi throughput issues other than no-hw-queueing 68550a7375SFelipe Balbi * + failed with FLAT_REG (DaVinci) 69550a7375SFelipe Balbi * + seems to behave with double buffering, PIO -and- CPPI 70550a7375SFelipe Balbi * + with gadgetfs + AIO, requests got lost? 71550a7375SFelipe Balbi * 72550a7375SFelipe Balbi * - RX/OUT 73550a7375SFelipe Balbi * + both pio and dma behave in with network and g_zero tests 74550a7375SFelipe Balbi * + dma is slow in typical case (short_not_ok is clear) 75550a7375SFelipe Balbi * + double buffering ok with PIO 76550a7375SFelipe Balbi * + double buffering *FAILS* with CPPI, wrong data bytes sometimes 77550a7375SFelipe Balbi * + request lossage observed with gadgetfs 78550a7375SFelipe Balbi * 79550a7375SFelipe Balbi * - ISO not tested ... might work, but only weakly isochronous 80550a7375SFelipe Balbi * 81550a7375SFelipe Balbi * - Gadget driver disabling of softconnect during bind() is ignored; so 82550a7375SFelipe Balbi * drivers can't hold off host requests until userspace is ready. 83550a7375SFelipe Balbi * (Workaround: they can turn it off later.) 84550a7375SFelipe Balbi * 85550a7375SFelipe Balbi * - PORTABILITY (assumes PIO works): 86550a7375SFelipe Balbi * + DaVinci, basically works with cppi dma 87550a7375SFelipe Balbi * + OMAP 2430, ditto with mentor dma 88550a7375SFelipe Balbi * + TUSB 6010, platform-specific dma in the works 89550a7375SFelipe Balbi */ 90550a7375SFelipe Balbi 91550a7375SFelipe Balbi /* ----------------------------------------------------------------------- */ 92550a7375SFelipe Balbi 93550a7375SFelipe Balbi /* 94550a7375SFelipe Balbi * Immediately complete a request. 95550a7375SFelipe Balbi * 96550a7375SFelipe Balbi * @param request the request to complete 97550a7375SFelipe Balbi * @param status the status to complete the request with 98550a7375SFelipe Balbi * Context: controller locked, IRQs blocked. 99550a7375SFelipe Balbi */ 100550a7375SFelipe Balbi void musb_g_giveback( 101550a7375SFelipe Balbi struct musb_ep *ep, 102550a7375SFelipe Balbi struct usb_request *request, 103550a7375SFelipe Balbi int status) 104550a7375SFelipe Balbi __releases(ep->musb->lock) 105550a7375SFelipe Balbi __acquires(ep->musb->lock) 106550a7375SFelipe Balbi { 107550a7375SFelipe Balbi struct musb_request *req; 108550a7375SFelipe Balbi struct musb *musb; 109550a7375SFelipe Balbi int busy = ep->busy; 110550a7375SFelipe Balbi 111550a7375SFelipe Balbi req = to_musb_request(request); 112550a7375SFelipe Balbi 113550a7375SFelipe Balbi list_del(&request->list); 114550a7375SFelipe Balbi if (req->request.status == -EINPROGRESS) 115550a7375SFelipe Balbi req->request.status = status; 116550a7375SFelipe Balbi musb = req->musb; 117550a7375SFelipe Balbi 118550a7375SFelipe Balbi ep->busy = 1; 119550a7375SFelipe Balbi spin_unlock(&musb->lock); 120550a7375SFelipe Balbi if (is_dma_capable()) { 121550a7375SFelipe Balbi if (req->mapped) { 122550a7375SFelipe Balbi dma_unmap_single(musb->controller, 123550a7375SFelipe Balbi req->request.dma, 124550a7375SFelipe Balbi req->request.length, 125550a7375SFelipe Balbi req->tx 126550a7375SFelipe Balbi ? DMA_TO_DEVICE 127550a7375SFelipe Balbi : DMA_FROM_DEVICE); 128550a7375SFelipe Balbi req->request.dma = DMA_ADDR_INVALID; 129550a7375SFelipe Balbi req->mapped = 0; 130550a7375SFelipe Balbi } else if (req->request.dma != DMA_ADDR_INVALID) 131550a7375SFelipe Balbi dma_sync_single_for_cpu(musb->controller, 132550a7375SFelipe Balbi req->request.dma, 133550a7375SFelipe Balbi req->request.length, 134550a7375SFelipe Balbi req->tx 135550a7375SFelipe Balbi ? DMA_TO_DEVICE 136550a7375SFelipe Balbi : DMA_FROM_DEVICE); 137550a7375SFelipe Balbi } 138550a7375SFelipe Balbi if (request->status == 0) 139550a7375SFelipe Balbi DBG(5, "%s done request %p, %d/%d\n", 140550a7375SFelipe Balbi ep->end_point.name, request, 141550a7375SFelipe Balbi req->request.actual, req->request.length); 142550a7375SFelipe Balbi else 143550a7375SFelipe Balbi DBG(2, "%s request %p, %d/%d fault %d\n", 144550a7375SFelipe Balbi ep->end_point.name, request, 145550a7375SFelipe Balbi req->request.actual, req->request.length, 146550a7375SFelipe Balbi request->status); 147550a7375SFelipe Balbi req->request.complete(&req->ep->end_point, &req->request); 148550a7375SFelipe Balbi spin_lock(&musb->lock); 149550a7375SFelipe Balbi ep->busy = busy; 150550a7375SFelipe Balbi } 151550a7375SFelipe Balbi 152550a7375SFelipe Balbi /* ----------------------------------------------------------------------- */ 153550a7375SFelipe Balbi 154550a7375SFelipe Balbi /* 155550a7375SFelipe Balbi * Abort requests queued to an endpoint using the status. Synchronous. 156550a7375SFelipe Balbi * caller locked controller and blocked irqs, and selected this ep. 157550a7375SFelipe Balbi */ 158550a7375SFelipe Balbi static void nuke(struct musb_ep *ep, const int status) 159550a7375SFelipe Balbi { 160550a7375SFelipe Balbi struct musb_request *req = NULL; 161550a7375SFelipe Balbi void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs; 162550a7375SFelipe Balbi 163550a7375SFelipe Balbi ep->busy = 1; 164550a7375SFelipe Balbi 165550a7375SFelipe Balbi if (is_dma_capable() && ep->dma) { 166550a7375SFelipe Balbi struct dma_controller *c = ep->musb->dma_controller; 167550a7375SFelipe Balbi int value; 168*b6e434a5SSergei Shtylyov 169550a7375SFelipe Balbi if (ep->is_in) { 170*b6e434a5SSergei Shtylyov /* 171*b6e434a5SSergei Shtylyov * The programming guide says that we must not clear 172*b6e434a5SSergei Shtylyov * the DMAMODE bit before DMAENAB, so we only 173*b6e434a5SSergei Shtylyov * clear it in the second write... 174*b6e434a5SSergei Shtylyov */ 175550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, 176*b6e434a5SSergei Shtylyov MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO); 177550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, 178550a7375SFelipe Balbi 0 | MUSB_TXCSR_FLUSHFIFO); 179550a7375SFelipe Balbi } else { 180550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, 181550a7375SFelipe Balbi 0 | MUSB_RXCSR_FLUSHFIFO); 182550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, 183550a7375SFelipe Balbi 0 | MUSB_RXCSR_FLUSHFIFO); 184550a7375SFelipe Balbi } 185550a7375SFelipe Balbi 186550a7375SFelipe Balbi value = c->channel_abort(ep->dma); 187550a7375SFelipe Balbi DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value); 188550a7375SFelipe Balbi c->channel_release(ep->dma); 189550a7375SFelipe Balbi ep->dma = NULL; 190550a7375SFelipe Balbi } 191550a7375SFelipe Balbi 192550a7375SFelipe Balbi while (!list_empty(&(ep->req_list))) { 193550a7375SFelipe Balbi req = container_of(ep->req_list.next, struct musb_request, 194550a7375SFelipe Balbi request.list); 195550a7375SFelipe Balbi musb_g_giveback(ep, &req->request, status); 196550a7375SFelipe Balbi } 197550a7375SFelipe Balbi } 198550a7375SFelipe Balbi 199550a7375SFelipe Balbi /* ----------------------------------------------------------------------- */ 200550a7375SFelipe Balbi 201550a7375SFelipe Balbi /* Data transfers - pure PIO, pure DMA, or mixed mode */ 202550a7375SFelipe Balbi 203550a7375SFelipe Balbi /* 204550a7375SFelipe Balbi * This assumes the separate CPPI engine is responding to DMA requests 205550a7375SFelipe Balbi * from the usb core ... sequenced a bit differently from mentor dma. 206550a7375SFelipe Balbi */ 207550a7375SFelipe Balbi 208550a7375SFelipe Balbi static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep) 209550a7375SFelipe Balbi { 210550a7375SFelipe Balbi if (can_bulk_split(musb, ep->type)) 211550a7375SFelipe Balbi return ep->hw_ep->max_packet_sz_tx; 212550a7375SFelipe Balbi else 213550a7375SFelipe Balbi return ep->packet_sz; 214550a7375SFelipe Balbi } 215550a7375SFelipe Balbi 216550a7375SFelipe Balbi 217550a7375SFelipe Balbi #ifdef CONFIG_USB_INVENTRA_DMA 218550a7375SFelipe Balbi 219550a7375SFelipe Balbi /* Peripheral tx (IN) using Mentor DMA works as follows: 220550a7375SFelipe Balbi Only mode 0 is used for transfers <= wPktSize, 221550a7375SFelipe Balbi mode 1 is used for larger transfers, 222550a7375SFelipe Balbi 223550a7375SFelipe Balbi One of the following happens: 224550a7375SFelipe Balbi - Host sends IN token which causes an endpoint interrupt 225550a7375SFelipe Balbi -> TxAvail 226550a7375SFelipe Balbi -> if DMA is currently busy, exit. 227550a7375SFelipe Balbi -> if queue is non-empty, txstate(). 228550a7375SFelipe Balbi 229550a7375SFelipe Balbi - Request is queued by the gadget driver. 230550a7375SFelipe Balbi -> if queue was previously empty, txstate() 231550a7375SFelipe Balbi 232550a7375SFelipe Balbi txstate() 233550a7375SFelipe Balbi -> start 234550a7375SFelipe Balbi /\ -> setup DMA 235550a7375SFelipe Balbi | (data is transferred to the FIFO, then sent out when 236550a7375SFelipe Balbi | IN token(s) are recd from Host. 237550a7375SFelipe Balbi | -> DMA interrupt on completion 238550a7375SFelipe Balbi | calls TxAvail. 239*b6e434a5SSergei Shtylyov | -> stop DMA, ~DMAENAB, 240550a7375SFelipe Balbi | -> set TxPktRdy for last short pkt or zlp 241550a7375SFelipe Balbi | -> Complete Request 242550a7375SFelipe Balbi | -> Continue next request (call txstate) 243550a7375SFelipe Balbi |___________________________________| 244550a7375SFelipe Balbi 245550a7375SFelipe Balbi * Non-Mentor DMA engines can of course work differently, such as by 246550a7375SFelipe Balbi * upleveling from irq-per-packet to irq-per-buffer. 247550a7375SFelipe Balbi */ 248550a7375SFelipe Balbi 249550a7375SFelipe Balbi #endif 250550a7375SFelipe Balbi 251550a7375SFelipe Balbi /* 252550a7375SFelipe Balbi * An endpoint is transmitting data. This can be called either from 253550a7375SFelipe Balbi * the IRQ routine or from ep.queue() to kickstart a request on an 254550a7375SFelipe Balbi * endpoint. 255550a7375SFelipe Balbi * 256550a7375SFelipe Balbi * Context: controller locked, IRQs blocked, endpoint selected 257550a7375SFelipe Balbi */ 258550a7375SFelipe Balbi static void txstate(struct musb *musb, struct musb_request *req) 259550a7375SFelipe Balbi { 260550a7375SFelipe Balbi u8 epnum = req->epnum; 261550a7375SFelipe Balbi struct musb_ep *musb_ep; 262550a7375SFelipe Balbi void __iomem *epio = musb->endpoints[epnum].regs; 263550a7375SFelipe Balbi struct usb_request *request; 264550a7375SFelipe Balbi u16 fifo_count = 0, csr; 265550a7375SFelipe Balbi int use_dma = 0; 266550a7375SFelipe Balbi 267550a7375SFelipe Balbi musb_ep = req->ep; 268550a7375SFelipe Balbi 269550a7375SFelipe Balbi /* we shouldn't get here while DMA is active ... but we do ... */ 270550a7375SFelipe Balbi if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { 271550a7375SFelipe Balbi DBG(4, "dma pending...\n"); 272550a7375SFelipe Balbi return; 273550a7375SFelipe Balbi } 274550a7375SFelipe Balbi 275550a7375SFelipe Balbi /* read TXCSR before */ 276550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 277550a7375SFelipe Balbi 278550a7375SFelipe Balbi request = &req->request; 279550a7375SFelipe Balbi fifo_count = min(max_ep_writesize(musb, musb_ep), 280550a7375SFelipe Balbi (int)(request->length - request->actual)); 281550a7375SFelipe Balbi 282550a7375SFelipe Balbi if (csr & MUSB_TXCSR_TXPKTRDY) { 283550a7375SFelipe Balbi DBG(5, "%s old packet still ready , txcsr %03x\n", 284550a7375SFelipe Balbi musb_ep->end_point.name, csr); 285550a7375SFelipe Balbi return; 286550a7375SFelipe Balbi } 287550a7375SFelipe Balbi 288550a7375SFelipe Balbi if (csr & MUSB_TXCSR_P_SENDSTALL) { 289550a7375SFelipe Balbi DBG(5, "%s stalling, txcsr %03x\n", 290550a7375SFelipe Balbi musb_ep->end_point.name, csr); 291550a7375SFelipe Balbi return; 292550a7375SFelipe Balbi } 293550a7375SFelipe Balbi 294550a7375SFelipe Balbi DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n", 295550a7375SFelipe Balbi epnum, musb_ep->packet_sz, fifo_count, 296550a7375SFelipe Balbi csr); 297550a7375SFelipe Balbi 298550a7375SFelipe Balbi #ifndef CONFIG_MUSB_PIO_ONLY 299550a7375SFelipe Balbi if (is_dma_capable() && musb_ep->dma) { 300550a7375SFelipe Balbi struct dma_controller *c = musb->dma_controller; 301550a7375SFelipe Balbi 302550a7375SFelipe Balbi use_dma = (request->dma != DMA_ADDR_INVALID); 303550a7375SFelipe Balbi 304550a7375SFelipe Balbi /* MUSB_TXCSR_P_ISO is still set correctly */ 305550a7375SFelipe Balbi 306550a7375SFelipe Balbi #ifdef CONFIG_USB_INVENTRA_DMA 307550a7375SFelipe Balbi { 308550a7375SFelipe Balbi size_t request_size; 309550a7375SFelipe Balbi 310550a7375SFelipe Balbi /* setup DMA, then program endpoint CSR */ 311550a7375SFelipe Balbi request_size = min(request->length, 312550a7375SFelipe Balbi musb_ep->dma->max_len); 313550a7375SFelipe Balbi if (request_size <= musb_ep->packet_sz) 314550a7375SFelipe Balbi musb_ep->dma->desired_mode = 0; 315550a7375SFelipe Balbi else 316550a7375SFelipe Balbi musb_ep->dma->desired_mode = 1; 317550a7375SFelipe Balbi 318550a7375SFelipe Balbi use_dma = use_dma && c->channel_program( 319550a7375SFelipe Balbi musb_ep->dma, musb_ep->packet_sz, 320550a7375SFelipe Balbi musb_ep->dma->desired_mode, 321550a7375SFelipe Balbi request->dma, request_size); 322550a7375SFelipe Balbi if (use_dma) { 323550a7375SFelipe Balbi if (musb_ep->dma->desired_mode == 0) { 324*b6e434a5SSergei Shtylyov /* 325*b6e434a5SSergei Shtylyov * We must not clear the DMAMODE bit 326*b6e434a5SSergei Shtylyov * before the DMAENAB bit -- and the 327*b6e434a5SSergei Shtylyov * latter doesn't always get cleared 328*b6e434a5SSergei Shtylyov * before we get here... 329*b6e434a5SSergei Shtylyov */ 330*b6e434a5SSergei Shtylyov csr &= ~(MUSB_TXCSR_AUTOSET 331*b6e434a5SSergei Shtylyov | MUSB_TXCSR_DMAENAB); 332*b6e434a5SSergei Shtylyov musb_writew(epio, MUSB_TXCSR, csr 333*b6e434a5SSergei Shtylyov | MUSB_TXCSR_P_WZC_BITS); 334*b6e434a5SSergei Shtylyov csr &= ~MUSB_TXCSR_DMAMODE; 335550a7375SFelipe Balbi csr |= (MUSB_TXCSR_DMAENAB | 336550a7375SFelipe Balbi MUSB_TXCSR_MODE); 337550a7375SFelipe Balbi /* against programming guide */ 338550a7375SFelipe Balbi } else 339550a7375SFelipe Balbi csr |= (MUSB_TXCSR_AUTOSET 340550a7375SFelipe Balbi | MUSB_TXCSR_DMAENAB 341550a7375SFelipe Balbi | MUSB_TXCSR_DMAMODE 342550a7375SFelipe Balbi | MUSB_TXCSR_MODE); 343550a7375SFelipe Balbi 344550a7375SFelipe Balbi csr &= ~MUSB_TXCSR_P_UNDERRUN; 345550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 346550a7375SFelipe Balbi } 347550a7375SFelipe Balbi } 348550a7375SFelipe Balbi 349550a7375SFelipe Balbi #elif defined(CONFIG_USB_TI_CPPI_DMA) 350550a7375SFelipe Balbi /* program endpoint CSR first, then setup DMA */ 351*b6e434a5SSergei Shtylyov csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); 352550a7375SFelipe Balbi csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB; 353550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, 354550a7375SFelipe Balbi (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN) 355550a7375SFelipe Balbi | csr); 356550a7375SFelipe Balbi 357550a7375SFelipe Balbi /* ensure writebuffer is empty */ 358550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 359550a7375SFelipe Balbi 360550a7375SFelipe Balbi /* NOTE host side sets DMAENAB later than this; both are 361550a7375SFelipe Balbi * OK since the transfer dma glue (between CPPI and Mentor 362550a7375SFelipe Balbi * fifos) just tells CPPI it could start. Data only moves 363550a7375SFelipe Balbi * to the USB TX fifo when both fifos are ready. 364550a7375SFelipe Balbi */ 365550a7375SFelipe Balbi 366550a7375SFelipe Balbi /* "mode" is irrelevant here; handle terminating ZLPs like 367550a7375SFelipe Balbi * PIO does, since the hardware RNDIS mode seems unreliable 368550a7375SFelipe Balbi * except for the last-packet-is-already-short case. 369550a7375SFelipe Balbi */ 370550a7375SFelipe Balbi use_dma = use_dma && c->channel_program( 371550a7375SFelipe Balbi musb_ep->dma, musb_ep->packet_sz, 372550a7375SFelipe Balbi 0, 373550a7375SFelipe Balbi request->dma, 374550a7375SFelipe Balbi request->length); 375550a7375SFelipe Balbi if (!use_dma) { 376550a7375SFelipe Balbi c->channel_release(musb_ep->dma); 377550a7375SFelipe Balbi musb_ep->dma = NULL; 378*b6e434a5SSergei Shtylyov csr &= ~MUSB_TXCSR_DMAENAB; 379*b6e434a5SSergei Shtylyov musb_writew(epio, MUSB_TXCSR, csr); 380550a7375SFelipe Balbi /* invariant: prequest->buf is non-null */ 381550a7375SFelipe Balbi } 382550a7375SFelipe Balbi #elif defined(CONFIG_USB_TUSB_OMAP_DMA) 383550a7375SFelipe Balbi use_dma = use_dma && c->channel_program( 384550a7375SFelipe Balbi musb_ep->dma, musb_ep->packet_sz, 385550a7375SFelipe Balbi request->zero, 386550a7375SFelipe Balbi request->dma, 387550a7375SFelipe Balbi request->length); 388550a7375SFelipe Balbi #endif 389550a7375SFelipe Balbi } 390550a7375SFelipe Balbi #endif 391550a7375SFelipe Balbi 392550a7375SFelipe Balbi if (!use_dma) { 393550a7375SFelipe Balbi musb_write_fifo(musb_ep->hw_ep, fifo_count, 394550a7375SFelipe Balbi (u8 *) (request->buf + request->actual)); 395550a7375SFelipe Balbi request->actual += fifo_count; 396550a7375SFelipe Balbi csr |= MUSB_TXCSR_TXPKTRDY; 397550a7375SFelipe Balbi csr &= ~MUSB_TXCSR_P_UNDERRUN; 398550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 399550a7375SFelipe Balbi } 400550a7375SFelipe Balbi 401550a7375SFelipe Balbi /* host may already have the data when this message shows... */ 402550a7375SFelipe Balbi DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n", 403550a7375SFelipe Balbi musb_ep->end_point.name, use_dma ? "dma" : "pio", 404550a7375SFelipe Balbi request->actual, request->length, 405550a7375SFelipe Balbi musb_readw(epio, MUSB_TXCSR), 406550a7375SFelipe Balbi fifo_count, 407550a7375SFelipe Balbi musb_readw(epio, MUSB_TXMAXP)); 408550a7375SFelipe Balbi } 409550a7375SFelipe Balbi 410550a7375SFelipe Balbi /* 411550a7375SFelipe Balbi * FIFO state update (e.g. data ready). 412550a7375SFelipe Balbi * Called from IRQ, with controller locked. 413550a7375SFelipe Balbi */ 414550a7375SFelipe Balbi void musb_g_tx(struct musb *musb, u8 epnum) 415550a7375SFelipe Balbi { 416550a7375SFelipe Balbi u16 csr; 417550a7375SFelipe Balbi struct usb_request *request; 418550a7375SFelipe Balbi u8 __iomem *mbase = musb->mregs; 419550a7375SFelipe Balbi struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in; 420550a7375SFelipe Balbi void __iomem *epio = musb->endpoints[epnum].regs; 421550a7375SFelipe Balbi struct dma_channel *dma; 422550a7375SFelipe Balbi 423550a7375SFelipe Balbi musb_ep_select(mbase, epnum); 424550a7375SFelipe Balbi request = next_request(musb_ep); 425550a7375SFelipe Balbi 426550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 427550a7375SFelipe Balbi DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr); 428550a7375SFelipe Balbi 429550a7375SFelipe Balbi dma = is_dma_capable() ? musb_ep->dma : NULL; 430550a7375SFelipe Balbi do { 431550a7375SFelipe Balbi /* REVISIT for high bandwidth, MUSB_TXCSR_P_INCOMPTX 432550a7375SFelipe Balbi * probably rates reporting as a host error 433550a7375SFelipe Balbi */ 434550a7375SFelipe Balbi if (csr & MUSB_TXCSR_P_SENTSTALL) { 435550a7375SFelipe Balbi csr |= MUSB_TXCSR_P_WZC_BITS; 436550a7375SFelipe Balbi csr &= ~MUSB_TXCSR_P_SENTSTALL; 437550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 438550a7375SFelipe Balbi if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 439550a7375SFelipe Balbi dma->status = MUSB_DMA_STATUS_CORE_ABORT; 440550a7375SFelipe Balbi musb->dma_controller->channel_abort(dma); 441550a7375SFelipe Balbi } 442550a7375SFelipe Balbi 443550a7375SFelipe Balbi if (request) 444550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, -EPIPE); 445550a7375SFelipe Balbi 446550a7375SFelipe Balbi break; 447550a7375SFelipe Balbi } 448550a7375SFelipe Balbi 449550a7375SFelipe Balbi if (csr & MUSB_TXCSR_P_UNDERRUN) { 450550a7375SFelipe Balbi /* we NAKed, no big deal ... little reason to care */ 451550a7375SFelipe Balbi csr |= MUSB_TXCSR_P_WZC_BITS; 452550a7375SFelipe Balbi csr &= ~(MUSB_TXCSR_P_UNDERRUN 453550a7375SFelipe Balbi | MUSB_TXCSR_TXPKTRDY); 454550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 455550a7375SFelipe Balbi DBG(20, "underrun on ep%d, req %p\n", epnum, request); 456550a7375SFelipe Balbi } 457550a7375SFelipe Balbi 458550a7375SFelipe Balbi if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 459550a7375SFelipe Balbi /* SHOULD NOT HAPPEN ... has with cppi though, after 460550a7375SFelipe Balbi * changing SENDSTALL (and other cases); harmless? 461550a7375SFelipe Balbi */ 462550a7375SFelipe Balbi DBG(5, "%s dma still busy?\n", musb_ep->end_point.name); 463550a7375SFelipe Balbi break; 464550a7375SFelipe Balbi } 465550a7375SFelipe Balbi 466550a7375SFelipe Balbi if (request) { 467550a7375SFelipe Balbi u8 is_dma = 0; 468550a7375SFelipe Balbi 469550a7375SFelipe Balbi if (dma && (csr & MUSB_TXCSR_DMAENAB)) { 470550a7375SFelipe Balbi is_dma = 1; 471550a7375SFelipe Balbi csr |= MUSB_TXCSR_P_WZC_BITS; 472550a7375SFelipe Balbi csr &= ~(MUSB_TXCSR_DMAENAB 473550a7375SFelipe Balbi | MUSB_TXCSR_P_UNDERRUN 474550a7375SFelipe Balbi | MUSB_TXCSR_TXPKTRDY); 475550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 476550a7375SFelipe Balbi /* ensure writebuffer is empty */ 477550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 478550a7375SFelipe Balbi request->actual += musb_ep->dma->actual_len; 479550a7375SFelipe Balbi DBG(4, "TXCSR%d %04x, dma off, " 480550a7375SFelipe Balbi "len %zu, req %p\n", 481550a7375SFelipe Balbi epnum, csr, 482550a7375SFelipe Balbi musb_ep->dma->actual_len, 483550a7375SFelipe Balbi request); 484550a7375SFelipe Balbi } 485550a7375SFelipe Balbi 486550a7375SFelipe Balbi if (is_dma || request->actual == request->length) { 487550a7375SFelipe Balbi 488550a7375SFelipe Balbi /* First, maybe a terminating short packet. 489550a7375SFelipe Balbi * Some DMA engines might handle this by 490550a7375SFelipe Balbi * themselves. 491550a7375SFelipe Balbi */ 492550a7375SFelipe Balbi if ((request->zero 493550a7375SFelipe Balbi && request->length 494550a7375SFelipe Balbi && (request->length 495550a7375SFelipe Balbi % musb_ep->packet_sz) 496550a7375SFelipe Balbi == 0) 497550a7375SFelipe Balbi #ifdef CONFIG_USB_INVENTRA_DMA 498550a7375SFelipe Balbi || (is_dma && 499550a7375SFelipe Balbi ((!dma->desired_mode) || 500550a7375SFelipe Balbi (request->actual & 501550a7375SFelipe Balbi (musb_ep->packet_sz - 1)))) 502550a7375SFelipe Balbi #endif 503550a7375SFelipe Balbi ) { 504550a7375SFelipe Balbi /* on dma completion, fifo may not 505550a7375SFelipe Balbi * be available yet ... 506550a7375SFelipe Balbi */ 507550a7375SFelipe Balbi if (csr & MUSB_TXCSR_TXPKTRDY) 508550a7375SFelipe Balbi break; 509550a7375SFelipe Balbi 510550a7375SFelipe Balbi DBG(4, "sending zero pkt\n"); 511550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, 512550a7375SFelipe Balbi MUSB_TXCSR_MODE 513550a7375SFelipe Balbi | MUSB_TXCSR_TXPKTRDY); 514550a7375SFelipe Balbi request->zero = 0; 515550a7375SFelipe Balbi } 516550a7375SFelipe Balbi 517550a7375SFelipe Balbi /* ... or if not, then complete it */ 518550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, 0); 519550a7375SFelipe Balbi 520550a7375SFelipe Balbi /* kickstart next transfer if appropriate; 521550a7375SFelipe Balbi * the packet that just completed might not 522550a7375SFelipe Balbi * be transmitted for hours or days. 523550a7375SFelipe Balbi * REVISIT for double buffering... 524550a7375SFelipe Balbi * FIXME revisit for stalls too... 525550a7375SFelipe Balbi */ 526550a7375SFelipe Balbi musb_ep_select(mbase, epnum); 527550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 528550a7375SFelipe Balbi if (csr & MUSB_TXCSR_FIFONOTEMPTY) 529550a7375SFelipe Balbi break; 530550a7375SFelipe Balbi request = musb_ep->desc 531550a7375SFelipe Balbi ? next_request(musb_ep) 532550a7375SFelipe Balbi : NULL; 533550a7375SFelipe Balbi if (!request) { 534550a7375SFelipe Balbi DBG(4, "%s idle now\n", 535550a7375SFelipe Balbi musb_ep->end_point.name); 536550a7375SFelipe Balbi break; 537550a7375SFelipe Balbi } 538550a7375SFelipe Balbi } 539550a7375SFelipe Balbi 540550a7375SFelipe Balbi txstate(musb, to_musb_request(request)); 541550a7375SFelipe Balbi } 542550a7375SFelipe Balbi 543550a7375SFelipe Balbi } while (0); 544550a7375SFelipe Balbi } 545550a7375SFelipe Balbi 546550a7375SFelipe Balbi /* ------------------------------------------------------------ */ 547550a7375SFelipe Balbi 548550a7375SFelipe Balbi #ifdef CONFIG_USB_INVENTRA_DMA 549550a7375SFelipe Balbi 550550a7375SFelipe Balbi /* Peripheral rx (OUT) using Mentor DMA works as follows: 551550a7375SFelipe Balbi - Only mode 0 is used. 552550a7375SFelipe Balbi 553550a7375SFelipe Balbi - Request is queued by the gadget class driver. 554550a7375SFelipe Balbi -> if queue was previously empty, rxstate() 555550a7375SFelipe Balbi 556550a7375SFelipe Balbi - Host sends OUT token which causes an endpoint interrupt 557550a7375SFelipe Balbi /\ -> RxReady 558550a7375SFelipe Balbi | -> if request queued, call rxstate 559550a7375SFelipe Balbi | /\ -> setup DMA 560550a7375SFelipe Balbi | | -> DMA interrupt on completion 561550a7375SFelipe Balbi | | -> RxReady 562550a7375SFelipe Balbi | | -> stop DMA 563550a7375SFelipe Balbi | | -> ack the read 564550a7375SFelipe Balbi | | -> if data recd = max expected 565550a7375SFelipe Balbi | | by the request, or host 566550a7375SFelipe Balbi | | sent a short packet, 567550a7375SFelipe Balbi | | complete the request, 568550a7375SFelipe Balbi | | and start the next one. 569550a7375SFelipe Balbi | |_____________________________________| 570550a7375SFelipe Balbi | else just wait for the host 571550a7375SFelipe Balbi | to send the next OUT token. 572550a7375SFelipe Balbi |__________________________________________________| 573550a7375SFelipe Balbi 574550a7375SFelipe Balbi * Non-Mentor DMA engines can of course work differently. 575550a7375SFelipe Balbi */ 576550a7375SFelipe Balbi 577550a7375SFelipe Balbi #endif 578550a7375SFelipe Balbi 579550a7375SFelipe Balbi /* 580550a7375SFelipe Balbi * Context: controller locked, IRQs blocked, endpoint selected 581550a7375SFelipe Balbi */ 582550a7375SFelipe Balbi static void rxstate(struct musb *musb, struct musb_request *req) 583550a7375SFelipe Balbi { 584550a7375SFelipe Balbi u16 csr = 0; 585550a7375SFelipe Balbi const u8 epnum = req->epnum; 586550a7375SFelipe Balbi struct usb_request *request = &req->request; 587550a7375SFelipe Balbi struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; 588550a7375SFelipe Balbi void __iomem *epio = musb->endpoints[epnum].regs; 589c2c96321SFelipe Balbi unsigned fifo_count = 0; 590550a7375SFelipe Balbi u16 len = musb_ep->packet_sz; 591550a7375SFelipe Balbi 592550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_RXCSR); 593550a7375SFelipe Balbi 594550a7375SFelipe Balbi if (is_cppi_enabled() && musb_ep->dma) { 595550a7375SFelipe Balbi struct dma_controller *c = musb->dma_controller; 596550a7375SFelipe Balbi struct dma_channel *channel = musb_ep->dma; 597550a7375SFelipe Balbi 598550a7375SFelipe Balbi /* NOTE: CPPI won't actually stop advancing the DMA 599550a7375SFelipe Balbi * queue after short packet transfers, so this is almost 600550a7375SFelipe Balbi * always going to run as IRQ-per-packet DMA so that 601550a7375SFelipe Balbi * faults will be handled correctly. 602550a7375SFelipe Balbi */ 603550a7375SFelipe Balbi if (c->channel_program(channel, 604550a7375SFelipe Balbi musb_ep->packet_sz, 605550a7375SFelipe Balbi !request->short_not_ok, 606550a7375SFelipe Balbi request->dma + request->actual, 607550a7375SFelipe Balbi request->length - request->actual)) { 608550a7375SFelipe Balbi 609550a7375SFelipe Balbi /* make sure that if an rxpkt arrived after the irq, 610550a7375SFelipe Balbi * the cppi engine will be ready to take it as soon 611550a7375SFelipe Balbi * as DMA is enabled 612550a7375SFelipe Balbi */ 613550a7375SFelipe Balbi csr &= ~(MUSB_RXCSR_AUTOCLEAR 614550a7375SFelipe Balbi | MUSB_RXCSR_DMAMODE); 615550a7375SFelipe Balbi csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS; 616550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 617550a7375SFelipe Balbi return; 618550a7375SFelipe Balbi } 619550a7375SFelipe Balbi } 620550a7375SFelipe Balbi 621550a7375SFelipe Balbi if (csr & MUSB_RXCSR_RXPKTRDY) { 622550a7375SFelipe Balbi len = musb_readw(epio, MUSB_RXCOUNT); 623550a7375SFelipe Balbi if (request->actual < request->length) { 624550a7375SFelipe Balbi #ifdef CONFIG_USB_INVENTRA_DMA 625550a7375SFelipe Balbi if (is_dma_capable() && musb_ep->dma) { 626550a7375SFelipe Balbi struct dma_controller *c; 627550a7375SFelipe Balbi struct dma_channel *channel; 628550a7375SFelipe Balbi int use_dma = 0; 629550a7375SFelipe Balbi 630550a7375SFelipe Balbi c = musb->dma_controller; 631550a7375SFelipe Balbi channel = musb_ep->dma; 632550a7375SFelipe Balbi 633550a7375SFelipe Balbi /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in 634550a7375SFelipe Balbi * mode 0 only. So we do not get endpoint interrupts due to DMA 635550a7375SFelipe Balbi * completion. We only get interrupts from DMA controller. 636550a7375SFelipe Balbi * 637550a7375SFelipe Balbi * We could operate in DMA mode 1 if we knew the size of the tranfer 638550a7375SFelipe Balbi * in advance. For mass storage class, request->length = what the host 639550a7375SFelipe Balbi * sends, so that'd work. But for pretty much everything else, 640550a7375SFelipe Balbi * request->length is routinely more than what the host sends. For 641550a7375SFelipe Balbi * most these gadgets, end of is signified either by a short packet, 642550a7375SFelipe Balbi * or filling the last byte of the buffer. (Sending extra data in 643550a7375SFelipe Balbi * that last pckate should trigger an overflow fault.) But in mode 1, 644550a7375SFelipe Balbi * we don't get DMA completion interrrupt for short packets. 645550a7375SFelipe Balbi * 646550a7375SFelipe Balbi * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1), 647550a7375SFelipe Balbi * to get endpoint interrupt on every DMA req, but that didn't seem 648550a7375SFelipe Balbi * to work reliably. 649550a7375SFelipe Balbi * 650550a7375SFelipe Balbi * REVISIT an updated g_file_storage can set req->short_not_ok, which 651550a7375SFelipe Balbi * then becomes usable as a runtime "use mode 1" hint... 652550a7375SFelipe Balbi */ 653550a7375SFelipe Balbi 654550a7375SFelipe Balbi csr |= MUSB_RXCSR_DMAENAB; 655550a7375SFelipe Balbi #ifdef USE_MODE1 656550a7375SFelipe Balbi csr |= MUSB_RXCSR_AUTOCLEAR; 657550a7375SFelipe Balbi /* csr |= MUSB_RXCSR_DMAMODE; */ 658550a7375SFelipe Balbi 659550a7375SFelipe Balbi /* this special sequence (enabling and then 660550a7375SFelipe Balbi * disabling MUSB_RXCSR_DMAMODE) is required 661550a7375SFelipe Balbi * to get DMAReq to activate 662550a7375SFelipe Balbi */ 663550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, 664550a7375SFelipe Balbi csr | MUSB_RXCSR_DMAMODE); 665550a7375SFelipe Balbi #endif 666550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 667550a7375SFelipe Balbi 668550a7375SFelipe Balbi if (request->actual < request->length) { 669550a7375SFelipe Balbi int transfer_size = 0; 670550a7375SFelipe Balbi #ifdef USE_MODE1 671550a7375SFelipe Balbi transfer_size = min(request->length, 672550a7375SFelipe Balbi channel->max_len); 673550a7375SFelipe Balbi #else 674550a7375SFelipe Balbi transfer_size = len; 675550a7375SFelipe Balbi #endif 676550a7375SFelipe Balbi if (transfer_size <= musb_ep->packet_sz) 677550a7375SFelipe Balbi musb_ep->dma->desired_mode = 0; 678550a7375SFelipe Balbi else 679550a7375SFelipe Balbi musb_ep->dma->desired_mode = 1; 680550a7375SFelipe Balbi 681550a7375SFelipe Balbi use_dma = c->channel_program( 682550a7375SFelipe Balbi channel, 683550a7375SFelipe Balbi musb_ep->packet_sz, 684550a7375SFelipe Balbi channel->desired_mode, 685550a7375SFelipe Balbi request->dma 686550a7375SFelipe Balbi + request->actual, 687550a7375SFelipe Balbi transfer_size); 688550a7375SFelipe Balbi } 689550a7375SFelipe Balbi 690550a7375SFelipe Balbi if (use_dma) 691550a7375SFelipe Balbi return; 692550a7375SFelipe Balbi } 693550a7375SFelipe Balbi #endif /* Mentor's DMA */ 694550a7375SFelipe Balbi 695550a7375SFelipe Balbi fifo_count = request->length - request->actual; 696550a7375SFelipe Balbi DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n", 697550a7375SFelipe Balbi musb_ep->end_point.name, 698550a7375SFelipe Balbi len, fifo_count, 699550a7375SFelipe Balbi musb_ep->packet_sz); 700550a7375SFelipe Balbi 701c2c96321SFelipe Balbi fifo_count = min_t(unsigned, len, fifo_count); 702550a7375SFelipe Balbi 703550a7375SFelipe Balbi #ifdef CONFIG_USB_TUSB_OMAP_DMA 704550a7375SFelipe Balbi if (tusb_dma_omap() && musb_ep->dma) { 705550a7375SFelipe Balbi struct dma_controller *c = musb->dma_controller; 706550a7375SFelipe Balbi struct dma_channel *channel = musb_ep->dma; 707550a7375SFelipe Balbi u32 dma_addr = request->dma + request->actual; 708550a7375SFelipe Balbi int ret; 709550a7375SFelipe Balbi 710550a7375SFelipe Balbi ret = c->channel_program(channel, 711550a7375SFelipe Balbi musb_ep->packet_sz, 712550a7375SFelipe Balbi channel->desired_mode, 713550a7375SFelipe Balbi dma_addr, 714550a7375SFelipe Balbi fifo_count); 715550a7375SFelipe Balbi if (ret) 716550a7375SFelipe Balbi return; 717550a7375SFelipe Balbi } 718550a7375SFelipe Balbi #endif 719550a7375SFelipe Balbi 720550a7375SFelipe Balbi musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) 721550a7375SFelipe Balbi (request->buf + request->actual)); 722550a7375SFelipe Balbi request->actual += fifo_count; 723550a7375SFelipe Balbi 724550a7375SFelipe Balbi /* REVISIT if we left anything in the fifo, flush 725550a7375SFelipe Balbi * it and report -EOVERFLOW 726550a7375SFelipe Balbi */ 727550a7375SFelipe Balbi 728550a7375SFelipe Balbi /* ack the read! */ 729550a7375SFelipe Balbi csr |= MUSB_RXCSR_P_WZC_BITS; 730550a7375SFelipe Balbi csr &= ~MUSB_RXCSR_RXPKTRDY; 731550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 732550a7375SFelipe Balbi } 733550a7375SFelipe Balbi } 734550a7375SFelipe Balbi 735550a7375SFelipe Balbi /* reach the end or short packet detected */ 736550a7375SFelipe Balbi if (request->actual == request->length || len < musb_ep->packet_sz) 737550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, 0); 738550a7375SFelipe Balbi } 739550a7375SFelipe Balbi 740550a7375SFelipe Balbi /* 741550a7375SFelipe Balbi * Data ready for a request; called from IRQ 742550a7375SFelipe Balbi */ 743550a7375SFelipe Balbi void musb_g_rx(struct musb *musb, u8 epnum) 744550a7375SFelipe Balbi { 745550a7375SFelipe Balbi u16 csr; 746550a7375SFelipe Balbi struct usb_request *request; 747550a7375SFelipe Balbi void __iomem *mbase = musb->mregs; 748550a7375SFelipe Balbi struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; 749550a7375SFelipe Balbi void __iomem *epio = musb->endpoints[epnum].regs; 750550a7375SFelipe Balbi struct dma_channel *dma; 751550a7375SFelipe Balbi 752550a7375SFelipe Balbi musb_ep_select(mbase, epnum); 753550a7375SFelipe Balbi 754550a7375SFelipe Balbi request = next_request(musb_ep); 755550a7375SFelipe Balbi 756550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_RXCSR); 757550a7375SFelipe Balbi dma = is_dma_capable() ? musb_ep->dma : NULL; 758550a7375SFelipe Balbi 759550a7375SFelipe Balbi DBG(4, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name, 760550a7375SFelipe Balbi csr, dma ? " (dma)" : "", request); 761550a7375SFelipe Balbi 762550a7375SFelipe Balbi if (csr & MUSB_RXCSR_P_SENTSTALL) { 763550a7375SFelipe Balbi if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 764550a7375SFelipe Balbi dma->status = MUSB_DMA_STATUS_CORE_ABORT; 765550a7375SFelipe Balbi (void) musb->dma_controller->channel_abort(dma); 766550a7375SFelipe Balbi request->actual += musb_ep->dma->actual_len; 767550a7375SFelipe Balbi } 768550a7375SFelipe Balbi 769550a7375SFelipe Balbi csr |= MUSB_RXCSR_P_WZC_BITS; 770550a7375SFelipe Balbi csr &= ~MUSB_RXCSR_P_SENTSTALL; 771550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 772550a7375SFelipe Balbi 773550a7375SFelipe Balbi if (request) 774550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, -EPIPE); 775550a7375SFelipe Balbi goto done; 776550a7375SFelipe Balbi } 777550a7375SFelipe Balbi 778550a7375SFelipe Balbi if (csr & MUSB_RXCSR_P_OVERRUN) { 779550a7375SFelipe Balbi /* csr |= MUSB_RXCSR_P_WZC_BITS; */ 780550a7375SFelipe Balbi csr &= ~MUSB_RXCSR_P_OVERRUN; 781550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 782550a7375SFelipe Balbi 783550a7375SFelipe Balbi DBG(3, "%s iso overrun on %p\n", musb_ep->name, request); 784550a7375SFelipe Balbi if (request && request->status == -EINPROGRESS) 785550a7375SFelipe Balbi request->status = -EOVERFLOW; 786550a7375SFelipe Balbi } 787550a7375SFelipe Balbi if (csr & MUSB_RXCSR_INCOMPRX) { 788550a7375SFelipe Balbi /* REVISIT not necessarily an error */ 789550a7375SFelipe Balbi DBG(4, "%s, incomprx\n", musb_ep->end_point.name); 790550a7375SFelipe Balbi } 791550a7375SFelipe Balbi 792550a7375SFelipe Balbi if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 793550a7375SFelipe Balbi /* "should not happen"; likely RXPKTRDY pending for DMA */ 794550a7375SFelipe Balbi DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1, 795550a7375SFelipe Balbi "%s busy, csr %04x\n", 796550a7375SFelipe Balbi musb_ep->end_point.name, csr); 797550a7375SFelipe Balbi goto done; 798550a7375SFelipe Balbi } 799550a7375SFelipe Balbi 800550a7375SFelipe Balbi if (dma && (csr & MUSB_RXCSR_DMAENAB)) { 801550a7375SFelipe Balbi csr &= ~(MUSB_RXCSR_AUTOCLEAR 802550a7375SFelipe Balbi | MUSB_RXCSR_DMAENAB 803550a7375SFelipe Balbi | MUSB_RXCSR_DMAMODE); 804550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, 805550a7375SFelipe Balbi MUSB_RXCSR_P_WZC_BITS | csr); 806550a7375SFelipe Balbi 807550a7375SFelipe Balbi request->actual += musb_ep->dma->actual_len; 808550a7375SFelipe Balbi 809550a7375SFelipe Balbi DBG(4, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n", 810550a7375SFelipe Balbi epnum, csr, 811550a7375SFelipe Balbi musb_readw(epio, MUSB_RXCSR), 812550a7375SFelipe Balbi musb_ep->dma->actual_len, request); 813550a7375SFelipe Balbi 814550a7375SFelipe Balbi #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) 815550a7375SFelipe Balbi /* Autoclear doesn't clear RxPktRdy for short packets */ 816550a7375SFelipe Balbi if ((dma->desired_mode == 0) 817550a7375SFelipe Balbi || (dma->actual_len 818550a7375SFelipe Balbi & (musb_ep->packet_sz - 1))) { 819550a7375SFelipe Balbi /* ack the read! */ 820550a7375SFelipe Balbi csr &= ~MUSB_RXCSR_RXPKTRDY; 821550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 822550a7375SFelipe Balbi } 823550a7375SFelipe Balbi 824550a7375SFelipe Balbi /* incomplete, and not short? wait for next IN packet */ 825550a7375SFelipe Balbi if ((request->actual < request->length) 826550a7375SFelipe Balbi && (musb_ep->dma->actual_len 827550a7375SFelipe Balbi == musb_ep->packet_sz)) 828550a7375SFelipe Balbi goto done; 829550a7375SFelipe Balbi #endif 830550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, 0); 831550a7375SFelipe Balbi 832550a7375SFelipe Balbi request = next_request(musb_ep); 833550a7375SFelipe Balbi if (!request) 834550a7375SFelipe Balbi goto done; 835550a7375SFelipe Balbi 836550a7375SFelipe Balbi /* don't start more i/o till the stall clears */ 837550a7375SFelipe Balbi musb_ep_select(mbase, epnum); 838550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_RXCSR); 839550a7375SFelipe Balbi if (csr & MUSB_RXCSR_P_SENDSTALL) 840550a7375SFelipe Balbi goto done; 841550a7375SFelipe Balbi } 842550a7375SFelipe Balbi 843550a7375SFelipe Balbi 844550a7375SFelipe Balbi /* analyze request if the ep is hot */ 845550a7375SFelipe Balbi if (request) 846550a7375SFelipe Balbi rxstate(musb, to_musb_request(request)); 847550a7375SFelipe Balbi else 848550a7375SFelipe Balbi DBG(3, "packet waiting for %s%s request\n", 849550a7375SFelipe Balbi musb_ep->desc ? "" : "inactive ", 850550a7375SFelipe Balbi musb_ep->end_point.name); 851550a7375SFelipe Balbi 852550a7375SFelipe Balbi done: 853550a7375SFelipe Balbi return; 854550a7375SFelipe Balbi } 855550a7375SFelipe Balbi 856550a7375SFelipe Balbi /* ------------------------------------------------------------ */ 857550a7375SFelipe Balbi 858550a7375SFelipe Balbi static int musb_gadget_enable(struct usb_ep *ep, 859550a7375SFelipe Balbi const struct usb_endpoint_descriptor *desc) 860550a7375SFelipe Balbi { 861550a7375SFelipe Balbi unsigned long flags; 862550a7375SFelipe Balbi struct musb_ep *musb_ep; 863550a7375SFelipe Balbi struct musb_hw_ep *hw_ep; 864550a7375SFelipe Balbi void __iomem *regs; 865550a7375SFelipe Balbi struct musb *musb; 866550a7375SFelipe Balbi void __iomem *mbase; 867550a7375SFelipe Balbi u8 epnum; 868550a7375SFelipe Balbi u16 csr; 869550a7375SFelipe Balbi unsigned tmp; 870550a7375SFelipe Balbi int status = -EINVAL; 871550a7375SFelipe Balbi 872550a7375SFelipe Balbi if (!ep || !desc) 873550a7375SFelipe Balbi return -EINVAL; 874550a7375SFelipe Balbi 875550a7375SFelipe Balbi musb_ep = to_musb_ep(ep); 876550a7375SFelipe Balbi hw_ep = musb_ep->hw_ep; 877550a7375SFelipe Balbi regs = hw_ep->regs; 878550a7375SFelipe Balbi musb = musb_ep->musb; 879550a7375SFelipe Balbi mbase = musb->mregs; 880550a7375SFelipe Balbi epnum = musb_ep->current_epnum; 881550a7375SFelipe Balbi 882550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 883550a7375SFelipe Balbi 884550a7375SFelipe Balbi if (musb_ep->desc) { 885550a7375SFelipe Balbi status = -EBUSY; 886550a7375SFelipe Balbi goto fail; 887550a7375SFelipe Balbi } 88896bcd090SJulia Lawall musb_ep->type = usb_endpoint_type(desc); 889550a7375SFelipe Balbi 890550a7375SFelipe Balbi /* check direction and (later) maxpacket size against endpoint */ 89196bcd090SJulia Lawall if (usb_endpoint_num(desc) != epnum) 892550a7375SFelipe Balbi goto fail; 893550a7375SFelipe Balbi 894550a7375SFelipe Balbi /* REVISIT this rules out high bandwidth periodic transfers */ 895550a7375SFelipe Balbi tmp = le16_to_cpu(desc->wMaxPacketSize); 896550a7375SFelipe Balbi if (tmp & ~0x07ff) 897550a7375SFelipe Balbi goto fail; 898550a7375SFelipe Balbi musb_ep->packet_sz = tmp; 899550a7375SFelipe Balbi 900550a7375SFelipe Balbi /* enable the interrupts for the endpoint, set the endpoint 901550a7375SFelipe Balbi * packet size (or fail), set the mode, clear the fifo 902550a7375SFelipe Balbi */ 903550a7375SFelipe Balbi musb_ep_select(mbase, epnum); 90496bcd090SJulia Lawall if (usb_endpoint_dir_in(desc)) { 905550a7375SFelipe Balbi u16 int_txe = musb_readw(mbase, MUSB_INTRTXE); 906550a7375SFelipe Balbi 907550a7375SFelipe Balbi if (hw_ep->is_shared_fifo) 908550a7375SFelipe Balbi musb_ep->is_in = 1; 909550a7375SFelipe Balbi if (!musb_ep->is_in) 910550a7375SFelipe Balbi goto fail; 911550a7375SFelipe Balbi if (tmp > hw_ep->max_packet_sz_tx) 912550a7375SFelipe Balbi goto fail; 913550a7375SFelipe Balbi 914550a7375SFelipe Balbi int_txe |= (1 << epnum); 915550a7375SFelipe Balbi musb_writew(mbase, MUSB_INTRTXE, int_txe); 916550a7375SFelipe Balbi 917550a7375SFelipe Balbi /* REVISIT if can_bulk_split(), use by updating "tmp"; 918550a7375SFelipe Balbi * likewise high bandwidth periodic tx 919550a7375SFelipe Balbi */ 920550a7375SFelipe Balbi musb_writew(regs, MUSB_TXMAXP, tmp); 921550a7375SFelipe Balbi 922550a7375SFelipe Balbi csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; 923550a7375SFelipe Balbi if (musb_readw(regs, MUSB_TXCSR) 924550a7375SFelipe Balbi & MUSB_TXCSR_FIFONOTEMPTY) 925550a7375SFelipe Balbi csr |= MUSB_TXCSR_FLUSHFIFO; 926550a7375SFelipe Balbi if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) 927550a7375SFelipe Balbi csr |= MUSB_TXCSR_P_ISO; 928550a7375SFelipe Balbi 929550a7375SFelipe Balbi /* set twice in case of double buffering */ 930550a7375SFelipe Balbi musb_writew(regs, MUSB_TXCSR, csr); 931550a7375SFelipe Balbi /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ 932550a7375SFelipe Balbi musb_writew(regs, MUSB_TXCSR, csr); 933550a7375SFelipe Balbi 934550a7375SFelipe Balbi } else { 935550a7375SFelipe Balbi u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE); 936550a7375SFelipe Balbi 937550a7375SFelipe Balbi if (hw_ep->is_shared_fifo) 938550a7375SFelipe Balbi musb_ep->is_in = 0; 939550a7375SFelipe Balbi if (musb_ep->is_in) 940550a7375SFelipe Balbi goto fail; 941550a7375SFelipe Balbi if (tmp > hw_ep->max_packet_sz_rx) 942550a7375SFelipe Balbi goto fail; 943550a7375SFelipe Balbi 944550a7375SFelipe Balbi int_rxe |= (1 << epnum); 945550a7375SFelipe Balbi musb_writew(mbase, MUSB_INTRRXE, int_rxe); 946550a7375SFelipe Balbi 947550a7375SFelipe Balbi /* REVISIT if can_bulk_combine() use by updating "tmp" 948550a7375SFelipe Balbi * likewise high bandwidth periodic rx 949550a7375SFelipe Balbi */ 950550a7375SFelipe Balbi musb_writew(regs, MUSB_RXMAXP, tmp); 951550a7375SFelipe Balbi 952550a7375SFelipe Balbi /* force shared fifo to OUT-only mode */ 953550a7375SFelipe Balbi if (hw_ep->is_shared_fifo) { 954550a7375SFelipe Balbi csr = musb_readw(regs, MUSB_TXCSR); 955550a7375SFelipe Balbi csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY); 956550a7375SFelipe Balbi musb_writew(regs, MUSB_TXCSR, csr); 957550a7375SFelipe Balbi } 958550a7375SFelipe Balbi 959550a7375SFelipe Balbi csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG; 960550a7375SFelipe Balbi if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) 961550a7375SFelipe Balbi csr |= MUSB_RXCSR_P_ISO; 962550a7375SFelipe Balbi else if (musb_ep->type == USB_ENDPOINT_XFER_INT) 963550a7375SFelipe Balbi csr |= MUSB_RXCSR_DISNYET; 964550a7375SFelipe Balbi 965550a7375SFelipe Balbi /* set twice in case of double buffering */ 966550a7375SFelipe Balbi musb_writew(regs, MUSB_RXCSR, csr); 967550a7375SFelipe Balbi musb_writew(regs, MUSB_RXCSR, csr); 968550a7375SFelipe Balbi } 969550a7375SFelipe Balbi 970550a7375SFelipe Balbi /* NOTE: all the I/O code _should_ work fine without DMA, in case 971550a7375SFelipe Balbi * for some reason you run out of channels here. 972550a7375SFelipe Balbi */ 973550a7375SFelipe Balbi if (is_dma_capable() && musb->dma_controller) { 974550a7375SFelipe Balbi struct dma_controller *c = musb->dma_controller; 975550a7375SFelipe Balbi 976550a7375SFelipe Balbi musb_ep->dma = c->channel_alloc(c, hw_ep, 977550a7375SFelipe Balbi (desc->bEndpointAddress & USB_DIR_IN)); 978550a7375SFelipe Balbi } else 979550a7375SFelipe Balbi musb_ep->dma = NULL; 980550a7375SFelipe Balbi 981550a7375SFelipe Balbi musb_ep->desc = desc; 982550a7375SFelipe Balbi musb_ep->busy = 0; 983550a7375SFelipe Balbi status = 0; 984550a7375SFelipe Balbi 985550a7375SFelipe Balbi pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n", 986550a7375SFelipe Balbi musb_driver_name, musb_ep->end_point.name, 987550a7375SFelipe Balbi ({ char *s; switch (musb_ep->type) { 988550a7375SFelipe Balbi case USB_ENDPOINT_XFER_BULK: s = "bulk"; break; 989550a7375SFelipe Balbi case USB_ENDPOINT_XFER_INT: s = "int"; break; 990550a7375SFelipe Balbi default: s = "iso"; break; 991550a7375SFelipe Balbi }; s; }), 992550a7375SFelipe Balbi musb_ep->is_in ? "IN" : "OUT", 993550a7375SFelipe Balbi musb_ep->dma ? "dma, " : "", 994550a7375SFelipe Balbi musb_ep->packet_sz); 995550a7375SFelipe Balbi 996550a7375SFelipe Balbi schedule_work(&musb->irq_work); 997550a7375SFelipe Balbi 998550a7375SFelipe Balbi fail: 999550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1000550a7375SFelipe Balbi return status; 1001550a7375SFelipe Balbi } 1002550a7375SFelipe Balbi 1003550a7375SFelipe Balbi /* 1004550a7375SFelipe Balbi * Disable an endpoint flushing all requests queued. 1005550a7375SFelipe Balbi */ 1006550a7375SFelipe Balbi static int musb_gadget_disable(struct usb_ep *ep) 1007550a7375SFelipe Balbi { 1008550a7375SFelipe Balbi unsigned long flags; 1009550a7375SFelipe Balbi struct musb *musb; 1010550a7375SFelipe Balbi u8 epnum; 1011550a7375SFelipe Balbi struct musb_ep *musb_ep; 1012550a7375SFelipe Balbi void __iomem *epio; 1013550a7375SFelipe Balbi int status = 0; 1014550a7375SFelipe Balbi 1015550a7375SFelipe Balbi musb_ep = to_musb_ep(ep); 1016550a7375SFelipe Balbi musb = musb_ep->musb; 1017550a7375SFelipe Balbi epnum = musb_ep->current_epnum; 1018550a7375SFelipe Balbi epio = musb->endpoints[epnum].regs; 1019550a7375SFelipe Balbi 1020550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1021550a7375SFelipe Balbi musb_ep_select(musb->mregs, epnum); 1022550a7375SFelipe Balbi 1023550a7375SFelipe Balbi /* zero the endpoint sizes */ 1024550a7375SFelipe Balbi if (musb_ep->is_in) { 1025550a7375SFelipe Balbi u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE); 1026550a7375SFelipe Balbi int_txe &= ~(1 << epnum); 1027550a7375SFelipe Balbi musb_writew(musb->mregs, MUSB_INTRTXE, int_txe); 1028550a7375SFelipe Balbi musb_writew(epio, MUSB_TXMAXP, 0); 1029550a7375SFelipe Balbi } else { 1030550a7375SFelipe Balbi u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE); 1031550a7375SFelipe Balbi int_rxe &= ~(1 << epnum); 1032550a7375SFelipe Balbi musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe); 1033550a7375SFelipe Balbi musb_writew(epio, MUSB_RXMAXP, 0); 1034550a7375SFelipe Balbi } 1035550a7375SFelipe Balbi 1036550a7375SFelipe Balbi musb_ep->desc = NULL; 1037550a7375SFelipe Balbi 1038550a7375SFelipe Balbi /* abort all pending DMA and requests */ 1039550a7375SFelipe Balbi nuke(musb_ep, -ESHUTDOWN); 1040550a7375SFelipe Balbi 1041550a7375SFelipe Balbi schedule_work(&musb->irq_work); 1042550a7375SFelipe Balbi 1043550a7375SFelipe Balbi spin_unlock_irqrestore(&(musb->lock), flags); 1044550a7375SFelipe Balbi 1045550a7375SFelipe Balbi DBG(2, "%s\n", musb_ep->end_point.name); 1046550a7375SFelipe Balbi 1047550a7375SFelipe Balbi return status; 1048550a7375SFelipe Balbi } 1049550a7375SFelipe Balbi 1050550a7375SFelipe Balbi /* 1051550a7375SFelipe Balbi * Allocate a request for an endpoint. 1052550a7375SFelipe Balbi * Reused by ep0 code. 1053550a7375SFelipe Balbi */ 1054550a7375SFelipe Balbi struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) 1055550a7375SFelipe Balbi { 1056550a7375SFelipe Balbi struct musb_ep *musb_ep = to_musb_ep(ep); 1057550a7375SFelipe Balbi struct musb_request *request = NULL; 1058550a7375SFelipe Balbi 1059550a7375SFelipe Balbi request = kzalloc(sizeof *request, gfp_flags); 1060550a7375SFelipe Balbi if (request) { 1061550a7375SFelipe Balbi INIT_LIST_HEAD(&request->request.list); 1062550a7375SFelipe Balbi request->request.dma = DMA_ADDR_INVALID; 1063550a7375SFelipe Balbi request->epnum = musb_ep->current_epnum; 1064550a7375SFelipe Balbi request->ep = musb_ep; 1065550a7375SFelipe Balbi } 1066550a7375SFelipe Balbi 1067550a7375SFelipe Balbi return &request->request; 1068550a7375SFelipe Balbi } 1069550a7375SFelipe Balbi 1070550a7375SFelipe Balbi /* 1071550a7375SFelipe Balbi * Free a request 1072550a7375SFelipe Balbi * Reused by ep0 code. 1073550a7375SFelipe Balbi */ 1074550a7375SFelipe Balbi void musb_free_request(struct usb_ep *ep, struct usb_request *req) 1075550a7375SFelipe Balbi { 1076550a7375SFelipe Balbi kfree(to_musb_request(req)); 1077550a7375SFelipe Balbi } 1078550a7375SFelipe Balbi 1079550a7375SFelipe Balbi static LIST_HEAD(buffers); 1080550a7375SFelipe Balbi 1081550a7375SFelipe Balbi struct free_record { 1082550a7375SFelipe Balbi struct list_head list; 1083550a7375SFelipe Balbi struct device *dev; 1084550a7375SFelipe Balbi unsigned bytes; 1085550a7375SFelipe Balbi dma_addr_t dma; 1086550a7375SFelipe Balbi }; 1087550a7375SFelipe Balbi 1088550a7375SFelipe Balbi /* 1089550a7375SFelipe Balbi * Context: controller locked, IRQs blocked. 1090550a7375SFelipe Balbi */ 1091550a7375SFelipe Balbi static void musb_ep_restart(struct musb *musb, struct musb_request *req) 1092550a7375SFelipe Balbi { 1093550a7375SFelipe Balbi DBG(3, "<== %s request %p len %u on hw_ep%d\n", 1094550a7375SFelipe Balbi req->tx ? "TX/IN" : "RX/OUT", 1095550a7375SFelipe Balbi &req->request, req->request.length, req->epnum); 1096550a7375SFelipe Balbi 1097550a7375SFelipe Balbi musb_ep_select(musb->mregs, req->epnum); 1098550a7375SFelipe Balbi if (req->tx) 1099550a7375SFelipe Balbi txstate(musb, req); 1100550a7375SFelipe Balbi else 1101550a7375SFelipe Balbi rxstate(musb, req); 1102550a7375SFelipe Balbi } 1103550a7375SFelipe Balbi 1104550a7375SFelipe Balbi static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, 1105550a7375SFelipe Balbi gfp_t gfp_flags) 1106550a7375SFelipe Balbi { 1107550a7375SFelipe Balbi struct musb_ep *musb_ep; 1108550a7375SFelipe Balbi struct musb_request *request; 1109550a7375SFelipe Balbi struct musb *musb; 1110550a7375SFelipe Balbi int status = 0; 1111550a7375SFelipe Balbi unsigned long lockflags; 1112550a7375SFelipe Balbi 1113550a7375SFelipe Balbi if (!ep || !req) 1114550a7375SFelipe Balbi return -EINVAL; 1115550a7375SFelipe Balbi if (!req->buf) 1116550a7375SFelipe Balbi return -ENODATA; 1117550a7375SFelipe Balbi 1118550a7375SFelipe Balbi musb_ep = to_musb_ep(ep); 1119550a7375SFelipe Balbi musb = musb_ep->musb; 1120550a7375SFelipe Balbi 1121550a7375SFelipe Balbi request = to_musb_request(req); 1122550a7375SFelipe Balbi request->musb = musb; 1123550a7375SFelipe Balbi 1124550a7375SFelipe Balbi if (request->ep != musb_ep) 1125550a7375SFelipe Balbi return -EINVAL; 1126550a7375SFelipe Balbi 1127550a7375SFelipe Balbi DBG(4, "<== to %s request=%p\n", ep->name, req); 1128550a7375SFelipe Balbi 1129550a7375SFelipe Balbi /* request is mine now... */ 1130550a7375SFelipe Balbi request->request.actual = 0; 1131550a7375SFelipe Balbi request->request.status = -EINPROGRESS; 1132550a7375SFelipe Balbi request->epnum = musb_ep->current_epnum; 1133550a7375SFelipe Balbi request->tx = musb_ep->is_in; 1134550a7375SFelipe Balbi 1135550a7375SFelipe Balbi if (is_dma_capable() && musb_ep->dma) { 1136550a7375SFelipe Balbi if (request->request.dma == DMA_ADDR_INVALID) { 1137550a7375SFelipe Balbi request->request.dma = dma_map_single( 1138550a7375SFelipe Balbi musb->controller, 1139550a7375SFelipe Balbi request->request.buf, 1140550a7375SFelipe Balbi request->request.length, 1141550a7375SFelipe Balbi request->tx 1142550a7375SFelipe Balbi ? DMA_TO_DEVICE 1143550a7375SFelipe Balbi : DMA_FROM_DEVICE); 1144550a7375SFelipe Balbi request->mapped = 1; 1145550a7375SFelipe Balbi } else { 1146550a7375SFelipe Balbi dma_sync_single_for_device(musb->controller, 1147550a7375SFelipe Balbi request->request.dma, 1148550a7375SFelipe Balbi request->request.length, 1149550a7375SFelipe Balbi request->tx 1150550a7375SFelipe Balbi ? DMA_TO_DEVICE 1151550a7375SFelipe Balbi : DMA_FROM_DEVICE); 1152550a7375SFelipe Balbi request->mapped = 0; 1153550a7375SFelipe Balbi } 1154550a7375SFelipe Balbi } else if (!req->buf) { 1155550a7375SFelipe Balbi return -ENODATA; 1156550a7375SFelipe Balbi } else 1157550a7375SFelipe Balbi request->mapped = 0; 1158550a7375SFelipe Balbi 1159550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, lockflags); 1160550a7375SFelipe Balbi 1161550a7375SFelipe Balbi /* don't queue if the ep is down */ 1162550a7375SFelipe Balbi if (!musb_ep->desc) { 1163550a7375SFelipe Balbi DBG(4, "req %p queued to %s while ep %s\n", 1164550a7375SFelipe Balbi req, ep->name, "disabled"); 1165550a7375SFelipe Balbi status = -ESHUTDOWN; 1166550a7375SFelipe Balbi goto cleanup; 1167550a7375SFelipe Balbi } 1168550a7375SFelipe Balbi 1169550a7375SFelipe Balbi /* add request to the list */ 1170550a7375SFelipe Balbi list_add_tail(&(request->request.list), &(musb_ep->req_list)); 1171550a7375SFelipe Balbi 1172550a7375SFelipe Balbi /* it this is the head of the queue, start i/o ... */ 1173550a7375SFelipe Balbi if (!musb_ep->busy && &request->request.list == musb_ep->req_list.next) 1174550a7375SFelipe Balbi musb_ep_restart(musb, request); 1175550a7375SFelipe Balbi 1176550a7375SFelipe Balbi cleanup: 1177550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, lockflags); 1178550a7375SFelipe Balbi return status; 1179550a7375SFelipe Balbi } 1180550a7375SFelipe Balbi 1181550a7375SFelipe Balbi static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request) 1182550a7375SFelipe Balbi { 1183550a7375SFelipe Balbi struct musb_ep *musb_ep = to_musb_ep(ep); 1184550a7375SFelipe Balbi struct usb_request *r; 1185550a7375SFelipe Balbi unsigned long flags; 1186550a7375SFelipe Balbi int status = 0; 1187550a7375SFelipe Balbi struct musb *musb = musb_ep->musb; 1188550a7375SFelipe Balbi 1189550a7375SFelipe Balbi if (!ep || !request || to_musb_request(request)->ep != musb_ep) 1190550a7375SFelipe Balbi return -EINVAL; 1191550a7375SFelipe Balbi 1192550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1193550a7375SFelipe Balbi 1194550a7375SFelipe Balbi list_for_each_entry(r, &musb_ep->req_list, list) { 1195550a7375SFelipe Balbi if (r == request) 1196550a7375SFelipe Balbi break; 1197550a7375SFelipe Balbi } 1198550a7375SFelipe Balbi if (r != request) { 1199550a7375SFelipe Balbi DBG(3, "request %p not queued to %s\n", request, ep->name); 1200550a7375SFelipe Balbi status = -EINVAL; 1201550a7375SFelipe Balbi goto done; 1202550a7375SFelipe Balbi } 1203550a7375SFelipe Balbi 1204550a7375SFelipe Balbi /* if the hardware doesn't have the request, easy ... */ 1205550a7375SFelipe Balbi if (musb_ep->req_list.next != &request->list || musb_ep->busy) 1206550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, -ECONNRESET); 1207550a7375SFelipe Balbi 1208550a7375SFelipe Balbi /* ... else abort the dma transfer ... */ 1209550a7375SFelipe Balbi else if (is_dma_capable() && musb_ep->dma) { 1210550a7375SFelipe Balbi struct dma_controller *c = musb->dma_controller; 1211550a7375SFelipe Balbi 1212550a7375SFelipe Balbi musb_ep_select(musb->mregs, musb_ep->current_epnum); 1213550a7375SFelipe Balbi if (c->channel_abort) 1214550a7375SFelipe Balbi status = c->channel_abort(musb_ep->dma); 1215550a7375SFelipe Balbi else 1216550a7375SFelipe Balbi status = -EBUSY; 1217550a7375SFelipe Balbi if (status == 0) 1218550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, -ECONNRESET); 1219550a7375SFelipe Balbi } else { 1220550a7375SFelipe Balbi /* NOTE: by sticking to easily tested hardware/driver states, 1221550a7375SFelipe Balbi * we leave counting of in-flight packets imprecise. 1222550a7375SFelipe Balbi */ 1223550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, -ECONNRESET); 1224550a7375SFelipe Balbi } 1225550a7375SFelipe Balbi 1226550a7375SFelipe Balbi done: 1227550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1228550a7375SFelipe Balbi return status; 1229550a7375SFelipe Balbi } 1230550a7375SFelipe Balbi 1231550a7375SFelipe Balbi /* 1232550a7375SFelipe Balbi * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any 1233550a7375SFelipe Balbi * data but will queue requests. 1234550a7375SFelipe Balbi * 1235550a7375SFelipe Balbi * exported to ep0 code 1236550a7375SFelipe Balbi */ 1237550a7375SFelipe Balbi int musb_gadget_set_halt(struct usb_ep *ep, int value) 1238550a7375SFelipe Balbi { 1239550a7375SFelipe Balbi struct musb_ep *musb_ep = to_musb_ep(ep); 1240550a7375SFelipe Balbi u8 epnum = musb_ep->current_epnum; 1241550a7375SFelipe Balbi struct musb *musb = musb_ep->musb; 1242550a7375SFelipe Balbi void __iomem *epio = musb->endpoints[epnum].regs; 1243550a7375SFelipe Balbi void __iomem *mbase; 1244550a7375SFelipe Balbi unsigned long flags; 1245550a7375SFelipe Balbi u16 csr; 1246550a7375SFelipe Balbi struct musb_request *request = NULL; 1247550a7375SFelipe Balbi int status = 0; 1248550a7375SFelipe Balbi 1249550a7375SFelipe Balbi if (!ep) 1250550a7375SFelipe Balbi return -EINVAL; 1251550a7375SFelipe Balbi mbase = musb->mregs; 1252550a7375SFelipe Balbi 1253550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1254550a7375SFelipe Balbi 1255550a7375SFelipe Balbi if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) { 1256550a7375SFelipe Balbi status = -EINVAL; 1257550a7375SFelipe Balbi goto done; 1258550a7375SFelipe Balbi } 1259550a7375SFelipe Balbi 1260550a7375SFelipe Balbi musb_ep_select(mbase, epnum); 1261550a7375SFelipe Balbi 1262550a7375SFelipe Balbi /* cannot portably stall with non-empty FIFO */ 1263550a7375SFelipe Balbi request = to_musb_request(next_request(musb_ep)); 1264550a7375SFelipe Balbi if (value && musb_ep->is_in) { 1265550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 1266550a7375SFelipe Balbi if (csr & MUSB_TXCSR_FIFONOTEMPTY) { 1267550a7375SFelipe Balbi DBG(3, "%s fifo busy, cannot halt\n", ep->name); 1268550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1269550a7375SFelipe Balbi return -EAGAIN; 1270550a7375SFelipe Balbi } 1271550a7375SFelipe Balbi 1272550a7375SFelipe Balbi } 1273550a7375SFelipe Balbi 1274550a7375SFelipe Balbi /* set/clear the stall and toggle bits */ 1275550a7375SFelipe Balbi DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear"); 1276550a7375SFelipe Balbi if (musb_ep->is_in) { 1277550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 1278550a7375SFelipe Balbi if (csr & MUSB_TXCSR_FIFONOTEMPTY) 1279550a7375SFelipe Balbi csr |= MUSB_TXCSR_FLUSHFIFO; 1280550a7375SFelipe Balbi csr |= MUSB_TXCSR_P_WZC_BITS 1281550a7375SFelipe Balbi | MUSB_TXCSR_CLRDATATOG; 1282550a7375SFelipe Balbi if (value) 1283550a7375SFelipe Balbi csr |= MUSB_TXCSR_P_SENDSTALL; 1284550a7375SFelipe Balbi else 1285550a7375SFelipe Balbi csr &= ~(MUSB_TXCSR_P_SENDSTALL 1286550a7375SFelipe Balbi | MUSB_TXCSR_P_SENTSTALL); 1287550a7375SFelipe Balbi csr &= ~MUSB_TXCSR_TXPKTRDY; 1288550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 1289550a7375SFelipe Balbi } else { 1290550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_RXCSR); 1291550a7375SFelipe Balbi csr |= MUSB_RXCSR_P_WZC_BITS 1292550a7375SFelipe Balbi | MUSB_RXCSR_FLUSHFIFO 1293550a7375SFelipe Balbi | MUSB_RXCSR_CLRDATATOG; 1294550a7375SFelipe Balbi if (value) 1295550a7375SFelipe Balbi csr |= MUSB_RXCSR_P_SENDSTALL; 1296550a7375SFelipe Balbi else 1297550a7375SFelipe Balbi csr &= ~(MUSB_RXCSR_P_SENDSTALL 1298550a7375SFelipe Balbi | MUSB_RXCSR_P_SENTSTALL); 1299550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 1300550a7375SFelipe Balbi } 1301550a7375SFelipe Balbi 1302550a7375SFelipe Balbi done: 1303550a7375SFelipe Balbi 1304550a7375SFelipe Balbi /* maybe start the first request in the queue */ 1305550a7375SFelipe Balbi if (!musb_ep->busy && !value && request) { 1306550a7375SFelipe Balbi DBG(3, "restarting the request\n"); 1307550a7375SFelipe Balbi musb_ep_restart(musb, request); 1308550a7375SFelipe Balbi } 1309550a7375SFelipe Balbi 1310550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1311550a7375SFelipe Balbi return status; 1312550a7375SFelipe Balbi } 1313550a7375SFelipe Balbi 1314550a7375SFelipe Balbi static int musb_gadget_fifo_status(struct usb_ep *ep) 1315550a7375SFelipe Balbi { 1316550a7375SFelipe Balbi struct musb_ep *musb_ep = to_musb_ep(ep); 1317550a7375SFelipe Balbi void __iomem *epio = musb_ep->hw_ep->regs; 1318550a7375SFelipe Balbi int retval = -EINVAL; 1319550a7375SFelipe Balbi 1320550a7375SFelipe Balbi if (musb_ep->desc && !musb_ep->is_in) { 1321550a7375SFelipe Balbi struct musb *musb = musb_ep->musb; 1322550a7375SFelipe Balbi int epnum = musb_ep->current_epnum; 1323550a7375SFelipe Balbi void __iomem *mbase = musb->mregs; 1324550a7375SFelipe Balbi unsigned long flags; 1325550a7375SFelipe Balbi 1326550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1327550a7375SFelipe Balbi 1328550a7375SFelipe Balbi musb_ep_select(mbase, epnum); 1329550a7375SFelipe Balbi /* FIXME return zero unless RXPKTRDY is set */ 1330550a7375SFelipe Balbi retval = musb_readw(epio, MUSB_RXCOUNT); 1331550a7375SFelipe Balbi 1332550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1333550a7375SFelipe Balbi } 1334550a7375SFelipe Balbi return retval; 1335550a7375SFelipe Balbi } 1336550a7375SFelipe Balbi 1337550a7375SFelipe Balbi static void musb_gadget_fifo_flush(struct usb_ep *ep) 1338550a7375SFelipe Balbi { 1339550a7375SFelipe Balbi struct musb_ep *musb_ep = to_musb_ep(ep); 1340550a7375SFelipe Balbi struct musb *musb = musb_ep->musb; 1341550a7375SFelipe Balbi u8 epnum = musb_ep->current_epnum; 1342550a7375SFelipe Balbi void __iomem *epio = musb->endpoints[epnum].regs; 1343550a7375SFelipe Balbi void __iomem *mbase; 1344550a7375SFelipe Balbi unsigned long flags; 1345550a7375SFelipe Balbi u16 csr, int_txe; 1346550a7375SFelipe Balbi 1347550a7375SFelipe Balbi mbase = musb->mregs; 1348550a7375SFelipe Balbi 1349550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1350550a7375SFelipe Balbi musb_ep_select(mbase, (u8) epnum); 1351550a7375SFelipe Balbi 1352550a7375SFelipe Balbi /* disable interrupts */ 1353550a7375SFelipe Balbi int_txe = musb_readw(mbase, MUSB_INTRTXE); 1354550a7375SFelipe Balbi musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); 1355550a7375SFelipe Balbi 1356550a7375SFelipe Balbi if (musb_ep->is_in) { 1357550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 1358550a7375SFelipe Balbi if (csr & MUSB_TXCSR_FIFONOTEMPTY) { 1359550a7375SFelipe Balbi csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS; 1360550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 1361550a7375SFelipe Balbi /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ 1362550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 1363550a7375SFelipe Balbi } 1364550a7375SFelipe Balbi } else { 1365550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_RXCSR); 1366550a7375SFelipe Balbi csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS; 1367550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 1368550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 1369550a7375SFelipe Balbi } 1370550a7375SFelipe Balbi 1371550a7375SFelipe Balbi /* re-enable interrupt */ 1372550a7375SFelipe Balbi musb_writew(mbase, MUSB_INTRTXE, int_txe); 1373550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1374550a7375SFelipe Balbi } 1375550a7375SFelipe Balbi 1376550a7375SFelipe Balbi static const struct usb_ep_ops musb_ep_ops = { 1377550a7375SFelipe Balbi .enable = musb_gadget_enable, 1378550a7375SFelipe Balbi .disable = musb_gadget_disable, 1379550a7375SFelipe Balbi .alloc_request = musb_alloc_request, 1380550a7375SFelipe Balbi .free_request = musb_free_request, 1381550a7375SFelipe Balbi .queue = musb_gadget_queue, 1382550a7375SFelipe Balbi .dequeue = musb_gadget_dequeue, 1383550a7375SFelipe Balbi .set_halt = musb_gadget_set_halt, 1384550a7375SFelipe Balbi .fifo_status = musb_gadget_fifo_status, 1385550a7375SFelipe Balbi .fifo_flush = musb_gadget_fifo_flush 1386550a7375SFelipe Balbi }; 1387550a7375SFelipe Balbi 1388550a7375SFelipe Balbi /* ----------------------------------------------------------------------- */ 1389550a7375SFelipe Balbi 1390550a7375SFelipe Balbi static int musb_gadget_get_frame(struct usb_gadget *gadget) 1391550a7375SFelipe Balbi { 1392550a7375SFelipe Balbi struct musb *musb = gadget_to_musb(gadget); 1393550a7375SFelipe Balbi 1394550a7375SFelipe Balbi return (int)musb_readw(musb->mregs, MUSB_FRAME); 1395550a7375SFelipe Balbi } 1396550a7375SFelipe Balbi 1397550a7375SFelipe Balbi static int musb_gadget_wakeup(struct usb_gadget *gadget) 1398550a7375SFelipe Balbi { 1399550a7375SFelipe Balbi struct musb *musb = gadget_to_musb(gadget); 1400550a7375SFelipe Balbi void __iomem *mregs = musb->mregs; 1401550a7375SFelipe Balbi unsigned long flags; 1402550a7375SFelipe Balbi int status = -EINVAL; 1403550a7375SFelipe Balbi u8 power, devctl; 1404550a7375SFelipe Balbi int retries; 1405550a7375SFelipe Balbi 1406550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1407550a7375SFelipe Balbi 1408550a7375SFelipe Balbi switch (musb->xceiv.state) { 1409550a7375SFelipe Balbi case OTG_STATE_B_PERIPHERAL: 1410550a7375SFelipe Balbi /* NOTE: OTG state machine doesn't include B_SUSPENDED; 1411550a7375SFelipe Balbi * that's part of the standard usb 1.1 state machine, and 1412550a7375SFelipe Balbi * doesn't affect OTG transitions. 1413550a7375SFelipe Balbi */ 1414550a7375SFelipe Balbi if (musb->may_wakeup && musb->is_suspended) 1415550a7375SFelipe Balbi break; 1416550a7375SFelipe Balbi goto done; 1417550a7375SFelipe Balbi case OTG_STATE_B_IDLE: 1418550a7375SFelipe Balbi /* Start SRP ... OTG not required. */ 1419550a7375SFelipe Balbi devctl = musb_readb(mregs, MUSB_DEVCTL); 1420550a7375SFelipe Balbi DBG(2, "Sending SRP: devctl: %02x\n", devctl); 1421550a7375SFelipe Balbi devctl |= MUSB_DEVCTL_SESSION; 1422550a7375SFelipe Balbi musb_writeb(mregs, MUSB_DEVCTL, devctl); 1423550a7375SFelipe Balbi devctl = musb_readb(mregs, MUSB_DEVCTL); 1424550a7375SFelipe Balbi retries = 100; 1425550a7375SFelipe Balbi while (!(devctl & MUSB_DEVCTL_SESSION)) { 1426550a7375SFelipe Balbi devctl = musb_readb(mregs, MUSB_DEVCTL); 1427550a7375SFelipe Balbi if (retries-- < 1) 1428550a7375SFelipe Balbi break; 1429550a7375SFelipe Balbi } 1430550a7375SFelipe Balbi retries = 10000; 1431550a7375SFelipe Balbi while (devctl & MUSB_DEVCTL_SESSION) { 1432550a7375SFelipe Balbi devctl = musb_readb(mregs, MUSB_DEVCTL); 1433550a7375SFelipe Balbi if (retries-- < 1) 1434550a7375SFelipe Balbi break; 1435550a7375SFelipe Balbi } 1436550a7375SFelipe Balbi 1437550a7375SFelipe Balbi /* Block idling for at least 1s */ 1438550a7375SFelipe Balbi musb_platform_try_idle(musb, 1439550a7375SFelipe Balbi jiffies + msecs_to_jiffies(1 * HZ)); 1440550a7375SFelipe Balbi 1441550a7375SFelipe Balbi status = 0; 1442550a7375SFelipe Balbi goto done; 1443550a7375SFelipe Balbi default: 1444550a7375SFelipe Balbi DBG(2, "Unhandled wake: %s\n", otg_state_string(musb)); 1445550a7375SFelipe Balbi goto done; 1446550a7375SFelipe Balbi } 1447550a7375SFelipe Balbi 1448550a7375SFelipe Balbi status = 0; 1449550a7375SFelipe Balbi 1450550a7375SFelipe Balbi power = musb_readb(mregs, MUSB_POWER); 1451550a7375SFelipe Balbi power |= MUSB_POWER_RESUME; 1452550a7375SFelipe Balbi musb_writeb(mregs, MUSB_POWER, power); 1453550a7375SFelipe Balbi DBG(2, "issue wakeup\n"); 1454550a7375SFelipe Balbi 1455550a7375SFelipe Balbi /* FIXME do this next chunk in a timer callback, no udelay */ 1456550a7375SFelipe Balbi mdelay(2); 1457550a7375SFelipe Balbi 1458550a7375SFelipe Balbi power = musb_readb(mregs, MUSB_POWER); 1459550a7375SFelipe Balbi power &= ~MUSB_POWER_RESUME; 1460550a7375SFelipe Balbi musb_writeb(mregs, MUSB_POWER, power); 1461550a7375SFelipe Balbi done: 1462550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1463550a7375SFelipe Balbi return status; 1464550a7375SFelipe Balbi } 1465550a7375SFelipe Balbi 1466550a7375SFelipe Balbi static int 1467550a7375SFelipe Balbi musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered) 1468550a7375SFelipe Balbi { 1469550a7375SFelipe Balbi struct musb *musb = gadget_to_musb(gadget); 1470550a7375SFelipe Balbi 1471550a7375SFelipe Balbi musb->is_self_powered = !!is_selfpowered; 1472550a7375SFelipe Balbi return 0; 1473550a7375SFelipe Balbi } 1474550a7375SFelipe Balbi 1475550a7375SFelipe Balbi static void musb_pullup(struct musb *musb, int is_on) 1476550a7375SFelipe Balbi { 1477550a7375SFelipe Balbi u8 power; 1478550a7375SFelipe Balbi 1479550a7375SFelipe Balbi power = musb_readb(musb->mregs, MUSB_POWER); 1480550a7375SFelipe Balbi if (is_on) 1481550a7375SFelipe Balbi power |= MUSB_POWER_SOFTCONN; 1482550a7375SFelipe Balbi else 1483550a7375SFelipe Balbi power &= ~MUSB_POWER_SOFTCONN; 1484550a7375SFelipe Balbi 1485550a7375SFelipe Balbi /* FIXME if on, HdrcStart; if off, HdrcStop */ 1486550a7375SFelipe Balbi 1487550a7375SFelipe Balbi DBG(3, "gadget %s D+ pullup %s\n", 1488550a7375SFelipe Balbi musb->gadget_driver->function, is_on ? "on" : "off"); 1489550a7375SFelipe Balbi musb_writeb(musb->mregs, MUSB_POWER, power); 1490550a7375SFelipe Balbi } 1491550a7375SFelipe Balbi 1492550a7375SFelipe Balbi #if 0 1493550a7375SFelipe Balbi static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active) 1494550a7375SFelipe Balbi { 1495550a7375SFelipe Balbi DBG(2, "<= %s =>\n", __func__); 1496550a7375SFelipe Balbi 1497550a7375SFelipe Balbi /* 1498550a7375SFelipe Balbi * FIXME iff driver's softconnect flag is set (as it is during probe, 1499550a7375SFelipe Balbi * though that can clear it), just musb_pullup(). 1500550a7375SFelipe Balbi */ 1501550a7375SFelipe Balbi 1502550a7375SFelipe Balbi return -EINVAL; 1503550a7375SFelipe Balbi } 1504550a7375SFelipe Balbi #endif 1505550a7375SFelipe Balbi 1506550a7375SFelipe Balbi static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) 1507550a7375SFelipe Balbi { 1508550a7375SFelipe Balbi struct musb *musb = gadget_to_musb(gadget); 1509550a7375SFelipe Balbi 1510550a7375SFelipe Balbi if (!musb->xceiv.set_power) 1511550a7375SFelipe Balbi return -EOPNOTSUPP; 1512550a7375SFelipe Balbi return otg_set_power(&musb->xceiv, mA); 1513550a7375SFelipe Balbi } 1514550a7375SFelipe Balbi 1515550a7375SFelipe Balbi static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) 1516550a7375SFelipe Balbi { 1517550a7375SFelipe Balbi struct musb *musb = gadget_to_musb(gadget); 1518550a7375SFelipe Balbi unsigned long flags; 1519550a7375SFelipe Balbi 1520550a7375SFelipe Balbi is_on = !!is_on; 1521550a7375SFelipe Balbi 1522550a7375SFelipe Balbi /* NOTE: this assumes we are sensing vbus; we'd rather 1523550a7375SFelipe Balbi * not pullup unless the B-session is active. 1524550a7375SFelipe Balbi */ 1525550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1526550a7375SFelipe Balbi if (is_on != musb->softconnect) { 1527550a7375SFelipe Balbi musb->softconnect = is_on; 1528550a7375SFelipe Balbi musb_pullup(musb, is_on); 1529550a7375SFelipe Balbi } 1530550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1531550a7375SFelipe Balbi return 0; 1532550a7375SFelipe Balbi } 1533550a7375SFelipe Balbi 1534550a7375SFelipe Balbi static const struct usb_gadget_ops musb_gadget_operations = { 1535550a7375SFelipe Balbi .get_frame = musb_gadget_get_frame, 1536550a7375SFelipe Balbi .wakeup = musb_gadget_wakeup, 1537550a7375SFelipe Balbi .set_selfpowered = musb_gadget_set_self_powered, 1538550a7375SFelipe Balbi /* .vbus_session = musb_gadget_vbus_session, */ 1539550a7375SFelipe Balbi .vbus_draw = musb_gadget_vbus_draw, 1540550a7375SFelipe Balbi .pullup = musb_gadget_pullup, 1541550a7375SFelipe Balbi }; 1542550a7375SFelipe Balbi 1543550a7375SFelipe Balbi /* ----------------------------------------------------------------------- */ 1544550a7375SFelipe Balbi 1545550a7375SFelipe Balbi /* Registration */ 1546550a7375SFelipe Balbi 1547550a7375SFelipe Balbi /* Only this registration code "knows" the rule (from USB standards) 1548550a7375SFelipe Balbi * about there being only one external upstream port. It assumes 1549550a7375SFelipe Balbi * all peripheral ports are external... 1550550a7375SFelipe Balbi */ 1551550a7375SFelipe Balbi static struct musb *the_gadget; 1552550a7375SFelipe Balbi 1553550a7375SFelipe Balbi static void musb_gadget_release(struct device *dev) 1554550a7375SFelipe Balbi { 1555550a7375SFelipe Balbi /* kref_put(WHAT) */ 1556550a7375SFelipe Balbi dev_dbg(dev, "%s\n", __func__); 1557550a7375SFelipe Balbi } 1558550a7375SFelipe Balbi 1559550a7375SFelipe Balbi 1560550a7375SFelipe Balbi static void __init 1561550a7375SFelipe Balbi init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in) 1562550a7375SFelipe Balbi { 1563550a7375SFelipe Balbi struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 1564550a7375SFelipe Balbi 1565550a7375SFelipe Balbi memset(ep, 0, sizeof *ep); 1566550a7375SFelipe Balbi 1567550a7375SFelipe Balbi ep->current_epnum = epnum; 1568550a7375SFelipe Balbi ep->musb = musb; 1569550a7375SFelipe Balbi ep->hw_ep = hw_ep; 1570550a7375SFelipe Balbi ep->is_in = is_in; 1571550a7375SFelipe Balbi 1572550a7375SFelipe Balbi INIT_LIST_HEAD(&ep->req_list); 1573550a7375SFelipe Balbi 1574550a7375SFelipe Balbi sprintf(ep->name, "ep%d%s", epnum, 1575550a7375SFelipe Balbi (!epnum || hw_ep->is_shared_fifo) ? "" : ( 1576550a7375SFelipe Balbi is_in ? "in" : "out")); 1577550a7375SFelipe Balbi ep->end_point.name = ep->name; 1578550a7375SFelipe Balbi INIT_LIST_HEAD(&ep->end_point.ep_list); 1579550a7375SFelipe Balbi if (!epnum) { 1580550a7375SFelipe Balbi ep->end_point.maxpacket = 64; 1581550a7375SFelipe Balbi ep->end_point.ops = &musb_g_ep0_ops; 1582550a7375SFelipe Balbi musb->g.ep0 = &ep->end_point; 1583550a7375SFelipe Balbi } else { 1584550a7375SFelipe Balbi if (is_in) 1585550a7375SFelipe Balbi ep->end_point.maxpacket = hw_ep->max_packet_sz_tx; 1586550a7375SFelipe Balbi else 1587550a7375SFelipe Balbi ep->end_point.maxpacket = hw_ep->max_packet_sz_rx; 1588550a7375SFelipe Balbi ep->end_point.ops = &musb_ep_ops; 1589550a7375SFelipe Balbi list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list); 1590550a7375SFelipe Balbi } 1591550a7375SFelipe Balbi } 1592550a7375SFelipe Balbi 1593550a7375SFelipe Balbi /* 1594550a7375SFelipe Balbi * Initialize the endpoints exposed to peripheral drivers, with backlinks 1595550a7375SFelipe Balbi * to the rest of the driver state. 1596550a7375SFelipe Balbi */ 1597550a7375SFelipe Balbi static inline void __init musb_g_init_endpoints(struct musb *musb) 1598550a7375SFelipe Balbi { 1599550a7375SFelipe Balbi u8 epnum; 1600550a7375SFelipe Balbi struct musb_hw_ep *hw_ep; 1601550a7375SFelipe Balbi unsigned count = 0; 1602550a7375SFelipe Balbi 1603550a7375SFelipe Balbi /* intialize endpoint list just once */ 1604550a7375SFelipe Balbi INIT_LIST_HEAD(&(musb->g.ep_list)); 1605550a7375SFelipe Balbi 1606550a7375SFelipe Balbi for (epnum = 0, hw_ep = musb->endpoints; 1607550a7375SFelipe Balbi epnum < musb->nr_endpoints; 1608550a7375SFelipe Balbi epnum++, hw_ep++) { 1609550a7375SFelipe Balbi if (hw_ep->is_shared_fifo /* || !epnum */) { 1610550a7375SFelipe Balbi init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0); 1611550a7375SFelipe Balbi count++; 1612550a7375SFelipe Balbi } else { 1613550a7375SFelipe Balbi if (hw_ep->max_packet_sz_tx) { 1614550a7375SFelipe Balbi init_peripheral_ep(musb, &hw_ep->ep_in, 1615550a7375SFelipe Balbi epnum, 1); 1616550a7375SFelipe Balbi count++; 1617550a7375SFelipe Balbi } 1618550a7375SFelipe Balbi if (hw_ep->max_packet_sz_rx) { 1619550a7375SFelipe Balbi init_peripheral_ep(musb, &hw_ep->ep_out, 1620550a7375SFelipe Balbi epnum, 0); 1621550a7375SFelipe Balbi count++; 1622550a7375SFelipe Balbi } 1623550a7375SFelipe Balbi } 1624550a7375SFelipe Balbi } 1625550a7375SFelipe Balbi } 1626550a7375SFelipe Balbi 1627550a7375SFelipe Balbi /* called once during driver setup to initialize and link into 1628550a7375SFelipe Balbi * the driver model; memory is zeroed. 1629550a7375SFelipe Balbi */ 1630550a7375SFelipe Balbi int __init musb_gadget_setup(struct musb *musb) 1631550a7375SFelipe Balbi { 1632550a7375SFelipe Balbi int status; 1633550a7375SFelipe Balbi 1634550a7375SFelipe Balbi /* REVISIT minor race: if (erroneously) setting up two 1635550a7375SFelipe Balbi * musb peripherals at the same time, only the bus lock 1636550a7375SFelipe Balbi * is probably held. 1637550a7375SFelipe Balbi */ 1638550a7375SFelipe Balbi if (the_gadget) 1639550a7375SFelipe Balbi return -EBUSY; 1640550a7375SFelipe Balbi the_gadget = musb; 1641550a7375SFelipe Balbi 1642550a7375SFelipe Balbi musb->g.ops = &musb_gadget_operations; 1643550a7375SFelipe Balbi musb->g.is_dualspeed = 1; 1644550a7375SFelipe Balbi musb->g.speed = USB_SPEED_UNKNOWN; 1645550a7375SFelipe Balbi 1646550a7375SFelipe Balbi /* this "gadget" abstracts/virtualizes the controller */ 1647427c4f33SKay Sievers dev_set_name(&musb->g.dev, "gadget"); 1648550a7375SFelipe Balbi musb->g.dev.parent = musb->controller; 1649550a7375SFelipe Balbi musb->g.dev.dma_mask = musb->controller->dma_mask; 1650550a7375SFelipe Balbi musb->g.dev.release = musb_gadget_release; 1651550a7375SFelipe Balbi musb->g.name = musb_driver_name; 1652550a7375SFelipe Balbi 1653550a7375SFelipe Balbi if (is_otg_enabled(musb)) 1654550a7375SFelipe Balbi musb->g.is_otg = 1; 1655550a7375SFelipe Balbi 1656550a7375SFelipe Balbi musb_g_init_endpoints(musb); 1657550a7375SFelipe Balbi 1658550a7375SFelipe Balbi musb->is_active = 0; 1659550a7375SFelipe Balbi musb_platform_try_idle(musb, 0); 1660550a7375SFelipe Balbi 1661550a7375SFelipe Balbi status = device_register(&musb->g.dev); 1662550a7375SFelipe Balbi if (status != 0) 1663550a7375SFelipe Balbi the_gadget = NULL; 1664550a7375SFelipe Balbi return status; 1665550a7375SFelipe Balbi } 1666550a7375SFelipe Balbi 1667550a7375SFelipe Balbi void musb_gadget_cleanup(struct musb *musb) 1668550a7375SFelipe Balbi { 1669550a7375SFelipe Balbi if (musb != the_gadget) 1670550a7375SFelipe Balbi return; 1671550a7375SFelipe Balbi 1672550a7375SFelipe Balbi device_unregister(&musb->g.dev); 1673550a7375SFelipe Balbi the_gadget = NULL; 1674550a7375SFelipe Balbi } 1675550a7375SFelipe Balbi 1676550a7375SFelipe Balbi /* 1677550a7375SFelipe Balbi * Register the gadget driver. Used by gadget drivers when 1678550a7375SFelipe Balbi * registering themselves with the controller. 1679550a7375SFelipe Balbi * 1680550a7375SFelipe Balbi * -EINVAL something went wrong (not driver) 1681550a7375SFelipe Balbi * -EBUSY another gadget is already using the controller 1682550a7375SFelipe Balbi * -ENOMEM no memeory to perform the operation 1683550a7375SFelipe Balbi * 1684550a7375SFelipe Balbi * @param driver the gadget driver 1685550a7375SFelipe Balbi * @return <0 if error, 0 if everything is fine 1686550a7375SFelipe Balbi */ 1687550a7375SFelipe Balbi int usb_gadget_register_driver(struct usb_gadget_driver *driver) 1688550a7375SFelipe Balbi { 1689550a7375SFelipe Balbi int retval; 1690550a7375SFelipe Balbi unsigned long flags; 1691550a7375SFelipe Balbi struct musb *musb = the_gadget; 1692550a7375SFelipe Balbi 1693550a7375SFelipe Balbi if (!driver 1694550a7375SFelipe Balbi || driver->speed != USB_SPEED_HIGH 1695550a7375SFelipe Balbi || !driver->bind 1696550a7375SFelipe Balbi || !driver->setup) 1697550a7375SFelipe Balbi return -EINVAL; 1698550a7375SFelipe Balbi 1699550a7375SFelipe Balbi /* driver must be initialized to support peripheral mode */ 1700550a7375SFelipe Balbi if (!musb || !(musb->board_mode == MUSB_OTG 1701550a7375SFelipe Balbi || musb->board_mode != MUSB_OTG)) { 1702550a7375SFelipe Balbi DBG(1, "%s, no dev??\n", __func__); 1703550a7375SFelipe Balbi return -ENODEV; 1704550a7375SFelipe Balbi } 1705550a7375SFelipe Balbi 1706550a7375SFelipe Balbi DBG(3, "registering driver %s\n", driver->function); 1707550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1708550a7375SFelipe Balbi 1709550a7375SFelipe Balbi if (musb->gadget_driver) { 1710550a7375SFelipe Balbi DBG(1, "%s is already bound to %s\n", 1711550a7375SFelipe Balbi musb_driver_name, 1712550a7375SFelipe Balbi musb->gadget_driver->driver.name); 1713550a7375SFelipe Balbi retval = -EBUSY; 1714550a7375SFelipe Balbi } else { 1715550a7375SFelipe Balbi musb->gadget_driver = driver; 1716550a7375SFelipe Balbi musb->g.dev.driver = &driver->driver; 1717550a7375SFelipe Balbi driver->driver.bus = NULL; 1718550a7375SFelipe Balbi musb->softconnect = 1; 1719550a7375SFelipe Balbi retval = 0; 1720550a7375SFelipe Balbi } 1721550a7375SFelipe Balbi 1722550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1723550a7375SFelipe Balbi 1724f362a475SFelipe Balbi if (retval == 0) { 1725550a7375SFelipe Balbi retval = driver->bind(&musb->g); 1726550a7375SFelipe Balbi if (retval != 0) { 1727550a7375SFelipe Balbi DBG(3, "bind to driver %s failed --> %d\n", 1728550a7375SFelipe Balbi driver->driver.name, retval); 1729550a7375SFelipe Balbi musb->gadget_driver = NULL; 1730550a7375SFelipe Balbi musb->g.dev.driver = NULL; 1731550a7375SFelipe Balbi } 1732550a7375SFelipe Balbi 1733550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1734550a7375SFelipe Balbi 1735550a7375SFelipe Balbi /* REVISIT always use otg_set_peripheral(), handling 1736550a7375SFelipe Balbi * issues including the root hub one below ... 1737550a7375SFelipe Balbi */ 1738550a7375SFelipe Balbi musb->xceiv.gadget = &musb->g; 1739550a7375SFelipe Balbi musb->xceiv.state = OTG_STATE_B_IDLE; 1740550a7375SFelipe Balbi musb->is_active = 1; 1741550a7375SFelipe Balbi 1742550a7375SFelipe Balbi /* FIXME this ignores the softconnect flag. Drivers are 1743550a7375SFelipe Balbi * allowed hold the peripheral inactive until for example 1744550a7375SFelipe Balbi * userspace hooks up printer hardware or DSP codecs, so 1745550a7375SFelipe Balbi * hosts only see fully functional devices. 1746550a7375SFelipe Balbi */ 1747550a7375SFelipe Balbi 1748550a7375SFelipe Balbi if (!is_otg_enabled(musb)) 1749550a7375SFelipe Balbi musb_start(musb); 1750550a7375SFelipe Balbi 1751550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1752550a7375SFelipe Balbi 1753550a7375SFelipe Balbi if (is_otg_enabled(musb)) { 1754550a7375SFelipe Balbi DBG(3, "OTG startup...\n"); 1755550a7375SFelipe Balbi 1756550a7375SFelipe Balbi /* REVISIT: funcall to other code, which also 1757550a7375SFelipe Balbi * handles power budgeting ... this way also 1758550a7375SFelipe Balbi * ensures HdrcStart is indirectly called. 1759550a7375SFelipe Balbi */ 1760550a7375SFelipe Balbi retval = usb_add_hcd(musb_to_hcd(musb), -1, 0); 1761550a7375SFelipe Balbi if (retval < 0) { 1762550a7375SFelipe Balbi DBG(1, "add_hcd failed, %d\n", retval); 1763550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1764550a7375SFelipe Balbi musb->xceiv.gadget = NULL; 1765550a7375SFelipe Balbi musb->xceiv.state = OTG_STATE_UNDEFINED; 1766550a7375SFelipe Balbi musb->gadget_driver = NULL; 1767550a7375SFelipe Balbi musb->g.dev.driver = NULL; 1768550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1769550a7375SFelipe Balbi } 1770550a7375SFelipe Balbi } 1771550a7375SFelipe Balbi } 1772550a7375SFelipe Balbi 1773550a7375SFelipe Balbi return retval; 1774550a7375SFelipe Balbi } 1775550a7375SFelipe Balbi EXPORT_SYMBOL(usb_gadget_register_driver); 1776550a7375SFelipe Balbi 1777550a7375SFelipe Balbi static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver) 1778550a7375SFelipe Balbi { 1779550a7375SFelipe Balbi int i; 1780550a7375SFelipe Balbi struct musb_hw_ep *hw_ep; 1781550a7375SFelipe Balbi 1782550a7375SFelipe Balbi /* don't disconnect if it's not connected */ 1783550a7375SFelipe Balbi if (musb->g.speed == USB_SPEED_UNKNOWN) 1784550a7375SFelipe Balbi driver = NULL; 1785550a7375SFelipe Balbi else 1786550a7375SFelipe Balbi musb->g.speed = USB_SPEED_UNKNOWN; 1787550a7375SFelipe Balbi 1788550a7375SFelipe Balbi /* deactivate the hardware */ 1789550a7375SFelipe Balbi if (musb->softconnect) { 1790550a7375SFelipe Balbi musb->softconnect = 0; 1791550a7375SFelipe Balbi musb_pullup(musb, 0); 1792550a7375SFelipe Balbi } 1793550a7375SFelipe Balbi musb_stop(musb); 1794550a7375SFelipe Balbi 1795550a7375SFelipe Balbi /* killing any outstanding requests will quiesce the driver; 1796550a7375SFelipe Balbi * then report disconnect 1797550a7375SFelipe Balbi */ 1798550a7375SFelipe Balbi if (driver) { 1799550a7375SFelipe Balbi for (i = 0, hw_ep = musb->endpoints; 1800550a7375SFelipe Balbi i < musb->nr_endpoints; 1801550a7375SFelipe Balbi i++, hw_ep++) { 1802550a7375SFelipe Balbi musb_ep_select(musb->mregs, i); 1803550a7375SFelipe Balbi if (hw_ep->is_shared_fifo /* || !epnum */) { 1804550a7375SFelipe Balbi nuke(&hw_ep->ep_in, -ESHUTDOWN); 1805550a7375SFelipe Balbi } else { 1806550a7375SFelipe Balbi if (hw_ep->max_packet_sz_tx) 1807550a7375SFelipe Balbi nuke(&hw_ep->ep_in, -ESHUTDOWN); 1808550a7375SFelipe Balbi if (hw_ep->max_packet_sz_rx) 1809550a7375SFelipe Balbi nuke(&hw_ep->ep_out, -ESHUTDOWN); 1810550a7375SFelipe Balbi } 1811550a7375SFelipe Balbi } 1812550a7375SFelipe Balbi 1813550a7375SFelipe Balbi spin_unlock(&musb->lock); 1814550a7375SFelipe Balbi driver->disconnect(&musb->g); 1815550a7375SFelipe Balbi spin_lock(&musb->lock); 1816550a7375SFelipe Balbi } 1817550a7375SFelipe Balbi } 1818550a7375SFelipe Balbi 1819550a7375SFelipe Balbi /* 1820550a7375SFelipe Balbi * Unregister the gadget driver. Used by gadget drivers when 1821550a7375SFelipe Balbi * unregistering themselves from the controller. 1822550a7375SFelipe Balbi * 1823550a7375SFelipe Balbi * @param driver the gadget driver to unregister 1824550a7375SFelipe Balbi */ 1825550a7375SFelipe Balbi int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 1826550a7375SFelipe Balbi { 1827550a7375SFelipe Balbi unsigned long flags; 1828550a7375SFelipe Balbi int retval = 0; 1829550a7375SFelipe Balbi struct musb *musb = the_gadget; 1830550a7375SFelipe Balbi 1831550a7375SFelipe Balbi if (!driver || !driver->unbind || !musb) 1832550a7375SFelipe Balbi return -EINVAL; 1833550a7375SFelipe Balbi 1834550a7375SFelipe Balbi /* REVISIT always use otg_set_peripheral() here too; 1835550a7375SFelipe Balbi * this needs to shut down the OTG engine. 1836550a7375SFelipe Balbi */ 1837550a7375SFelipe Balbi 1838550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1839550a7375SFelipe Balbi 1840550a7375SFelipe Balbi #ifdef CONFIG_USB_MUSB_OTG 1841550a7375SFelipe Balbi musb_hnp_stop(musb); 1842550a7375SFelipe Balbi #endif 1843550a7375SFelipe Balbi 1844550a7375SFelipe Balbi if (musb->gadget_driver == driver) { 1845550a7375SFelipe Balbi 1846550a7375SFelipe Balbi (void) musb_gadget_vbus_draw(&musb->g, 0); 1847550a7375SFelipe Balbi 1848550a7375SFelipe Balbi musb->xceiv.state = OTG_STATE_UNDEFINED; 1849550a7375SFelipe Balbi stop_activity(musb, driver); 1850550a7375SFelipe Balbi 1851550a7375SFelipe Balbi DBG(3, "unregistering driver %s\n", driver->function); 1852550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1853550a7375SFelipe Balbi driver->unbind(&musb->g); 1854550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1855550a7375SFelipe Balbi 1856550a7375SFelipe Balbi musb->gadget_driver = NULL; 1857550a7375SFelipe Balbi musb->g.dev.driver = NULL; 1858550a7375SFelipe Balbi 1859550a7375SFelipe Balbi musb->is_active = 0; 1860550a7375SFelipe Balbi musb_platform_try_idle(musb, 0); 1861550a7375SFelipe Balbi } else 1862550a7375SFelipe Balbi retval = -EINVAL; 1863550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1864550a7375SFelipe Balbi 1865550a7375SFelipe Balbi if (is_otg_enabled(musb) && retval == 0) { 1866550a7375SFelipe Balbi usb_remove_hcd(musb_to_hcd(musb)); 1867550a7375SFelipe Balbi /* FIXME we need to be able to register another 1868550a7375SFelipe Balbi * gadget driver here and have everything work; 1869550a7375SFelipe Balbi * that currently misbehaves. 1870550a7375SFelipe Balbi */ 1871550a7375SFelipe Balbi } 1872550a7375SFelipe Balbi 1873550a7375SFelipe Balbi return retval; 1874550a7375SFelipe Balbi } 1875550a7375SFelipe Balbi EXPORT_SYMBOL(usb_gadget_unregister_driver); 1876550a7375SFelipe Balbi 1877550a7375SFelipe Balbi 1878550a7375SFelipe Balbi /* ----------------------------------------------------------------------- */ 1879550a7375SFelipe Balbi 1880550a7375SFelipe Balbi /* lifecycle operations called through plat_uds.c */ 1881550a7375SFelipe Balbi 1882550a7375SFelipe Balbi void musb_g_resume(struct musb *musb) 1883550a7375SFelipe Balbi { 1884550a7375SFelipe Balbi musb->is_suspended = 0; 1885550a7375SFelipe Balbi switch (musb->xceiv.state) { 1886550a7375SFelipe Balbi case OTG_STATE_B_IDLE: 1887550a7375SFelipe Balbi break; 1888550a7375SFelipe Balbi case OTG_STATE_B_WAIT_ACON: 1889550a7375SFelipe Balbi case OTG_STATE_B_PERIPHERAL: 1890550a7375SFelipe Balbi musb->is_active = 1; 1891550a7375SFelipe Balbi if (musb->gadget_driver && musb->gadget_driver->resume) { 1892550a7375SFelipe Balbi spin_unlock(&musb->lock); 1893550a7375SFelipe Balbi musb->gadget_driver->resume(&musb->g); 1894550a7375SFelipe Balbi spin_lock(&musb->lock); 1895550a7375SFelipe Balbi } 1896550a7375SFelipe Balbi break; 1897550a7375SFelipe Balbi default: 1898550a7375SFelipe Balbi WARNING("unhandled RESUME transition (%s)\n", 1899550a7375SFelipe Balbi otg_state_string(musb)); 1900550a7375SFelipe Balbi } 1901550a7375SFelipe Balbi } 1902550a7375SFelipe Balbi 1903550a7375SFelipe Balbi /* called when SOF packets stop for 3+ msec */ 1904550a7375SFelipe Balbi void musb_g_suspend(struct musb *musb) 1905550a7375SFelipe Balbi { 1906550a7375SFelipe Balbi u8 devctl; 1907550a7375SFelipe Balbi 1908550a7375SFelipe Balbi devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 1909550a7375SFelipe Balbi DBG(3, "devctl %02x\n", devctl); 1910550a7375SFelipe Balbi 1911550a7375SFelipe Balbi switch (musb->xceiv.state) { 1912550a7375SFelipe Balbi case OTG_STATE_B_IDLE: 1913550a7375SFelipe Balbi if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) 1914550a7375SFelipe Balbi musb->xceiv.state = OTG_STATE_B_PERIPHERAL; 1915550a7375SFelipe Balbi break; 1916550a7375SFelipe Balbi case OTG_STATE_B_PERIPHERAL: 1917550a7375SFelipe Balbi musb->is_suspended = 1; 1918550a7375SFelipe Balbi if (musb->gadget_driver && musb->gadget_driver->suspend) { 1919550a7375SFelipe Balbi spin_unlock(&musb->lock); 1920550a7375SFelipe Balbi musb->gadget_driver->suspend(&musb->g); 1921550a7375SFelipe Balbi spin_lock(&musb->lock); 1922550a7375SFelipe Balbi } 1923550a7375SFelipe Balbi break; 1924550a7375SFelipe Balbi default: 1925550a7375SFelipe Balbi /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ; 1926550a7375SFelipe Balbi * A_PERIPHERAL may need care too 1927550a7375SFelipe Balbi */ 1928550a7375SFelipe Balbi WARNING("unhandled SUSPEND transition (%s)\n", 1929550a7375SFelipe Balbi otg_state_string(musb)); 1930550a7375SFelipe Balbi } 1931550a7375SFelipe Balbi } 1932550a7375SFelipe Balbi 1933550a7375SFelipe Balbi /* Called during SRP */ 1934550a7375SFelipe Balbi void musb_g_wakeup(struct musb *musb) 1935550a7375SFelipe Balbi { 1936550a7375SFelipe Balbi musb_gadget_wakeup(&musb->g); 1937550a7375SFelipe Balbi } 1938550a7375SFelipe Balbi 1939550a7375SFelipe Balbi /* called when VBUS drops below session threshold, and in other cases */ 1940550a7375SFelipe Balbi void musb_g_disconnect(struct musb *musb) 1941550a7375SFelipe Balbi { 1942550a7375SFelipe Balbi void __iomem *mregs = musb->mregs; 1943550a7375SFelipe Balbi u8 devctl = musb_readb(mregs, MUSB_DEVCTL); 1944550a7375SFelipe Balbi 1945550a7375SFelipe Balbi DBG(3, "devctl %02x\n", devctl); 1946550a7375SFelipe Balbi 1947550a7375SFelipe Balbi /* clear HR */ 1948550a7375SFelipe Balbi musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION); 1949550a7375SFelipe Balbi 1950550a7375SFelipe Balbi /* don't draw vbus until new b-default session */ 1951550a7375SFelipe Balbi (void) musb_gadget_vbus_draw(&musb->g, 0); 1952550a7375SFelipe Balbi 1953550a7375SFelipe Balbi musb->g.speed = USB_SPEED_UNKNOWN; 1954550a7375SFelipe Balbi if (musb->gadget_driver && musb->gadget_driver->disconnect) { 1955550a7375SFelipe Balbi spin_unlock(&musb->lock); 1956550a7375SFelipe Balbi musb->gadget_driver->disconnect(&musb->g); 1957550a7375SFelipe Balbi spin_lock(&musb->lock); 1958550a7375SFelipe Balbi } 1959550a7375SFelipe Balbi 1960550a7375SFelipe Balbi switch (musb->xceiv.state) { 1961550a7375SFelipe Balbi default: 1962550a7375SFelipe Balbi #ifdef CONFIG_USB_MUSB_OTG 1963550a7375SFelipe Balbi DBG(2, "Unhandled disconnect %s, setting a_idle\n", 1964550a7375SFelipe Balbi otg_state_string(musb)); 1965550a7375SFelipe Balbi musb->xceiv.state = OTG_STATE_A_IDLE; 1966550a7375SFelipe Balbi break; 1967550a7375SFelipe Balbi case OTG_STATE_A_PERIPHERAL: 1968550a7375SFelipe Balbi musb->xceiv.state = OTG_STATE_A_WAIT_VFALL; 1969550a7375SFelipe Balbi break; 1970550a7375SFelipe Balbi case OTG_STATE_B_WAIT_ACON: 1971550a7375SFelipe Balbi case OTG_STATE_B_HOST: 1972550a7375SFelipe Balbi #endif 1973550a7375SFelipe Balbi case OTG_STATE_B_PERIPHERAL: 1974550a7375SFelipe Balbi case OTG_STATE_B_IDLE: 1975550a7375SFelipe Balbi musb->xceiv.state = OTG_STATE_B_IDLE; 1976550a7375SFelipe Balbi break; 1977550a7375SFelipe Balbi case OTG_STATE_B_SRP_INIT: 1978550a7375SFelipe Balbi break; 1979550a7375SFelipe Balbi } 1980550a7375SFelipe Balbi 1981550a7375SFelipe Balbi musb->is_active = 0; 1982550a7375SFelipe Balbi } 1983550a7375SFelipe Balbi 1984550a7375SFelipe Balbi void musb_g_reset(struct musb *musb) 1985550a7375SFelipe Balbi __releases(musb->lock) 1986550a7375SFelipe Balbi __acquires(musb->lock) 1987550a7375SFelipe Balbi { 1988550a7375SFelipe Balbi void __iomem *mbase = musb->mregs; 1989550a7375SFelipe Balbi u8 devctl = musb_readb(mbase, MUSB_DEVCTL); 1990550a7375SFelipe Balbi u8 power; 1991550a7375SFelipe Balbi 1992550a7375SFelipe Balbi DBG(3, "<== %s addr=%x driver '%s'\n", 1993550a7375SFelipe Balbi (devctl & MUSB_DEVCTL_BDEVICE) 1994550a7375SFelipe Balbi ? "B-Device" : "A-Device", 1995550a7375SFelipe Balbi musb_readb(mbase, MUSB_FADDR), 1996550a7375SFelipe Balbi musb->gadget_driver 1997550a7375SFelipe Balbi ? musb->gadget_driver->driver.name 1998550a7375SFelipe Balbi : NULL 1999550a7375SFelipe Balbi ); 2000550a7375SFelipe Balbi 2001550a7375SFelipe Balbi /* report disconnect, if we didn't already (flushing EP state) */ 2002550a7375SFelipe Balbi if (musb->g.speed != USB_SPEED_UNKNOWN) 2003550a7375SFelipe Balbi musb_g_disconnect(musb); 2004550a7375SFelipe Balbi 2005550a7375SFelipe Balbi /* clear HR */ 2006550a7375SFelipe Balbi else if (devctl & MUSB_DEVCTL_HR) 2007550a7375SFelipe Balbi musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); 2008550a7375SFelipe Balbi 2009550a7375SFelipe Balbi 2010550a7375SFelipe Balbi /* what speed did we negotiate? */ 2011550a7375SFelipe Balbi power = musb_readb(mbase, MUSB_POWER); 2012550a7375SFelipe Balbi musb->g.speed = (power & MUSB_POWER_HSMODE) 2013550a7375SFelipe Balbi ? USB_SPEED_HIGH : USB_SPEED_FULL; 2014550a7375SFelipe Balbi 2015550a7375SFelipe Balbi /* start in USB_STATE_DEFAULT */ 2016550a7375SFelipe Balbi musb->is_active = 1; 2017550a7375SFelipe Balbi musb->is_suspended = 0; 2018550a7375SFelipe Balbi MUSB_DEV_MODE(musb); 2019550a7375SFelipe Balbi musb->address = 0; 2020550a7375SFelipe Balbi musb->ep0_state = MUSB_EP0_STAGE_SETUP; 2021550a7375SFelipe Balbi 2022550a7375SFelipe Balbi musb->may_wakeup = 0; 2023550a7375SFelipe Balbi musb->g.b_hnp_enable = 0; 2024550a7375SFelipe Balbi musb->g.a_alt_hnp_support = 0; 2025550a7375SFelipe Balbi musb->g.a_hnp_support = 0; 2026550a7375SFelipe Balbi 2027550a7375SFelipe Balbi /* Normal reset, as B-Device; 2028550a7375SFelipe Balbi * or else after HNP, as A-Device 2029550a7375SFelipe Balbi */ 2030550a7375SFelipe Balbi if (devctl & MUSB_DEVCTL_BDEVICE) { 2031550a7375SFelipe Balbi musb->xceiv.state = OTG_STATE_B_PERIPHERAL; 2032550a7375SFelipe Balbi musb->g.is_a_peripheral = 0; 2033550a7375SFelipe Balbi } else if (is_otg_enabled(musb)) { 2034550a7375SFelipe Balbi musb->xceiv.state = OTG_STATE_A_PERIPHERAL; 2035550a7375SFelipe Balbi musb->g.is_a_peripheral = 1; 2036550a7375SFelipe Balbi } else 2037550a7375SFelipe Balbi WARN_ON(1); 2038550a7375SFelipe Balbi 2039550a7375SFelipe Balbi /* start with default limits on VBUS power draw */ 2040550a7375SFelipe Balbi (void) musb_gadget_vbus_draw(&musb->g, 2041550a7375SFelipe Balbi is_otg_enabled(musb) ? 8 : 100); 2042550a7375SFelipe Balbi } 2043