1550a7375SFelipe Balbi /* 2550a7375SFelipe Balbi * MUSB OTG driver peripheral support 3550a7375SFelipe Balbi * 4550a7375SFelipe Balbi * Copyright 2005 Mentor Graphics Corporation 5550a7375SFelipe Balbi * Copyright (C) 2005-2006 by Texas Instruments 6550a7375SFelipe Balbi * Copyright (C) 2006-2007 Nokia Corporation 7cea83241SSergei Shtylyov * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com> 8550a7375SFelipe Balbi * 9550a7375SFelipe Balbi * This program is free software; you can redistribute it and/or 10550a7375SFelipe Balbi * modify it under the terms of the GNU General Public License 11550a7375SFelipe Balbi * version 2 as published by the Free Software Foundation. 12550a7375SFelipe Balbi * 13550a7375SFelipe Balbi * This program is distributed in the hope that it will be useful, but 14550a7375SFelipe Balbi * WITHOUT ANY WARRANTY; without even the implied warranty of 15550a7375SFelipe Balbi * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16550a7375SFelipe Balbi * General Public License for more details. 17550a7375SFelipe Balbi * 18550a7375SFelipe Balbi * You should have received a copy of the GNU General Public License 19550a7375SFelipe Balbi * along with this program; if not, write to the Free Software 20550a7375SFelipe Balbi * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 21550a7375SFelipe Balbi * 02110-1301 USA 22550a7375SFelipe Balbi * 23550a7375SFelipe Balbi * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED 24550a7375SFelipe Balbi * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 25550a7375SFelipe Balbi * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 26550a7375SFelipe Balbi * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27550a7375SFelipe Balbi * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28550a7375SFelipe Balbi * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 29550a7375SFelipe Balbi * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 30550a7375SFelipe Balbi * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31550a7375SFelipe Balbi * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32550a7375SFelipe Balbi * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33550a7375SFelipe Balbi * 34550a7375SFelipe Balbi */ 35550a7375SFelipe Balbi 36550a7375SFelipe Balbi #include <linux/kernel.h> 37550a7375SFelipe Balbi #include <linux/list.h> 38550a7375SFelipe Balbi #include <linux/timer.h> 39550a7375SFelipe Balbi #include <linux/module.h> 40550a7375SFelipe Balbi #include <linux/smp.h> 41550a7375SFelipe Balbi #include <linux/spinlock.h> 42550a7375SFelipe Balbi #include <linux/delay.h> 43550a7375SFelipe Balbi #include <linux/dma-mapping.h> 445a0e3ad6STejun Heo #include <linux/slab.h> 45550a7375SFelipe Balbi 46550a7375SFelipe Balbi #include "musb_core.h" 47550a7375SFelipe Balbi 48550a7375SFelipe Balbi 49550a7375SFelipe Balbi /* MUSB PERIPHERAL status 3-mar-2006: 50550a7375SFelipe Balbi * 51550a7375SFelipe Balbi * - EP0 seems solid. It passes both USBCV and usbtest control cases. 52550a7375SFelipe Balbi * Minor glitches: 53550a7375SFelipe Balbi * 54550a7375SFelipe Balbi * + remote wakeup to Linux hosts work, but saw USBCV failures; 55550a7375SFelipe Balbi * in one test run (operator error?) 56550a7375SFelipe Balbi * + endpoint halt tests -- in both usbtest and usbcv -- seem 57550a7375SFelipe Balbi * to break when dma is enabled ... is something wrongly 58550a7375SFelipe Balbi * clearing SENDSTALL? 59550a7375SFelipe Balbi * 60550a7375SFelipe Balbi * - Mass storage behaved ok when last tested. Network traffic patterns 61550a7375SFelipe Balbi * (with lots of short transfers etc) need retesting; they turn up the 62550a7375SFelipe Balbi * worst cases of the DMA, since short packets are typical but are not 63550a7375SFelipe Balbi * required. 64550a7375SFelipe Balbi * 65550a7375SFelipe Balbi * - TX/IN 66550a7375SFelipe Balbi * + both pio and dma behave in with network and g_zero tests 67550a7375SFelipe Balbi * + no cppi throughput issues other than no-hw-queueing 68550a7375SFelipe Balbi * + failed with FLAT_REG (DaVinci) 69550a7375SFelipe Balbi * + seems to behave with double buffering, PIO -and- CPPI 70550a7375SFelipe Balbi * + with gadgetfs + AIO, requests got lost? 71550a7375SFelipe Balbi * 72550a7375SFelipe Balbi * - RX/OUT 73550a7375SFelipe Balbi * + both pio and dma behave in with network and g_zero tests 74550a7375SFelipe Balbi * + dma is slow in typical case (short_not_ok is clear) 75550a7375SFelipe Balbi * + double buffering ok with PIO 76550a7375SFelipe Balbi * + double buffering *FAILS* with CPPI, wrong data bytes sometimes 77550a7375SFelipe Balbi * + request lossage observed with gadgetfs 78550a7375SFelipe Balbi * 79550a7375SFelipe Balbi * - ISO not tested ... might work, but only weakly isochronous 80550a7375SFelipe Balbi * 81550a7375SFelipe Balbi * - Gadget driver disabling of softconnect during bind() is ignored; so 82550a7375SFelipe Balbi * drivers can't hold off host requests until userspace is ready. 83550a7375SFelipe Balbi * (Workaround: they can turn it off later.) 84550a7375SFelipe Balbi * 85550a7375SFelipe Balbi * - PORTABILITY (assumes PIO works): 86550a7375SFelipe Balbi * + DaVinci, basically works with cppi dma 87550a7375SFelipe Balbi * + OMAP 2430, ditto with mentor dma 88550a7375SFelipe Balbi * + TUSB 6010, platform-specific dma in the works 89550a7375SFelipe Balbi */ 90550a7375SFelipe Balbi 91550a7375SFelipe Balbi /* ----------------------------------------------------------------------- */ 92550a7375SFelipe Balbi 93c65bfa62SMian Yousaf Kaukab #define is_buffer_mapped(req) (is_dma_capable() && \ 94c65bfa62SMian Yousaf Kaukab (req->map_state != UN_MAPPED)) 95c65bfa62SMian Yousaf Kaukab 9692d2711fSHema Kalliguddi /* Maps the buffer to dma */ 9792d2711fSHema Kalliguddi 9892d2711fSHema Kalliguddi static inline void map_dma_buffer(struct musb_request *request, 99c65bfa62SMian Yousaf Kaukab struct musb *musb, struct musb_ep *musb_ep) 10092d2711fSHema Kalliguddi { 1015f5761cbSMian Yousaf Kaukab int compatible = true; 1025f5761cbSMian Yousaf Kaukab struct dma_controller *dma = musb->dma_controller; 1035f5761cbSMian Yousaf Kaukab 104c65bfa62SMian Yousaf Kaukab request->map_state = UN_MAPPED; 105c65bfa62SMian Yousaf Kaukab 106c65bfa62SMian Yousaf Kaukab if (!is_dma_capable() || !musb_ep->dma) 107c65bfa62SMian Yousaf Kaukab return; 108c65bfa62SMian Yousaf Kaukab 1095f5761cbSMian Yousaf Kaukab /* Check if DMA engine can handle this request. 1105f5761cbSMian Yousaf Kaukab * DMA code must reject the USB request explicitly. 1115f5761cbSMian Yousaf Kaukab * Default behaviour is to map the request. 1125f5761cbSMian Yousaf Kaukab */ 1135f5761cbSMian Yousaf Kaukab if (dma->is_compatible) 1145f5761cbSMian Yousaf Kaukab compatible = dma->is_compatible(musb_ep->dma, 1155f5761cbSMian Yousaf Kaukab musb_ep->packet_sz, request->request.buf, 1165f5761cbSMian Yousaf Kaukab request->request.length); 1175f5761cbSMian Yousaf Kaukab if (!compatible) 1185f5761cbSMian Yousaf Kaukab return; 1195f5761cbSMian Yousaf Kaukab 12092d2711fSHema Kalliguddi if (request->request.dma == DMA_ADDR_INVALID) { 12192d2711fSHema Kalliguddi request->request.dma = dma_map_single( 12292d2711fSHema Kalliguddi musb->controller, 12392d2711fSHema Kalliguddi request->request.buf, 12492d2711fSHema Kalliguddi request->request.length, 12592d2711fSHema Kalliguddi request->tx 12692d2711fSHema Kalliguddi ? DMA_TO_DEVICE 12792d2711fSHema Kalliguddi : DMA_FROM_DEVICE); 128c65bfa62SMian Yousaf Kaukab request->map_state = MUSB_MAPPED; 12992d2711fSHema Kalliguddi } else { 13092d2711fSHema Kalliguddi dma_sync_single_for_device(musb->controller, 13192d2711fSHema Kalliguddi request->request.dma, 13292d2711fSHema Kalliguddi request->request.length, 13392d2711fSHema Kalliguddi request->tx 13492d2711fSHema Kalliguddi ? DMA_TO_DEVICE 13592d2711fSHema Kalliguddi : DMA_FROM_DEVICE); 136c65bfa62SMian Yousaf Kaukab request->map_state = PRE_MAPPED; 13792d2711fSHema Kalliguddi } 13892d2711fSHema Kalliguddi } 13992d2711fSHema Kalliguddi 14092d2711fSHema Kalliguddi /* Unmap the buffer from dma and maps it back to cpu */ 14192d2711fSHema Kalliguddi static inline void unmap_dma_buffer(struct musb_request *request, 14292d2711fSHema Kalliguddi struct musb *musb) 14392d2711fSHema Kalliguddi { 144*06d9db72SKishon Vijay Abraham I struct musb_ep *musb_ep = request->ep; 145*06d9db72SKishon Vijay Abraham I 146*06d9db72SKishon Vijay Abraham I if (!is_buffer_mapped(request) || !musb_ep->dma) 147c65bfa62SMian Yousaf Kaukab return; 148c65bfa62SMian Yousaf Kaukab 14992d2711fSHema Kalliguddi if (request->request.dma == DMA_ADDR_INVALID) { 1505c8a86e1SFelipe Balbi dev_vdbg(musb->controller, 1515c8a86e1SFelipe Balbi "not unmapping a never mapped buffer\n"); 15292d2711fSHema Kalliguddi return; 15392d2711fSHema Kalliguddi } 154c65bfa62SMian Yousaf Kaukab if (request->map_state == MUSB_MAPPED) { 15592d2711fSHema Kalliguddi dma_unmap_single(musb->controller, 15692d2711fSHema Kalliguddi request->request.dma, 15792d2711fSHema Kalliguddi request->request.length, 15892d2711fSHema Kalliguddi request->tx 15992d2711fSHema Kalliguddi ? DMA_TO_DEVICE 16092d2711fSHema Kalliguddi : DMA_FROM_DEVICE); 16192d2711fSHema Kalliguddi request->request.dma = DMA_ADDR_INVALID; 162c65bfa62SMian Yousaf Kaukab } else { /* PRE_MAPPED */ 16392d2711fSHema Kalliguddi dma_sync_single_for_cpu(musb->controller, 16492d2711fSHema Kalliguddi request->request.dma, 16592d2711fSHema Kalliguddi request->request.length, 16692d2711fSHema Kalliguddi request->tx 16792d2711fSHema Kalliguddi ? DMA_TO_DEVICE 16892d2711fSHema Kalliguddi : DMA_FROM_DEVICE); 16992d2711fSHema Kalliguddi } 170c65bfa62SMian Yousaf Kaukab request->map_state = UN_MAPPED; 17192d2711fSHema Kalliguddi } 17292d2711fSHema Kalliguddi 173550a7375SFelipe Balbi /* 174550a7375SFelipe Balbi * Immediately complete a request. 175550a7375SFelipe Balbi * 176550a7375SFelipe Balbi * @param request the request to complete 177550a7375SFelipe Balbi * @param status the status to complete the request with 178550a7375SFelipe Balbi * Context: controller locked, IRQs blocked. 179550a7375SFelipe Balbi */ 180550a7375SFelipe Balbi void musb_g_giveback( 181550a7375SFelipe Balbi struct musb_ep *ep, 182550a7375SFelipe Balbi struct usb_request *request, 183550a7375SFelipe Balbi int status) 184550a7375SFelipe Balbi __releases(ep->musb->lock) 185550a7375SFelipe Balbi __acquires(ep->musb->lock) 186550a7375SFelipe Balbi { 187550a7375SFelipe Balbi struct musb_request *req; 188550a7375SFelipe Balbi struct musb *musb; 189550a7375SFelipe Balbi int busy = ep->busy; 190550a7375SFelipe Balbi 191550a7375SFelipe Balbi req = to_musb_request(request); 192550a7375SFelipe Balbi 193ad1adb89SFelipe Balbi list_del(&req->list); 194550a7375SFelipe Balbi if (req->request.status == -EINPROGRESS) 195550a7375SFelipe Balbi req->request.status = status; 196550a7375SFelipe Balbi musb = req->musb; 197550a7375SFelipe Balbi 198550a7375SFelipe Balbi ep->busy = 1; 199550a7375SFelipe Balbi spin_unlock(&musb->lock); 200*06d9db72SKishon Vijay Abraham I 201*06d9db72SKishon Vijay Abraham I if (!dma_mapping_error(&musb->g.dev, request->dma)) 20292d2711fSHema Kalliguddi unmap_dma_buffer(req, musb); 203*06d9db72SKishon Vijay Abraham I 204550a7375SFelipe Balbi if (request->status == 0) 2055c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s done request %p, %d/%d\n", 206550a7375SFelipe Balbi ep->end_point.name, request, 207550a7375SFelipe Balbi req->request.actual, req->request.length); 208550a7375SFelipe Balbi else 2095c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n", 210550a7375SFelipe Balbi ep->end_point.name, request, 211550a7375SFelipe Balbi req->request.actual, req->request.length, 212550a7375SFelipe Balbi request->status); 213550a7375SFelipe Balbi req->request.complete(&req->ep->end_point, &req->request); 214550a7375SFelipe Balbi spin_lock(&musb->lock); 215550a7375SFelipe Balbi ep->busy = busy; 216550a7375SFelipe Balbi } 217550a7375SFelipe Balbi 218550a7375SFelipe Balbi /* ----------------------------------------------------------------------- */ 219550a7375SFelipe Balbi 220550a7375SFelipe Balbi /* 221550a7375SFelipe Balbi * Abort requests queued to an endpoint using the status. Synchronous. 222550a7375SFelipe Balbi * caller locked controller and blocked irqs, and selected this ep. 223550a7375SFelipe Balbi */ 224550a7375SFelipe Balbi static void nuke(struct musb_ep *ep, const int status) 225550a7375SFelipe Balbi { 2265c8a86e1SFelipe Balbi struct musb *musb = ep->musb; 227550a7375SFelipe Balbi struct musb_request *req = NULL; 228550a7375SFelipe Balbi void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs; 229550a7375SFelipe Balbi 230550a7375SFelipe Balbi ep->busy = 1; 231550a7375SFelipe Balbi 232550a7375SFelipe Balbi if (is_dma_capable() && ep->dma) { 233550a7375SFelipe Balbi struct dma_controller *c = ep->musb->dma_controller; 234550a7375SFelipe Balbi int value; 235b6e434a5SSergei Shtylyov 236550a7375SFelipe Balbi if (ep->is_in) { 237b6e434a5SSergei Shtylyov /* 238b6e434a5SSergei Shtylyov * The programming guide says that we must not clear 239b6e434a5SSergei Shtylyov * the DMAMODE bit before DMAENAB, so we only 240b6e434a5SSergei Shtylyov * clear it in the second write... 241b6e434a5SSergei Shtylyov */ 242550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, 243b6e434a5SSergei Shtylyov MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO); 244550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, 245550a7375SFelipe Balbi 0 | MUSB_TXCSR_FLUSHFIFO); 246550a7375SFelipe Balbi } else { 247550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, 248550a7375SFelipe Balbi 0 | MUSB_RXCSR_FLUSHFIFO); 249550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, 250550a7375SFelipe Balbi 0 | MUSB_RXCSR_FLUSHFIFO); 251550a7375SFelipe Balbi } 252550a7375SFelipe Balbi 253550a7375SFelipe Balbi value = c->channel_abort(ep->dma); 2545c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s: abort DMA --> %d\n", 2555c8a86e1SFelipe Balbi ep->name, value); 256550a7375SFelipe Balbi c->channel_release(ep->dma); 257550a7375SFelipe Balbi ep->dma = NULL; 258550a7375SFelipe Balbi } 259550a7375SFelipe Balbi 260ad1adb89SFelipe Balbi while (!list_empty(&ep->req_list)) { 261ad1adb89SFelipe Balbi req = list_first_entry(&ep->req_list, struct musb_request, list); 262550a7375SFelipe Balbi musb_g_giveback(ep, &req->request, status); 263550a7375SFelipe Balbi } 264550a7375SFelipe Balbi } 265550a7375SFelipe Balbi 266550a7375SFelipe Balbi /* ----------------------------------------------------------------------- */ 267550a7375SFelipe Balbi 268550a7375SFelipe Balbi /* Data transfers - pure PIO, pure DMA, or mixed mode */ 269550a7375SFelipe Balbi 270550a7375SFelipe Balbi /* 271550a7375SFelipe Balbi * This assumes the separate CPPI engine is responding to DMA requests 272550a7375SFelipe Balbi * from the usb core ... sequenced a bit differently from mentor dma. 273550a7375SFelipe Balbi */ 274550a7375SFelipe Balbi 275550a7375SFelipe Balbi static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep) 276550a7375SFelipe Balbi { 277550a7375SFelipe Balbi if (can_bulk_split(musb, ep->type)) 278550a7375SFelipe Balbi return ep->hw_ep->max_packet_sz_tx; 279550a7375SFelipe Balbi else 280550a7375SFelipe Balbi return ep->packet_sz; 281550a7375SFelipe Balbi } 282550a7375SFelipe Balbi 283550a7375SFelipe Balbi 284550a7375SFelipe Balbi #ifdef CONFIG_USB_INVENTRA_DMA 285550a7375SFelipe Balbi 286550a7375SFelipe Balbi /* Peripheral tx (IN) using Mentor DMA works as follows: 287550a7375SFelipe Balbi Only mode 0 is used for transfers <= wPktSize, 288550a7375SFelipe Balbi mode 1 is used for larger transfers, 289550a7375SFelipe Balbi 290550a7375SFelipe Balbi One of the following happens: 291550a7375SFelipe Balbi - Host sends IN token which causes an endpoint interrupt 292550a7375SFelipe Balbi -> TxAvail 293550a7375SFelipe Balbi -> if DMA is currently busy, exit. 294550a7375SFelipe Balbi -> if queue is non-empty, txstate(). 295550a7375SFelipe Balbi 296550a7375SFelipe Balbi - Request is queued by the gadget driver. 297550a7375SFelipe Balbi -> if queue was previously empty, txstate() 298550a7375SFelipe Balbi 299550a7375SFelipe Balbi txstate() 300550a7375SFelipe Balbi -> start 301550a7375SFelipe Balbi /\ -> setup DMA 302550a7375SFelipe Balbi | (data is transferred to the FIFO, then sent out when 303550a7375SFelipe Balbi | IN token(s) are recd from Host. 304550a7375SFelipe Balbi | -> DMA interrupt on completion 305550a7375SFelipe Balbi | calls TxAvail. 306b6e434a5SSergei Shtylyov | -> stop DMA, ~DMAENAB, 307550a7375SFelipe Balbi | -> set TxPktRdy for last short pkt or zlp 308550a7375SFelipe Balbi | -> Complete Request 309550a7375SFelipe Balbi | -> Continue next request (call txstate) 310550a7375SFelipe Balbi |___________________________________| 311550a7375SFelipe Balbi 312550a7375SFelipe Balbi * Non-Mentor DMA engines can of course work differently, such as by 313550a7375SFelipe Balbi * upleveling from irq-per-packet to irq-per-buffer. 314550a7375SFelipe Balbi */ 315550a7375SFelipe Balbi 316550a7375SFelipe Balbi #endif 317550a7375SFelipe Balbi 318550a7375SFelipe Balbi /* 319550a7375SFelipe Balbi * An endpoint is transmitting data. This can be called either from 320550a7375SFelipe Balbi * the IRQ routine or from ep.queue() to kickstart a request on an 321550a7375SFelipe Balbi * endpoint. 322550a7375SFelipe Balbi * 323550a7375SFelipe Balbi * Context: controller locked, IRQs blocked, endpoint selected 324550a7375SFelipe Balbi */ 325550a7375SFelipe Balbi static void txstate(struct musb *musb, struct musb_request *req) 326550a7375SFelipe Balbi { 327550a7375SFelipe Balbi u8 epnum = req->epnum; 328550a7375SFelipe Balbi struct musb_ep *musb_ep; 329550a7375SFelipe Balbi void __iomem *epio = musb->endpoints[epnum].regs; 330550a7375SFelipe Balbi struct usb_request *request; 331550a7375SFelipe Balbi u16 fifo_count = 0, csr; 332550a7375SFelipe Balbi int use_dma = 0; 333550a7375SFelipe Balbi 334550a7375SFelipe Balbi musb_ep = req->ep; 335550a7375SFelipe Balbi 336abf710e6SVikram Pandita /* Check if EP is disabled */ 337abf710e6SVikram Pandita if (!musb_ep->desc) { 338abf710e6SVikram Pandita dev_dbg(musb->controller, "ep:%s disabled - ignore request\n", 339abf710e6SVikram Pandita musb_ep->end_point.name); 340abf710e6SVikram Pandita return; 341abf710e6SVikram Pandita } 342abf710e6SVikram Pandita 343550a7375SFelipe Balbi /* we shouldn't get here while DMA is active ... but we do ... */ 344550a7375SFelipe Balbi if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { 3455c8a86e1SFelipe Balbi dev_dbg(musb->controller, "dma pending...\n"); 346550a7375SFelipe Balbi return; 347550a7375SFelipe Balbi } 348550a7375SFelipe Balbi 349550a7375SFelipe Balbi /* read TXCSR before */ 350550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 351550a7375SFelipe Balbi 352550a7375SFelipe Balbi request = &req->request; 353550a7375SFelipe Balbi fifo_count = min(max_ep_writesize(musb, musb_ep), 354550a7375SFelipe Balbi (int)(request->length - request->actual)); 355550a7375SFelipe Balbi 356550a7375SFelipe Balbi if (csr & MUSB_TXCSR_TXPKTRDY) { 3575c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n", 358550a7375SFelipe Balbi musb_ep->end_point.name, csr); 359550a7375SFelipe Balbi return; 360550a7375SFelipe Balbi } 361550a7375SFelipe Balbi 362550a7375SFelipe Balbi if (csr & MUSB_TXCSR_P_SENDSTALL) { 3635c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s stalling, txcsr %03x\n", 364550a7375SFelipe Balbi musb_ep->end_point.name, csr); 365550a7375SFelipe Balbi return; 366550a7375SFelipe Balbi } 367550a7375SFelipe Balbi 3685c8a86e1SFelipe Balbi dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n", 369550a7375SFelipe Balbi epnum, musb_ep->packet_sz, fifo_count, 370550a7375SFelipe Balbi csr); 371550a7375SFelipe Balbi 372550a7375SFelipe Balbi #ifndef CONFIG_MUSB_PIO_ONLY 373c65bfa62SMian Yousaf Kaukab if (is_buffer_mapped(req)) { 374550a7375SFelipe Balbi struct dma_controller *c = musb->dma_controller; 37566af83ddSMing Lei size_t request_size; 37666af83ddSMing Lei 37766af83ddSMing Lei /* setup DMA, then program endpoint CSR */ 37866af83ddSMing Lei request_size = min_t(size_t, request->length - request->actual, 37966af83ddSMing Lei musb_ep->dma->max_len); 380550a7375SFelipe Balbi 381d17d535fSAjay Kumar Gupta use_dma = (request->dma != DMA_ADDR_INVALID && request_size); 382550a7375SFelipe Balbi 383550a7375SFelipe Balbi /* MUSB_TXCSR_P_ISO is still set correctly */ 384550a7375SFelipe Balbi 385a48ff906SMian Yousaf Kaukab #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) 386550a7375SFelipe Balbi { 387d1043a26SAnand Gadiyar if (request_size < musb_ep->packet_sz) 388550a7375SFelipe Balbi musb_ep->dma->desired_mode = 0; 389550a7375SFelipe Balbi else 390550a7375SFelipe Balbi musb_ep->dma->desired_mode = 1; 391550a7375SFelipe Balbi 392550a7375SFelipe Balbi use_dma = use_dma && c->channel_program( 393550a7375SFelipe Balbi musb_ep->dma, musb_ep->packet_sz, 394550a7375SFelipe Balbi musb_ep->dma->desired_mode, 395796a83faSCliff Cai request->dma + request->actual, request_size); 396550a7375SFelipe Balbi if (use_dma) { 397550a7375SFelipe Balbi if (musb_ep->dma->desired_mode == 0) { 398b6e434a5SSergei Shtylyov /* 399b6e434a5SSergei Shtylyov * We must not clear the DMAMODE bit 400b6e434a5SSergei Shtylyov * before the DMAENAB bit -- and the 401b6e434a5SSergei Shtylyov * latter doesn't always get cleared 402b6e434a5SSergei Shtylyov * before we get here... 403b6e434a5SSergei Shtylyov */ 404b6e434a5SSergei Shtylyov csr &= ~(MUSB_TXCSR_AUTOSET 405b6e434a5SSergei Shtylyov | MUSB_TXCSR_DMAENAB); 406b6e434a5SSergei Shtylyov musb_writew(epio, MUSB_TXCSR, csr 407b6e434a5SSergei Shtylyov | MUSB_TXCSR_P_WZC_BITS); 408b6e434a5SSergei Shtylyov csr &= ~MUSB_TXCSR_DMAMODE; 409550a7375SFelipe Balbi csr |= (MUSB_TXCSR_DMAENAB | 410550a7375SFelipe Balbi MUSB_TXCSR_MODE); 411550a7375SFelipe Balbi /* against programming guide */ 412f11d893dSMing Lei } else { 413f11d893dSMing Lei csr |= (MUSB_TXCSR_DMAENAB 414550a7375SFelipe Balbi | MUSB_TXCSR_DMAMODE 415550a7375SFelipe Balbi | MUSB_TXCSR_MODE); 416bb3a2ef2Ssupriya karanth /* 417bb3a2ef2Ssupriya karanth * Enable Autoset according to table 418bb3a2ef2Ssupriya karanth * below 419bb3a2ef2Ssupriya karanth * bulk_split hb_mult Autoset_Enable 420bb3a2ef2Ssupriya karanth * 0 0 Yes(Normal) 421bb3a2ef2Ssupriya karanth * 0 >0 No(High BW ISO) 422bb3a2ef2Ssupriya karanth * 1 0 Yes(HS bulk) 423bb3a2ef2Ssupriya karanth * 1 >0 Yes(FS bulk) 424bb3a2ef2Ssupriya karanth */ 425bb3a2ef2Ssupriya karanth if (!musb_ep->hb_mult || 426bb3a2ef2Ssupriya karanth (musb_ep->hb_mult && 427bb3a2ef2Ssupriya karanth can_bulk_split(musb, 428bb3a2ef2Ssupriya karanth musb_ep->type))) 429f11d893dSMing Lei csr |= MUSB_TXCSR_AUTOSET; 430f11d893dSMing Lei } 431550a7375SFelipe Balbi csr &= ~MUSB_TXCSR_P_UNDERRUN; 432f11d893dSMing Lei 433550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 434550a7375SFelipe Balbi } 435550a7375SFelipe Balbi } 436550a7375SFelipe Balbi 437550a7375SFelipe Balbi #elif defined(CONFIG_USB_TI_CPPI_DMA) 438550a7375SFelipe Balbi /* program endpoint CSR first, then setup DMA */ 439b6e434a5SSergei Shtylyov csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); 44037e3ee99SSergei Shtylyov csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE | 44137e3ee99SSergei Shtylyov MUSB_TXCSR_MODE; 442550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, 443550a7375SFelipe Balbi (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN) 444550a7375SFelipe Balbi | csr); 445550a7375SFelipe Balbi 446550a7375SFelipe Balbi /* ensure writebuffer is empty */ 447550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 448550a7375SFelipe Balbi 449550a7375SFelipe Balbi /* NOTE host side sets DMAENAB later than this; both are 450550a7375SFelipe Balbi * OK since the transfer dma glue (between CPPI and Mentor 451550a7375SFelipe Balbi * fifos) just tells CPPI it could start. Data only moves 452550a7375SFelipe Balbi * to the USB TX fifo when both fifos are ready. 453550a7375SFelipe Balbi */ 454550a7375SFelipe Balbi 455550a7375SFelipe Balbi /* "mode" is irrelevant here; handle terminating ZLPs like 456550a7375SFelipe Balbi * PIO does, since the hardware RNDIS mode seems unreliable 457550a7375SFelipe Balbi * except for the last-packet-is-already-short case. 458550a7375SFelipe Balbi */ 459550a7375SFelipe Balbi use_dma = use_dma && c->channel_program( 460550a7375SFelipe Balbi musb_ep->dma, musb_ep->packet_sz, 461550a7375SFelipe Balbi 0, 46266af83ddSMing Lei request->dma + request->actual, 46366af83ddSMing Lei request_size); 464550a7375SFelipe Balbi if (!use_dma) { 465550a7375SFelipe Balbi c->channel_release(musb_ep->dma); 466550a7375SFelipe Balbi musb_ep->dma = NULL; 467b6e434a5SSergei Shtylyov csr &= ~MUSB_TXCSR_DMAENAB; 468b6e434a5SSergei Shtylyov musb_writew(epio, MUSB_TXCSR, csr); 469550a7375SFelipe Balbi /* invariant: prequest->buf is non-null */ 470550a7375SFelipe Balbi } 471550a7375SFelipe Balbi #elif defined(CONFIG_USB_TUSB_OMAP_DMA) 472550a7375SFelipe Balbi use_dma = use_dma && c->channel_program( 473550a7375SFelipe Balbi musb_ep->dma, musb_ep->packet_sz, 474550a7375SFelipe Balbi request->zero, 47566af83ddSMing Lei request->dma + request->actual, 47666af83ddSMing Lei request_size); 477550a7375SFelipe Balbi #endif 478550a7375SFelipe Balbi } 479550a7375SFelipe Balbi #endif 480550a7375SFelipe Balbi 481550a7375SFelipe Balbi if (!use_dma) { 48292d2711fSHema Kalliguddi /* 48392d2711fSHema Kalliguddi * Unmap the dma buffer back to cpu if dma channel 48492d2711fSHema Kalliguddi * programming fails 48592d2711fSHema Kalliguddi */ 48692d2711fSHema Kalliguddi unmap_dma_buffer(req, musb); 48792d2711fSHema Kalliguddi 488550a7375SFelipe Balbi musb_write_fifo(musb_ep->hw_ep, fifo_count, 489550a7375SFelipe Balbi (u8 *) (request->buf + request->actual)); 490550a7375SFelipe Balbi request->actual += fifo_count; 491550a7375SFelipe Balbi csr |= MUSB_TXCSR_TXPKTRDY; 492550a7375SFelipe Balbi csr &= ~MUSB_TXCSR_P_UNDERRUN; 493550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 494550a7375SFelipe Balbi } 495550a7375SFelipe Balbi 496550a7375SFelipe Balbi /* host may already have the data when this message shows... */ 4975c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n", 498550a7375SFelipe Balbi musb_ep->end_point.name, use_dma ? "dma" : "pio", 499550a7375SFelipe Balbi request->actual, request->length, 500550a7375SFelipe Balbi musb_readw(epio, MUSB_TXCSR), 501550a7375SFelipe Balbi fifo_count, 502550a7375SFelipe Balbi musb_readw(epio, MUSB_TXMAXP)); 503550a7375SFelipe Balbi } 504550a7375SFelipe Balbi 505550a7375SFelipe Balbi /* 506550a7375SFelipe Balbi * FIFO state update (e.g. data ready). 507550a7375SFelipe Balbi * Called from IRQ, with controller locked. 508550a7375SFelipe Balbi */ 509550a7375SFelipe Balbi void musb_g_tx(struct musb *musb, u8 epnum) 510550a7375SFelipe Balbi { 511550a7375SFelipe Balbi u16 csr; 512ad1adb89SFelipe Balbi struct musb_request *req; 513550a7375SFelipe Balbi struct usb_request *request; 514550a7375SFelipe Balbi u8 __iomem *mbase = musb->mregs; 515550a7375SFelipe Balbi struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in; 516550a7375SFelipe Balbi void __iomem *epio = musb->endpoints[epnum].regs; 517550a7375SFelipe Balbi struct dma_channel *dma; 518550a7375SFelipe Balbi 519550a7375SFelipe Balbi musb_ep_select(mbase, epnum); 520ad1adb89SFelipe Balbi req = next_request(musb_ep); 521ad1adb89SFelipe Balbi request = &req->request; 522550a7375SFelipe Balbi 523550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 5245c8a86e1SFelipe Balbi dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr); 525550a7375SFelipe Balbi 526550a7375SFelipe Balbi dma = is_dma_capable() ? musb_ep->dma : NULL; 5277723de7eSSergei Shtylyov 5287723de7eSSergei Shtylyov /* 5297723de7eSSergei Shtylyov * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX 5307723de7eSSergei Shtylyov * probably rates reporting as a host error. 531550a7375SFelipe Balbi */ 532550a7375SFelipe Balbi if (csr & MUSB_TXCSR_P_SENTSTALL) { 533550a7375SFelipe Balbi csr |= MUSB_TXCSR_P_WZC_BITS; 534550a7375SFelipe Balbi csr &= ~MUSB_TXCSR_P_SENTSTALL; 535550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 5367723de7eSSergei Shtylyov return; 537550a7375SFelipe Balbi } 538550a7375SFelipe Balbi 539550a7375SFelipe Balbi if (csr & MUSB_TXCSR_P_UNDERRUN) { 5407723de7eSSergei Shtylyov /* We NAKed, no big deal... little reason to care. */ 541550a7375SFelipe Balbi csr |= MUSB_TXCSR_P_WZC_BITS; 5427723de7eSSergei Shtylyov csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); 543550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 5445c8a86e1SFelipe Balbi dev_vdbg(musb->controller, "underrun on ep%d, req %p\n", 5455c8a86e1SFelipe Balbi epnum, request); 546550a7375SFelipe Balbi } 547550a7375SFelipe Balbi 548550a7375SFelipe Balbi if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 5497723de7eSSergei Shtylyov /* 5507723de7eSSergei Shtylyov * SHOULD NOT HAPPEN... has with CPPI though, after 551550a7375SFelipe Balbi * changing SENDSTALL (and other cases); harmless? 552550a7375SFelipe Balbi */ 5535c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name); 5547723de7eSSergei Shtylyov return; 555550a7375SFelipe Balbi } 556550a7375SFelipe Balbi 557550a7375SFelipe Balbi if (request) { 558550a7375SFelipe Balbi u8 is_dma = 0; 559550a7375SFelipe Balbi 560550a7375SFelipe Balbi if (dma && (csr & MUSB_TXCSR_DMAENAB)) { 561550a7375SFelipe Balbi is_dma = 1; 562550a7375SFelipe Balbi csr |= MUSB_TXCSR_P_WZC_BITS; 5637723de7eSSergei Shtylyov csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | 564100d4a9dSMian Yousaf Kaukab MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); 565550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 5667723de7eSSergei Shtylyov /* Ensure writebuffer is empty. */ 567550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 568550a7375SFelipe Balbi request->actual += musb_ep->dma->actual_len; 5695c8a86e1SFelipe Balbi dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n", 5707723de7eSSergei Shtylyov epnum, csr, musb_ep->dma->actual_len, request); 571550a7375SFelipe Balbi } 572550a7375SFelipe Balbi 5737723de7eSSergei Shtylyov /* 5747723de7eSSergei Shtylyov * First, maybe a terminating short packet. Some DMA 5757723de7eSSergei Shtylyov * engines might handle this by themselves. 576550a7375SFelipe Balbi */ 5777723de7eSSergei Shtylyov if ((request->zero && request->length 578e7379aaaSMing Lei && (request->length % musb_ep->packet_sz == 0) 579e7379aaaSMing Lei && (request->actual == request->length)) 580a48ff906SMian Yousaf Kaukab #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) 5817723de7eSSergei Shtylyov || (is_dma && (!dma->desired_mode || 582550a7375SFelipe Balbi (request->actual & 583550a7375SFelipe Balbi (musb_ep->packet_sz - 1)))) 584550a7375SFelipe Balbi #endif 585550a7375SFelipe Balbi ) { 5867723de7eSSergei Shtylyov /* 5877723de7eSSergei Shtylyov * On DMA completion, FIFO may not be 5887723de7eSSergei Shtylyov * available yet... 589550a7375SFelipe Balbi */ 590550a7375SFelipe Balbi if (csr & MUSB_TXCSR_TXPKTRDY) 5917723de7eSSergei Shtylyov return; 592550a7375SFelipe Balbi 5935c8a86e1SFelipe Balbi dev_dbg(musb->controller, "sending zero pkt\n"); 5947723de7eSSergei Shtylyov musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE 595550a7375SFelipe Balbi | MUSB_TXCSR_TXPKTRDY); 596550a7375SFelipe Balbi request->zero = 0; 597550a7375SFelipe Balbi } 598550a7375SFelipe Balbi 599bb27bc2cSMing Lei if (request->actual == request->length) { 600550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, 0); 60139287076SSupriya Karanth /* 60239287076SSupriya Karanth * In the giveback function the MUSB lock is 60339287076SSupriya Karanth * released and acquired after sometime. During 60439287076SSupriya Karanth * this time period the INDEX register could get 60539287076SSupriya Karanth * changed by the gadget_queue function especially 60639287076SSupriya Karanth * on SMP systems. Reselect the INDEX to be sure 60739287076SSupriya Karanth * we are reading/modifying the right registers 60839287076SSupriya Karanth */ 60939287076SSupriya Karanth musb_ep_select(mbase, epnum); 610ad1adb89SFelipe Balbi req = musb_ep->desc ? next_request(musb_ep) : NULL; 611ad1adb89SFelipe Balbi if (!req) { 6125c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s idle now\n", 613550a7375SFelipe Balbi musb_ep->end_point.name); 6147723de7eSSergei Shtylyov return; 61595962a77SSergei Shtylyov } 616550a7375SFelipe Balbi } 617550a7375SFelipe Balbi 618ad1adb89SFelipe Balbi txstate(musb, req); 619550a7375SFelipe Balbi } 620550a7375SFelipe Balbi } 621550a7375SFelipe Balbi 622550a7375SFelipe Balbi /* ------------------------------------------------------------ */ 623550a7375SFelipe Balbi 624550a7375SFelipe Balbi #ifdef CONFIG_USB_INVENTRA_DMA 625550a7375SFelipe Balbi 626550a7375SFelipe Balbi /* Peripheral rx (OUT) using Mentor DMA works as follows: 627550a7375SFelipe Balbi - Only mode 0 is used. 628550a7375SFelipe Balbi 629550a7375SFelipe Balbi - Request is queued by the gadget class driver. 630550a7375SFelipe Balbi -> if queue was previously empty, rxstate() 631550a7375SFelipe Balbi 632550a7375SFelipe Balbi - Host sends OUT token which causes an endpoint interrupt 633550a7375SFelipe Balbi /\ -> RxReady 634550a7375SFelipe Balbi | -> if request queued, call rxstate 635550a7375SFelipe Balbi | /\ -> setup DMA 636550a7375SFelipe Balbi | | -> DMA interrupt on completion 637550a7375SFelipe Balbi | | -> RxReady 638550a7375SFelipe Balbi | | -> stop DMA 639550a7375SFelipe Balbi | | -> ack the read 640550a7375SFelipe Balbi | | -> if data recd = max expected 641550a7375SFelipe Balbi | | by the request, or host 642550a7375SFelipe Balbi | | sent a short packet, 643550a7375SFelipe Balbi | | complete the request, 644550a7375SFelipe Balbi | | and start the next one. 645550a7375SFelipe Balbi | |_____________________________________| 646550a7375SFelipe Balbi | else just wait for the host 647550a7375SFelipe Balbi | to send the next OUT token. 648550a7375SFelipe Balbi |__________________________________________________| 649550a7375SFelipe Balbi 650550a7375SFelipe Balbi * Non-Mentor DMA engines can of course work differently. 651550a7375SFelipe Balbi */ 652550a7375SFelipe Balbi 653550a7375SFelipe Balbi #endif 654550a7375SFelipe Balbi 655550a7375SFelipe Balbi /* 656550a7375SFelipe Balbi * Context: controller locked, IRQs blocked, endpoint selected 657550a7375SFelipe Balbi */ 658550a7375SFelipe Balbi static void rxstate(struct musb *musb, struct musb_request *req) 659550a7375SFelipe Balbi { 660550a7375SFelipe Balbi const u8 epnum = req->epnum; 661550a7375SFelipe Balbi struct usb_request *request = &req->request; 662bd2e74d6SMing Lei struct musb_ep *musb_ep; 663550a7375SFelipe Balbi void __iomem *epio = musb->endpoints[epnum].regs; 664f0443afdSSergei Shtylyov unsigned len = 0; 665f0443afdSSergei Shtylyov u16 fifo_count; 666cea83241SSergei Shtylyov u16 csr = musb_readw(epio, MUSB_RXCSR); 667bd2e74d6SMing Lei struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; 6680ae52d54SAnand Gadiyar u8 use_mode_1; 669bd2e74d6SMing Lei 670bd2e74d6SMing Lei if (hw_ep->is_shared_fifo) 671bd2e74d6SMing Lei musb_ep = &hw_ep->ep_in; 672bd2e74d6SMing Lei else 673bd2e74d6SMing Lei musb_ep = &hw_ep->ep_out; 674bd2e74d6SMing Lei 675f0443afdSSergei Shtylyov fifo_count = musb_ep->packet_sz; 676550a7375SFelipe Balbi 677abf710e6SVikram Pandita /* Check if EP is disabled */ 678abf710e6SVikram Pandita if (!musb_ep->desc) { 679abf710e6SVikram Pandita dev_dbg(musb->controller, "ep:%s disabled - ignore request\n", 680abf710e6SVikram Pandita musb_ep->end_point.name); 681abf710e6SVikram Pandita return; 682abf710e6SVikram Pandita } 683abf710e6SVikram Pandita 684cea83241SSergei Shtylyov /* We shouldn't get here while DMA is active, but we do... */ 685cea83241SSergei Shtylyov if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { 6865c8a86e1SFelipe Balbi dev_dbg(musb->controller, "DMA pending...\n"); 687cea83241SSergei Shtylyov return; 688cea83241SSergei Shtylyov } 689cea83241SSergei Shtylyov 690cea83241SSergei Shtylyov if (csr & MUSB_RXCSR_P_SENDSTALL) { 6915c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n", 692cea83241SSergei Shtylyov musb_ep->end_point.name, csr); 693cea83241SSergei Shtylyov return; 694cea83241SSergei Shtylyov } 695550a7375SFelipe Balbi 696c65bfa62SMian Yousaf Kaukab if (is_cppi_enabled() && is_buffer_mapped(req)) { 697550a7375SFelipe Balbi struct dma_controller *c = musb->dma_controller; 698550a7375SFelipe Balbi struct dma_channel *channel = musb_ep->dma; 699550a7375SFelipe Balbi 700550a7375SFelipe Balbi /* NOTE: CPPI won't actually stop advancing the DMA 701550a7375SFelipe Balbi * queue after short packet transfers, so this is almost 702550a7375SFelipe Balbi * always going to run as IRQ-per-packet DMA so that 703550a7375SFelipe Balbi * faults will be handled correctly. 704550a7375SFelipe Balbi */ 705550a7375SFelipe Balbi if (c->channel_program(channel, 706550a7375SFelipe Balbi musb_ep->packet_sz, 707550a7375SFelipe Balbi !request->short_not_ok, 708550a7375SFelipe Balbi request->dma + request->actual, 709550a7375SFelipe Balbi request->length - request->actual)) { 710550a7375SFelipe Balbi 711550a7375SFelipe Balbi /* make sure that if an rxpkt arrived after the irq, 712550a7375SFelipe Balbi * the cppi engine will be ready to take it as soon 713550a7375SFelipe Balbi * as DMA is enabled 714550a7375SFelipe Balbi */ 715550a7375SFelipe Balbi csr &= ~(MUSB_RXCSR_AUTOCLEAR 716550a7375SFelipe Balbi | MUSB_RXCSR_DMAMODE); 717550a7375SFelipe Balbi csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS; 718550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 719550a7375SFelipe Balbi return; 720550a7375SFelipe Balbi } 721550a7375SFelipe Balbi } 722550a7375SFelipe Balbi 723550a7375SFelipe Balbi if (csr & MUSB_RXCSR_RXPKTRDY) { 724f0443afdSSergei Shtylyov fifo_count = musb_readw(epio, MUSB_RXCOUNT); 7250ae52d54SAnand Gadiyar 7260ae52d54SAnand Gadiyar /* 72700a89180SFelipe Balbi * Enable Mode 1 on RX transfers only when short_not_ok flag 72800a89180SFelipe Balbi * is set. Currently short_not_ok flag is set only from 72900a89180SFelipe Balbi * file_storage and f_mass_storage drivers 7300ae52d54SAnand Gadiyar */ 73100a89180SFelipe Balbi 73200a89180SFelipe Balbi if (request->short_not_ok && fifo_count == musb_ep->packet_sz) 7330ae52d54SAnand Gadiyar use_mode_1 = 1; 7340ae52d54SAnand Gadiyar else 7350ae52d54SAnand Gadiyar use_mode_1 = 0; 7360ae52d54SAnand Gadiyar 737550a7375SFelipe Balbi if (request->actual < request->length) { 738550a7375SFelipe Balbi #ifdef CONFIG_USB_INVENTRA_DMA 739c65bfa62SMian Yousaf Kaukab if (is_buffer_mapped(req)) { 740550a7375SFelipe Balbi struct dma_controller *c; 741550a7375SFelipe Balbi struct dma_channel *channel; 742550a7375SFelipe Balbi int use_dma = 0; 743660fa886SRoger Quadros int transfer_size; 744550a7375SFelipe Balbi 745550a7375SFelipe Balbi c = musb->dma_controller; 746550a7375SFelipe Balbi channel = musb_ep->dma; 747550a7375SFelipe Balbi 74800a89180SFelipe Balbi /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in 74900a89180SFelipe Balbi * mode 0 only. So we do not get endpoint interrupts due to DMA 75000a89180SFelipe Balbi * completion. We only get interrupts from DMA controller. 75100a89180SFelipe Balbi * 75200a89180SFelipe Balbi * We could operate in DMA mode 1 if we knew the size of the tranfer 75300a89180SFelipe Balbi * in advance. For mass storage class, request->length = what the host 75400a89180SFelipe Balbi * sends, so that'd work. But for pretty much everything else, 75500a89180SFelipe Balbi * request->length is routinely more than what the host sends. For 75600a89180SFelipe Balbi * most these gadgets, end of is signified either by a short packet, 75700a89180SFelipe Balbi * or filling the last byte of the buffer. (Sending extra data in 75800a89180SFelipe Balbi * that last pckate should trigger an overflow fault.) But in mode 1, 75900a89180SFelipe Balbi * we don't get DMA completion interrupt for short packets. 76000a89180SFelipe Balbi * 76100a89180SFelipe Balbi * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1), 76200a89180SFelipe Balbi * to get endpoint interrupt on every DMA req, but that didn't seem 76300a89180SFelipe Balbi * to work reliably. 76400a89180SFelipe Balbi * 76500a89180SFelipe Balbi * REVISIT an updated g_file_storage can set req->short_not_ok, which 76600a89180SFelipe Balbi * then becomes usable as a runtime "use mode 1" hint... 76700a89180SFelipe Balbi */ 76800a89180SFelipe Balbi 7690ae52d54SAnand Gadiyar /* Experimental: Mode1 works with mass storage use cases */ 7700ae52d54SAnand Gadiyar if (use_mode_1) { 7719001d80dSMing Lei csr |= MUSB_RXCSR_AUTOCLEAR; 7720ae52d54SAnand Gadiyar musb_writew(epio, MUSB_RXCSR, csr); 7730ae52d54SAnand Gadiyar csr |= MUSB_RXCSR_DMAENAB; 7740ae52d54SAnand Gadiyar musb_writew(epio, MUSB_RXCSR, csr); 775550a7375SFelipe Balbi 7760ae52d54SAnand Gadiyar /* 7770ae52d54SAnand Gadiyar * this special sequence (enabling and then 778550a7375SFelipe Balbi * disabling MUSB_RXCSR_DMAMODE) is required 779550a7375SFelipe Balbi * to get DMAReq to activate 780550a7375SFelipe Balbi */ 781550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, 782550a7375SFelipe Balbi csr | MUSB_RXCSR_DMAMODE); 7830ae52d54SAnand Gadiyar musb_writew(epio, MUSB_RXCSR, csr); 7840ae52d54SAnand Gadiyar 785660fa886SRoger Quadros transfer_size = min(request->length - request->actual, 786660fa886SRoger Quadros channel->max_len); 787660fa886SRoger Quadros musb_ep->dma->desired_mode = 1; 788660fa886SRoger Quadros 7890ae52d54SAnand Gadiyar } else { 7909001d80dSMing Lei if (!musb_ep->hb_mult && 7919001d80dSMing Lei musb_ep->hw_ep->rx_double_buffered) 7929001d80dSMing Lei csr |= MUSB_RXCSR_AUTOCLEAR; 7930ae52d54SAnand Gadiyar csr |= MUSB_RXCSR_DMAENAB; 794550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 795550a7375SFelipe Balbi 7961018b4e4SMing Lei transfer_size = min(request->length - request->actual, 797f0443afdSSergei Shtylyov (unsigned)fifo_count); 798550a7375SFelipe Balbi musb_ep->dma->desired_mode = 0; 7990ae52d54SAnand Gadiyar } 800550a7375SFelipe Balbi 801550a7375SFelipe Balbi use_dma = c->channel_program( 802550a7375SFelipe Balbi channel, 803550a7375SFelipe Balbi musb_ep->packet_sz, 804550a7375SFelipe Balbi channel->desired_mode, 805550a7375SFelipe Balbi request->dma 806550a7375SFelipe Balbi + request->actual, 807550a7375SFelipe Balbi transfer_size); 808550a7375SFelipe Balbi 809550a7375SFelipe Balbi if (use_dma) 810550a7375SFelipe Balbi return; 811550a7375SFelipe Balbi } 812a48ff906SMian Yousaf Kaukab #elif defined(CONFIG_USB_UX500_DMA) 813a48ff906SMian Yousaf Kaukab if ((is_buffer_mapped(req)) && 814a48ff906SMian Yousaf Kaukab (request->actual < request->length)) { 815a48ff906SMian Yousaf Kaukab 816a48ff906SMian Yousaf Kaukab struct dma_controller *c; 817a48ff906SMian Yousaf Kaukab struct dma_channel *channel; 818a48ff906SMian Yousaf Kaukab int transfer_size = 0; 819a48ff906SMian Yousaf Kaukab 820a48ff906SMian Yousaf Kaukab c = musb->dma_controller; 821a48ff906SMian Yousaf Kaukab channel = musb_ep->dma; 822a48ff906SMian Yousaf Kaukab 823a48ff906SMian Yousaf Kaukab /* In case first packet is short */ 824f0443afdSSergei Shtylyov if (fifo_count < musb_ep->packet_sz) 825f0443afdSSergei Shtylyov transfer_size = fifo_count; 826a48ff906SMian Yousaf Kaukab else if (request->short_not_ok) 827a48ff906SMian Yousaf Kaukab transfer_size = min(request->length - 828a48ff906SMian Yousaf Kaukab request->actual, 829a48ff906SMian Yousaf Kaukab channel->max_len); 830a48ff906SMian Yousaf Kaukab else 831a48ff906SMian Yousaf Kaukab transfer_size = min(request->length - 832a48ff906SMian Yousaf Kaukab request->actual, 833f0443afdSSergei Shtylyov (unsigned)fifo_count); 834a48ff906SMian Yousaf Kaukab 835a48ff906SMian Yousaf Kaukab csr &= ~MUSB_RXCSR_DMAMODE; 836a48ff906SMian Yousaf Kaukab csr |= (MUSB_RXCSR_DMAENAB | 837a48ff906SMian Yousaf Kaukab MUSB_RXCSR_AUTOCLEAR); 838a48ff906SMian Yousaf Kaukab 839a48ff906SMian Yousaf Kaukab musb_writew(epio, MUSB_RXCSR, csr); 840a48ff906SMian Yousaf Kaukab 841a48ff906SMian Yousaf Kaukab if (transfer_size <= musb_ep->packet_sz) { 842a48ff906SMian Yousaf Kaukab musb_ep->dma->desired_mode = 0; 843a48ff906SMian Yousaf Kaukab } else { 844a48ff906SMian Yousaf Kaukab musb_ep->dma->desired_mode = 1; 845a48ff906SMian Yousaf Kaukab /* Mode must be set after DMAENAB */ 846a48ff906SMian Yousaf Kaukab csr |= MUSB_RXCSR_DMAMODE; 847a48ff906SMian Yousaf Kaukab musb_writew(epio, MUSB_RXCSR, csr); 848a48ff906SMian Yousaf Kaukab } 849a48ff906SMian Yousaf Kaukab 850a48ff906SMian Yousaf Kaukab if (c->channel_program(channel, 851a48ff906SMian Yousaf Kaukab musb_ep->packet_sz, 852a48ff906SMian Yousaf Kaukab channel->desired_mode, 853a48ff906SMian Yousaf Kaukab request->dma 854a48ff906SMian Yousaf Kaukab + request->actual, 855a48ff906SMian Yousaf Kaukab transfer_size)) 856a48ff906SMian Yousaf Kaukab 857a48ff906SMian Yousaf Kaukab return; 858a48ff906SMian Yousaf Kaukab } 859550a7375SFelipe Balbi #endif /* Mentor's DMA */ 860550a7375SFelipe Balbi 861f0443afdSSergei Shtylyov len = request->length - request->actual; 8625c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n", 863550a7375SFelipe Balbi musb_ep->end_point.name, 864f0443afdSSergei Shtylyov fifo_count, len, 865550a7375SFelipe Balbi musb_ep->packet_sz); 866550a7375SFelipe Balbi 867c2c96321SFelipe Balbi fifo_count = min_t(unsigned, len, fifo_count); 868550a7375SFelipe Balbi 869550a7375SFelipe Balbi #ifdef CONFIG_USB_TUSB_OMAP_DMA 870c65bfa62SMian Yousaf Kaukab if (tusb_dma_omap() && is_buffer_mapped(req)) { 871550a7375SFelipe Balbi struct dma_controller *c = musb->dma_controller; 872550a7375SFelipe Balbi struct dma_channel *channel = musb_ep->dma; 873550a7375SFelipe Balbi u32 dma_addr = request->dma + request->actual; 874550a7375SFelipe Balbi int ret; 875550a7375SFelipe Balbi 876550a7375SFelipe Balbi ret = c->channel_program(channel, 877550a7375SFelipe Balbi musb_ep->packet_sz, 878550a7375SFelipe Balbi channel->desired_mode, 879550a7375SFelipe Balbi dma_addr, 880550a7375SFelipe Balbi fifo_count); 881550a7375SFelipe Balbi if (ret) 882550a7375SFelipe Balbi return; 883550a7375SFelipe Balbi } 884550a7375SFelipe Balbi #endif 88592d2711fSHema Kalliguddi /* 88692d2711fSHema Kalliguddi * Unmap the dma buffer back to cpu if dma channel 88792d2711fSHema Kalliguddi * programming fails. This buffer is mapped if the 88892d2711fSHema Kalliguddi * channel allocation is successful 88992d2711fSHema Kalliguddi */ 890c65bfa62SMian Yousaf Kaukab if (is_buffer_mapped(req)) { 89192d2711fSHema Kalliguddi unmap_dma_buffer(req, musb); 89292d2711fSHema Kalliguddi 893e75df371SMing Lei /* 894e75df371SMing Lei * Clear DMAENAB and AUTOCLEAR for the 89592d2711fSHema Kalliguddi * PIO mode transfer 89692d2711fSHema Kalliguddi */ 897e75df371SMing Lei csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR); 89892d2711fSHema Kalliguddi musb_writew(epio, MUSB_RXCSR, csr); 89992d2711fSHema Kalliguddi } 900550a7375SFelipe Balbi 901550a7375SFelipe Balbi musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) 902550a7375SFelipe Balbi (request->buf + request->actual)); 903550a7375SFelipe Balbi request->actual += fifo_count; 904550a7375SFelipe Balbi 905550a7375SFelipe Balbi /* REVISIT if we left anything in the fifo, flush 906550a7375SFelipe Balbi * it and report -EOVERFLOW 907550a7375SFelipe Balbi */ 908550a7375SFelipe Balbi 909550a7375SFelipe Balbi /* ack the read! */ 910550a7375SFelipe Balbi csr |= MUSB_RXCSR_P_WZC_BITS; 911550a7375SFelipe Balbi csr &= ~MUSB_RXCSR_RXPKTRDY; 912550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 913550a7375SFelipe Balbi } 914550a7375SFelipe Balbi } 915550a7375SFelipe Balbi 916550a7375SFelipe Balbi /* reach the end or short packet detected */ 917f0443afdSSergei Shtylyov if (request->actual == request->length || 918f0443afdSSergei Shtylyov fifo_count < musb_ep->packet_sz) 919550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, 0); 920550a7375SFelipe Balbi } 921550a7375SFelipe Balbi 922550a7375SFelipe Balbi /* 923550a7375SFelipe Balbi * Data ready for a request; called from IRQ 924550a7375SFelipe Balbi */ 925550a7375SFelipe Balbi void musb_g_rx(struct musb *musb, u8 epnum) 926550a7375SFelipe Balbi { 927550a7375SFelipe Balbi u16 csr; 928ad1adb89SFelipe Balbi struct musb_request *req; 929550a7375SFelipe Balbi struct usb_request *request; 930550a7375SFelipe Balbi void __iomem *mbase = musb->mregs; 931bd2e74d6SMing Lei struct musb_ep *musb_ep; 932550a7375SFelipe Balbi void __iomem *epio = musb->endpoints[epnum].regs; 933550a7375SFelipe Balbi struct dma_channel *dma; 934bd2e74d6SMing Lei struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; 935bd2e74d6SMing Lei 936bd2e74d6SMing Lei if (hw_ep->is_shared_fifo) 937bd2e74d6SMing Lei musb_ep = &hw_ep->ep_in; 938bd2e74d6SMing Lei else 939bd2e74d6SMing Lei musb_ep = &hw_ep->ep_out; 940550a7375SFelipe Balbi 941550a7375SFelipe Balbi musb_ep_select(mbase, epnum); 942550a7375SFelipe Balbi 943ad1adb89SFelipe Balbi req = next_request(musb_ep); 944ad1adb89SFelipe Balbi if (!req) 9450abdc36fSMaulik Mankad return; 946550a7375SFelipe Balbi 947ad1adb89SFelipe Balbi request = &req->request; 948ad1adb89SFelipe Balbi 949550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_RXCSR); 950550a7375SFelipe Balbi dma = is_dma_capable() ? musb_ep->dma : NULL; 951550a7375SFelipe Balbi 9525c8a86e1SFelipe Balbi dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name, 953550a7375SFelipe Balbi csr, dma ? " (dma)" : "", request); 954550a7375SFelipe Balbi 955550a7375SFelipe Balbi if (csr & MUSB_RXCSR_P_SENTSTALL) { 956550a7375SFelipe Balbi csr |= MUSB_RXCSR_P_WZC_BITS; 957550a7375SFelipe Balbi csr &= ~MUSB_RXCSR_P_SENTSTALL; 958550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 959cea83241SSergei Shtylyov return; 960550a7375SFelipe Balbi } 961550a7375SFelipe Balbi 962550a7375SFelipe Balbi if (csr & MUSB_RXCSR_P_OVERRUN) { 963550a7375SFelipe Balbi /* csr |= MUSB_RXCSR_P_WZC_BITS; */ 964550a7375SFelipe Balbi csr &= ~MUSB_RXCSR_P_OVERRUN; 965550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 966550a7375SFelipe Balbi 9675c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request); 96843467868SSergei Shtylyov if (request->status == -EINPROGRESS) 969550a7375SFelipe Balbi request->status = -EOVERFLOW; 970550a7375SFelipe Balbi } 971550a7375SFelipe Balbi if (csr & MUSB_RXCSR_INCOMPRX) { 972550a7375SFelipe Balbi /* REVISIT not necessarily an error */ 9735c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name); 974550a7375SFelipe Balbi } 975550a7375SFelipe Balbi 976550a7375SFelipe Balbi if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 977550a7375SFelipe Balbi /* "should not happen"; likely RXPKTRDY pending for DMA */ 9785c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s busy, csr %04x\n", 979550a7375SFelipe Balbi musb_ep->end_point.name, csr); 980cea83241SSergei Shtylyov return; 981550a7375SFelipe Balbi } 982550a7375SFelipe Balbi 983550a7375SFelipe Balbi if (dma && (csr & MUSB_RXCSR_DMAENAB)) { 984550a7375SFelipe Balbi csr &= ~(MUSB_RXCSR_AUTOCLEAR 985550a7375SFelipe Balbi | MUSB_RXCSR_DMAENAB 986550a7375SFelipe Balbi | MUSB_RXCSR_DMAMODE); 987550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, 988550a7375SFelipe Balbi MUSB_RXCSR_P_WZC_BITS | csr); 989550a7375SFelipe Balbi 990550a7375SFelipe Balbi request->actual += musb_ep->dma->actual_len; 991550a7375SFelipe Balbi 9925c8a86e1SFelipe Balbi dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n", 993550a7375SFelipe Balbi epnum, csr, 994550a7375SFelipe Balbi musb_readw(epio, MUSB_RXCSR), 995550a7375SFelipe Balbi musb_ep->dma->actual_len, request); 996550a7375SFelipe Balbi 997a48ff906SMian Yousaf Kaukab #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \ 998a48ff906SMian Yousaf Kaukab defined(CONFIG_USB_UX500_DMA) 999550a7375SFelipe Balbi /* Autoclear doesn't clear RxPktRdy for short packets */ 10009001d80dSMing Lei if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered) 1001550a7375SFelipe Balbi || (dma->actual_len 1002550a7375SFelipe Balbi & (musb_ep->packet_sz - 1))) { 1003550a7375SFelipe Balbi /* ack the read! */ 1004550a7375SFelipe Balbi csr &= ~MUSB_RXCSR_RXPKTRDY; 1005550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 1006550a7375SFelipe Balbi } 1007550a7375SFelipe Balbi 1008550a7375SFelipe Balbi /* incomplete, and not short? wait for next IN packet */ 1009550a7375SFelipe Balbi if ((request->actual < request->length) 1010550a7375SFelipe Balbi && (musb_ep->dma->actual_len 10119001d80dSMing Lei == musb_ep->packet_sz)) { 10129001d80dSMing Lei /* In double buffer case, continue to unload fifo if 10139001d80dSMing Lei * there is Rx packet in FIFO. 10149001d80dSMing Lei **/ 10159001d80dSMing Lei csr = musb_readw(epio, MUSB_RXCSR); 10169001d80dSMing Lei if ((csr & MUSB_RXCSR_RXPKTRDY) && 10179001d80dSMing Lei hw_ep->rx_double_buffered) 10189001d80dSMing Lei goto exit; 1019cea83241SSergei Shtylyov return; 10209001d80dSMing Lei } 1021550a7375SFelipe Balbi #endif 1022550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, 0); 102339287076SSupriya Karanth /* 102439287076SSupriya Karanth * In the giveback function the MUSB lock is 102539287076SSupriya Karanth * released and acquired after sometime. During 102639287076SSupriya Karanth * this time period the INDEX register could get 102739287076SSupriya Karanth * changed by the gadget_queue function especially 102839287076SSupriya Karanth * on SMP systems. Reselect the INDEX to be sure 102939287076SSupriya Karanth * we are reading/modifying the right registers 103039287076SSupriya Karanth */ 103139287076SSupriya Karanth musb_ep_select(mbase, epnum); 1032550a7375SFelipe Balbi 1033ad1adb89SFelipe Balbi req = next_request(musb_ep); 1034ad1adb89SFelipe Balbi if (!req) 1035cea83241SSergei Shtylyov return; 1036550a7375SFelipe Balbi } 1037a48ff906SMian Yousaf Kaukab #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \ 1038a48ff906SMian Yousaf Kaukab defined(CONFIG_USB_UX500_DMA) 10399001d80dSMing Lei exit: 1040bb324b08SAjay Kumar Gupta #endif 104143467868SSergei Shtylyov /* Analyze request */ 1042ad1adb89SFelipe Balbi rxstate(musb, req); 1043550a7375SFelipe Balbi } 1044550a7375SFelipe Balbi 1045550a7375SFelipe Balbi /* ------------------------------------------------------------ */ 1046550a7375SFelipe Balbi 1047550a7375SFelipe Balbi static int musb_gadget_enable(struct usb_ep *ep, 1048550a7375SFelipe Balbi const struct usb_endpoint_descriptor *desc) 1049550a7375SFelipe Balbi { 1050550a7375SFelipe Balbi unsigned long flags; 1051550a7375SFelipe Balbi struct musb_ep *musb_ep; 1052550a7375SFelipe Balbi struct musb_hw_ep *hw_ep; 1053550a7375SFelipe Balbi void __iomem *regs; 1054550a7375SFelipe Balbi struct musb *musb; 1055550a7375SFelipe Balbi void __iomem *mbase; 1056550a7375SFelipe Balbi u8 epnum; 1057550a7375SFelipe Balbi u16 csr; 1058550a7375SFelipe Balbi unsigned tmp; 1059550a7375SFelipe Balbi int status = -EINVAL; 1060550a7375SFelipe Balbi 1061550a7375SFelipe Balbi if (!ep || !desc) 1062550a7375SFelipe Balbi return -EINVAL; 1063550a7375SFelipe Balbi 1064550a7375SFelipe Balbi musb_ep = to_musb_ep(ep); 1065550a7375SFelipe Balbi hw_ep = musb_ep->hw_ep; 1066550a7375SFelipe Balbi regs = hw_ep->regs; 1067550a7375SFelipe Balbi musb = musb_ep->musb; 1068550a7375SFelipe Balbi mbase = musb->mregs; 1069550a7375SFelipe Balbi epnum = musb_ep->current_epnum; 1070550a7375SFelipe Balbi 1071550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1072550a7375SFelipe Balbi 1073550a7375SFelipe Balbi if (musb_ep->desc) { 1074550a7375SFelipe Balbi status = -EBUSY; 1075550a7375SFelipe Balbi goto fail; 1076550a7375SFelipe Balbi } 107796bcd090SJulia Lawall musb_ep->type = usb_endpoint_type(desc); 1078550a7375SFelipe Balbi 1079550a7375SFelipe Balbi /* check direction and (later) maxpacket size against endpoint */ 108096bcd090SJulia Lawall if (usb_endpoint_num(desc) != epnum) 1081550a7375SFelipe Balbi goto fail; 1082550a7375SFelipe Balbi 1083550a7375SFelipe Balbi /* REVISIT this rules out high bandwidth periodic transfers */ 108429cc8897SKuninori Morimoto tmp = usb_endpoint_maxp(desc); 1085f11d893dSMing Lei if (tmp & ~0x07ff) { 1086f11d893dSMing Lei int ok; 1087f11d893dSMing Lei 1088f11d893dSMing Lei if (usb_endpoint_dir_in(desc)) 1089f11d893dSMing Lei ok = musb->hb_iso_tx; 1090f11d893dSMing Lei else 1091f11d893dSMing Lei ok = musb->hb_iso_rx; 1092f11d893dSMing Lei 1093f11d893dSMing Lei if (!ok) { 10945c8a86e1SFelipe Balbi dev_dbg(musb->controller, "no support for high bandwidth ISO\n"); 1095550a7375SFelipe Balbi goto fail; 1096f11d893dSMing Lei } 1097f11d893dSMing Lei musb_ep->hb_mult = (tmp >> 11) & 3; 1098f11d893dSMing Lei } else { 1099f11d893dSMing Lei musb_ep->hb_mult = 0; 1100f11d893dSMing Lei } 1101f11d893dSMing Lei 1102f11d893dSMing Lei musb_ep->packet_sz = tmp & 0x7ff; 1103f11d893dSMing Lei tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1); 1104550a7375SFelipe Balbi 1105550a7375SFelipe Balbi /* enable the interrupts for the endpoint, set the endpoint 1106550a7375SFelipe Balbi * packet size (or fail), set the mode, clear the fifo 1107550a7375SFelipe Balbi */ 1108550a7375SFelipe Balbi musb_ep_select(mbase, epnum); 110996bcd090SJulia Lawall if (usb_endpoint_dir_in(desc)) { 1110550a7375SFelipe Balbi 1111550a7375SFelipe Balbi if (hw_ep->is_shared_fifo) 1112550a7375SFelipe Balbi musb_ep->is_in = 1; 1113550a7375SFelipe Balbi if (!musb_ep->is_in) 1114550a7375SFelipe Balbi goto fail; 1115f11d893dSMing Lei 1116f11d893dSMing Lei if (tmp > hw_ep->max_packet_sz_tx) { 11175c8a86e1SFelipe Balbi dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n"); 1118550a7375SFelipe Balbi goto fail; 1119f11d893dSMing Lei } 1120550a7375SFelipe Balbi 1121b18d26f6SSebastian Andrzej Siewior musb->intrtxe |= (1 << epnum); 1122b18d26f6SSebastian Andrzej Siewior musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe); 1123550a7375SFelipe Balbi 1124550a7375SFelipe Balbi /* REVISIT if can_bulk_split(), use by updating "tmp"; 1125550a7375SFelipe Balbi * likewise high bandwidth periodic tx 1126550a7375SFelipe Balbi */ 11279f445cb2SCliff Cai /* Set TXMAXP with the FIFO size of the endpoint 112831c9909bSMing Lei * to disable double buffering mode. 11299f445cb2SCliff Cai */ 1130bb3a2ef2Ssupriya karanth if (musb->double_buffer_not_ok) { 113106624818SFelipe Balbi musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); 1132bb3a2ef2Ssupriya karanth } else { 1133bb3a2ef2Ssupriya karanth if (can_bulk_split(musb, musb_ep->type)) 1134bb3a2ef2Ssupriya karanth musb_ep->hb_mult = (hw_ep->max_packet_sz_tx / 1135bb3a2ef2Ssupriya karanth musb_ep->packet_sz) - 1; 113606624818SFelipe Balbi musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz 113706624818SFelipe Balbi | (musb_ep->hb_mult << 11)); 1138bb3a2ef2Ssupriya karanth } 1139550a7375SFelipe Balbi 1140550a7375SFelipe Balbi csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; 1141550a7375SFelipe Balbi if (musb_readw(regs, MUSB_TXCSR) 1142550a7375SFelipe Balbi & MUSB_TXCSR_FIFONOTEMPTY) 1143550a7375SFelipe Balbi csr |= MUSB_TXCSR_FLUSHFIFO; 1144550a7375SFelipe Balbi if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) 1145550a7375SFelipe Balbi csr |= MUSB_TXCSR_P_ISO; 1146550a7375SFelipe Balbi 1147550a7375SFelipe Balbi /* set twice in case of double buffering */ 1148550a7375SFelipe Balbi musb_writew(regs, MUSB_TXCSR, csr); 1149550a7375SFelipe Balbi /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ 1150550a7375SFelipe Balbi musb_writew(regs, MUSB_TXCSR, csr); 1151550a7375SFelipe Balbi 1152550a7375SFelipe Balbi } else { 1153550a7375SFelipe Balbi 1154550a7375SFelipe Balbi if (hw_ep->is_shared_fifo) 1155550a7375SFelipe Balbi musb_ep->is_in = 0; 1156550a7375SFelipe Balbi if (musb_ep->is_in) 1157550a7375SFelipe Balbi goto fail; 1158f11d893dSMing Lei 1159f11d893dSMing Lei if (tmp > hw_ep->max_packet_sz_rx) { 11605c8a86e1SFelipe Balbi dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n"); 1161550a7375SFelipe Balbi goto fail; 1162f11d893dSMing Lei } 1163550a7375SFelipe Balbi 1164af5ec14dSSebastian Andrzej Siewior musb->intrrxe |= (1 << epnum); 1165af5ec14dSSebastian Andrzej Siewior musb_writew(mbase, MUSB_INTRRXE, musb->intrrxe); 1166550a7375SFelipe Balbi 1167550a7375SFelipe Balbi /* REVISIT if can_bulk_combine() use by updating "tmp" 1168550a7375SFelipe Balbi * likewise high bandwidth periodic rx 1169550a7375SFelipe Balbi */ 11709f445cb2SCliff Cai /* Set RXMAXP with the FIFO size of the endpoint 11719f445cb2SCliff Cai * to disable double buffering mode. 11729f445cb2SCliff Cai */ 117306624818SFelipe Balbi if (musb->double_buffer_not_ok) 117406624818SFelipe Balbi musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx); 117506624818SFelipe Balbi else 117606624818SFelipe Balbi musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz 117706624818SFelipe Balbi | (musb_ep->hb_mult << 11)); 1178550a7375SFelipe Balbi 1179550a7375SFelipe Balbi /* force shared fifo to OUT-only mode */ 1180550a7375SFelipe Balbi if (hw_ep->is_shared_fifo) { 1181550a7375SFelipe Balbi csr = musb_readw(regs, MUSB_TXCSR); 1182550a7375SFelipe Balbi csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY); 1183550a7375SFelipe Balbi musb_writew(regs, MUSB_TXCSR, csr); 1184550a7375SFelipe Balbi } 1185550a7375SFelipe Balbi 1186550a7375SFelipe Balbi csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG; 1187550a7375SFelipe Balbi if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) 1188550a7375SFelipe Balbi csr |= MUSB_RXCSR_P_ISO; 1189550a7375SFelipe Balbi else if (musb_ep->type == USB_ENDPOINT_XFER_INT) 1190550a7375SFelipe Balbi csr |= MUSB_RXCSR_DISNYET; 1191550a7375SFelipe Balbi 1192550a7375SFelipe Balbi /* set twice in case of double buffering */ 1193550a7375SFelipe Balbi musb_writew(regs, MUSB_RXCSR, csr); 1194550a7375SFelipe Balbi musb_writew(regs, MUSB_RXCSR, csr); 1195550a7375SFelipe Balbi } 1196550a7375SFelipe Balbi 1197550a7375SFelipe Balbi /* NOTE: all the I/O code _should_ work fine without DMA, in case 1198550a7375SFelipe Balbi * for some reason you run out of channels here. 1199550a7375SFelipe Balbi */ 1200550a7375SFelipe Balbi if (is_dma_capable() && musb->dma_controller) { 1201550a7375SFelipe Balbi struct dma_controller *c = musb->dma_controller; 1202550a7375SFelipe Balbi 1203550a7375SFelipe Balbi musb_ep->dma = c->channel_alloc(c, hw_ep, 1204550a7375SFelipe Balbi (desc->bEndpointAddress & USB_DIR_IN)); 1205550a7375SFelipe Balbi } else 1206550a7375SFelipe Balbi musb_ep->dma = NULL; 1207550a7375SFelipe Balbi 1208550a7375SFelipe Balbi musb_ep->desc = desc; 1209550a7375SFelipe Balbi musb_ep->busy = 0; 121047e97605SSergei Shtylyov musb_ep->wedged = 0; 1211550a7375SFelipe Balbi status = 0; 1212550a7375SFelipe Balbi 1213550a7375SFelipe Balbi pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n", 1214550a7375SFelipe Balbi musb_driver_name, musb_ep->end_point.name, 1215550a7375SFelipe Balbi ({ char *s; switch (musb_ep->type) { 1216550a7375SFelipe Balbi case USB_ENDPOINT_XFER_BULK: s = "bulk"; break; 1217550a7375SFelipe Balbi case USB_ENDPOINT_XFER_INT: s = "int"; break; 1218550a7375SFelipe Balbi default: s = "iso"; break; 1219550a7375SFelipe Balbi }; s; }), 1220550a7375SFelipe Balbi musb_ep->is_in ? "IN" : "OUT", 1221550a7375SFelipe Balbi musb_ep->dma ? "dma, " : "", 1222550a7375SFelipe Balbi musb_ep->packet_sz); 1223550a7375SFelipe Balbi 1224550a7375SFelipe Balbi schedule_work(&musb->irq_work); 1225550a7375SFelipe Balbi 1226550a7375SFelipe Balbi fail: 1227550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1228550a7375SFelipe Balbi return status; 1229550a7375SFelipe Balbi } 1230550a7375SFelipe Balbi 1231550a7375SFelipe Balbi /* 1232550a7375SFelipe Balbi * Disable an endpoint flushing all requests queued. 1233550a7375SFelipe Balbi */ 1234550a7375SFelipe Balbi static int musb_gadget_disable(struct usb_ep *ep) 1235550a7375SFelipe Balbi { 1236550a7375SFelipe Balbi unsigned long flags; 1237550a7375SFelipe Balbi struct musb *musb; 1238550a7375SFelipe Balbi u8 epnum; 1239550a7375SFelipe Balbi struct musb_ep *musb_ep; 1240550a7375SFelipe Balbi void __iomem *epio; 1241550a7375SFelipe Balbi int status = 0; 1242550a7375SFelipe Balbi 1243550a7375SFelipe Balbi musb_ep = to_musb_ep(ep); 1244550a7375SFelipe Balbi musb = musb_ep->musb; 1245550a7375SFelipe Balbi epnum = musb_ep->current_epnum; 1246550a7375SFelipe Balbi epio = musb->endpoints[epnum].regs; 1247550a7375SFelipe Balbi 1248550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1249550a7375SFelipe Balbi musb_ep_select(musb->mregs, epnum); 1250550a7375SFelipe Balbi 1251550a7375SFelipe Balbi /* zero the endpoint sizes */ 1252550a7375SFelipe Balbi if (musb_ep->is_in) { 1253b18d26f6SSebastian Andrzej Siewior musb->intrtxe &= ~(1 << epnum); 1254b18d26f6SSebastian Andrzej Siewior musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe); 1255550a7375SFelipe Balbi musb_writew(epio, MUSB_TXMAXP, 0); 1256550a7375SFelipe Balbi } else { 1257af5ec14dSSebastian Andrzej Siewior musb->intrrxe &= ~(1 << epnum); 1258af5ec14dSSebastian Andrzej Siewior musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe); 1259550a7375SFelipe Balbi musb_writew(epio, MUSB_RXMAXP, 0); 1260550a7375SFelipe Balbi } 1261550a7375SFelipe Balbi 1262550a7375SFelipe Balbi musb_ep->desc = NULL; 126308f75bf1SGrazvydas Ignotas musb_ep->end_point.desc = NULL; 1264550a7375SFelipe Balbi 1265550a7375SFelipe Balbi /* abort all pending DMA and requests */ 1266550a7375SFelipe Balbi nuke(musb_ep, -ESHUTDOWN); 1267550a7375SFelipe Balbi 1268550a7375SFelipe Balbi schedule_work(&musb->irq_work); 1269550a7375SFelipe Balbi 1270550a7375SFelipe Balbi spin_unlock_irqrestore(&(musb->lock), flags); 1271550a7375SFelipe Balbi 12725c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name); 1273550a7375SFelipe Balbi 1274550a7375SFelipe Balbi return status; 1275550a7375SFelipe Balbi } 1276550a7375SFelipe Balbi 1277550a7375SFelipe Balbi /* 1278550a7375SFelipe Balbi * Allocate a request for an endpoint. 1279550a7375SFelipe Balbi * Reused by ep0 code. 1280550a7375SFelipe Balbi */ 1281550a7375SFelipe Balbi struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) 1282550a7375SFelipe Balbi { 1283550a7375SFelipe Balbi struct musb_ep *musb_ep = to_musb_ep(ep); 12845c8a86e1SFelipe Balbi struct musb *musb = musb_ep->musb; 1285550a7375SFelipe Balbi struct musb_request *request = NULL; 1286550a7375SFelipe Balbi 1287550a7375SFelipe Balbi request = kzalloc(sizeof *request, gfp_flags); 12880607f862SFelipe Balbi if (!request) { 12895c8a86e1SFelipe Balbi dev_dbg(musb->controller, "not enough memory\n"); 12900607f862SFelipe Balbi return NULL; 12910607f862SFelipe Balbi } 12920607f862SFelipe Balbi 1293550a7375SFelipe Balbi request->request.dma = DMA_ADDR_INVALID; 1294550a7375SFelipe Balbi request->epnum = musb_ep->current_epnum; 1295550a7375SFelipe Balbi request->ep = musb_ep; 1296550a7375SFelipe Balbi 1297550a7375SFelipe Balbi return &request->request; 1298550a7375SFelipe Balbi } 1299550a7375SFelipe Balbi 1300550a7375SFelipe Balbi /* 1301550a7375SFelipe Balbi * Free a request 1302550a7375SFelipe Balbi * Reused by ep0 code. 1303550a7375SFelipe Balbi */ 1304550a7375SFelipe Balbi void musb_free_request(struct usb_ep *ep, struct usb_request *req) 1305550a7375SFelipe Balbi { 1306550a7375SFelipe Balbi kfree(to_musb_request(req)); 1307550a7375SFelipe Balbi } 1308550a7375SFelipe Balbi 1309550a7375SFelipe Balbi static LIST_HEAD(buffers); 1310550a7375SFelipe Balbi 1311550a7375SFelipe Balbi struct free_record { 1312550a7375SFelipe Balbi struct list_head list; 1313550a7375SFelipe Balbi struct device *dev; 1314550a7375SFelipe Balbi unsigned bytes; 1315550a7375SFelipe Balbi dma_addr_t dma; 1316550a7375SFelipe Balbi }; 1317550a7375SFelipe Balbi 1318550a7375SFelipe Balbi /* 1319550a7375SFelipe Balbi * Context: controller locked, IRQs blocked. 1320550a7375SFelipe Balbi */ 1321a666e3e6SSergei Shtylyov void musb_ep_restart(struct musb *musb, struct musb_request *req) 1322550a7375SFelipe Balbi { 13235c8a86e1SFelipe Balbi dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n", 1324550a7375SFelipe Balbi req->tx ? "TX/IN" : "RX/OUT", 1325550a7375SFelipe Balbi &req->request, req->request.length, req->epnum); 1326550a7375SFelipe Balbi 1327550a7375SFelipe Balbi musb_ep_select(musb->mregs, req->epnum); 1328550a7375SFelipe Balbi if (req->tx) 1329550a7375SFelipe Balbi txstate(musb, req); 1330550a7375SFelipe Balbi else 1331550a7375SFelipe Balbi rxstate(musb, req); 1332550a7375SFelipe Balbi } 1333550a7375SFelipe Balbi 1334550a7375SFelipe Balbi static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, 1335550a7375SFelipe Balbi gfp_t gfp_flags) 1336550a7375SFelipe Balbi { 1337550a7375SFelipe Balbi struct musb_ep *musb_ep; 1338550a7375SFelipe Balbi struct musb_request *request; 1339550a7375SFelipe Balbi struct musb *musb; 1340550a7375SFelipe Balbi int status = 0; 1341550a7375SFelipe Balbi unsigned long lockflags; 1342550a7375SFelipe Balbi 1343550a7375SFelipe Balbi if (!ep || !req) 1344550a7375SFelipe Balbi return -EINVAL; 1345550a7375SFelipe Balbi if (!req->buf) 1346550a7375SFelipe Balbi return -ENODATA; 1347550a7375SFelipe Balbi 1348550a7375SFelipe Balbi musb_ep = to_musb_ep(ep); 1349550a7375SFelipe Balbi musb = musb_ep->musb; 1350550a7375SFelipe Balbi 1351550a7375SFelipe Balbi request = to_musb_request(req); 1352550a7375SFelipe Balbi request->musb = musb; 1353550a7375SFelipe Balbi 1354550a7375SFelipe Balbi if (request->ep != musb_ep) 1355550a7375SFelipe Balbi return -EINVAL; 1356550a7375SFelipe Balbi 13575c8a86e1SFelipe Balbi dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req); 1358550a7375SFelipe Balbi 1359550a7375SFelipe Balbi /* request is mine now... */ 1360550a7375SFelipe Balbi request->request.actual = 0; 1361550a7375SFelipe Balbi request->request.status = -EINPROGRESS; 1362550a7375SFelipe Balbi request->epnum = musb_ep->current_epnum; 1363550a7375SFelipe Balbi request->tx = musb_ep->is_in; 1364550a7375SFelipe Balbi 1365c65bfa62SMian Yousaf Kaukab map_dma_buffer(request, musb, musb_ep); 1366550a7375SFelipe Balbi 1367550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, lockflags); 1368550a7375SFelipe Balbi 1369550a7375SFelipe Balbi /* don't queue if the ep is down */ 1370550a7375SFelipe Balbi if (!musb_ep->desc) { 13715c8a86e1SFelipe Balbi dev_dbg(musb->controller, "req %p queued to %s while ep %s\n", 1372550a7375SFelipe Balbi req, ep->name, "disabled"); 1373550a7375SFelipe Balbi status = -ESHUTDOWN; 1374550a7375SFelipe Balbi goto cleanup; 1375550a7375SFelipe Balbi } 1376550a7375SFelipe Balbi 1377550a7375SFelipe Balbi /* add request to the list */ 1378ad1adb89SFelipe Balbi list_add_tail(&request->list, &musb_ep->req_list); 1379550a7375SFelipe Balbi 1380550a7375SFelipe Balbi /* it this is the head of the queue, start i/o ... */ 1381ad1adb89SFelipe Balbi if (!musb_ep->busy && &request->list == musb_ep->req_list.next) 1382550a7375SFelipe Balbi musb_ep_restart(musb, request); 1383550a7375SFelipe Balbi 1384550a7375SFelipe Balbi cleanup: 1385550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, lockflags); 1386550a7375SFelipe Balbi return status; 1387550a7375SFelipe Balbi } 1388550a7375SFelipe Balbi 1389550a7375SFelipe Balbi static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request) 1390550a7375SFelipe Balbi { 1391550a7375SFelipe Balbi struct musb_ep *musb_ep = to_musb_ep(ep); 13924cbbf084SFelipe Balbi struct musb_request *req = to_musb_request(request); 13934cbbf084SFelipe Balbi struct musb_request *r; 1394550a7375SFelipe Balbi unsigned long flags; 1395550a7375SFelipe Balbi int status = 0; 1396550a7375SFelipe Balbi struct musb *musb = musb_ep->musb; 1397550a7375SFelipe Balbi 1398550a7375SFelipe Balbi if (!ep || !request || to_musb_request(request)->ep != musb_ep) 1399550a7375SFelipe Balbi return -EINVAL; 1400550a7375SFelipe Balbi 1401550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1402550a7375SFelipe Balbi 1403550a7375SFelipe Balbi list_for_each_entry(r, &musb_ep->req_list, list) { 14044cbbf084SFelipe Balbi if (r == req) 1405550a7375SFelipe Balbi break; 1406550a7375SFelipe Balbi } 14074cbbf084SFelipe Balbi if (r != req) { 14085c8a86e1SFelipe Balbi dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name); 1409550a7375SFelipe Balbi status = -EINVAL; 1410550a7375SFelipe Balbi goto done; 1411550a7375SFelipe Balbi } 1412550a7375SFelipe Balbi 1413550a7375SFelipe Balbi /* if the hardware doesn't have the request, easy ... */ 14143d5ad13eSFelipe Balbi if (musb_ep->req_list.next != &req->list || musb_ep->busy) 1415550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, -ECONNRESET); 1416550a7375SFelipe Balbi 1417550a7375SFelipe Balbi /* ... else abort the dma transfer ... */ 1418550a7375SFelipe Balbi else if (is_dma_capable() && musb_ep->dma) { 1419550a7375SFelipe Balbi struct dma_controller *c = musb->dma_controller; 1420550a7375SFelipe Balbi 1421550a7375SFelipe Balbi musb_ep_select(musb->mregs, musb_ep->current_epnum); 1422550a7375SFelipe Balbi if (c->channel_abort) 1423550a7375SFelipe Balbi status = c->channel_abort(musb_ep->dma); 1424550a7375SFelipe Balbi else 1425550a7375SFelipe Balbi status = -EBUSY; 1426550a7375SFelipe Balbi if (status == 0) 1427550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, -ECONNRESET); 1428550a7375SFelipe Balbi } else { 1429550a7375SFelipe Balbi /* NOTE: by sticking to easily tested hardware/driver states, 1430550a7375SFelipe Balbi * we leave counting of in-flight packets imprecise. 1431550a7375SFelipe Balbi */ 1432550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, -ECONNRESET); 1433550a7375SFelipe Balbi } 1434550a7375SFelipe Balbi 1435550a7375SFelipe Balbi done: 1436550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1437550a7375SFelipe Balbi return status; 1438550a7375SFelipe Balbi } 1439550a7375SFelipe Balbi 1440550a7375SFelipe Balbi /* 1441550a7375SFelipe Balbi * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any 1442550a7375SFelipe Balbi * data but will queue requests. 1443550a7375SFelipe Balbi * 1444550a7375SFelipe Balbi * exported to ep0 code 1445550a7375SFelipe Balbi */ 14461b6c3b0fSFelipe Balbi static int musb_gadget_set_halt(struct usb_ep *ep, int value) 1447550a7375SFelipe Balbi { 1448550a7375SFelipe Balbi struct musb_ep *musb_ep = to_musb_ep(ep); 1449550a7375SFelipe Balbi u8 epnum = musb_ep->current_epnum; 1450550a7375SFelipe Balbi struct musb *musb = musb_ep->musb; 1451550a7375SFelipe Balbi void __iomem *epio = musb->endpoints[epnum].regs; 1452550a7375SFelipe Balbi void __iomem *mbase; 1453550a7375SFelipe Balbi unsigned long flags; 1454550a7375SFelipe Balbi u16 csr; 1455cea83241SSergei Shtylyov struct musb_request *request; 1456550a7375SFelipe Balbi int status = 0; 1457550a7375SFelipe Balbi 1458550a7375SFelipe Balbi if (!ep) 1459550a7375SFelipe Balbi return -EINVAL; 1460550a7375SFelipe Balbi mbase = musb->mregs; 1461550a7375SFelipe Balbi 1462550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1463550a7375SFelipe Balbi 1464550a7375SFelipe Balbi if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) { 1465550a7375SFelipe Balbi status = -EINVAL; 1466550a7375SFelipe Balbi goto done; 1467550a7375SFelipe Balbi } 1468550a7375SFelipe Balbi 1469550a7375SFelipe Balbi musb_ep_select(mbase, epnum); 1470550a7375SFelipe Balbi 1471ad1adb89SFelipe Balbi request = next_request(musb_ep); 1472cea83241SSergei Shtylyov if (value) { 1473cea83241SSergei Shtylyov if (request) { 14745c8a86e1SFelipe Balbi dev_dbg(musb->controller, "request in progress, cannot halt %s\n", 1475cea83241SSergei Shtylyov ep->name); 1476cea83241SSergei Shtylyov status = -EAGAIN; 1477cea83241SSergei Shtylyov goto done; 1478cea83241SSergei Shtylyov } 1479cea83241SSergei Shtylyov /* Cannot portably stall with non-empty FIFO */ 1480cea83241SSergei Shtylyov if (musb_ep->is_in) { 1481550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 1482550a7375SFelipe Balbi if (csr & MUSB_TXCSR_FIFONOTEMPTY) { 14835c8a86e1SFelipe Balbi dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name); 1484cea83241SSergei Shtylyov status = -EAGAIN; 1485cea83241SSergei Shtylyov goto done; 1486550a7375SFelipe Balbi } 1487cea83241SSergei Shtylyov } 148847e97605SSergei Shtylyov } else 148947e97605SSergei Shtylyov musb_ep->wedged = 0; 1490550a7375SFelipe Balbi 1491550a7375SFelipe Balbi /* set/clear the stall and toggle bits */ 14925c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear"); 1493550a7375SFelipe Balbi if (musb_ep->is_in) { 1494550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 1495550a7375SFelipe Balbi csr |= MUSB_TXCSR_P_WZC_BITS 1496550a7375SFelipe Balbi | MUSB_TXCSR_CLRDATATOG; 1497550a7375SFelipe Balbi if (value) 1498550a7375SFelipe Balbi csr |= MUSB_TXCSR_P_SENDSTALL; 1499550a7375SFelipe Balbi else 1500550a7375SFelipe Balbi csr &= ~(MUSB_TXCSR_P_SENDSTALL 1501550a7375SFelipe Balbi | MUSB_TXCSR_P_SENTSTALL); 1502550a7375SFelipe Balbi csr &= ~MUSB_TXCSR_TXPKTRDY; 1503550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 1504550a7375SFelipe Balbi } else { 1505550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_RXCSR); 1506550a7375SFelipe Balbi csr |= MUSB_RXCSR_P_WZC_BITS 1507550a7375SFelipe Balbi | MUSB_RXCSR_FLUSHFIFO 1508550a7375SFelipe Balbi | MUSB_RXCSR_CLRDATATOG; 1509550a7375SFelipe Balbi if (value) 1510550a7375SFelipe Balbi csr |= MUSB_RXCSR_P_SENDSTALL; 1511550a7375SFelipe Balbi else 1512550a7375SFelipe Balbi csr &= ~(MUSB_RXCSR_P_SENDSTALL 1513550a7375SFelipe Balbi | MUSB_RXCSR_P_SENTSTALL); 1514550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 1515550a7375SFelipe Balbi } 1516550a7375SFelipe Balbi 1517550a7375SFelipe Balbi /* maybe start the first request in the queue */ 1518550a7375SFelipe Balbi if (!musb_ep->busy && !value && request) { 15195c8a86e1SFelipe Balbi dev_dbg(musb->controller, "restarting the request\n"); 1520550a7375SFelipe Balbi musb_ep_restart(musb, request); 1521550a7375SFelipe Balbi } 1522550a7375SFelipe Balbi 1523cea83241SSergei Shtylyov done: 1524550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1525550a7375SFelipe Balbi return status; 1526550a7375SFelipe Balbi } 1527550a7375SFelipe Balbi 152847e97605SSergei Shtylyov /* 152947e97605SSergei Shtylyov * Sets the halt feature with the clear requests ignored 153047e97605SSergei Shtylyov */ 15311b6c3b0fSFelipe Balbi static int musb_gadget_set_wedge(struct usb_ep *ep) 153247e97605SSergei Shtylyov { 153347e97605SSergei Shtylyov struct musb_ep *musb_ep = to_musb_ep(ep); 153447e97605SSergei Shtylyov 153547e97605SSergei Shtylyov if (!ep) 153647e97605SSergei Shtylyov return -EINVAL; 153747e97605SSergei Shtylyov 153847e97605SSergei Shtylyov musb_ep->wedged = 1; 153947e97605SSergei Shtylyov 154047e97605SSergei Shtylyov return usb_ep_set_halt(ep); 154147e97605SSergei Shtylyov } 154247e97605SSergei Shtylyov 1543550a7375SFelipe Balbi static int musb_gadget_fifo_status(struct usb_ep *ep) 1544550a7375SFelipe Balbi { 1545550a7375SFelipe Balbi struct musb_ep *musb_ep = to_musb_ep(ep); 1546550a7375SFelipe Balbi void __iomem *epio = musb_ep->hw_ep->regs; 1547550a7375SFelipe Balbi int retval = -EINVAL; 1548550a7375SFelipe Balbi 1549550a7375SFelipe Balbi if (musb_ep->desc && !musb_ep->is_in) { 1550550a7375SFelipe Balbi struct musb *musb = musb_ep->musb; 1551550a7375SFelipe Balbi int epnum = musb_ep->current_epnum; 1552550a7375SFelipe Balbi void __iomem *mbase = musb->mregs; 1553550a7375SFelipe Balbi unsigned long flags; 1554550a7375SFelipe Balbi 1555550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1556550a7375SFelipe Balbi 1557550a7375SFelipe Balbi musb_ep_select(mbase, epnum); 1558550a7375SFelipe Balbi /* FIXME return zero unless RXPKTRDY is set */ 1559550a7375SFelipe Balbi retval = musb_readw(epio, MUSB_RXCOUNT); 1560550a7375SFelipe Balbi 1561550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1562550a7375SFelipe Balbi } 1563550a7375SFelipe Balbi return retval; 1564550a7375SFelipe Balbi } 1565550a7375SFelipe Balbi 1566550a7375SFelipe Balbi static void musb_gadget_fifo_flush(struct usb_ep *ep) 1567550a7375SFelipe Balbi { 1568550a7375SFelipe Balbi struct musb_ep *musb_ep = to_musb_ep(ep); 1569550a7375SFelipe Balbi struct musb *musb = musb_ep->musb; 1570550a7375SFelipe Balbi u8 epnum = musb_ep->current_epnum; 1571550a7375SFelipe Balbi void __iomem *epio = musb->endpoints[epnum].regs; 1572550a7375SFelipe Balbi void __iomem *mbase; 1573550a7375SFelipe Balbi unsigned long flags; 1574b18d26f6SSebastian Andrzej Siewior u16 csr; 1575550a7375SFelipe Balbi 1576550a7375SFelipe Balbi mbase = musb->mregs; 1577550a7375SFelipe Balbi 1578550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1579550a7375SFelipe Balbi musb_ep_select(mbase, (u8) epnum); 1580550a7375SFelipe Balbi 1581550a7375SFelipe Balbi /* disable interrupts */ 1582b18d26f6SSebastian Andrzej Siewior musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe & ~(1 << epnum)); 1583550a7375SFelipe Balbi 1584550a7375SFelipe Balbi if (musb_ep->is_in) { 1585550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 1586550a7375SFelipe Balbi if (csr & MUSB_TXCSR_FIFONOTEMPTY) { 1587550a7375SFelipe Balbi csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS; 15884858f06eSYauheni Kaliuta /* 15894858f06eSYauheni Kaliuta * Setting both TXPKTRDY and FLUSHFIFO makes controller 15904858f06eSYauheni Kaliuta * to interrupt current FIFO loading, but not flushing 15914858f06eSYauheni Kaliuta * the already loaded ones. 15924858f06eSYauheni Kaliuta */ 15934858f06eSYauheni Kaliuta csr &= ~MUSB_TXCSR_TXPKTRDY; 1594550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 1595550a7375SFelipe Balbi /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ 1596550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 1597550a7375SFelipe Balbi } 1598550a7375SFelipe Balbi } else { 1599550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_RXCSR); 1600550a7375SFelipe Balbi csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS; 1601550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 1602550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 1603550a7375SFelipe Balbi } 1604550a7375SFelipe Balbi 1605550a7375SFelipe Balbi /* re-enable interrupt */ 1606b18d26f6SSebastian Andrzej Siewior musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe); 1607550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1608550a7375SFelipe Balbi } 1609550a7375SFelipe Balbi 1610550a7375SFelipe Balbi static const struct usb_ep_ops musb_ep_ops = { 1611550a7375SFelipe Balbi .enable = musb_gadget_enable, 1612550a7375SFelipe Balbi .disable = musb_gadget_disable, 1613550a7375SFelipe Balbi .alloc_request = musb_alloc_request, 1614550a7375SFelipe Balbi .free_request = musb_free_request, 1615550a7375SFelipe Balbi .queue = musb_gadget_queue, 1616550a7375SFelipe Balbi .dequeue = musb_gadget_dequeue, 1617550a7375SFelipe Balbi .set_halt = musb_gadget_set_halt, 161847e97605SSergei Shtylyov .set_wedge = musb_gadget_set_wedge, 1619550a7375SFelipe Balbi .fifo_status = musb_gadget_fifo_status, 1620550a7375SFelipe Balbi .fifo_flush = musb_gadget_fifo_flush 1621550a7375SFelipe Balbi }; 1622550a7375SFelipe Balbi 1623550a7375SFelipe Balbi /* ----------------------------------------------------------------------- */ 1624550a7375SFelipe Balbi 1625550a7375SFelipe Balbi static int musb_gadget_get_frame(struct usb_gadget *gadget) 1626550a7375SFelipe Balbi { 1627550a7375SFelipe Balbi struct musb *musb = gadget_to_musb(gadget); 1628550a7375SFelipe Balbi 1629550a7375SFelipe Balbi return (int)musb_readw(musb->mregs, MUSB_FRAME); 1630550a7375SFelipe Balbi } 1631550a7375SFelipe Balbi 1632550a7375SFelipe Balbi static int musb_gadget_wakeup(struct usb_gadget *gadget) 1633550a7375SFelipe Balbi { 1634550a7375SFelipe Balbi struct musb *musb = gadget_to_musb(gadget); 1635550a7375SFelipe Balbi void __iomem *mregs = musb->mregs; 1636550a7375SFelipe Balbi unsigned long flags; 1637550a7375SFelipe Balbi int status = -EINVAL; 1638550a7375SFelipe Balbi u8 power, devctl; 1639550a7375SFelipe Balbi int retries; 1640550a7375SFelipe Balbi 1641550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1642550a7375SFelipe Balbi 164384e250ffSDavid Brownell switch (musb->xceiv->state) { 1644550a7375SFelipe Balbi case OTG_STATE_B_PERIPHERAL: 1645550a7375SFelipe Balbi /* NOTE: OTG state machine doesn't include B_SUSPENDED; 1646550a7375SFelipe Balbi * that's part of the standard usb 1.1 state machine, and 1647550a7375SFelipe Balbi * doesn't affect OTG transitions. 1648550a7375SFelipe Balbi */ 1649550a7375SFelipe Balbi if (musb->may_wakeup && musb->is_suspended) 1650550a7375SFelipe Balbi break; 1651550a7375SFelipe Balbi goto done; 1652550a7375SFelipe Balbi case OTG_STATE_B_IDLE: 1653550a7375SFelipe Balbi /* Start SRP ... OTG not required. */ 1654550a7375SFelipe Balbi devctl = musb_readb(mregs, MUSB_DEVCTL); 16555c8a86e1SFelipe Balbi dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl); 1656550a7375SFelipe Balbi devctl |= MUSB_DEVCTL_SESSION; 1657550a7375SFelipe Balbi musb_writeb(mregs, MUSB_DEVCTL, devctl); 1658550a7375SFelipe Balbi devctl = musb_readb(mregs, MUSB_DEVCTL); 1659550a7375SFelipe Balbi retries = 100; 1660550a7375SFelipe Balbi while (!(devctl & MUSB_DEVCTL_SESSION)) { 1661550a7375SFelipe Balbi devctl = musb_readb(mregs, MUSB_DEVCTL); 1662550a7375SFelipe Balbi if (retries-- < 1) 1663550a7375SFelipe Balbi break; 1664550a7375SFelipe Balbi } 1665550a7375SFelipe Balbi retries = 10000; 1666550a7375SFelipe Balbi while (devctl & MUSB_DEVCTL_SESSION) { 1667550a7375SFelipe Balbi devctl = musb_readb(mregs, MUSB_DEVCTL); 1668550a7375SFelipe Balbi if (retries-- < 1) 1669550a7375SFelipe Balbi break; 1670550a7375SFelipe Balbi } 1671550a7375SFelipe Balbi 16728620543eSHema HK spin_unlock_irqrestore(&musb->lock, flags); 16736e13c650SHeikki Krogerus otg_start_srp(musb->xceiv->otg); 16748620543eSHema HK spin_lock_irqsave(&musb->lock, flags); 16758620543eSHema HK 1676550a7375SFelipe Balbi /* Block idling for at least 1s */ 1677550a7375SFelipe Balbi musb_platform_try_idle(musb, 1678550a7375SFelipe Balbi jiffies + msecs_to_jiffies(1 * HZ)); 1679550a7375SFelipe Balbi 1680550a7375SFelipe Balbi status = 0; 1681550a7375SFelipe Balbi goto done; 1682550a7375SFelipe Balbi default: 16835c8a86e1SFelipe Balbi dev_dbg(musb->controller, "Unhandled wake: %s\n", 16843df00453SAnatolij Gustschin otg_state_string(musb->xceiv->state)); 1685550a7375SFelipe Balbi goto done; 1686550a7375SFelipe Balbi } 1687550a7375SFelipe Balbi 1688550a7375SFelipe Balbi status = 0; 1689550a7375SFelipe Balbi 1690550a7375SFelipe Balbi power = musb_readb(mregs, MUSB_POWER); 1691550a7375SFelipe Balbi power |= MUSB_POWER_RESUME; 1692550a7375SFelipe Balbi musb_writeb(mregs, MUSB_POWER, power); 16935c8a86e1SFelipe Balbi dev_dbg(musb->controller, "issue wakeup\n"); 1694550a7375SFelipe Balbi 1695550a7375SFelipe Balbi /* FIXME do this next chunk in a timer callback, no udelay */ 1696550a7375SFelipe Balbi mdelay(2); 1697550a7375SFelipe Balbi 1698550a7375SFelipe Balbi power = musb_readb(mregs, MUSB_POWER); 1699550a7375SFelipe Balbi power &= ~MUSB_POWER_RESUME; 1700550a7375SFelipe Balbi musb_writeb(mregs, MUSB_POWER, power); 1701550a7375SFelipe Balbi done: 1702550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1703550a7375SFelipe Balbi return status; 1704550a7375SFelipe Balbi } 1705550a7375SFelipe Balbi 1706550a7375SFelipe Balbi static int 1707550a7375SFelipe Balbi musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered) 1708550a7375SFelipe Balbi { 1709550a7375SFelipe Balbi struct musb *musb = gadget_to_musb(gadget); 1710550a7375SFelipe Balbi 1711550a7375SFelipe Balbi musb->is_self_powered = !!is_selfpowered; 1712550a7375SFelipe Balbi return 0; 1713550a7375SFelipe Balbi } 1714550a7375SFelipe Balbi 1715550a7375SFelipe Balbi static void musb_pullup(struct musb *musb, int is_on) 1716550a7375SFelipe Balbi { 1717550a7375SFelipe Balbi u8 power; 1718550a7375SFelipe Balbi 1719550a7375SFelipe Balbi power = musb_readb(musb->mregs, MUSB_POWER); 1720550a7375SFelipe Balbi if (is_on) 1721550a7375SFelipe Balbi power |= MUSB_POWER_SOFTCONN; 1722550a7375SFelipe Balbi else 1723550a7375SFelipe Balbi power &= ~MUSB_POWER_SOFTCONN; 1724550a7375SFelipe Balbi 1725550a7375SFelipe Balbi /* FIXME if on, HdrcStart; if off, HdrcStop */ 1726550a7375SFelipe Balbi 1727e71eb392SSebastian Andrzej Siewior dev_dbg(musb->controller, "gadget D+ pullup %s\n", 1728e71eb392SSebastian Andrzej Siewior is_on ? "on" : "off"); 1729550a7375SFelipe Balbi musb_writeb(musb->mregs, MUSB_POWER, power); 1730550a7375SFelipe Balbi } 1731550a7375SFelipe Balbi 1732550a7375SFelipe Balbi #if 0 1733550a7375SFelipe Balbi static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active) 1734550a7375SFelipe Balbi { 17355c8a86e1SFelipe Balbi dev_dbg(musb->controller, "<= %s =>\n", __func__); 1736550a7375SFelipe Balbi 1737550a7375SFelipe Balbi /* 1738550a7375SFelipe Balbi * FIXME iff driver's softconnect flag is set (as it is during probe, 1739550a7375SFelipe Balbi * though that can clear it), just musb_pullup(). 1740550a7375SFelipe Balbi */ 1741550a7375SFelipe Balbi 1742550a7375SFelipe Balbi return -EINVAL; 1743550a7375SFelipe Balbi } 1744550a7375SFelipe Balbi #endif 1745550a7375SFelipe Balbi 1746550a7375SFelipe Balbi static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) 1747550a7375SFelipe Balbi { 1748550a7375SFelipe Balbi struct musb *musb = gadget_to_musb(gadget); 1749550a7375SFelipe Balbi 175084e250ffSDavid Brownell if (!musb->xceiv->set_power) 1751550a7375SFelipe Balbi return -EOPNOTSUPP; 1752b96d3b08SHeikki Krogerus return usb_phy_set_power(musb->xceiv, mA); 1753550a7375SFelipe Balbi } 1754550a7375SFelipe Balbi 1755550a7375SFelipe Balbi static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) 1756550a7375SFelipe Balbi { 1757550a7375SFelipe Balbi struct musb *musb = gadget_to_musb(gadget); 1758550a7375SFelipe Balbi unsigned long flags; 1759550a7375SFelipe Balbi 1760550a7375SFelipe Balbi is_on = !!is_on; 1761550a7375SFelipe Balbi 176293e098a8SJohn Stultz pm_runtime_get_sync(musb->controller); 176393e098a8SJohn Stultz 1764550a7375SFelipe Balbi /* NOTE: this assumes we are sensing vbus; we'd rather 1765550a7375SFelipe Balbi * not pullup unless the B-session is active. 1766550a7375SFelipe Balbi */ 1767550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1768550a7375SFelipe Balbi if (is_on != musb->softconnect) { 1769550a7375SFelipe Balbi musb->softconnect = is_on; 1770550a7375SFelipe Balbi musb_pullup(musb, is_on); 1771550a7375SFelipe Balbi } 1772550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 177393e098a8SJohn Stultz 177493e098a8SJohn Stultz pm_runtime_put(musb->controller); 177593e098a8SJohn Stultz 1776550a7375SFelipe Balbi return 0; 1777550a7375SFelipe Balbi } 1778550a7375SFelipe Balbi 1779e71eb392SSebastian Andrzej Siewior static int musb_gadget_start(struct usb_gadget *g, 1780e71eb392SSebastian Andrzej Siewior struct usb_gadget_driver *driver); 1781e71eb392SSebastian Andrzej Siewior static int musb_gadget_stop(struct usb_gadget *g, 1782e71eb392SSebastian Andrzej Siewior struct usb_gadget_driver *driver); 17830f91349bSSebastian Andrzej Siewior 1784550a7375SFelipe Balbi static const struct usb_gadget_ops musb_gadget_operations = { 1785550a7375SFelipe Balbi .get_frame = musb_gadget_get_frame, 1786550a7375SFelipe Balbi .wakeup = musb_gadget_wakeup, 1787550a7375SFelipe Balbi .set_selfpowered = musb_gadget_set_self_powered, 1788550a7375SFelipe Balbi /* .vbus_session = musb_gadget_vbus_session, */ 1789550a7375SFelipe Balbi .vbus_draw = musb_gadget_vbus_draw, 1790550a7375SFelipe Balbi .pullup = musb_gadget_pullup, 1791e71eb392SSebastian Andrzej Siewior .udc_start = musb_gadget_start, 1792e71eb392SSebastian Andrzej Siewior .udc_stop = musb_gadget_stop, 1793550a7375SFelipe Balbi }; 1794550a7375SFelipe Balbi 1795550a7375SFelipe Balbi /* ----------------------------------------------------------------------- */ 1796550a7375SFelipe Balbi 1797550a7375SFelipe Balbi /* Registration */ 1798550a7375SFelipe Balbi 1799550a7375SFelipe Balbi /* Only this registration code "knows" the rule (from USB standards) 1800550a7375SFelipe Balbi * about there being only one external upstream port. It assumes 1801550a7375SFelipe Balbi * all peripheral ports are external... 1802550a7375SFelipe Balbi */ 1803550a7375SFelipe Balbi 1804550a7375SFelipe Balbi static void musb_gadget_release(struct device *dev) 1805550a7375SFelipe Balbi { 1806550a7375SFelipe Balbi /* kref_put(WHAT) */ 1807550a7375SFelipe Balbi dev_dbg(dev, "%s\n", __func__); 1808550a7375SFelipe Balbi } 1809550a7375SFelipe Balbi 1810550a7375SFelipe Balbi 181141ac7b3aSBill Pemberton static void 1812550a7375SFelipe Balbi init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in) 1813550a7375SFelipe Balbi { 1814550a7375SFelipe Balbi struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 1815550a7375SFelipe Balbi 1816550a7375SFelipe Balbi memset(ep, 0, sizeof *ep); 1817550a7375SFelipe Balbi 1818550a7375SFelipe Balbi ep->current_epnum = epnum; 1819550a7375SFelipe Balbi ep->musb = musb; 1820550a7375SFelipe Balbi ep->hw_ep = hw_ep; 1821550a7375SFelipe Balbi ep->is_in = is_in; 1822550a7375SFelipe Balbi 1823550a7375SFelipe Balbi INIT_LIST_HEAD(&ep->req_list); 1824550a7375SFelipe Balbi 1825550a7375SFelipe Balbi sprintf(ep->name, "ep%d%s", epnum, 1826550a7375SFelipe Balbi (!epnum || hw_ep->is_shared_fifo) ? "" : ( 1827550a7375SFelipe Balbi is_in ? "in" : "out")); 1828550a7375SFelipe Balbi ep->end_point.name = ep->name; 1829550a7375SFelipe Balbi INIT_LIST_HEAD(&ep->end_point.ep_list); 1830550a7375SFelipe Balbi if (!epnum) { 1831550a7375SFelipe Balbi ep->end_point.maxpacket = 64; 1832550a7375SFelipe Balbi ep->end_point.ops = &musb_g_ep0_ops; 1833550a7375SFelipe Balbi musb->g.ep0 = &ep->end_point; 1834550a7375SFelipe Balbi } else { 1835550a7375SFelipe Balbi if (is_in) 1836550a7375SFelipe Balbi ep->end_point.maxpacket = hw_ep->max_packet_sz_tx; 1837550a7375SFelipe Balbi else 1838550a7375SFelipe Balbi ep->end_point.maxpacket = hw_ep->max_packet_sz_rx; 1839550a7375SFelipe Balbi ep->end_point.ops = &musb_ep_ops; 1840550a7375SFelipe Balbi list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list); 1841550a7375SFelipe Balbi } 1842550a7375SFelipe Balbi } 1843550a7375SFelipe Balbi 1844550a7375SFelipe Balbi /* 1845550a7375SFelipe Balbi * Initialize the endpoints exposed to peripheral drivers, with backlinks 1846550a7375SFelipe Balbi * to the rest of the driver state. 1847550a7375SFelipe Balbi */ 184841ac7b3aSBill Pemberton static inline void musb_g_init_endpoints(struct musb *musb) 1849550a7375SFelipe Balbi { 1850550a7375SFelipe Balbi u8 epnum; 1851550a7375SFelipe Balbi struct musb_hw_ep *hw_ep; 1852550a7375SFelipe Balbi unsigned count = 0; 1853550a7375SFelipe Balbi 1854b595076aSUwe Kleine-König /* initialize endpoint list just once */ 1855550a7375SFelipe Balbi INIT_LIST_HEAD(&(musb->g.ep_list)); 1856550a7375SFelipe Balbi 1857550a7375SFelipe Balbi for (epnum = 0, hw_ep = musb->endpoints; 1858550a7375SFelipe Balbi epnum < musb->nr_endpoints; 1859550a7375SFelipe Balbi epnum++, hw_ep++) { 1860550a7375SFelipe Balbi if (hw_ep->is_shared_fifo /* || !epnum */) { 1861550a7375SFelipe Balbi init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0); 1862550a7375SFelipe Balbi count++; 1863550a7375SFelipe Balbi } else { 1864550a7375SFelipe Balbi if (hw_ep->max_packet_sz_tx) { 1865550a7375SFelipe Balbi init_peripheral_ep(musb, &hw_ep->ep_in, 1866550a7375SFelipe Balbi epnum, 1); 1867550a7375SFelipe Balbi count++; 1868550a7375SFelipe Balbi } 1869550a7375SFelipe Balbi if (hw_ep->max_packet_sz_rx) { 1870550a7375SFelipe Balbi init_peripheral_ep(musb, &hw_ep->ep_out, 1871550a7375SFelipe Balbi epnum, 0); 1872550a7375SFelipe Balbi count++; 1873550a7375SFelipe Balbi } 1874550a7375SFelipe Balbi } 1875550a7375SFelipe Balbi } 1876550a7375SFelipe Balbi } 1877550a7375SFelipe Balbi 1878550a7375SFelipe Balbi /* called once during driver setup to initialize and link into 1879550a7375SFelipe Balbi * the driver model; memory is zeroed. 1880550a7375SFelipe Balbi */ 188141ac7b3aSBill Pemberton int musb_gadget_setup(struct musb *musb) 1882550a7375SFelipe Balbi { 1883550a7375SFelipe Balbi int status; 1884550a7375SFelipe Balbi 1885550a7375SFelipe Balbi /* REVISIT minor race: if (erroneously) setting up two 1886550a7375SFelipe Balbi * musb peripherals at the same time, only the bus lock 1887550a7375SFelipe Balbi * is probably held. 1888550a7375SFelipe Balbi */ 1889550a7375SFelipe Balbi 1890550a7375SFelipe Balbi musb->g.ops = &musb_gadget_operations; 1891d327ab5bSMichal Nazarewicz musb->g.max_speed = USB_SPEED_HIGH; 1892550a7375SFelipe Balbi musb->g.speed = USB_SPEED_UNKNOWN; 1893550a7375SFelipe Balbi 1894550a7375SFelipe Balbi /* this "gadget" abstracts/virtualizes the controller */ 1895427c4f33SKay Sievers dev_set_name(&musb->g.dev, "gadget"); 1896550a7375SFelipe Balbi musb->g.dev.parent = musb->controller; 1897550a7375SFelipe Balbi musb->g.dev.dma_mask = musb->controller->dma_mask; 1898550a7375SFelipe Balbi musb->g.dev.release = musb_gadget_release; 1899550a7375SFelipe Balbi musb->g.name = musb_driver_name; 1900550a7375SFelipe Balbi 1901550a7375SFelipe Balbi musb->g.is_otg = 1; 1902550a7375SFelipe Balbi 1903550a7375SFelipe Balbi musb_g_init_endpoints(musb); 1904550a7375SFelipe Balbi 1905550a7375SFelipe Balbi musb->is_active = 0; 1906550a7375SFelipe Balbi musb_platform_try_idle(musb, 0); 1907550a7375SFelipe Balbi 1908550a7375SFelipe Balbi status = device_register(&musb->g.dev); 1909e2c34045SRahul Ruikar if (status != 0) { 1910e2c34045SRahul Ruikar put_device(&musb->g.dev); 19110f91349bSSebastian Andrzej Siewior return status; 1912e2c34045SRahul Ruikar } 19130f91349bSSebastian Andrzej Siewior status = usb_add_gadget_udc(musb->controller, &musb->g); 19140f91349bSSebastian Andrzej Siewior if (status) 19150f91349bSSebastian Andrzej Siewior goto err; 19160f91349bSSebastian Andrzej Siewior 19170f91349bSSebastian Andrzej Siewior return 0; 19180f91349bSSebastian Andrzej Siewior err: 19196193d699SSebastian Andrzej Siewior musb->g.dev.parent = NULL; 19200f91349bSSebastian Andrzej Siewior device_unregister(&musb->g.dev); 1921550a7375SFelipe Balbi return status; 1922550a7375SFelipe Balbi } 1923550a7375SFelipe Balbi 1924550a7375SFelipe Balbi void musb_gadget_cleanup(struct musb *musb) 1925550a7375SFelipe Balbi { 19260f91349bSSebastian Andrzej Siewior usb_del_gadget_udc(&musb->g); 19276193d699SSebastian Andrzej Siewior if (musb->g.dev.parent) 1928550a7375SFelipe Balbi device_unregister(&musb->g.dev); 1929550a7375SFelipe Balbi } 1930550a7375SFelipe Balbi 1931550a7375SFelipe Balbi /* 1932550a7375SFelipe Balbi * Register the gadget driver. Used by gadget drivers when 1933550a7375SFelipe Balbi * registering themselves with the controller. 1934550a7375SFelipe Balbi * 1935550a7375SFelipe Balbi * -EINVAL something went wrong (not driver) 1936550a7375SFelipe Balbi * -EBUSY another gadget is already using the controller 1937b595076aSUwe Kleine-König * -ENOMEM no memory to perform the operation 1938550a7375SFelipe Balbi * 1939550a7375SFelipe Balbi * @param driver the gadget driver 1940550a7375SFelipe Balbi * @return <0 if error, 0 if everything is fine 1941550a7375SFelipe Balbi */ 1942e71eb392SSebastian Andrzej Siewior static int musb_gadget_start(struct usb_gadget *g, 1943e71eb392SSebastian Andrzej Siewior struct usb_gadget_driver *driver) 1944550a7375SFelipe Balbi { 1945e71eb392SSebastian Andrzej Siewior struct musb *musb = gadget_to_musb(g); 1946d445b6daSHeikki Krogerus struct usb_otg *otg = musb->xceiv->otg; 1947032ec49fSFelipe Balbi struct usb_hcd *hcd = musb_to_hcd(musb); 194863eed2b5SFelipe Balbi unsigned long flags; 1949032ec49fSFelipe Balbi int retval = 0; 1950550a7375SFelipe Balbi 1951032ec49fSFelipe Balbi if (driver->max_speed < USB_SPEED_HIGH) { 1952032ec49fSFelipe Balbi retval = -EINVAL; 1953032ec49fSFelipe Balbi goto err; 1954032ec49fSFelipe Balbi } 1955550a7375SFelipe Balbi 19567acc6197SHema HK pm_runtime_get_sync(musb->controller); 19577acc6197SHema HK 19585c8a86e1SFelipe Balbi dev_dbg(musb->controller, "registering driver %s\n", driver->function); 1959550a7375SFelipe Balbi 1960e71eb392SSebastian Andrzej Siewior musb->softconnect = 0; 1961550a7375SFelipe Balbi musb->gadget_driver = driver; 1962550a7375SFelipe Balbi 1963550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1964e71eb392SSebastian Andrzej Siewior musb->is_active = 1; 1965550a7375SFelipe Balbi 19666e13c650SHeikki Krogerus otg_set_peripheral(otg, &musb->g); 1967d4c433feSArnaud Mandy musb->xceiv->state = OTG_STATE_B_IDLE; 1968550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1969550a7375SFelipe Balbi 1970550a7375SFelipe Balbi /* REVISIT: funcall to other code, which also 1971550a7375SFelipe Balbi * handles power budgeting ... this way also 1972550a7375SFelipe Balbi * ensures HdrcStart is indirectly called. 1973550a7375SFelipe Balbi */ 1974032ec49fSFelipe Balbi retval = usb_add_hcd(hcd, 0, 0); 1975550a7375SFelipe Balbi if (retval < 0) { 19765c8a86e1SFelipe Balbi dev_dbg(musb->controller, "add_hcd failed, %d\n", retval); 1977032ec49fSFelipe Balbi goto err; 19785f1e8ce7SHema HK } 1979002eda13SHema HK 1980002eda13SHema HK if ((musb->xceiv->last_event == USB_EVENT_ID) 1981d445b6daSHeikki Krogerus && otg->set_vbus) 19826e13c650SHeikki Krogerus otg_set_vbus(otg, 1); 1983550a7375SFelipe Balbi 198463eed2b5SFelipe Balbi hcd->self.uses_pio_for_control = 1; 1985032ec49fSFelipe Balbi 19867acc6197SHema HK if (musb->xceiv->last_event == USB_EVENT_NONE) 19877acc6197SHema HK pm_runtime_put(musb->controller); 19887acc6197SHema HK 198963eed2b5SFelipe Balbi return 0; 199063eed2b5SFelipe Balbi 1991032ec49fSFelipe Balbi err: 1992550a7375SFelipe Balbi return retval; 1993550a7375SFelipe Balbi } 1994550a7375SFelipe Balbi 1995550a7375SFelipe Balbi static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver) 1996550a7375SFelipe Balbi { 1997550a7375SFelipe Balbi int i; 1998550a7375SFelipe Balbi struct musb_hw_ep *hw_ep; 1999550a7375SFelipe Balbi 2000550a7375SFelipe Balbi /* don't disconnect if it's not connected */ 2001550a7375SFelipe Balbi if (musb->g.speed == USB_SPEED_UNKNOWN) 2002550a7375SFelipe Balbi driver = NULL; 2003550a7375SFelipe Balbi else 2004550a7375SFelipe Balbi musb->g.speed = USB_SPEED_UNKNOWN; 2005550a7375SFelipe Balbi 2006550a7375SFelipe Balbi /* deactivate the hardware */ 2007550a7375SFelipe Balbi if (musb->softconnect) { 2008550a7375SFelipe Balbi musb->softconnect = 0; 2009550a7375SFelipe Balbi musb_pullup(musb, 0); 2010550a7375SFelipe Balbi } 2011550a7375SFelipe Balbi musb_stop(musb); 2012550a7375SFelipe Balbi 2013550a7375SFelipe Balbi /* killing any outstanding requests will quiesce the driver; 2014550a7375SFelipe Balbi * then report disconnect 2015550a7375SFelipe Balbi */ 2016550a7375SFelipe Balbi if (driver) { 2017550a7375SFelipe Balbi for (i = 0, hw_ep = musb->endpoints; 2018550a7375SFelipe Balbi i < musb->nr_endpoints; 2019550a7375SFelipe Balbi i++, hw_ep++) { 2020550a7375SFelipe Balbi musb_ep_select(musb->mregs, i); 2021550a7375SFelipe Balbi if (hw_ep->is_shared_fifo /* || !epnum */) { 2022550a7375SFelipe Balbi nuke(&hw_ep->ep_in, -ESHUTDOWN); 2023550a7375SFelipe Balbi } else { 2024550a7375SFelipe Balbi if (hw_ep->max_packet_sz_tx) 2025550a7375SFelipe Balbi nuke(&hw_ep->ep_in, -ESHUTDOWN); 2026550a7375SFelipe Balbi if (hw_ep->max_packet_sz_rx) 2027550a7375SFelipe Balbi nuke(&hw_ep->ep_out, -ESHUTDOWN); 2028550a7375SFelipe Balbi } 2029550a7375SFelipe Balbi } 2030550a7375SFelipe Balbi } 2031550a7375SFelipe Balbi } 2032550a7375SFelipe Balbi 2033550a7375SFelipe Balbi /* 2034550a7375SFelipe Balbi * Unregister the gadget driver. Used by gadget drivers when 2035550a7375SFelipe Balbi * unregistering themselves from the controller. 2036550a7375SFelipe Balbi * 2037550a7375SFelipe Balbi * @param driver the gadget driver to unregister 2038550a7375SFelipe Balbi */ 2039e71eb392SSebastian Andrzej Siewior static int musb_gadget_stop(struct usb_gadget *g, 2040e71eb392SSebastian Andrzej Siewior struct usb_gadget_driver *driver) 2041550a7375SFelipe Balbi { 2042e71eb392SSebastian Andrzej Siewior struct musb *musb = gadget_to_musb(g); 204363eed2b5SFelipe Balbi unsigned long flags; 2044550a7375SFelipe Balbi 20457acc6197SHema HK if (musb->xceiv->last_event == USB_EVENT_NONE) 20467acc6197SHema HK pm_runtime_get_sync(musb->controller); 20477acc6197SHema HK 204863eed2b5SFelipe Balbi /* 204963eed2b5SFelipe Balbi * REVISIT always use otg_set_peripheral() here too; 2050550a7375SFelipe Balbi * this needs to shut down the OTG engine. 2051550a7375SFelipe Balbi */ 2052550a7375SFelipe Balbi 2053550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 2054550a7375SFelipe Balbi 2055550a7375SFelipe Balbi musb_hnp_stop(musb); 2056550a7375SFelipe Balbi 2057550a7375SFelipe Balbi (void) musb_gadget_vbus_draw(&musb->g, 0); 2058550a7375SFelipe Balbi 205984e250ffSDavid Brownell musb->xceiv->state = OTG_STATE_UNDEFINED; 2060550a7375SFelipe Balbi stop_activity(musb, driver); 20616e13c650SHeikki Krogerus otg_set_peripheral(musb->xceiv->otg, NULL); 2062550a7375SFelipe Balbi 20635c8a86e1SFelipe Balbi dev_dbg(musb->controller, "unregistering driver %s\n", driver->function); 206463eed2b5SFelipe Balbi 2065550a7375SFelipe Balbi musb->is_active = 0; 2066550a7375SFelipe Balbi musb_platform_try_idle(musb, 0); 2067550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 2068550a7375SFelipe Balbi 2069550a7375SFelipe Balbi usb_remove_hcd(musb_to_hcd(musb)); 2070032ec49fSFelipe Balbi /* 2071032ec49fSFelipe Balbi * FIXME we need to be able to register another 2072550a7375SFelipe Balbi * gadget driver here and have everything work; 2073550a7375SFelipe Balbi * that currently misbehaves. 2074550a7375SFelipe Balbi */ 207563eed2b5SFelipe Balbi 20767acc6197SHema HK pm_runtime_put(musb->controller); 20777acc6197SHema HK 207863eed2b5SFelipe Balbi return 0; 2079550a7375SFelipe Balbi } 2080550a7375SFelipe Balbi 2081550a7375SFelipe Balbi /* ----------------------------------------------------------------------- */ 2082550a7375SFelipe Balbi 2083550a7375SFelipe Balbi /* lifecycle operations called through plat_uds.c */ 2084550a7375SFelipe Balbi 2085550a7375SFelipe Balbi void musb_g_resume(struct musb *musb) 2086550a7375SFelipe Balbi { 2087550a7375SFelipe Balbi musb->is_suspended = 0; 208884e250ffSDavid Brownell switch (musb->xceiv->state) { 2089550a7375SFelipe Balbi case OTG_STATE_B_IDLE: 2090550a7375SFelipe Balbi break; 2091550a7375SFelipe Balbi case OTG_STATE_B_WAIT_ACON: 2092550a7375SFelipe Balbi case OTG_STATE_B_PERIPHERAL: 2093550a7375SFelipe Balbi musb->is_active = 1; 2094550a7375SFelipe Balbi if (musb->gadget_driver && musb->gadget_driver->resume) { 2095550a7375SFelipe Balbi spin_unlock(&musb->lock); 2096550a7375SFelipe Balbi musb->gadget_driver->resume(&musb->g); 2097550a7375SFelipe Balbi spin_lock(&musb->lock); 2098550a7375SFelipe Balbi } 2099550a7375SFelipe Balbi break; 2100550a7375SFelipe Balbi default: 2101550a7375SFelipe Balbi WARNING("unhandled RESUME transition (%s)\n", 21023df00453SAnatolij Gustschin otg_state_string(musb->xceiv->state)); 2103550a7375SFelipe Balbi } 2104550a7375SFelipe Balbi } 2105550a7375SFelipe Balbi 2106550a7375SFelipe Balbi /* called when SOF packets stop for 3+ msec */ 2107550a7375SFelipe Balbi void musb_g_suspend(struct musb *musb) 2108550a7375SFelipe Balbi { 2109550a7375SFelipe Balbi u8 devctl; 2110550a7375SFelipe Balbi 2111550a7375SFelipe Balbi devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 21125c8a86e1SFelipe Balbi dev_dbg(musb->controller, "devctl %02x\n", devctl); 2113550a7375SFelipe Balbi 211484e250ffSDavid Brownell switch (musb->xceiv->state) { 2115550a7375SFelipe Balbi case OTG_STATE_B_IDLE: 2116550a7375SFelipe Balbi if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) 211784e250ffSDavid Brownell musb->xceiv->state = OTG_STATE_B_PERIPHERAL; 2118550a7375SFelipe Balbi break; 2119550a7375SFelipe Balbi case OTG_STATE_B_PERIPHERAL: 2120550a7375SFelipe Balbi musb->is_suspended = 1; 2121550a7375SFelipe Balbi if (musb->gadget_driver && musb->gadget_driver->suspend) { 2122550a7375SFelipe Balbi spin_unlock(&musb->lock); 2123550a7375SFelipe Balbi musb->gadget_driver->suspend(&musb->g); 2124550a7375SFelipe Balbi spin_lock(&musb->lock); 2125550a7375SFelipe Balbi } 2126550a7375SFelipe Balbi break; 2127550a7375SFelipe Balbi default: 2128550a7375SFelipe Balbi /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ; 2129550a7375SFelipe Balbi * A_PERIPHERAL may need care too 2130550a7375SFelipe Balbi */ 2131550a7375SFelipe Balbi WARNING("unhandled SUSPEND transition (%s)\n", 21323df00453SAnatolij Gustschin otg_state_string(musb->xceiv->state)); 2133550a7375SFelipe Balbi } 2134550a7375SFelipe Balbi } 2135550a7375SFelipe Balbi 2136550a7375SFelipe Balbi /* Called during SRP */ 2137550a7375SFelipe Balbi void musb_g_wakeup(struct musb *musb) 2138550a7375SFelipe Balbi { 2139550a7375SFelipe Balbi musb_gadget_wakeup(&musb->g); 2140550a7375SFelipe Balbi } 2141550a7375SFelipe Balbi 2142550a7375SFelipe Balbi /* called when VBUS drops below session threshold, and in other cases */ 2143550a7375SFelipe Balbi void musb_g_disconnect(struct musb *musb) 2144550a7375SFelipe Balbi { 2145550a7375SFelipe Balbi void __iomem *mregs = musb->mregs; 2146550a7375SFelipe Balbi u8 devctl = musb_readb(mregs, MUSB_DEVCTL); 2147550a7375SFelipe Balbi 21485c8a86e1SFelipe Balbi dev_dbg(musb->controller, "devctl %02x\n", devctl); 2149550a7375SFelipe Balbi 2150550a7375SFelipe Balbi /* clear HR */ 2151550a7375SFelipe Balbi musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION); 2152550a7375SFelipe Balbi 2153550a7375SFelipe Balbi /* don't draw vbus until new b-default session */ 2154550a7375SFelipe Balbi (void) musb_gadget_vbus_draw(&musb->g, 0); 2155550a7375SFelipe Balbi 2156550a7375SFelipe Balbi musb->g.speed = USB_SPEED_UNKNOWN; 2157550a7375SFelipe Balbi if (musb->gadget_driver && musb->gadget_driver->disconnect) { 2158550a7375SFelipe Balbi spin_unlock(&musb->lock); 2159550a7375SFelipe Balbi musb->gadget_driver->disconnect(&musb->g); 2160550a7375SFelipe Balbi spin_lock(&musb->lock); 2161550a7375SFelipe Balbi } 2162550a7375SFelipe Balbi 216384e250ffSDavid Brownell switch (musb->xceiv->state) { 2164550a7375SFelipe Balbi default: 21655c8a86e1SFelipe Balbi dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n", 21663df00453SAnatolij Gustschin otg_state_string(musb->xceiv->state)); 216784e250ffSDavid Brownell musb->xceiv->state = OTG_STATE_A_IDLE; 2168ab983f2aSDavid Brownell MUSB_HST_MODE(musb); 2169550a7375SFelipe Balbi break; 2170550a7375SFelipe Balbi case OTG_STATE_A_PERIPHERAL: 21711de00daeSDavid Brownell musb->xceiv->state = OTG_STATE_A_WAIT_BCON; 2172ab983f2aSDavid Brownell MUSB_HST_MODE(musb); 2173550a7375SFelipe Balbi break; 2174550a7375SFelipe Balbi case OTG_STATE_B_WAIT_ACON: 2175550a7375SFelipe Balbi case OTG_STATE_B_HOST: 2176550a7375SFelipe Balbi case OTG_STATE_B_PERIPHERAL: 2177550a7375SFelipe Balbi case OTG_STATE_B_IDLE: 217884e250ffSDavid Brownell musb->xceiv->state = OTG_STATE_B_IDLE; 2179550a7375SFelipe Balbi break; 2180550a7375SFelipe Balbi case OTG_STATE_B_SRP_INIT: 2181550a7375SFelipe Balbi break; 2182550a7375SFelipe Balbi } 2183550a7375SFelipe Balbi 2184550a7375SFelipe Balbi musb->is_active = 0; 2185550a7375SFelipe Balbi } 2186550a7375SFelipe Balbi 2187550a7375SFelipe Balbi void musb_g_reset(struct musb *musb) 2188550a7375SFelipe Balbi __releases(musb->lock) 2189550a7375SFelipe Balbi __acquires(musb->lock) 2190550a7375SFelipe Balbi { 2191550a7375SFelipe Balbi void __iomem *mbase = musb->mregs; 2192550a7375SFelipe Balbi u8 devctl = musb_readb(mbase, MUSB_DEVCTL); 2193550a7375SFelipe Balbi u8 power; 2194550a7375SFelipe Balbi 2195515ba29cSSebastian Andrzej Siewior dev_dbg(musb->controller, "<== %s driver '%s'\n", 2196550a7375SFelipe Balbi (devctl & MUSB_DEVCTL_BDEVICE) 2197550a7375SFelipe Balbi ? "B-Device" : "A-Device", 2198550a7375SFelipe Balbi musb->gadget_driver 2199550a7375SFelipe Balbi ? musb->gadget_driver->driver.name 2200550a7375SFelipe Balbi : NULL 2201550a7375SFelipe Balbi ); 2202550a7375SFelipe Balbi 2203550a7375SFelipe Balbi /* report disconnect, if we didn't already (flushing EP state) */ 2204550a7375SFelipe Balbi if (musb->g.speed != USB_SPEED_UNKNOWN) 2205550a7375SFelipe Balbi musb_g_disconnect(musb); 2206550a7375SFelipe Balbi 2207550a7375SFelipe Balbi /* clear HR */ 2208550a7375SFelipe Balbi else if (devctl & MUSB_DEVCTL_HR) 2209550a7375SFelipe Balbi musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); 2210550a7375SFelipe Balbi 2211550a7375SFelipe Balbi 2212550a7375SFelipe Balbi /* what speed did we negotiate? */ 2213550a7375SFelipe Balbi power = musb_readb(mbase, MUSB_POWER); 2214550a7375SFelipe Balbi musb->g.speed = (power & MUSB_POWER_HSMODE) 2215550a7375SFelipe Balbi ? USB_SPEED_HIGH : USB_SPEED_FULL; 2216550a7375SFelipe Balbi 2217550a7375SFelipe Balbi /* start in USB_STATE_DEFAULT */ 2218550a7375SFelipe Balbi musb->is_active = 1; 2219550a7375SFelipe Balbi musb->is_suspended = 0; 2220550a7375SFelipe Balbi MUSB_DEV_MODE(musb); 2221550a7375SFelipe Balbi musb->address = 0; 2222550a7375SFelipe Balbi musb->ep0_state = MUSB_EP0_STAGE_SETUP; 2223550a7375SFelipe Balbi 2224550a7375SFelipe Balbi musb->may_wakeup = 0; 2225550a7375SFelipe Balbi musb->g.b_hnp_enable = 0; 2226550a7375SFelipe Balbi musb->g.a_alt_hnp_support = 0; 2227550a7375SFelipe Balbi musb->g.a_hnp_support = 0; 2228550a7375SFelipe Balbi 2229550a7375SFelipe Balbi /* Normal reset, as B-Device; 2230550a7375SFelipe Balbi * or else after HNP, as A-Device 2231550a7375SFelipe Balbi */ 2232550a7375SFelipe Balbi if (devctl & MUSB_DEVCTL_BDEVICE) { 223384e250ffSDavid Brownell musb->xceiv->state = OTG_STATE_B_PERIPHERAL; 2234550a7375SFelipe Balbi musb->g.is_a_peripheral = 0; 2235032ec49fSFelipe Balbi } else { 223684e250ffSDavid Brownell musb->xceiv->state = OTG_STATE_A_PERIPHERAL; 2237550a7375SFelipe Balbi musb->g.is_a_peripheral = 1; 2238032ec49fSFelipe Balbi } 2239550a7375SFelipe Balbi 2240550a7375SFelipe Balbi /* start with default limits on VBUS power draw */ 2241032ec49fSFelipe Balbi (void) musb_gadget_vbus_draw(&musb->g, 8); 2242550a7375SFelipe Balbi } 2243