15fd54aceSGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2550a7375SFelipe Balbi /* 3550a7375SFelipe Balbi * MUSB OTG driver peripheral support 4550a7375SFelipe Balbi * 5550a7375SFelipe Balbi * Copyright 2005 Mentor Graphics Corporation 6550a7375SFelipe Balbi * Copyright (C) 2005-2006 by Texas Instruments 7550a7375SFelipe Balbi * Copyright (C) 2006-2007 Nokia Corporation 8cea83241SSergei Shtylyov * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com> 9550a7375SFelipe Balbi */ 10550a7375SFelipe Balbi 11550a7375SFelipe Balbi #include <linux/kernel.h> 12550a7375SFelipe Balbi #include <linux/list.h> 13550a7375SFelipe Balbi #include <linux/timer.h> 14550a7375SFelipe Balbi #include <linux/module.h> 15550a7375SFelipe Balbi #include <linux/smp.h> 16550a7375SFelipe Balbi #include <linux/spinlock.h> 17550a7375SFelipe Balbi #include <linux/delay.h> 18550a7375SFelipe Balbi #include <linux/dma-mapping.h> 195a0e3ad6STejun Heo #include <linux/slab.h> 20550a7375SFelipe Balbi 21550a7375SFelipe Balbi #include "musb_core.h" 22fc78003eSBin Liu #include "musb_trace.h" 23550a7375SFelipe Balbi 24550a7375SFelipe Balbi 25550a7375SFelipe Balbi /* ----------------------------------------------------------------------- */ 26550a7375SFelipe Balbi 27c65bfa62SMian Yousaf Kaukab #define is_buffer_mapped(req) (is_dma_capable() && \ 28c65bfa62SMian Yousaf Kaukab (req->map_state != UN_MAPPED)) 29c65bfa62SMian Yousaf Kaukab 3092d2711fSHema Kalliguddi /* Maps the buffer to dma */ 3192d2711fSHema Kalliguddi 3292d2711fSHema Kalliguddi static inline void map_dma_buffer(struct musb_request *request, 33c65bfa62SMian Yousaf Kaukab struct musb *musb, struct musb_ep *musb_ep) 3492d2711fSHema Kalliguddi { 355f5761cbSMian Yousaf Kaukab int compatible = true; 365f5761cbSMian Yousaf Kaukab struct dma_controller *dma = musb->dma_controller; 375f5761cbSMian Yousaf Kaukab 38c65bfa62SMian Yousaf Kaukab request->map_state = UN_MAPPED; 39c65bfa62SMian Yousaf Kaukab 40c65bfa62SMian Yousaf Kaukab if (!is_dma_capable() || !musb_ep->dma) 41c65bfa62SMian Yousaf Kaukab return; 42c65bfa62SMian Yousaf Kaukab 435f5761cbSMian Yousaf Kaukab /* Check if DMA engine can handle this request. 445f5761cbSMian Yousaf Kaukab * DMA code must reject the USB request explicitly. 455f5761cbSMian Yousaf Kaukab * Default behaviour is to map the request. 465f5761cbSMian Yousaf Kaukab */ 475f5761cbSMian Yousaf Kaukab if (dma->is_compatible) 485f5761cbSMian Yousaf Kaukab compatible = dma->is_compatible(musb_ep->dma, 495f5761cbSMian Yousaf Kaukab musb_ep->packet_sz, request->request.buf, 505f5761cbSMian Yousaf Kaukab request->request.length); 515f5761cbSMian Yousaf Kaukab if (!compatible) 525f5761cbSMian Yousaf Kaukab return; 535f5761cbSMian Yousaf Kaukab 5492d2711fSHema Kalliguddi if (request->request.dma == DMA_ADDR_INVALID) { 557b360f42SSebastian Andrzej Siewior dma_addr_t dma_addr; 567b360f42SSebastian Andrzej Siewior int ret; 577b360f42SSebastian Andrzej Siewior 587b360f42SSebastian Andrzej Siewior dma_addr = dma_map_single( 5992d2711fSHema Kalliguddi musb->controller, 6092d2711fSHema Kalliguddi request->request.buf, 6192d2711fSHema Kalliguddi request->request.length, 6292d2711fSHema Kalliguddi request->tx 6392d2711fSHema Kalliguddi ? DMA_TO_DEVICE 6492d2711fSHema Kalliguddi : DMA_FROM_DEVICE); 657b360f42SSebastian Andrzej Siewior ret = dma_mapping_error(musb->controller, dma_addr); 667b360f42SSebastian Andrzej Siewior if (ret) 677b360f42SSebastian Andrzej Siewior return; 687b360f42SSebastian Andrzej Siewior 697b360f42SSebastian Andrzej Siewior request->request.dma = dma_addr; 70c65bfa62SMian Yousaf Kaukab request->map_state = MUSB_MAPPED; 7192d2711fSHema Kalliguddi } else { 7292d2711fSHema Kalliguddi dma_sync_single_for_device(musb->controller, 7392d2711fSHema Kalliguddi request->request.dma, 7492d2711fSHema Kalliguddi request->request.length, 7592d2711fSHema Kalliguddi request->tx 7692d2711fSHema Kalliguddi ? DMA_TO_DEVICE 7792d2711fSHema Kalliguddi : DMA_FROM_DEVICE); 78c65bfa62SMian Yousaf Kaukab request->map_state = PRE_MAPPED; 7992d2711fSHema Kalliguddi } 8092d2711fSHema Kalliguddi } 8192d2711fSHema Kalliguddi 8292d2711fSHema Kalliguddi /* Unmap the buffer from dma and maps it back to cpu */ 8392d2711fSHema Kalliguddi static inline void unmap_dma_buffer(struct musb_request *request, 8492d2711fSHema Kalliguddi struct musb *musb) 8592d2711fSHema Kalliguddi { 8606d9db72SKishon Vijay Abraham I struct musb_ep *musb_ep = request->ep; 8706d9db72SKishon Vijay Abraham I 8806d9db72SKishon Vijay Abraham I if (!is_buffer_mapped(request) || !musb_ep->dma) 89c65bfa62SMian Yousaf Kaukab return; 90c65bfa62SMian Yousaf Kaukab 9192d2711fSHema Kalliguddi if (request->request.dma == DMA_ADDR_INVALID) { 925c8a86e1SFelipe Balbi dev_vdbg(musb->controller, 935c8a86e1SFelipe Balbi "not unmapping a never mapped buffer\n"); 9492d2711fSHema Kalliguddi return; 9592d2711fSHema Kalliguddi } 96c65bfa62SMian Yousaf Kaukab if (request->map_state == MUSB_MAPPED) { 9792d2711fSHema Kalliguddi dma_unmap_single(musb->controller, 9892d2711fSHema Kalliguddi request->request.dma, 9992d2711fSHema Kalliguddi request->request.length, 10092d2711fSHema Kalliguddi request->tx 10192d2711fSHema Kalliguddi ? DMA_TO_DEVICE 10292d2711fSHema Kalliguddi : DMA_FROM_DEVICE); 10392d2711fSHema Kalliguddi request->request.dma = DMA_ADDR_INVALID; 104c65bfa62SMian Yousaf Kaukab } else { /* PRE_MAPPED */ 10592d2711fSHema Kalliguddi dma_sync_single_for_cpu(musb->controller, 10692d2711fSHema Kalliguddi request->request.dma, 10792d2711fSHema Kalliguddi request->request.length, 10892d2711fSHema Kalliguddi request->tx 10992d2711fSHema Kalliguddi ? DMA_TO_DEVICE 11092d2711fSHema Kalliguddi : DMA_FROM_DEVICE); 11192d2711fSHema Kalliguddi } 112c65bfa62SMian Yousaf Kaukab request->map_state = UN_MAPPED; 11392d2711fSHema Kalliguddi } 11492d2711fSHema Kalliguddi 115550a7375SFelipe Balbi /* 116550a7375SFelipe Balbi * Immediately complete a request. 117550a7375SFelipe Balbi * 118550a7375SFelipe Balbi * @param request the request to complete 119550a7375SFelipe Balbi * @param status the status to complete the request with 120550a7375SFelipe Balbi * Context: controller locked, IRQs blocked. 121550a7375SFelipe Balbi */ 122550a7375SFelipe Balbi void musb_g_giveback( 123550a7375SFelipe Balbi struct musb_ep *ep, 124550a7375SFelipe Balbi struct usb_request *request, 125550a7375SFelipe Balbi int status) 126550a7375SFelipe Balbi __releases(ep->musb->lock) 127550a7375SFelipe Balbi __acquires(ep->musb->lock) 128550a7375SFelipe Balbi { 129550a7375SFelipe Balbi struct musb_request *req; 130550a7375SFelipe Balbi struct musb *musb; 131550a7375SFelipe Balbi int busy = ep->busy; 132550a7375SFelipe Balbi 133550a7375SFelipe Balbi req = to_musb_request(request); 134550a7375SFelipe Balbi 135ad1adb89SFelipe Balbi list_del(&req->list); 136550a7375SFelipe Balbi if (req->request.status == -EINPROGRESS) 137550a7375SFelipe Balbi req->request.status = status; 138550a7375SFelipe Balbi musb = req->musb; 139550a7375SFelipe Balbi 140550a7375SFelipe Balbi ep->busy = 1; 141550a7375SFelipe Balbi spin_unlock(&musb->lock); 14206d9db72SKishon Vijay Abraham I 14306d9db72SKishon Vijay Abraham I if (!dma_mapping_error(&musb->g.dev, request->dma)) 14492d2711fSHema Kalliguddi unmap_dma_buffer(req, musb); 14506d9db72SKishon Vijay Abraham I 146fc78003eSBin Liu trace_musb_req_gb(req); 147304f7e5eSMichal Sojka usb_gadget_giveback_request(&req->ep->end_point, &req->request); 148550a7375SFelipe Balbi spin_lock(&musb->lock); 149550a7375SFelipe Balbi ep->busy = busy; 150550a7375SFelipe Balbi } 151550a7375SFelipe Balbi 152550a7375SFelipe Balbi /* ----------------------------------------------------------------------- */ 153550a7375SFelipe Balbi 154550a7375SFelipe Balbi /* 155550a7375SFelipe Balbi * Abort requests queued to an endpoint using the status. Synchronous. 156550a7375SFelipe Balbi * caller locked controller and blocked irqs, and selected this ep. 157550a7375SFelipe Balbi */ 158550a7375SFelipe Balbi static void nuke(struct musb_ep *ep, const int status) 159550a7375SFelipe Balbi { 1605c8a86e1SFelipe Balbi struct musb *musb = ep->musb; 161550a7375SFelipe Balbi struct musb_request *req = NULL; 162550a7375SFelipe Balbi void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs; 163550a7375SFelipe Balbi 164550a7375SFelipe Balbi ep->busy = 1; 165550a7375SFelipe Balbi 166550a7375SFelipe Balbi if (is_dma_capable() && ep->dma) { 167550a7375SFelipe Balbi struct dma_controller *c = ep->musb->dma_controller; 168550a7375SFelipe Balbi int value; 169b6e434a5SSergei Shtylyov 170550a7375SFelipe Balbi if (ep->is_in) { 171b6e434a5SSergei Shtylyov /* 172b6e434a5SSergei Shtylyov * The programming guide says that we must not clear 173b6e434a5SSergei Shtylyov * the DMAMODE bit before DMAENAB, so we only 174b6e434a5SSergei Shtylyov * clear it in the second write... 175b6e434a5SSergei Shtylyov */ 176550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, 177b6e434a5SSergei Shtylyov MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO); 178550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, 179550a7375SFelipe Balbi 0 | MUSB_TXCSR_FLUSHFIFO); 180550a7375SFelipe Balbi } else { 181550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, 182550a7375SFelipe Balbi 0 | MUSB_RXCSR_FLUSHFIFO); 183550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, 184550a7375SFelipe Balbi 0 | MUSB_RXCSR_FLUSHFIFO); 185550a7375SFelipe Balbi } 186550a7375SFelipe Balbi 187550a7375SFelipe Balbi value = c->channel_abort(ep->dma); 188b99d3659SBin Liu musb_dbg(musb, "%s: abort DMA --> %d", ep->name, value); 189550a7375SFelipe Balbi c->channel_release(ep->dma); 190550a7375SFelipe Balbi ep->dma = NULL; 191550a7375SFelipe Balbi } 192550a7375SFelipe Balbi 193ad1adb89SFelipe Balbi while (!list_empty(&ep->req_list)) { 194ad1adb89SFelipe Balbi req = list_first_entry(&ep->req_list, struct musb_request, list); 195550a7375SFelipe Balbi musb_g_giveback(ep, &req->request, status); 196550a7375SFelipe Balbi } 197550a7375SFelipe Balbi } 198550a7375SFelipe Balbi 199550a7375SFelipe Balbi /* ----------------------------------------------------------------------- */ 200550a7375SFelipe Balbi 201550a7375SFelipe Balbi /* Data transfers - pure PIO, pure DMA, or mixed mode */ 202550a7375SFelipe Balbi 203550a7375SFelipe Balbi /* 204550a7375SFelipe Balbi * This assumes the separate CPPI engine is responding to DMA requests 205550a7375SFelipe Balbi * from the usb core ... sequenced a bit differently from mentor dma. 206550a7375SFelipe Balbi */ 207550a7375SFelipe Balbi 208550a7375SFelipe Balbi static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep) 209550a7375SFelipe Balbi { 210550a7375SFelipe Balbi if (can_bulk_split(musb, ep->type)) 211550a7375SFelipe Balbi return ep->hw_ep->max_packet_sz_tx; 212550a7375SFelipe Balbi else 213550a7375SFelipe Balbi return ep->packet_sz; 214550a7375SFelipe Balbi } 215550a7375SFelipe Balbi 216550a7375SFelipe Balbi /* 217550a7375SFelipe Balbi * An endpoint is transmitting data. This can be called either from 218550a7375SFelipe Balbi * the IRQ routine or from ep.queue() to kickstart a request on an 219550a7375SFelipe Balbi * endpoint. 220550a7375SFelipe Balbi * 221550a7375SFelipe Balbi * Context: controller locked, IRQs blocked, endpoint selected 222550a7375SFelipe Balbi */ 223550a7375SFelipe Balbi static void txstate(struct musb *musb, struct musb_request *req) 224550a7375SFelipe Balbi { 225550a7375SFelipe Balbi u8 epnum = req->epnum; 226550a7375SFelipe Balbi struct musb_ep *musb_ep; 227550a7375SFelipe Balbi void __iomem *epio = musb->endpoints[epnum].regs; 228550a7375SFelipe Balbi struct usb_request *request; 229550a7375SFelipe Balbi u16 fifo_count = 0, csr; 230550a7375SFelipe Balbi int use_dma = 0; 231550a7375SFelipe Balbi 232550a7375SFelipe Balbi musb_ep = req->ep; 233550a7375SFelipe Balbi 234abf710e6SVikram Pandita /* Check if EP is disabled */ 235abf710e6SVikram Pandita if (!musb_ep->desc) { 236b99d3659SBin Liu musb_dbg(musb, "ep:%s disabled - ignore request", 237abf710e6SVikram Pandita musb_ep->end_point.name); 238abf710e6SVikram Pandita return; 239abf710e6SVikram Pandita } 240abf710e6SVikram Pandita 241550a7375SFelipe Balbi /* we shouldn't get here while DMA is active ... but we do ... */ 242550a7375SFelipe Balbi if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { 243b99d3659SBin Liu musb_dbg(musb, "dma pending..."); 244550a7375SFelipe Balbi return; 245550a7375SFelipe Balbi } 246550a7375SFelipe Balbi 247550a7375SFelipe Balbi /* read TXCSR before */ 248550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 249550a7375SFelipe Balbi 250550a7375SFelipe Balbi request = &req->request; 251550a7375SFelipe Balbi fifo_count = min(max_ep_writesize(musb, musb_ep), 252550a7375SFelipe Balbi (int)(request->length - request->actual)); 253550a7375SFelipe Balbi 254550a7375SFelipe Balbi if (csr & MUSB_TXCSR_TXPKTRDY) { 255b99d3659SBin Liu musb_dbg(musb, "%s old packet still ready , txcsr %03x", 256550a7375SFelipe Balbi musb_ep->end_point.name, csr); 257550a7375SFelipe Balbi return; 258550a7375SFelipe Balbi } 259550a7375SFelipe Balbi 260550a7375SFelipe Balbi if (csr & MUSB_TXCSR_P_SENDSTALL) { 261b99d3659SBin Liu musb_dbg(musb, "%s stalling, txcsr %03x", 262550a7375SFelipe Balbi musb_ep->end_point.name, csr); 263550a7375SFelipe Balbi return; 264550a7375SFelipe Balbi } 265550a7375SFelipe Balbi 266b99d3659SBin Liu musb_dbg(musb, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x", 267550a7375SFelipe Balbi epnum, musb_ep->packet_sz, fifo_count, 268550a7375SFelipe Balbi csr); 269550a7375SFelipe Balbi 270550a7375SFelipe Balbi #ifndef CONFIG_MUSB_PIO_ONLY 271c65bfa62SMian Yousaf Kaukab if (is_buffer_mapped(req)) { 272550a7375SFelipe Balbi struct dma_controller *c = musb->dma_controller; 27366af83ddSMing Lei size_t request_size; 27466af83ddSMing Lei 27566af83ddSMing Lei /* setup DMA, then program endpoint CSR */ 27666af83ddSMing Lei request_size = min_t(size_t, request->length - request->actual, 27766af83ddSMing Lei musb_ep->dma->max_len); 278550a7375SFelipe Balbi 279d17d535fSAjay Kumar Gupta use_dma = (request->dma != DMA_ADDR_INVALID && request_size); 280550a7375SFelipe Balbi 281550a7375SFelipe Balbi /* MUSB_TXCSR_P_ISO is still set correctly */ 282550a7375SFelipe Balbi 28303840fadSFelipe Balbi if (musb_dma_inventra(musb) || musb_dma_ux500(musb)) { 284d1043a26SAnand Gadiyar if (request_size < musb_ep->packet_sz) 285550a7375SFelipe Balbi musb_ep->dma->desired_mode = 0; 286550a7375SFelipe Balbi else 287550a7375SFelipe Balbi musb_ep->dma->desired_mode = 1; 288550a7375SFelipe Balbi 289550a7375SFelipe Balbi use_dma = use_dma && c->channel_program( 290550a7375SFelipe Balbi musb_ep->dma, musb_ep->packet_sz, 291550a7375SFelipe Balbi musb_ep->dma->desired_mode, 292796a83faSCliff Cai request->dma + request->actual, request_size); 293550a7375SFelipe Balbi if (use_dma) { 294550a7375SFelipe Balbi if (musb_ep->dma->desired_mode == 0) { 295b6e434a5SSergei Shtylyov /* 296b6e434a5SSergei Shtylyov * We must not clear the DMAMODE bit 297b6e434a5SSergei Shtylyov * before the DMAENAB bit -- and the 298b6e434a5SSergei Shtylyov * latter doesn't always get cleared 299b6e434a5SSergei Shtylyov * before we get here... 300b6e434a5SSergei Shtylyov */ 301b6e434a5SSergei Shtylyov csr &= ~(MUSB_TXCSR_AUTOSET 302b6e434a5SSergei Shtylyov | MUSB_TXCSR_DMAENAB); 303b6e434a5SSergei Shtylyov musb_writew(epio, MUSB_TXCSR, csr 304b6e434a5SSergei Shtylyov | MUSB_TXCSR_P_WZC_BITS); 305b6e434a5SSergei Shtylyov csr &= ~MUSB_TXCSR_DMAMODE; 306550a7375SFelipe Balbi csr |= (MUSB_TXCSR_DMAENAB | 307550a7375SFelipe Balbi MUSB_TXCSR_MODE); 308550a7375SFelipe Balbi /* against programming guide */ 309f11d893dSMing Lei } else { 310f11d893dSMing Lei csr |= (MUSB_TXCSR_DMAENAB 311550a7375SFelipe Balbi | MUSB_TXCSR_DMAMODE 312550a7375SFelipe Balbi | MUSB_TXCSR_MODE); 313bb3a2ef2Ssupriya karanth /* 314bb3a2ef2Ssupriya karanth * Enable Autoset according to table 315bb3a2ef2Ssupriya karanth * below 316bb3a2ef2Ssupriya karanth * bulk_split hb_mult Autoset_Enable 317bb3a2ef2Ssupriya karanth * 0 0 Yes(Normal) 318bb3a2ef2Ssupriya karanth * 0 >0 No(High BW ISO) 319bb3a2ef2Ssupriya karanth * 1 0 Yes(HS bulk) 320bb3a2ef2Ssupriya karanth * 1 >0 Yes(FS bulk) 321bb3a2ef2Ssupriya karanth */ 322bb3a2ef2Ssupriya karanth if (!musb_ep->hb_mult || 323bb3a2ef2Ssupriya karanth can_bulk_split(musb, 3241a171626SGeyslan G. Bem musb_ep->type)) 325f11d893dSMing Lei csr |= MUSB_TXCSR_AUTOSET; 326f11d893dSMing Lei } 327550a7375SFelipe Balbi csr &= ~MUSB_TXCSR_P_UNDERRUN; 328f11d893dSMing Lei 329550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 330550a7375SFelipe Balbi } 331550a7375SFelipe Balbi } 332550a7375SFelipe Balbi 333f8e9f34fSTony Lindgren if (is_cppi_enabled(musb)) { 334550a7375SFelipe Balbi /* program endpoint CSR first, then setup DMA */ 335b6e434a5SSergei Shtylyov csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); 33637e3ee99SSergei Shtylyov csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE | 33737e3ee99SSergei Shtylyov MUSB_TXCSR_MODE; 338fc525751SSebastian Andrzej Siewior musb_writew(epio, MUSB_TXCSR, (MUSB_TXCSR_P_WZC_BITS & 339fc525751SSebastian Andrzej Siewior ~MUSB_TXCSR_P_UNDERRUN) | csr); 340550a7375SFelipe Balbi 341550a7375SFelipe Balbi /* ensure writebuffer is empty */ 342550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 343550a7375SFelipe Balbi 344fc525751SSebastian Andrzej Siewior /* 345fc525751SSebastian Andrzej Siewior * NOTE host side sets DMAENAB later than this; both are 346fc525751SSebastian Andrzej Siewior * OK since the transfer dma glue (between CPPI and 347fc525751SSebastian Andrzej Siewior * Mentor fifos) just tells CPPI it could start. Data 348fc525751SSebastian Andrzej Siewior * only moves to the USB TX fifo when both fifos are 349fc525751SSebastian Andrzej Siewior * ready. 350550a7375SFelipe Balbi */ 351fc525751SSebastian Andrzej Siewior /* 352fc525751SSebastian Andrzej Siewior * "mode" is irrelevant here; handle terminating ZLPs 353fc525751SSebastian Andrzej Siewior * like PIO does, since the hardware RNDIS mode seems 354fc525751SSebastian Andrzej Siewior * unreliable except for the 355fc525751SSebastian Andrzej Siewior * last-packet-is-already-short case. 356550a7375SFelipe Balbi */ 357550a7375SFelipe Balbi use_dma = use_dma && c->channel_program( 358550a7375SFelipe Balbi musb_ep->dma, musb_ep->packet_sz, 359550a7375SFelipe Balbi 0, 36066af83ddSMing Lei request->dma + request->actual, 36166af83ddSMing Lei request_size); 362550a7375SFelipe Balbi if (!use_dma) { 363550a7375SFelipe Balbi c->channel_release(musb_ep->dma); 364550a7375SFelipe Balbi musb_ep->dma = NULL; 365b6e434a5SSergei Shtylyov csr &= ~MUSB_TXCSR_DMAENAB; 366b6e434a5SSergei Shtylyov musb_writew(epio, MUSB_TXCSR, csr); 367550a7375SFelipe Balbi /* invariant: prequest->buf is non-null */ 368550a7375SFelipe Balbi } 369f8e9f34fSTony Lindgren } else if (tusb_dma_omap(musb)) 370550a7375SFelipe Balbi use_dma = use_dma && c->channel_program( 371550a7375SFelipe Balbi musb_ep->dma, musb_ep->packet_sz, 372550a7375SFelipe Balbi request->zero, 37366af83ddSMing Lei request->dma + request->actual, 37466af83ddSMing Lei request_size); 375550a7375SFelipe Balbi } 376550a7375SFelipe Balbi #endif 377550a7375SFelipe Balbi 378550a7375SFelipe Balbi if (!use_dma) { 37992d2711fSHema Kalliguddi /* 38092d2711fSHema Kalliguddi * Unmap the dma buffer back to cpu if dma channel 38192d2711fSHema Kalliguddi * programming fails 38292d2711fSHema Kalliguddi */ 38392d2711fSHema Kalliguddi unmap_dma_buffer(req, musb); 38492d2711fSHema Kalliguddi 385550a7375SFelipe Balbi musb_write_fifo(musb_ep->hw_ep, fifo_count, 386550a7375SFelipe Balbi (u8 *) (request->buf + request->actual)); 387550a7375SFelipe Balbi request->actual += fifo_count; 388550a7375SFelipe Balbi csr |= MUSB_TXCSR_TXPKTRDY; 389550a7375SFelipe Balbi csr &= ~MUSB_TXCSR_P_UNDERRUN; 390550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 391550a7375SFelipe Balbi } 392550a7375SFelipe Balbi 393550a7375SFelipe Balbi /* host may already have the data when this message shows... */ 394b99d3659SBin Liu musb_dbg(musb, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d", 395550a7375SFelipe Balbi musb_ep->end_point.name, use_dma ? "dma" : "pio", 396550a7375SFelipe Balbi request->actual, request->length, 397550a7375SFelipe Balbi musb_readw(epio, MUSB_TXCSR), 398550a7375SFelipe Balbi fifo_count, 399550a7375SFelipe Balbi musb_readw(epio, MUSB_TXMAXP)); 400550a7375SFelipe Balbi } 401550a7375SFelipe Balbi 402550a7375SFelipe Balbi /* 403550a7375SFelipe Balbi * FIFO state update (e.g. data ready). 404550a7375SFelipe Balbi * Called from IRQ, with controller locked. 405550a7375SFelipe Balbi */ 406550a7375SFelipe Balbi void musb_g_tx(struct musb *musb, u8 epnum) 407550a7375SFelipe Balbi { 408550a7375SFelipe Balbi u16 csr; 409ad1adb89SFelipe Balbi struct musb_request *req; 410550a7375SFelipe Balbi struct usb_request *request; 411550a7375SFelipe Balbi u8 __iomem *mbase = musb->mregs; 412550a7375SFelipe Balbi struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in; 413550a7375SFelipe Balbi void __iomem *epio = musb->endpoints[epnum].regs; 414550a7375SFelipe Balbi struct dma_channel *dma; 415550a7375SFelipe Balbi 416550a7375SFelipe Balbi musb_ep_select(mbase, epnum); 417ad1adb89SFelipe Balbi req = next_request(musb_ep); 418ad1adb89SFelipe Balbi request = &req->request; 419550a7375SFelipe Balbi 420550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 421b99d3659SBin Liu musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr); 422550a7375SFelipe Balbi 423550a7375SFelipe Balbi dma = is_dma_capable() ? musb_ep->dma : NULL; 4247723de7eSSergei Shtylyov 4257723de7eSSergei Shtylyov /* 4267723de7eSSergei Shtylyov * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX 4277723de7eSSergei Shtylyov * probably rates reporting as a host error. 428550a7375SFelipe Balbi */ 429550a7375SFelipe Balbi if (csr & MUSB_TXCSR_P_SENTSTALL) { 430550a7375SFelipe Balbi csr |= MUSB_TXCSR_P_WZC_BITS; 431550a7375SFelipe Balbi csr &= ~MUSB_TXCSR_P_SENTSTALL; 432550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 4337723de7eSSergei Shtylyov return; 434550a7375SFelipe Balbi } 435550a7375SFelipe Balbi 436550a7375SFelipe Balbi if (csr & MUSB_TXCSR_P_UNDERRUN) { 4377723de7eSSergei Shtylyov /* We NAKed, no big deal... little reason to care. */ 438550a7375SFelipe Balbi csr |= MUSB_TXCSR_P_WZC_BITS; 4397723de7eSSergei Shtylyov csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); 440550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 4415c8a86e1SFelipe Balbi dev_vdbg(musb->controller, "underrun on ep%d, req %p\n", 4425c8a86e1SFelipe Balbi epnum, request); 443550a7375SFelipe Balbi } 444550a7375SFelipe Balbi 445550a7375SFelipe Balbi if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 4467723de7eSSergei Shtylyov /* 4477723de7eSSergei Shtylyov * SHOULD NOT HAPPEN... has with CPPI though, after 448550a7375SFelipe Balbi * changing SENDSTALL (and other cases); harmless? 449550a7375SFelipe Balbi */ 450b99d3659SBin Liu musb_dbg(musb, "%s dma still busy?", musb_ep->end_point.name); 4517723de7eSSergei Shtylyov return; 452550a7375SFelipe Balbi } 453550a7375SFelipe Balbi 454*ae2938c3SPaul Cercueil if (req) { 455550a7375SFelipe Balbi 4569aea9b6cSBin Liu trace_musb_req_tx(req); 4579aea9b6cSBin Liu 458550a7375SFelipe Balbi if (dma && (csr & MUSB_TXCSR_DMAENAB)) { 459550a7375SFelipe Balbi csr |= MUSB_TXCSR_P_WZC_BITS; 4607723de7eSSergei Shtylyov csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | 461100d4a9dSMian Yousaf Kaukab MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); 462550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 4637723de7eSSergei Shtylyov /* Ensure writebuffer is empty. */ 464550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 465550a7375SFelipe Balbi request->actual += musb_ep->dma->actual_len; 466b99d3659SBin Liu musb_dbg(musb, "TXCSR%d %04x, DMA off, len %zu, req %p", 4677723de7eSSergei Shtylyov epnum, csr, musb_ep->dma->actual_len, request); 468550a7375SFelipe Balbi } 469550a7375SFelipe Balbi 4707723de7eSSergei Shtylyov /* 4717723de7eSSergei Shtylyov * First, maybe a terminating short packet. Some DMA 4727723de7eSSergei Shtylyov * engines might handle this by themselves. 473550a7375SFelipe Balbi */ 474fb91cddcSTony Lindgren if ((request->zero && request->length) 475e7379aaaSMing Lei && (request->length % musb_ep->packet_sz == 0) 476c418fd6cSPaul Elder && (request->actual == request->length)) { 477fb91cddcSTony Lindgren 4787723de7eSSergei Shtylyov /* 4797723de7eSSergei Shtylyov * On DMA completion, FIFO may not be 4807723de7eSSergei Shtylyov * available yet... 481550a7375SFelipe Balbi */ 482550a7375SFelipe Balbi if (csr & MUSB_TXCSR_TXPKTRDY) 4837723de7eSSergei Shtylyov return; 484550a7375SFelipe Balbi 4857723de7eSSergei Shtylyov musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE 486550a7375SFelipe Balbi | MUSB_TXCSR_TXPKTRDY); 487550a7375SFelipe Balbi request->zero = 0; 488550a7375SFelipe Balbi } 489550a7375SFelipe Balbi 490bb27bc2cSMing Lei if (request->actual == request->length) { 491550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, 0); 49239287076SSupriya Karanth /* 49339287076SSupriya Karanth * In the giveback function the MUSB lock is 49439287076SSupriya Karanth * released and acquired after sometime. During 49539287076SSupriya Karanth * this time period the INDEX register could get 49639287076SSupriya Karanth * changed by the gadget_queue function especially 49739287076SSupriya Karanth * on SMP systems. Reselect the INDEX to be sure 49839287076SSupriya Karanth * we are reading/modifying the right registers 49939287076SSupriya Karanth */ 50039287076SSupriya Karanth musb_ep_select(mbase, epnum); 501ad1adb89SFelipe Balbi req = musb_ep->desc ? next_request(musb_ep) : NULL; 502ad1adb89SFelipe Balbi if (!req) { 503b99d3659SBin Liu musb_dbg(musb, "%s idle now", 504550a7375SFelipe Balbi musb_ep->end_point.name); 5057723de7eSSergei Shtylyov return; 50695962a77SSergei Shtylyov } 507550a7375SFelipe Balbi } 508550a7375SFelipe Balbi 509ad1adb89SFelipe Balbi txstate(musb, req); 510550a7375SFelipe Balbi } 511550a7375SFelipe Balbi } 512550a7375SFelipe Balbi 513550a7375SFelipe Balbi /* ------------------------------------------------------------ */ 514550a7375SFelipe Balbi 515550a7375SFelipe Balbi /* 516550a7375SFelipe Balbi * Context: controller locked, IRQs blocked, endpoint selected 517550a7375SFelipe Balbi */ 518550a7375SFelipe Balbi static void rxstate(struct musb *musb, struct musb_request *req) 519550a7375SFelipe Balbi { 520550a7375SFelipe Balbi const u8 epnum = req->epnum; 521550a7375SFelipe Balbi struct usb_request *request = &req->request; 522bd2e74d6SMing Lei struct musb_ep *musb_ep; 523550a7375SFelipe Balbi void __iomem *epio = musb->endpoints[epnum].regs; 524f0443afdSSergei Shtylyov unsigned len = 0; 525f0443afdSSergei Shtylyov u16 fifo_count; 526cea83241SSergei Shtylyov u16 csr = musb_readw(epio, MUSB_RXCSR); 527bd2e74d6SMing Lei struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; 5280ae52d54SAnand Gadiyar u8 use_mode_1; 529bd2e74d6SMing Lei 530bd2e74d6SMing Lei if (hw_ep->is_shared_fifo) 531bd2e74d6SMing Lei musb_ep = &hw_ep->ep_in; 532bd2e74d6SMing Lei else 533bd2e74d6SMing Lei musb_ep = &hw_ep->ep_out; 534bd2e74d6SMing Lei 535f0443afdSSergei Shtylyov fifo_count = musb_ep->packet_sz; 536550a7375SFelipe Balbi 537abf710e6SVikram Pandita /* Check if EP is disabled */ 538abf710e6SVikram Pandita if (!musb_ep->desc) { 539b99d3659SBin Liu musb_dbg(musb, "ep:%s disabled - ignore request", 540abf710e6SVikram Pandita musb_ep->end_point.name); 541abf710e6SVikram Pandita return; 542abf710e6SVikram Pandita } 543abf710e6SVikram Pandita 544cea83241SSergei Shtylyov /* We shouldn't get here while DMA is active, but we do... */ 545cea83241SSergei Shtylyov if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { 546b99d3659SBin Liu musb_dbg(musb, "DMA pending..."); 547cea83241SSergei Shtylyov return; 548cea83241SSergei Shtylyov } 549cea83241SSergei Shtylyov 550cea83241SSergei Shtylyov if (csr & MUSB_RXCSR_P_SENDSTALL) { 551b99d3659SBin Liu musb_dbg(musb, "%s stalling, RXCSR %04x", 552cea83241SSergei Shtylyov musb_ep->end_point.name, csr); 553cea83241SSergei Shtylyov return; 554cea83241SSergei Shtylyov } 555550a7375SFelipe Balbi 556f8e9f34fSTony Lindgren if (is_cppi_enabled(musb) && is_buffer_mapped(req)) { 557550a7375SFelipe Balbi struct dma_controller *c = musb->dma_controller; 558550a7375SFelipe Balbi struct dma_channel *channel = musb_ep->dma; 559550a7375SFelipe Balbi 560550a7375SFelipe Balbi /* NOTE: CPPI won't actually stop advancing the DMA 561550a7375SFelipe Balbi * queue after short packet transfers, so this is almost 562550a7375SFelipe Balbi * always going to run as IRQ-per-packet DMA so that 563550a7375SFelipe Balbi * faults will be handled correctly. 564550a7375SFelipe Balbi */ 565550a7375SFelipe Balbi if (c->channel_program(channel, 566550a7375SFelipe Balbi musb_ep->packet_sz, 567550a7375SFelipe Balbi !request->short_not_ok, 568550a7375SFelipe Balbi request->dma + request->actual, 569550a7375SFelipe Balbi request->length - request->actual)) { 570550a7375SFelipe Balbi 571550a7375SFelipe Balbi /* make sure that if an rxpkt arrived after the irq, 572550a7375SFelipe Balbi * the cppi engine will be ready to take it as soon 573550a7375SFelipe Balbi * as DMA is enabled 574550a7375SFelipe Balbi */ 575550a7375SFelipe Balbi csr &= ~(MUSB_RXCSR_AUTOCLEAR 576550a7375SFelipe Balbi | MUSB_RXCSR_DMAMODE); 577550a7375SFelipe Balbi csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS; 578550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 579550a7375SFelipe Balbi return; 580550a7375SFelipe Balbi } 581550a7375SFelipe Balbi } 582550a7375SFelipe Balbi 583550a7375SFelipe Balbi if (csr & MUSB_RXCSR_RXPKTRDY) { 584f0443afdSSergei Shtylyov fifo_count = musb_readw(epio, MUSB_RXCOUNT); 5850ae52d54SAnand Gadiyar 5860ae52d54SAnand Gadiyar /* 58700a89180SFelipe Balbi * Enable Mode 1 on RX transfers only when short_not_ok flag 58800a89180SFelipe Balbi * is set. Currently short_not_ok flag is set only from 58900a89180SFelipe Balbi * file_storage and f_mass_storage drivers 5900ae52d54SAnand Gadiyar */ 59100a89180SFelipe Balbi 59200a89180SFelipe Balbi if (request->short_not_ok && fifo_count == musb_ep->packet_sz) 5930ae52d54SAnand Gadiyar use_mode_1 = 1; 5940ae52d54SAnand Gadiyar else 5950ae52d54SAnand Gadiyar use_mode_1 = 0; 5960ae52d54SAnand Gadiyar 597550a7375SFelipe Balbi if (request->actual < request->length) { 59803840fadSFelipe Balbi if (!is_buffer_mapped(req)) 59903840fadSFelipe Balbi goto buffer_aint_mapped; 60003840fadSFelipe Balbi 60103840fadSFelipe Balbi if (musb_dma_inventra(musb)) { 602550a7375SFelipe Balbi struct dma_controller *c; 603550a7375SFelipe Balbi struct dma_channel *channel; 604550a7375SFelipe Balbi int use_dma = 0; 60537730eccSFelipe Balbi unsigned int transfer_size; 606550a7375SFelipe Balbi 607550a7375SFelipe Balbi c = musb->dma_controller; 608550a7375SFelipe Balbi channel = musb_ep->dma; 609550a7375SFelipe Balbi 61000a89180SFelipe Balbi /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in 61100a89180SFelipe Balbi * mode 0 only. So we do not get endpoint interrupts due to DMA 61200a89180SFelipe Balbi * completion. We only get interrupts from DMA controller. 61300a89180SFelipe Balbi * 61400a89180SFelipe Balbi * We could operate in DMA mode 1 if we knew the size of the tranfer 61500a89180SFelipe Balbi * in advance. For mass storage class, request->length = what the host 61600a89180SFelipe Balbi * sends, so that'd work. But for pretty much everything else, 61700a89180SFelipe Balbi * request->length is routinely more than what the host sends. For 61800a89180SFelipe Balbi * most these gadgets, end of is signified either by a short packet, 61900a89180SFelipe Balbi * or filling the last byte of the buffer. (Sending extra data in 62000a89180SFelipe Balbi * that last pckate should trigger an overflow fault.) But in mode 1, 62100a89180SFelipe Balbi * we don't get DMA completion interrupt for short packets. 62200a89180SFelipe Balbi * 62300a89180SFelipe Balbi * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1), 62400a89180SFelipe Balbi * to get endpoint interrupt on every DMA req, but that didn't seem 62500a89180SFelipe Balbi * to work reliably. 62600a89180SFelipe Balbi * 62700a89180SFelipe Balbi * REVISIT an updated g_file_storage can set req->short_not_ok, which 62800a89180SFelipe Balbi * then becomes usable as a runtime "use mode 1" hint... 62900a89180SFelipe Balbi */ 63000a89180SFelipe Balbi 6310ae52d54SAnand Gadiyar /* Experimental: Mode1 works with mass storage use cases */ 6320ae52d54SAnand Gadiyar if (use_mode_1) { 6339001d80dSMing Lei csr |= MUSB_RXCSR_AUTOCLEAR; 6340ae52d54SAnand Gadiyar musb_writew(epio, MUSB_RXCSR, csr); 6350ae52d54SAnand Gadiyar csr |= MUSB_RXCSR_DMAENAB; 6360ae52d54SAnand Gadiyar musb_writew(epio, MUSB_RXCSR, csr); 637550a7375SFelipe Balbi 6380ae52d54SAnand Gadiyar /* 6390ae52d54SAnand Gadiyar * this special sequence (enabling and then 640550a7375SFelipe Balbi * disabling MUSB_RXCSR_DMAMODE) is required 641550a7375SFelipe Balbi * to get DMAReq to activate 642550a7375SFelipe Balbi */ 643550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, 644550a7375SFelipe Balbi csr | MUSB_RXCSR_DMAMODE); 6450ae52d54SAnand Gadiyar musb_writew(epio, MUSB_RXCSR, csr); 6460ae52d54SAnand Gadiyar 64737730eccSFelipe Balbi transfer_size = min_t(unsigned int, 64837730eccSFelipe Balbi request->length - 64937730eccSFelipe Balbi request->actual, 650660fa886SRoger Quadros channel->max_len); 651660fa886SRoger Quadros musb_ep->dma->desired_mode = 1; 6520ae52d54SAnand Gadiyar } else { 6539001d80dSMing Lei if (!musb_ep->hb_mult && 6549001d80dSMing Lei musb_ep->hw_ep->rx_double_buffered) 6559001d80dSMing Lei csr |= MUSB_RXCSR_AUTOCLEAR; 6560ae52d54SAnand Gadiyar csr |= MUSB_RXCSR_DMAENAB; 657550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 658550a7375SFelipe Balbi 6591018b4e4SMing Lei transfer_size = min(request->length - request->actual, 660f0443afdSSergei Shtylyov (unsigned)fifo_count); 661550a7375SFelipe Balbi musb_ep->dma->desired_mode = 0; 6620ae52d54SAnand Gadiyar } 663550a7375SFelipe Balbi 664550a7375SFelipe Balbi use_dma = c->channel_program( 665550a7375SFelipe Balbi channel, 666550a7375SFelipe Balbi musb_ep->packet_sz, 667550a7375SFelipe Balbi channel->desired_mode, 668550a7375SFelipe Balbi request->dma 669550a7375SFelipe Balbi + request->actual, 670550a7375SFelipe Balbi transfer_size); 671550a7375SFelipe Balbi 672550a7375SFelipe Balbi if (use_dma) 673550a7375SFelipe Balbi return; 674550a7375SFelipe Balbi } 67503840fadSFelipe Balbi 67603840fadSFelipe Balbi if ((musb_dma_ux500(musb)) && 677a48ff906SMian Yousaf Kaukab (request->actual < request->length)) { 678a48ff906SMian Yousaf Kaukab 679a48ff906SMian Yousaf Kaukab struct dma_controller *c; 680a48ff906SMian Yousaf Kaukab struct dma_channel *channel; 68137730eccSFelipe Balbi unsigned int transfer_size = 0; 682a48ff906SMian Yousaf Kaukab 683a48ff906SMian Yousaf Kaukab c = musb->dma_controller; 684a48ff906SMian Yousaf Kaukab channel = musb_ep->dma; 685a48ff906SMian Yousaf Kaukab 686a48ff906SMian Yousaf Kaukab /* In case first packet is short */ 687f0443afdSSergei Shtylyov if (fifo_count < musb_ep->packet_sz) 688f0443afdSSergei Shtylyov transfer_size = fifo_count; 689a48ff906SMian Yousaf Kaukab else if (request->short_not_ok) 69037730eccSFelipe Balbi transfer_size = min_t(unsigned int, 69137730eccSFelipe Balbi request->length - 692a48ff906SMian Yousaf Kaukab request->actual, 693a48ff906SMian Yousaf Kaukab channel->max_len); 694a48ff906SMian Yousaf Kaukab else 69537730eccSFelipe Balbi transfer_size = min_t(unsigned int, 69637730eccSFelipe Balbi request->length - 697a48ff906SMian Yousaf Kaukab request->actual, 698f0443afdSSergei Shtylyov (unsigned)fifo_count); 699a48ff906SMian Yousaf Kaukab 700a48ff906SMian Yousaf Kaukab csr &= ~MUSB_RXCSR_DMAMODE; 701a48ff906SMian Yousaf Kaukab csr |= (MUSB_RXCSR_DMAENAB | 702a48ff906SMian Yousaf Kaukab MUSB_RXCSR_AUTOCLEAR); 703a48ff906SMian Yousaf Kaukab 704a48ff906SMian Yousaf Kaukab musb_writew(epio, MUSB_RXCSR, csr); 705a48ff906SMian Yousaf Kaukab 706a48ff906SMian Yousaf Kaukab if (transfer_size <= musb_ep->packet_sz) { 707a48ff906SMian Yousaf Kaukab musb_ep->dma->desired_mode = 0; 708a48ff906SMian Yousaf Kaukab } else { 709a48ff906SMian Yousaf Kaukab musb_ep->dma->desired_mode = 1; 710a48ff906SMian Yousaf Kaukab /* Mode must be set after DMAENAB */ 711a48ff906SMian Yousaf Kaukab csr |= MUSB_RXCSR_DMAMODE; 712a48ff906SMian Yousaf Kaukab musb_writew(epio, MUSB_RXCSR, csr); 713a48ff906SMian Yousaf Kaukab } 714a48ff906SMian Yousaf Kaukab 715a48ff906SMian Yousaf Kaukab if (c->channel_program(channel, 716a48ff906SMian Yousaf Kaukab musb_ep->packet_sz, 717a48ff906SMian Yousaf Kaukab channel->desired_mode, 718a48ff906SMian Yousaf Kaukab request->dma 719a48ff906SMian Yousaf Kaukab + request->actual, 720a48ff906SMian Yousaf Kaukab transfer_size)) 721a48ff906SMian Yousaf Kaukab 722a48ff906SMian Yousaf Kaukab return; 723a48ff906SMian Yousaf Kaukab } 724550a7375SFelipe Balbi 725f0443afdSSergei Shtylyov len = request->length - request->actual; 726b99d3659SBin Liu musb_dbg(musb, "%s OUT/RX pio fifo %d/%d, maxpacket %d", 727550a7375SFelipe Balbi musb_ep->end_point.name, 728f0443afdSSergei Shtylyov fifo_count, len, 729550a7375SFelipe Balbi musb_ep->packet_sz); 730550a7375SFelipe Balbi 731c2c96321SFelipe Balbi fifo_count = min_t(unsigned, len, fifo_count); 732550a7375SFelipe Balbi 73303840fadSFelipe Balbi if (tusb_dma_omap(musb)) { 734550a7375SFelipe Balbi struct dma_controller *c = musb->dma_controller; 735550a7375SFelipe Balbi struct dma_channel *channel = musb_ep->dma; 736550a7375SFelipe Balbi u32 dma_addr = request->dma + request->actual; 737550a7375SFelipe Balbi int ret; 738550a7375SFelipe Balbi 739550a7375SFelipe Balbi ret = c->channel_program(channel, 740550a7375SFelipe Balbi musb_ep->packet_sz, 741550a7375SFelipe Balbi channel->desired_mode, 742550a7375SFelipe Balbi dma_addr, 743550a7375SFelipe Balbi fifo_count); 744550a7375SFelipe Balbi if (ret) 745550a7375SFelipe Balbi return; 746550a7375SFelipe Balbi } 74703840fadSFelipe Balbi 74892d2711fSHema Kalliguddi /* 74992d2711fSHema Kalliguddi * Unmap the dma buffer back to cpu if dma channel 75092d2711fSHema Kalliguddi * programming fails. This buffer is mapped if the 75192d2711fSHema Kalliguddi * channel allocation is successful 75292d2711fSHema Kalliguddi */ 75392d2711fSHema Kalliguddi unmap_dma_buffer(req, musb); 75492d2711fSHema Kalliguddi 755e75df371SMing Lei /* 756e75df371SMing Lei * Clear DMAENAB and AUTOCLEAR for the 75792d2711fSHema Kalliguddi * PIO mode transfer 75892d2711fSHema Kalliguddi */ 759e75df371SMing Lei csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR); 76092d2711fSHema Kalliguddi musb_writew(epio, MUSB_RXCSR, csr); 761550a7375SFelipe Balbi 76203840fadSFelipe Balbi buffer_aint_mapped: 763550a7375SFelipe Balbi musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) 764550a7375SFelipe Balbi (request->buf + request->actual)); 765550a7375SFelipe Balbi request->actual += fifo_count; 766550a7375SFelipe Balbi 767550a7375SFelipe Balbi /* REVISIT if we left anything in the fifo, flush 768550a7375SFelipe Balbi * it and report -EOVERFLOW 769550a7375SFelipe Balbi */ 770550a7375SFelipe Balbi 771550a7375SFelipe Balbi /* ack the read! */ 772550a7375SFelipe Balbi csr |= MUSB_RXCSR_P_WZC_BITS; 773550a7375SFelipe Balbi csr &= ~MUSB_RXCSR_RXPKTRDY; 774550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 775550a7375SFelipe Balbi } 776550a7375SFelipe Balbi } 777550a7375SFelipe Balbi 778550a7375SFelipe Balbi /* reach the end or short packet detected */ 779f0443afdSSergei Shtylyov if (request->actual == request->length || 780f0443afdSSergei Shtylyov fifo_count < musb_ep->packet_sz) 781550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, 0); 782550a7375SFelipe Balbi } 783550a7375SFelipe Balbi 784550a7375SFelipe Balbi /* 785550a7375SFelipe Balbi * Data ready for a request; called from IRQ 786550a7375SFelipe Balbi */ 787550a7375SFelipe Balbi void musb_g_rx(struct musb *musb, u8 epnum) 788550a7375SFelipe Balbi { 789550a7375SFelipe Balbi u16 csr; 790ad1adb89SFelipe Balbi struct musb_request *req; 791550a7375SFelipe Balbi struct usb_request *request; 792550a7375SFelipe Balbi void __iomem *mbase = musb->mregs; 793bd2e74d6SMing Lei struct musb_ep *musb_ep; 794550a7375SFelipe Balbi void __iomem *epio = musb->endpoints[epnum].regs; 795550a7375SFelipe Balbi struct dma_channel *dma; 796bd2e74d6SMing Lei struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; 797bd2e74d6SMing Lei 798bd2e74d6SMing Lei if (hw_ep->is_shared_fifo) 799bd2e74d6SMing Lei musb_ep = &hw_ep->ep_in; 800bd2e74d6SMing Lei else 801bd2e74d6SMing Lei musb_ep = &hw_ep->ep_out; 802550a7375SFelipe Balbi 803550a7375SFelipe Balbi musb_ep_select(mbase, epnum); 804550a7375SFelipe Balbi 805ad1adb89SFelipe Balbi req = next_request(musb_ep); 806ad1adb89SFelipe Balbi if (!req) 8070abdc36fSMaulik Mankad return; 808550a7375SFelipe Balbi 809fc78003eSBin Liu trace_musb_req_rx(req); 810ad1adb89SFelipe Balbi request = &req->request; 811ad1adb89SFelipe Balbi 812550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_RXCSR); 813550a7375SFelipe Balbi dma = is_dma_capable() ? musb_ep->dma : NULL; 814550a7375SFelipe Balbi 815b99d3659SBin Liu musb_dbg(musb, "<== %s, rxcsr %04x%s %p", musb_ep->end_point.name, 816550a7375SFelipe Balbi csr, dma ? " (dma)" : "", request); 817550a7375SFelipe Balbi 818550a7375SFelipe Balbi if (csr & MUSB_RXCSR_P_SENTSTALL) { 819550a7375SFelipe Balbi csr |= MUSB_RXCSR_P_WZC_BITS; 820550a7375SFelipe Balbi csr &= ~MUSB_RXCSR_P_SENTSTALL; 821550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 822cea83241SSergei Shtylyov return; 823550a7375SFelipe Balbi } 824550a7375SFelipe Balbi 825550a7375SFelipe Balbi if (csr & MUSB_RXCSR_P_OVERRUN) { 826550a7375SFelipe Balbi /* csr |= MUSB_RXCSR_P_WZC_BITS; */ 827550a7375SFelipe Balbi csr &= ~MUSB_RXCSR_P_OVERRUN; 828550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 829550a7375SFelipe Balbi 830b99d3659SBin Liu musb_dbg(musb, "%s iso overrun on %p", musb_ep->name, request); 83143467868SSergei Shtylyov if (request->status == -EINPROGRESS) 832550a7375SFelipe Balbi request->status = -EOVERFLOW; 833550a7375SFelipe Balbi } 834550a7375SFelipe Balbi if (csr & MUSB_RXCSR_INCOMPRX) { 835550a7375SFelipe Balbi /* REVISIT not necessarily an error */ 836b99d3659SBin Liu musb_dbg(musb, "%s, incomprx", musb_ep->end_point.name); 837550a7375SFelipe Balbi } 838550a7375SFelipe Balbi 839550a7375SFelipe Balbi if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 840550a7375SFelipe Balbi /* "should not happen"; likely RXPKTRDY pending for DMA */ 841b99d3659SBin Liu musb_dbg(musb, "%s busy, csr %04x", 842550a7375SFelipe Balbi musb_ep->end_point.name, csr); 843cea83241SSergei Shtylyov return; 844550a7375SFelipe Balbi } 845550a7375SFelipe Balbi 846550a7375SFelipe Balbi if (dma && (csr & MUSB_RXCSR_DMAENAB)) { 847550a7375SFelipe Balbi csr &= ~(MUSB_RXCSR_AUTOCLEAR 848550a7375SFelipe Balbi | MUSB_RXCSR_DMAENAB 849550a7375SFelipe Balbi | MUSB_RXCSR_DMAMODE); 850550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, 851550a7375SFelipe Balbi MUSB_RXCSR_P_WZC_BITS | csr); 852550a7375SFelipe Balbi 853550a7375SFelipe Balbi request->actual += musb_ep->dma->actual_len; 854550a7375SFelipe Balbi 855a48ff906SMian Yousaf Kaukab #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \ 856a48ff906SMian Yousaf Kaukab defined(CONFIG_USB_UX500_DMA) 857550a7375SFelipe Balbi /* Autoclear doesn't clear RxPktRdy for short packets */ 8589001d80dSMing Lei if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered) 859550a7375SFelipe Balbi || (dma->actual_len 860550a7375SFelipe Balbi & (musb_ep->packet_sz - 1))) { 861550a7375SFelipe Balbi /* ack the read! */ 862550a7375SFelipe Balbi csr &= ~MUSB_RXCSR_RXPKTRDY; 863550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 864550a7375SFelipe Balbi } 865550a7375SFelipe Balbi 866550a7375SFelipe Balbi /* incomplete, and not short? wait for next IN packet */ 867550a7375SFelipe Balbi if ((request->actual < request->length) 868550a7375SFelipe Balbi && (musb_ep->dma->actual_len 8699001d80dSMing Lei == musb_ep->packet_sz)) { 8709001d80dSMing Lei /* In double buffer case, continue to unload fifo if 8719001d80dSMing Lei * there is Rx packet in FIFO. 8729001d80dSMing Lei **/ 8739001d80dSMing Lei csr = musb_readw(epio, MUSB_RXCSR); 8749001d80dSMing Lei if ((csr & MUSB_RXCSR_RXPKTRDY) && 8759001d80dSMing Lei hw_ep->rx_double_buffered) 8769001d80dSMing Lei goto exit; 877cea83241SSergei Shtylyov return; 8789001d80dSMing Lei } 879550a7375SFelipe Balbi #endif 880550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, 0); 88139287076SSupriya Karanth /* 88239287076SSupriya Karanth * In the giveback function the MUSB lock is 88339287076SSupriya Karanth * released and acquired after sometime. During 88439287076SSupriya Karanth * this time period the INDEX register could get 88539287076SSupriya Karanth * changed by the gadget_queue function especially 88639287076SSupriya Karanth * on SMP systems. Reselect the INDEX to be sure 88739287076SSupriya Karanth * we are reading/modifying the right registers 88839287076SSupriya Karanth */ 88939287076SSupriya Karanth musb_ep_select(mbase, epnum); 890550a7375SFelipe Balbi 891ad1adb89SFelipe Balbi req = next_request(musb_ep); 892ad1adb89SFelipe Balbi if (!req) 893cea83241SSergei Shtylyov return; 894550a7375SFelipe Balbi } 895a48ff906SMian Yousaf Kaukab #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \ 896a48ff906SMian Yousaf Kaukab defined(CONFIG_USB_UX500_DMA) 8979001d80dSMing Lei exit: 898bb324b08SAjay Kumar Gupta #endif 89943467868SSergei Shtylyov /* Analyze request */ 900ad1adb89SFelipe Balbi rxstate(musb, req); 901550a7375SFelipe Balbi } 902550a7375SFelipe Balbi 903550a7375SFelipe Balbi /* ------------------------------------------------------------ */ 904550a7375SFelipe Balbi 905550a7375SFelipe Balbi static int musb_gadget_enable(struct usb_ep *ep, 906550a7375SFelipe Balbi const struct usb_endpoint_descriptor *desc) 907550a7375SFelipe Balbi { 908550a7375SFelipe Balbi unsigned long flags; 909550a7375SFelipe Balbi struct musb_ep *musb_ep; 910550a7375SFelipe Balbi struct musb_hw_ep *hw_ep; 911550a7375SFelipe Balbi void __iomem *regs; 912550a7375SFelipe Balbi struct musb *musb; 913550a7375SFelipe Balbi void __iomem *mbase; 914550a7375SFelipe Balbi u8 epnum; 915550a7375SFelipe Balbi u16 csr; 916550a7375SFelipe Balbi unsigned tmp; 917550a7375SFelipe Balbi int status = -EINVAL; 918550a7375SFelipe Balbi 919550a7375SFelipe Balbi if (!ep || !desc) 920550a7375SFelipe Balbi return -EINVAL; 921550a7375SFelipe Balbi 922550a7375SFelipe Balbi musb_ep = to_musb_ep(ep); 923550a7375SFelipe Balbi hw_ep = musb_ep->hw_ep; 924550a7375SFelipe Balbi regs = hw_ep->regs; 925550a7375SFelipe Balbi musb = musb_ep->musb; 926550a7375SFelipe Balbi mbase = musb->mregs; 927550a7375SFelipe Balbi epnum = musb_ep->current_epnum; 928550a7375SFelipe Balbi 929550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 930550a7375SFelipe Balbi 931550a7375SFelipe Balbi if (musb_ep->desc) { 932550a7375SFelipe Balbi status = -EBUSY; 933550a7375SFelipe Balbi goto fail; 934550a7375SFelipe Balbi } 93596bcd090SJulia Lawall musb_ep->type = usb_endpoint_type(desc); 936550a7375SFelipe Balbi 937550a7375SFelipe Balbi /* check direction and (later) maxpacket size against endpoint */ 93896bcd090SJulia Lawall if (usb_endpoint_num(desc) != epnum) 939550a7375SFelipe Balbi goto fail; 940550a7375SFelipe Balbi 941550a7375SFelipe Balbi /* REVISIT this rules out high bandwidth periodic transfers */ 9426ddcabc2SFelipe Balbi tmp = usb_endpoint_maxp_mult(desc) - 1; 9436ddcabc2SFelipe Balbi if (tmp) { 944f11d893dSMing Lei int ok; 945f11d893dSMing Lei 946f11d893dSMing Lei if (usb_endpoint_dir_in(desc)) 947f11d893dSMing Lei ok = musb->hb_iso_tx; 948f11d893dSMing Lei else 949f11d893dSMing Lei ok = musb->hb_iso_rx; 950f11d893dSMing Lei 951f11d893dSMing Lei if (!ok) { 952b99d3659SBin Liu musb_dbg(musb, "no support for high bandwidth ISO"); 953550a7375SFelipe Balbi goto fail; 954f11d893dSMing Lei } 9556ddcabc2SFelipe Balbi musb_ep->hb_mult = tmp; 956f11d893dSMing Lei } else { 957f11d893dSMing Lei musb_ep->hb_mult = 0; 958f11d893dSMing Lei } 959f11d893dSMing Lei 9606ddcabc2SFelipe Balbi musb_ep->packet_sz = usb_endpoint_maxp(desc); 961f11d893dSMing Lei tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1); 962550a7375SFelipe Balbi 963550a7375SFelipe Balbi /* enable the interrupts for the endpoint, set the endpoint 964550a7375SFelipe Balbi * packet size (or fail), set the mode, clear the fifo 965550a7375SFelipe Balbi */ 966550a7375SFelipe Balbi musb_ep_select(mbase, epnum); 96796bcd090SJulia Lawall if (usb_endpoint_dir_in(desc)) { 968550a7375SFelipe Balbi 969550a7375SFelipe Balbi if (hw_ep->is_shared_fifo) 970550a7375SFelipe Balbi musb_ep->is_in = 1; 971550a7375SFelipe Balbi if (!musb_ep->is_in) 972550a7375SFelipe Balbi goto fail; 973f11d893dSMing Lei 974f11d893dSMing Lei if (tmp > hw_ep->max_packet_sz_tx) { 975b99d3659SBin Liu musb_dbg(musb, "packet size beyond hardware FIFO size"); 976550a7375SFelipe Balbi goto fail; 977f11d893dSMing Lei } 978550a7375SFelipe Balbi 979b18d26f6SSebastian Andrzej Siewior musb->intrtxe |= (1 << epnum); 980b18d26f6SSebastian Andrzej Siewior musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe); 981550a7375SFelipe Balbi 982550a7375SFelipe Balbi /* REVISIT if can_bulk_split(), use by updating "tmp"; 983550a7375SFelipe Balbi * likewise high bandwidth periodic tx 984550a7375SFelipe Balbi */ 9859f445cb2SCliff Cai /* Set TXMAXP with the FIFO size of the endpoint 98631c9909bSMing Lei * to disable double buffering mode. 9879f445cb2SCliff Cai */ 988bb3a2ef2Ssupriya karanth if (can_bulk_split(musb, musb_ep->type)) 989bb3a2ef2Ssupriya karanth musb_ep->hb_mult = (hw_ep->max_packet_sz_tx / 990bb3a2ef2Ssupriya karanth musb_ep->packet_sz) - 1; 99106624818SFelipe Balbi musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz 99206624818SFelipe Balbi | (musb_ep->hb_mult << 11)); 993550a7375SFelipe Balbi 994550a7375SFelipe Balbi csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; 995550a7375SFelipe Balbi if (musb_readw(regs, MUSB_TXCSR) 996550a7375SFelipe Balbi & MUSB_TXCSR_FIFONOTEMPTY) 997550a7375SFelipe Balbi csr |= MUSB_TXCSR_FLUSHFIFO; 998550a7375SFelipe Balbi if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) 999550a7375SFelipe Balbi csr |= MUSB_TXCSR_P_ISO; 1000550a7375SFelipe Balbi 1001550a7375SFelipe Balbi /* set twice in case of double buffering */ 1002550a7375SFelipe Balbi musb_writew(regs, MUSB_TXCSR, csr); 1003550a7375SFelipe Balbi /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ 1004550a7375SFelipe Balbi musb_writew(regs, MUSB_TXCSR, csr); 1005550a7375SFelipe Balbi 1006550a7375SFelipe Balbi } else { 1007550a7375SFelipe Balbi 1008550a7375SFelipe Balbi if (hw_ep->is_shared_fifo) 1009550a7375SFelipe Balbi musb_ep->is_in = 0; 1010550a7375SFelipe Balbi if (musb_ep->is_in) 1011550a7375SFelipe Balbi goto fail; 1012f11d893dSMing Lei 1013f11d893dSMing Lei if (tmp > hw_ep->max_packet_sz_rx) { 1014b99d3659SBin Liu musb_dbg(musb, "packet size beyond hardware FIFO size"); 1015550a7375SFelipe Balbi goto fail; 1016f11d893dSMing Lei } 1017550a7375SFelipe Balbi 1018af5ec14dSSebastian Andrzej Siewior musb->intrrxe |= (1 << epnum); 1019af5ec14dSSebastian Andrzej Siewior musb_writew(mbase, MUSB_INTRRXE, musb->intrrxe); 1020550a7375SFelipe Balbi 1021550a7375SFelipe Balbi /* REVISIT if can_bulk_combine() use by updating "tmp" 1022550a7375SFelipe Balbi * likewise high bandwidth periodic rx 1023550a7375SFelipe Balbi */ 10249f445cb2SCliff Cai /* Set RXMAXP with the FIFO size of the endpoint 10259f445cb2SCliff Cai * to disable double buffering mode. 10269f445cb2SCliff Cai */ 102706624818SFelipe Balbi musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz 102806624818SFelipe Balbi | (musb_ep->hb_mult << 11)); 1029550a7375SFelipe Balbi 1030550a7375SFelipe Balbi /* force shared fifo to OUT-only mode */ 1031550a7375SFelipe Balbi if (hw_ep->is_shared_fifo) { 1032550a7375SFelipe Balbi csr = musb_readw(regs, MUSB_TXCSR); 1033550a7375SFelipe Balbi csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY); 1034550a7375SFelipe Balbi musb_writew(regs, MUSB_TXCSR, csr); 1035550a7375SFelipe Balbi } 1036550a7375SFelipe Balbi 1037550a7375SFelipe Balbi csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG; 1038550a7375SFelipe Balbi if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) 1039550a7375SFelipe Balbi csr |= MUSB_RXCSR_P_ISO; 1040550a7375SFelipe Balbi else if (musb_ep->type == USB_ENDPOINT_XFER_INT) 1041550a7375SFelipe Balbi csr |= MUSB_RXCSR_DISNYET; 1042550a7375SFelipe Balbi 1043550a7375SFelipe Balbi /* set twice in case of double buffering */ 1044550a7375SFelipe Balbi musb_writew(regs, MUSB_RXCSR, csr); 1045550a7375SFelipe Balbi musb_writew(regs, MUSB_RXCSR, csr); 1046550a7375SFelipe Balbi } 1047550a7375SFelipe Balbi 1048550a7375SFelipe Balbi /* NOTE: all the I/O code _should_ work fine without DMA, in case 1049550a7375SFelipe Balbi * for some reason you run out of channels here. 1050550a7375SFelipe Balbi */ 1051550a7375SFelipe Balbi if (is_dma_capable() && musb->dma_controller) { 1052550a7375SFelipe Balbi struct dma_controller *c = musb->dma_controller; 1053550a7375SFelipe Balbi 1054550a7375SFelipe Balbi musb_ep->dma = c->channel_alloc(c, hw_ep, 1055550a7375SFelipe Balbi (desc->bEndpointAddress & USB_DIR_IN)); 1056550a7375SFelipe Balbi } else 1057550a7375SFelipe Balbi musb_ep->dma = NULL; 1058550a7375SFelipe Balbi 1059550a7375SFelipe Balbi musb_ep->desc = desc; 1060550a7375SFelipe Balbi musb_ep->busy = 0; 106147e97605SSergei Shtylyov musb_ep->wedged = 0; 1062550a7375SFelipe Balbi status = 0; 1063550a7375SFelipe Balbi 1064550a7375SFelipe Balbi pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n", 1065550a7375SFelipe Balbi musb_driver_name, musb_ep->end_point.name, 10660ccbadafSBin Liu musb_ep_xfertype_string(musb_ep->type), 1067550a7375SFelipe Balbi musb_ep->is_in ? "IN" : "OUT", 1068550a7375SFelipe Balbi musb_ep->dma ? "dma, " : "", 1069550a7375SFelipe Balbi musb_ep->packet_sz); 1070550a7375SFelipe Balbi 10712bff3916STony Lindgren schedule_delayed_work(&musb->irq_work, 0); 1072550a7375SFelipe Balbi 1073550a7375SFelipe Balbi fail: 1074550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1075550a7375SFelipe Balbi return status; 1076550a7375SFelipe Balbi } 1077550a7375SFelipe Balbi 1078550a7375SFelipe Balbi /* 1079550a7375SFelipe Balbi * Disable an endpoint flushing all requests queued. 1080550a7375SFelipe Balbi */ 1081550a7375SFelipe Balbi static int musb_gadget_disable(struct usb_ep *ep) 1082550a7375SFelipe Balbi { 1083550a7375SFelipe Balbi unsigned long flags; 1084550a7375SFelipe Balbi struct musb *musb; 1085550a7375SFelipe Balbi u8 epnum; 1086550a7375SFelipe Balbi struct musb_ep *musb_ep; 1087550a7375SFelipe Balbi void __iomem *epio; 1088550a7375SFelipe Balbi 1089550a7375SFelipe Balbi musb_ep = to_musb_ep(ep); 1090550a7375SFelipe Balbi musb = musb_ep->musb; 1091550a7375SFelipe Balbi epnum = musb_ep->current_epnum; 1092550a7375SFelipe Balbi epio = musb->endpoints[epnum].regs; 1093550a7375SFelipe Balbi 1094550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1095550a7375SFelipe Balbi musb_ep_select(musb->mregs, epnum); 1096550a7375SFelipe Balbi 1097550a7375SFelipe Balbi /* zero the endpoint sizes */ 1098550a7375SFelipe Balbi if (musb_ep->is_in) { 1099b18d26f6SSebastian Andrzej Siewior musb->intrtxe &= ~(1 << epnum); 1100b18d26f6SSebastian Andrzej Siewior musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe); 1101550a7375SFelipe Balbi musb_writew(epio, MUSB_TXMAXP, 0); 1102550a7375SFelipe Balbi } else { 1103af5ec14dSSebastian Andrzej Siewior musb->intrrxe &= ~(1 << epnum); 1104af5ec14dSSebastian Andrzej Siewior musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe); 1105550a7375SFelipe Balbi musb_writew(epio, MUSB_RXMAXP, 0); 1106550a7375SFelipe Balbi } 1107550a7375SFelipe Balbi 1108550a7375SFelipe Balbi /* abort all pending DMA and requests */ 1109550a7375SFelipe Balbi nuke(musb_ep, -ESHUTDOWN); 1110550a7375SFelipe Balbi 1111607fb0f4STal Shorer musb_ep->desc = NULL; 1112607fb0f4STal Shorer musb_ep->end_point.desc = NULL; 1113607fb0f4STal Shorer 11142bff3916STony Lindgren schedule_delayed_work(&musb->irq_work, 0); 1115550a7375SFelipe Balbi 1116550a7375SFelipe Balbi spin_unlock_irqrestore(&(musb->lock), flags); 1117550a7375SFelipe Balbi 1118b99d3659SBin Liu musb_dbg(musb, "%s", musb_ep->end_point.name); 1119550a7375SFelipe Balbi 112029e56c0cSSaurav Girepunje return 0; 1121550a7375SFelipe Balbi } 1122550a7375SFelipe Balbi 1123550a7375SFelipe Balbi /* 1124550a7375SFelipe Balbi * Allocate a request for an endpoint. 1125550a7375SFelipe Balbi * Reused by ep0 code. 1126550a7375SFelipe Balbi */ 1127550a7375SFelipe Balbi struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) 1128550a7375SFelipe Balbi { 1129550a7375SFelipe Balbi struct musb_ep *musb_ep = to_musb_ep(ep); 1130550a7375SFelipe Balbi struct musb_request *request = NULL; 1131550a7375SFelipe Balbi 1132550a7375SFelipe Balbi request = kzalloc(sizeof *request, gfp_flags); 1133b99d3659SBin Liu if (!request) 11340607f862SFelipe Balbi return NULL; 11350607f862SFelipe Balbi 1136550a7375SFelipe Balbi request->request.dma = DMA_ADDR_INVALID; 1137550a7375SFelipe Balbi request->epnum = musb_ep->current_epnum; 1138550a7375SFelipe Balbi request->ep = musb_ep; 1139550a7375SFelipe Balbi 1140fc78003eSBin Liu trace_musb_req_alloc(request); 1141550a7375SFelipe Balbi return &request->request; 1142550a7375SFelipe Balbi } 1143550a7375SFelipe Balbi 1144550a7375SFelipe Balbi /* 1145550a7375SFelipe Balbi * Free a request 1146550a7375SFelipe Balbi * Reused by ep0 code. 1147550a7375SFelipe Balbi */ 1148550a7375SFelipe Balbi void musb_free_request(struct usb_ep *ep, struct usb_request *req) 1149550a7375SFelipe Balbi { 1150fc78003eSBin Liu struct musb_request *request = to_musb_request(req); 1151fc78003eSBin Liu 1152fc78003eSBin Liu trace_musb_req_free(request); 1153fc78003eSBin Liu kfree(request); 1154550a7375SFelipe Balbi } 1155550a7375SFelipe Balbi 1156550a7375SFelipe Balbi static LIST_HEAD(buffers); 1157550a7375SFelipe Balbi 1158550a7375SFelipe Balbi struct free_record { 1159550a7375SFelipe Balbi struct list_head list; 1160550a7375SFelipe Balbi struct device *dev; 1161550a7375SFelipe Balbi unsigned bytes; 1162550a7375SFelipe Balbi dma_addr_t dma; 1163550a7375SFelipe Balbi }; 1164550a7375SFelipe Balbi 1165550a7375SFelipe Balbi /* 1166550a7375SFelipe Balbi * Context: controller locked, IRQs blocked. 1167550a7375SFelipe Balbi */ 1168a666e3e6SSergei Shtylyov void musb_ep_restart(struct musb *musb, struct musb_request *req) 1169550a7375SFelipe Balbi { 1170fc78003eSBin Liu trace_musb_req_start(req); 1171550a7375SFelipe Balbi musb_ep_select(musb->mregs, req->epnum); 1172550a7375SFelipe Balbi if (req->tx) 1173550a7375SFelipe Balbi txstate(musb, req); 1174550a7375SFelipe Balbi else 1175550a7375SFelipe Balbi rxstate(musb, req); 1176550a7375SFelipe Balbi } 1177550a7375SFelipe Balbi 1178ea2f35c0STony Lindgren static int musb_ep_restart_resume_work(struct musb *musb, void *data) 1179ea2f35c0STony Lindgren { 1180ea2f35c0STony Lindgren struct musb_request *req = data; 1181ea2f35c0STony Lindgren 1182ea2f35c0STony Lindgren musb_ep_restart(musb, req); 1183ea2f35c0STony Lindgren 1184ea2f35c0STony Lindgren return 0; 1185ea2f35c0STony Lindgren } 1186ea2f35c0STony Lindgren 1187550a7375SFelipe Balbi static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, 1188550a7375SFelipe Balbi gfp_t gfp_flags) 1189550a7375SFelipe Balbi { 1190550a7375SFelipe Balbi struct musb_ep *musb_ep; 1191550a7375SFelipe Balbi struct musb_request *request; 1192550a7375SFelipe Balbi struct musb *musb; 1193ea2f35c0STony Lindgren int status; 1194550a7375SFelipe Balbi unsigned long lockflags; 1195550a7375SFelipe Balbi 1196550a7375SFelipe Balbi if (!ep || !req) 1197550a7375SFelipe Balbi return -EINVAL; 1198550a7375SFelipe Balbi if (!req->buf) 1199550a7375SFelipe Balbi return -ENODATA; 1200550a7375SFelipe Balbi 1201550a7375SFelipe Balbi musb_ep = to_musb_ep(ep); 1202550a7375SFelipe Balbi musb = musb_ep->musb; 1203550a7375SFelipe Balbi 1204550a7375SFelipe Balbi request = to_musb_request(req); 1205550a7375SFelipe Balbi request->musb = musb; 1206550a7375SFelipe Balbi 1207550a7375SFelipe Balbi if (request->ep != musb_ep) 1208550a7375SFelipe Balbi return -EINVAL; 1209550a7375SFelipe Balbi 1210ea2f35c0STony Lindgren status = pm_runtime_get(musb->controller); 1211ea2f35c0STony Lindgren if ((status != -EINPROGRESS) && status < 0) { 1212ea2f35c0STony Lindgren dev_err(musb->controller, 1213ea2f35c0STony Lindgren "pm runtime get failed in %s\n", 1214ea2f35c0STony Lindgren __func__); 1215ea2f35c0STony Lindgren pm_runtime_put_noidle(musb->controller); 1216ea2f35c0STony Lindgren 1217ea2f35c0STony Lindgren return status; 1218ea2f35c0STony Lindgren } 1219ea2f35c0STony Lindgren status = 0; 1220ea2f35c0STony Lindgren 1221fc78003eSBin Liu trace_musb_req_enq(request); 1222550a7375SFelipe Balbi 1223550a7375SFelipe Balbi /* request is mine now... */ 1224550a7375SFelipe Balbi request->request.actual = 0; 1225550a7375SFelipe Balbi request->request.status = -EINPROGRESS; 1226550a7375SFelipe Balbi request->epnum = musb_ep->current_epnum; 1227550a7375SFelipe Balbi request->tx = musb_ep->is_in; 1228550a7375SFelipe Balbi 1229c65bfa62SMian Yousaf Kaukab map_dma_buffer(request, musb, musb_ep); 1230550a7375SFelipe Balbi 1231550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, lockflags); 1232550a7375SFelipe Balbi 1233550a7375SFelipe Balbi /* don't queue if the ep is down */ 1234550a7375SFelipe Balbi if (!musb_ep->desc) { 1235b99d3659SBin Liu musb_dbg(musb, "req %p queued to %s while ep %s", 1236550a7375SFelipe Balbi req, ep->name, "disabled"); 1237550a7375SFelipe Balbi status = -ESHUTDOWN; 123823a53d90SSebastian Andrzej Siewior unmap_dma_buffer(request, musb); 123923a53d90SSebastian Andrzej Siewior goto unlock; 1240550a7375SFelipe Balbi } 1241550a7375SFelipe Balbi 1242550a7375SFelipe Balbi /* add request to the list */ 1243ad1adb89SFelipe Balbi list_add_tail(&request->list, &musb_ep->req_list); 1244550a7375SFelipe Balbi 1245550a7375SFelipe Balbi /* it this is the head of the queue, start i/o ... */ 1246ea2f35c0STony Lindgren if (!musb_ep->busy && &request->list == musb_ep->req_list.next) { 1247ea2f35c0STony Lindgren status = musb_queue_resume_work(musb, 1248ea2f35c0STony Lindgren musb_ep_restart_resume_work, 1249ea2f35c0STony Lindgren request); 1250ea2f35c0STony Lindgren if (status < 0) 1251ea2f35c0STony Lindgren dev_err(musb->controller, "%s resume work: %i\n", 1252ea2f35c0STony Lindgren __func__, status); 1253ea2f35c0STony Lindgren } 1254550a7375SFelipe Balbi 125523a53d90SSebastian Andrzej Siewior unlock: 1256550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, lockflags); 1257cacaaf80STony Lindgren pm_runtime_mark_last_busy(musb->controller); 1258cacaaf80STony Lindgren pm_runtime_put_autosuspend(musb->controller); 1259cacaaf80STony Lindgren 1260550a7375SFelipe Balbi return status; 1261550a7375SFelipe Balbi } 1262550a7375SFelipe Balbi 1263550a7375SFelipe Balbi static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request) 1264550a7375SFelipe Balbi { 1265550a7375SFelipe Balbi struct musb_ep *musb_ep = to_musb_ep(ep); 12664cbbf084SFelipe Balbi struct musb_request *req = to_musb_request(request); 12674cbbf084SFelipe Balbi struct musb_request *r; 1268550a7375SFelipe Balbi unsigned long flags; 1269550a7375SFelipe Balbi int status = 0; 1270550a7375SFelipe Balbi struct musb *musb = musb_ep->musb; 1271550a7375SFelipe Balbi 1272fc78003eSBin Liu if (!ep || !request || req->ep != musb_ep) 1273550a7375SFelipe Balbi return -EINVAL; 1274550a7375SFelipe Balbi 1275fc78003eSBin Liu trace_musb_req_deq(req); 1276fc78003eSBin Liu 1277550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1278550a7375SFelipe Balbi 1279550a7375SFelipe Balbi list_for_each_entry(r, &musb_ep->req_list, list) { 12804cbbf084SFelipe Balbi if (r == req) 1281550a7375SFelipe Balbi break; 1282550a7375SFelipe Balbi } 12834cbbf084SFelipe Balbi if (r != req) { 1284b99d3659SBin Liu dev_err(musb->controller, "request %p not queued to %s\n", 1285b99d3659SBin Liu request, ep->name); 1286550a7375SFelipe Balbi status = -EINVAL; 1287550a7375SFelipe Balbi goto done; 1288550a7375SFelipe Balbi } 1289550a7375SFelipe Balbi 1290550a7375SFelipe Balbi /* if the hardware doesn't have the request, easy ... */ 12913d5ad13eSFelipe Balbi if (musb_ep->req_list.next != &req->list || musb_ep->busy) 1292550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, -ECONNRESET); 1293550a7375SFelipe Balbi 1294550a7375SFelipe Balbi /* ... else abort the dma transfer ... */ 1295550a7375SFelipe Balbi else if (is_dma_capable() && musb_ep->dma) { 1296550a7375SFelipe Balbi struct dma_controller *c = musb->dma_controller; 1297550a7375SFelipe Balbi 1298550a7375SFelipe Balbi musb_ep_select(musb->mregs, musb_ep->current_epnum); 1299550a7375SFelipe Balbi if (c->channel_abort) 1300550a7375SFelipe Balbi status = c->channel_abort(musb_ep->dma); 1301550a7375SFelipe Balbi else 1302550a7375SFelipe Balbi status = -EBUSY; 1303550a7375SFelipe Balbi if (status == 0) 1304550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, -ECONNRESET); 1305550a7375SFelipe Balbi } else { 1306550a7375SFelipe Balbi /* NOTE: by sticking to easily tested hardware/driver states, 1307550a7375SFelipe Balbi * we leave counting of in-flight packets imprecise. 1308550a7375SFelipe Balbi */ 1309550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, -ECONNRESET); 1310550a7375SFelipe Balbi } 1311550a7375SFelipe Balbi 1312550a7375SFelipe Balbi done: 1313550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1314550a7375SFelipe Balbi return status; 1315550a7375SFelipe Balbi } 1316550a7375SFelipe Balbi 1317550a7375SFelipe Balbi /* 1318c1aa81daSGeert Uytterhoeven * Set or clear the halt bit of an endpoint. A halted endpoint won't tx/rx any 1319550a7375SFelipe Balbi * data but will queue requests. 1320550a7375SFelipe Balbi * 1321550a7375SFelipe Balbi * exported to ep0 code 1322550a7375SFelipe Balbi */ 13231b6c3b0fSFelipe Balbi static int musb_gadget_set_halt(struct usb_ep *ep, int value) 1324550a7375SFelipe Balbi { 1325550a7375SFelipe Balbi struct musb_ep *musb_ep = to_musb_ep(ep); 1326550a7375SFelipe Balbi u8 epnum = musb_ep->current_epnum; 1327550a7375SFelipe Balbi struct musb *musb = musb_ep->musb; 1328550a7375SFelipe Balbi void __iomem *epio = musb->endpoints[epnum].regs; 1329550a7375SFelipe Balbi void __iomem *mbase; 1330550a7375SFelipe Balbi unsigned long flags; 1331550a7375SFelipe Balbi u16 csr; 1332cea83241SSergei Shtylyov struct musb_request *request; 1333550a7375SFelipe Balbi int status = 0; 1334550a7375SFelipe Balbi 1335550a7375SFelipe Balbi if (!ep) 1336550a7375SFelipe Balbi return -EINVAL; 1337550a7375SFelipe Balbi mbase = musb->mregs; 1338550a7375SFelipe Balbi 1339550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1340550a7375SFelipe Balbi 1341550a7375SFelipe Balbi if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) { 1342550a7375SFelipe Balbi status = -EINVAL; 1343550a7375SFelipe Balbi goto done; 1344550a7375SFelipe Balbi } 1345550a7375SFelipe Balbi 1346550a7375SFelipe Balbi musb_ep_select(mbase, epnum); 1347550a7375SFelipe Balbi 1348ad1adb89SFelipe Balbi request = next_request(musb_ep); 1349cea83241SSergei Shtylyov if (value) { 1350cea83241SSergei Shtylyov if (request) { 1351b99d3659SBin Liu musb_dbg(musb, "request in progress, cannot halt %s", 1352cea83241SSergei Shtylyov ep->name); 1353cea83241SSergei Shtylyov status = -EAGAIN; 1354cea83241SSergei Shtylyov goto done; 1355cea83241SSergei Shtylyov } 1356cea83241SSergei Shtylyov /* Cannot portably stall with non-empty FIFO */ 1357cea83241SSergei Shtylyov if (musb_ep->is_in) { 1358550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 1359550a7375SFelipe Balbi if (csr & MUSB_TXCSR_FIFONOTEMPTY) { 1360b99d3659SBin Liu musb_dbg(musb, "FIFO busy, cannot halt %s", 1361b99d3659SBin Liu ep->name); 1362cea83241SSergei Shtylyov status = -EAGAIN; 1363cea83241SSergei Shtylyov goto done; 1364550a7375SFelipe Balbi } 1365cea83241SSergei Shtylyov } 136647e97605SSergei Shtylyov } else 136747e97605SSergei Shtylyov musb_ep->wedged = 0; 1368550a7375SFelipe Balbi 1369550a7375SFelipe Balbi /* set/clear the stall and toggle bits */ 1370b99d3659SBin Liu musb_dbg(musb, "%s: %s stall", ep->name, value ? "set" : "clear"); 1371550a7375SFelipe Balbi if (musb_ep->is_in) { 1372550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 1373550a7375SFelipe Balbi csr |= MUSB_TXCSR_P_WZC_BITS 1374550a7375SFelipe Balbi | MUSB_TXCSR_CLRDATATOG; 1375550a7375SFelipe Balbi if (value) 1376550a7375SFelipe Balbi csr |= MUSB_TXCSR_P_SENDSTALL; 1377550a7375SFelipe Balbi else 1378550a7375SFelipe Balbi csr &= ~(MUSB_TXCSR_P_SENDSTALL 1379550a7375SFelipe Balbi | MUSB_TXCSR_P_SENTSTALL); 1380550a7375SFelipe Balbi csr &= ~MUSB_TXCSR_TXPKTRDY; 1381550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 1382550a7375SFelipe Balbi } else { 1383550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_RXCSR); 1384550a7375SFelipe Balbi csr |= MUSB_RXCSR_P_WZC_BITS 1385550a7375SFelipe Balbi | MUSB_RXCSR_FLUSHFIFO 1386550a7375SFelipe Balbi | MUSB_RXCSR_CLRDATATOG; 1387550a7375SFelipe Balbi if (value) 1388550a7375SFelipe Balbi csr |= MUSB_RXCSR_P_SENDSTALL; 1389550a7375SFelipe Balbi else 1390550a7375SFelipe Balbi csr &= ~(MUSB_RXCSR_P_SENDSTALL 1391550a7375SFelipe Balbi | MUSB_RXCSR_P_SENTSTALL); 1392550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 1393550a7375SFelipe Balbi } 1394550a7375SFelipe Balbi 1395550a7375SFelipe Balbi /* maybe start the first request in the queue */ 1396550a7375SFelipe Balbi if (!musb_ep->busy && !value && request) { 1397b99d3659SBin Liu musb_dbg(musb, "restarting the request"); 1398550a7375SFelipe Balbi musb_ep_restart(musb, request); 1399550a7375SFelipe Balbi } 1400550a7375SFelipe Balbi 1401cea83241SSergei Shtylyov done: 1402550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1403550a7375SFelipe Balbi return status; 1404550a7375SFelipe Balbi } 1405550a7375SFelipe Balbi 140647e97605SSergei Shtylyov /* 140747e97605SSergei Shtylyov * Sets the halt feature with the clear requests ignored 140847e97605SSergei Shtylyov */ 14091b6c3b0fSFelipe Balbi static int musb_gadget_set_wedge(struct usb_ep *ep) 141047e97605SSergei Shtylyov { 141147e97605SSergei Shtylyov struct musb_ep *musb_ep = to_musb_ep(ep); 141247e97605SSergei Shtylyov 141347e97605SSergei Shtylyov if (!ep) 141447e97605SSergei Shtylyov return -EINVAL; 141547e97605SSergei Shtylyov 141647e97605SSergei Shtylyov musb_ep->wedged = 1; 141747e97605SSergei Shtylyov 141847e97605SSergei Shtylyov return usb_ep_set_halt(ep); 141947e97605SSergei Shtylyov } 142047e97605SSergei Shtylyov 1421550a7375SFelipe Balbi static int musb_gadget_fifo_status(struct usb_ep *ep) 1422550a7375SFelipe Balbi { 1423550a7375SFelipe Balbi struct musb_ep *musb_ep = to_musb_ep(ep); 1424550a7375SFelipe Balbi void __iomem *epio = musb_ep->hw_ep->regs; 1425550a7375SFelipe Balbi int retval = -EINVAL; 1426550a7375SFelipe Balbi 1427550a7375SFelipe Balbi if (musb_ep->desc && !musb_ep->is_in) { 1428550a7375SFelipe Balbi struct musb *musb = musb_ep->musb; 1429550a7375SFelipe Balbi int epnum = musb_ep->current_epnum; 1430550a7375SFelipe Balbi void __iomem *mbase = musb->mregs; 1431550a7375SFelipe Balbi unsigned long flags; 1432550a7375SFelipe Balbi 1433550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1434550a7375SFelipe Balbi 1435550a7375SFelipe Balbi musb_ep_select(mbase, epnum); 1436550a7375SFelipe Balbi /* FIXME return zero unless RXPKTRDY is set */ 1437550a7375SFelipe Balbi retval = musb_readw(epio, MUSB_RXCOUNT); 1438550a7375SFelipe Balbi 1439550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1440550a7375SFelipe Balbi } 1441550a7375SFelipe Balbi return retval; 1442550a7375SFelipe Balbi } 1443550a7375SFelipe Balbi 1444550a7375SFelipe Balbi static void musb_gadget_fifo_flush(struct usb_ep *ep) 1445550a7375SFelipe Balbi { 1446550a7375SFelipe Balbi struct musb_ep *musb_ep = to_musb_ep(ep); 1447550a7375SFelipe Balbi struct musb *musb = musb_ep->musb; 1448550a7375SFelipe Balbi u8 epnum = musb_ep->current_epnum; 1449550a7375SFelipe Balbi void __iomem *epio = musb->endpoints[epnum].regs; 1450550a7375SFelipe Balbi void __iomem *mbase; 1451550a7375SFelipe Balbi unsigned long flags; 1452b18d26f6SSebastian Andrzej Siewior u16 csr; 1453550a7375SFelipe Balbi 1454550a7375SFelipe Balbi mbase = musb->mregs; 1455550a7375SFelipe Balbi 1456550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1457550a7375SFelipe Balbi musb_ep_select(mbase, (u8) epnum); 1458550a7375SFelipe Balbi 1459550a7375SFelipe Balbi /* disable interrupts */ 1460b18d26f6SSebastian Andrzej Siewior musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe & ~(1 << epnum)); 1461550a7375SFelipe Balbi 1462550a7375SFelipe Balbi if (musb_ep->is_in) { 1463550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 1464550a7375SFelipe Balbi if (csr & MUSB_TXCSR_FIFONOTEMPTY) { 1465550a7375SFelipe Balbi csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS; 14664858f06eSYauheni Kaliuta /* 14674858f06eSYauheni Kaliuta * Setting both TXPKTRDY and FLUSHFIFO makes controller 14684858f06eSYauheni Kaliuta * to interrupt current FIFO loading, but not flushing 14694858f06eSYauheni Kaliuta * the already loaded ones. 14704858f06eSYauheni Kaliuta */ 14714858f06eSYauheni Kaliuta csr &= ~MUSB_TXCSR_TXPKTRDY; 1472550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 1473550a7375SFelipe Balbi /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ 1474550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 1475550a7375SFelipe Balbi } 1476550a7375SFelipe Balbi } else { 1477550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_RXCSR); 1478550a7375SFelipe Balbi csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS; 1479550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 1480550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 1481550a7375SFelipe Balbi } 1482550a7375SFelipe Balbi 1483550a7375SFelipe Balbi /* re-enable interrupt */ 1484b18d26f6SSebastian Andrzej Siewior musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe); 1485550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1486550a7375SFelipe Balbi } 1487550a7375SFelipe Balbi 1488550a7375SFelipe Balbi static const struct usb_ep_ops musb_ep_ops = { 1489550a7375SFelipe Balbi .enable = musb_gadget_enable, 1490550a7375SFelipe Balbi .disable = musb_gadget_disable, 1491550a7375SFelipe Balbi .alloc_request = musb_alloc_request, 1492550a7375SFelipe Balbi .free_request = musb_free_request, 1493550a7375SFelipe Balbi .queue = musb_gadget_queue, 1494550a7375SFelipe Balbi .dequeue = musb_gadget_dequeue, 1495550a7375SFelipe Balbi .set_halt = musb_gadget_set_halt, 149647e97605SSergei Shtylyov .set_wedge = musb_gadget_set_wedge, 1497550a7375SFelipe Balbi .fifo_status = musb_gadget_fifo_status, 1498550a7375SFelipe Balbi .fifo_flush = musb_gadget_fifo_flush 1499550a7375SFelipe Balbi }; 1500550a7375SFelipe Balbi 1501550a7375SFelipe Balbi /* ----------------------------------------------------------------------- */ 1502550a7375SFelipe Balbi 1503550a7375SFelipe Balbi static int musb_gadget_get_frame(struct usb_gadget *gadget) 1504550a7375SFelipe Balbi { 1505550a7375SFelipe Balbi struct musb *musb = gadget_to_musb(gadget); 1506550a7375SFelipe Balbi 1507550a7375SFelipe Balbi return (int)musb_readw(musb->mregs, MUSB_FRAME); 1508550a7375SFelipe Balbi } 1509550a7375SFelipe Balbi 1510550a7375SFelipe Balbi static int musb_gadget_wakeup(struct usb_gadget *gadget) 1511550a7375SFelipe Balbi { 1512550a7375SFelipe Balbi struct musb *musb = gadget_to_musb(gadget); 1513550a7375SFelipe Balbi void __iomem *mregs = musb->mregs; 1514550a7375SFelipe Balbi unsigned long flags; 1515550a7375SFelipe Balbi int status = -EINVAL; 1516550a7375SFelipe Balbi u8 power, devctl; 1517550a7375SFelipe Balbi int retries; 1518550a7375SFelipe Balbi 1519550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1520550a7375SFelipe Balbi 1521e47d9254SAntoine Tenart switch (musb->xceiv->otg->state) { 1522550a7375SFelipe Balbi case OTG_STATE_B_PERIPHERAL: 1523550a7375SFelipe Balbi /* NOTE: OTG state machine doesn't include B_SUSPENDED; 1524550a7375SFelipe Balbi * that's part of the standard usb 1.1 state machine, and 1525550a7375SFelipe Balbi * doesn't affect OTG transitions. 1526550a7375SFelipe Balbi */ 1527550a7375SFelipe Balbi if (musb->may_wakeup && musb->is_suspended) 1528550a7375SFelipe Balbi break; 1529550a7375SFelipe Balbi goto done; 1530550a7375SFelipe Balbi case OTG_STATE_B_IDLE: 1531550a7375SFelipe Balbi /* Start SRP ... OTG not required. */ 1532550a7375SFelipe Balbi devctl = musb_readb(mregs, MUSB_DEVCTL); 1533b99d3659SBin Liu musb_dbg(musb, "Sending SRP: devctl: %02x", devctl); 1534550a7375SFelipe Balbi devctl |= MUSB_DEVCTL_SESSION; 1535550a7375SFelipe Balbi musb_writeb(mregs, MUSB_DEVCTL, devctl); 1536550a7375SFelipe Balbi devctl = musb_readb(mregs, MUSB_DEVCTL); 1537550a7375SFelipe Balbi retries = 100; 1538550a7375SFelipe Balbi while (!(devctl & MUSB_DEVCTL_SESSION)) { 1539550a7375SFelipe Balbi devctl = musb_readb(mregs, MUSB_DEVCTL); 1540550a7375SFelipe Balbi if (retries-- < 1) 1541550a7375SFelipe Balbi break; 1542550a7375SFelipe Balbi } 1543550a7375SFelipe Balbi retries = 10000; 1544550a7375SFelipe Balbi while (devctl & MUSB_DEVCTL_SESSION) { 1545550a7375SFelipe Balbi devctl = musb_readb(mregs, MUSB_DEVCTL); 1546550a7375SFelipe Balbi if (retries-- < 1) 1547550a7375SFelipe Balbi break; 1548550a7375SFelipe Balbi } 1549550a7375SFelipe Balbi 15508620543eSHema HK spin_unlock_irqrestore(&musb->lock, flags); 15516e13c650SHeikki Krogerus otg_start_srp(musb->xceiv->otg); 15528620543eSHema HK spin_lock_irqsave(&musb->lock, flags); 15538620543eSHema HK 1554550a7375SFelipe Balbi /* Block idling for at least 1s */ 1555550a7375SFelipe Balbi musb_platform_try_idle(musb, 1556550a7375SFelipe Balbi jiffies + msecs_to_jiffies(1 * HZ)); 1557550a7375SFelipe Balbi 1558550a7375SFelipe Balbi status = 0; 1559550a7375SFelipe Balbi goto done; 1560550a7375SFelipe Balbi default: 1561b99d3659SBin Liu musb_dbg(musb, "Unhandled wake: %s", 1562e47d9254SAntoine Tenart usb_otg_state_string(musb->xceiv->otg->state)); 1563550a7375SFelipe Balbi goto done; 1564550a7375SFelipe Balbi } 1565550a7375SFelipe Balbi 1566550a7375SFelipe Balbi status = 0; 1567550a7375SFelipe Balbi 1568550a7375SFelipe Balbi power = musb_readb(mregs, MUSB_POWER); 1569550a7375SFelipe Balbi power |= MUSB_POWER_RESUME; 1570550a7375SFelipe Balbi musb_writeb(mregs, MUSB_POWER, power); 1571b99d3659SBin Liu musb_dbg(musb, "issue wakeup"); 1572550a7375SFelipe Balbi 1573550a7375SFelipe Balbi /* FIXME do this next chunk in a timer callback, no udelay */ 1574550a7375SFelipe Balbi mdelay(2); 1575550a7375SFelipe Balbi 1576550a7375SFelipe Balbi power = musb_readb(mregs, MUSB_POWER); 1577550a7375SFelipe Balbi power &= ~MUSB_POWER_RESUME; 1578550a7375SFelipe Balbi musb_writeb(mregs, MUSB_POWER, power); 1579550a7375SFelipe Balbi done: 1580550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1581550a7375SFelipe Balbi return status; 1582550a7375SFelipe Balbi } 1583550a7375SFelipe Balbi 1584550a7375SFelipe Balbi static int 1585550a7375SFelipe Balbi musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered) 1586550a7375SFelipe Balbi { 1587dadac986SPeter Chen gadget->is_selfpowered = !!is_selfpowered; 1588550a7375SFelipe Balbi return 0; 1589550a7375SFelipe Balbi } 1590550a7375SFelipe Balbi 1591550a7375SFelipe Balbi static void musb_pullup(struct musb *musb, int is_on) 1592550a7375SFelipe Balbi { 1593550a7375SFelipe Balbi u8 power; 1594550a7375SFelipe Balbi 1595550a7375SFelipe Balbi power = musb_readb(musb->mregs, MUSB_POWER); 1596550a7375SFelipe Balbi if (is_on) 1597550a7375SFelipe Balbi power |= MUSB_POWER_SOFTCONN; 1598550a7375SFelipe Balbi else 1599550a7375SFelipe Balbi power &= ~MUSB_POWER_SOFTCONN; 1600550a7375SFelipe Balbi 1601550a7375SFelipe Balbi /* FIXME if on, HdrcStart; if off, HdrcStop */ 1602550a7375SFelipe Balbi 1603b99d3659SBin Liu musb_dbg(musb, "gadget D+ pullup %s", 1604e71eb392SSebastian Andrzej Siewior is_on ? "on" : "off"); 1605550a7375SFelipe Balbi musb_writeb(musb->mregs, MUSB_POWER, power); 1606550a7375SFelipe Balbi } 1607550a7375SFelipe Balbi 1608550a7375SFelipe Balbi #if 0 1609550a7375SFelipe Balbi static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active) 1610550a7375SFelipe Balbi { 1611b99d3659SBin Liu musb_dbg(musb, "<= %s =>\n", __func__); 1612550a7375SFelipe Balbi 1613550a7375SFelipe Balbi /* 1614550a7375SFelipe Balbi * FIXME iff driver's softconnect flag is set (as it is during probe, 1615550a7375SFelipe Balbi * though that can clear it), just musb_pullup(). 1616550a7375SFelipe Balbi */ 1617550a7375SFelipe Balbi 1618550a7375SFelipe Balbi return -EINVAL; 1619550a7375SFelipe Balbi } 1620550a7375SFelipe Balbi #endif 1621550a7375SFelipe Balbi 1622550a7375SFelipe Balbi static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) 1623550a7375SFelipe Balbi { 1624550a7375SFelipe Balbi struct musb *musb = gadget_to_musb(gadget); 1625550a7375SFelipe Balbi 162684e250ffSDavid Brownell if (!musb->xceiv->set_power) 1627550a7375SFelipe Balbi return -EOPNOTSUPP; 1628b96d3b08SHeikki Krogerus return usb_phy_set_power(musb->xceiv, mA); 1629550a7375SFelipe Balbi } 1630550a7375SFelipe Balbi 1631517bafffSTony Lindgren static void musb_gadget_work(struct work_struct *work) 1632517bafffSTony Lindgren { 1633517bafffSTony Lindgren struct musb *musb; 1634517bafffSTony Lindgren unsigned long flags; 1635517bafffSTony Lindgren 1636517bafffSTony Lindgren musb = container_of(work, struct musb, gadget_work.work); 1637517bafffSTony Lindgren pm_runtime_get_sync(musb->controller); 1638517bafffSTony Lindgren spin_lock_irqsave(&musb->lock, flags); 1639517bafffSTony Lindgren musb_pullup(musb, musb->softconnect); 1640517bafffSTony Lindgren spin_unlock_irqrestore(&musb->lock, flags); 1641517bafffSTony Lindgren pm_runtime_mark_last_busy(musb->controller); 1642517bafffSTony Lindgren pm_runtime_put_autosuspend(musb->controller); 1643517bafffSTony Lindgren } 1644517bafffSTony Lindgren 1645550a7375SFelipe Balbi static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) 1646550a7375SFelipe Balbi { 1647550a7375SFelipe Balbi struct musb *musb = gadget_to_musb(gadget); 1648550a7375SFelipe Balbi unsigned long flags; 1649550a7375SFelipe Balbi 1650550a7375SFelipe Balbi is_on = !!is_on; 1651550a7375SFelipe Balbi 1652550a7375SFelipe Balbi /* NOTE: this assumes we are sensing vbus; we'd rather 1653550a7375SFelipe Balbi * not pullup unless the B-session is active. 1654550a7375SFelipe Balbi */ 1655550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1656550a7375SFelipe Balbi if (is_on != musb->softconnect) { 1657550a7375SFelipe Balbi musb->softconnect = is_on; 1658517bafffSTony Lindgren schedule_delayed_work(&musb->gadget_work, 0); 1659550a7375SFelipe Balbi } 1660550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 166193e098a8SJohn Stultz 1662550a7375SFelipe Balbi return 0; 1663550a7375SFelipe Balbi } 1664550a7375SFelipe Balbi 1665e71eb392SSebastian Andrzej Siewior static int musb_gadget_start(struct usb_gadget *g, 1666e71eb392SSebastian Andrzej Siewior struct usb_gadget_driver *driver); 166722835b80SFelipe Balbi static int musb_gadget_stop(struct usb_gadget *g); 16680f91349bSSebastian Andrzej Siewior 1669550a7375SFelipe Balbi static const struct usb_gadget_ops musb_gadget_operations = { 1670550a7375SFelipe Balbi .get_frame = musb_gadget_get_frame, 1671550a7375SFelipe Balbi .wakeup = musb_gadget_wakeup, 1672550a7375SFelipe Balbi .set_selfpowered = musb_gadget_set_self_powered, 1673550a7375SFelipe Balbi /* .vbus_session = musb_gadget_vbus_session, */ 1674550a7375SFelipe Balbi .vbus_draw = musb_gadget_vbus_draw, 1675550a7375SFelipe Balbi .pullup = musb_gadget_pullup, 1676e71eb392SSebastian Andrzej Siewior .udc_start = musb_gadget_start, 1677e71eb392SSebastian Andrzej Siewior .udc_stop = musb_gadget_stop, 1678550a7375SFelipe Balbi }; 1679550a7375SFelipe Balbi 1680550a7375SFelipe Balbi /* ----------------------------------------------------------------------- */ 1681550a7375SFelipe Balbi 1682550a7375SFelipe Balbi /* Registration */ 1683550a7375SFelipe Balbi 1684550a7375SFelipe Balbi /* Only this registration code "knows" the rule (from USB standards) 1685550a7375SFelipe Balbi * about there being only one external upstream port. It assumes 1686550a7375SFelipe Balbi * all peripheral ports are external... 1687550a7375SFelipe Balbi */ 1688550a7375SFelipe Balbi 168941ac7b3aSBill Pemberton static void 1690550a7375SFelipe Balbi init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in) 1691550a7375SFelipe Balbi { 1692550a7375SFelipe Balbi struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 1693550a7375SFelipe Balbi 1694550a7375SFelipe Balbi memset(ep, 0, sizeof *ep); 1695550a7375SFelipe Balbi 1696550a7375SFelipe Balbi ep->current_epnum = epnum; 1697550a7375SFelipe Balbi ep->musb = musb; 1698550a7375SFelipe Balbi ep->hw_ep = hw_ep; 1699550a7375SFelipe Balbi ep->is_in = is_in; 1700550a7375SFelipe Balbi 1701550a7375SFelipe Balbi INIT_LIST_HEAD(&ep->req_list); 1702550a7375SFelipe Balbi 1703550a7375SFelipe Balbi sprintf(ep->name, "ep%d%s", epnum, 1704550a7375SFelipe Balbi (!epnum || hw_ep->is_shared_fifo) ? "" : ( 1705550a7375SFelipe Balbi is_in ? "in" : "out")); 1706550a7375SFelipe Balbi ep->end_point.name = ep->name; 1707550a7375SFelipe Balbi INIT_LIST_HEAD(&ep->end_point.ep_list); 1708550a7375SFelipe Balbi if (!epnum) { 1709e117e742SRobert Baldyga usb_ep_set_maxpacket_limit(&ep->end_point, 64); 17108501955eSRobert Baldyga ep->end_point.caps.type_control = true; 1711550a7375SFelipe Balbi ep->end_point.ops = &musb_g_ep0_ops; 1712550a7375SFelipe Balbi musb->g.ep0 = &ep->end_point; 1713550a7375SFelipe Balbi } else { 1714550a7375SFelipe Balbi if (is_in) 1715e117e742SRobert Baldyga usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_tx); 1716550a7375SFelipe Balbi else 1717e117e742SRobert Baldyga usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_rx); 17188501955eSRobert Baldyga ep->end_point.caps.type_iso = true; 17198501955eSRobert Baldyga ep->end_point.caps.type_bulk = true; 17208501955eSRobert Baldyga ep->end_point.caps.type_int = true; 1721550a7375SFelipe Balbi ep->end_point.ops = &musb_ep_ops; 1722550a7375SFelipe Balbi list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list); 1723550a7375SFelipe Balbi } 17248501955eSRobert Baldyga 17258501955eSRobert Baldyga if (!epnum || hw_ep->is_shared_fifo) { 17268501955eSRobert Baldyga ep->end_point.caps.dir_in = true; 17278501955eSRobert Baldyga ep->end_point.caps.dir_out = true; 17288501955eSRobert Baldyga } else if (is_in) 17298501955eSRobert Baldyga ep->end_point.caps.dir_in = true; 17308501955eSRobert Baldyga else 17318501955eSRobert Baldyga ep->end_point.caps.dir_out = true; 1732550a7375SFelipe Balbi } 1733550a7375SFelipe Balbi 1734550a7375SFelipe Balbi /* 1735550a7375SFelipe Balbi * Initialize the endpoints exposed to peripheral drivers, with backlinks 1736550a7375SFelipe Balbi * to the rest of the driver state. 1737550a7375SFelipe Balbi */ 173841ac7b3aSBill Pemberton static inline void musb_g_init_endpoints(struct musb *musb) 1739550a7375SFelipe Balbi { 1740550a7375SFelipe Balbi u8 epnum; 1741550a7375SFelipe Balbi struct musb_hw_ep *hw_ep; 1742550a7375SFelipe Balbi unsigned count = 0; 1743550a7375SFelipe Balbi 1744b595076aSUwe Kleine-König /* initialize endpoint list just once */ 1745550a7375SFelipe Balbi INIT_LIST_HEAD(&(musb->g.ep_list)); 1746550a7375SFelipe Balbi 1747550a7375SFelipe Balbi for (epnum = 0, hw_ep = musb->endpoints; 1748550a7375SFelipe Balbi epnum < musb->nr_endpoints; 1749550a7375SFelipe Balbi epnum++, hw_ep++) { 1750550a7375SFelipe Balbi if (hw_ep->is_shared_fifo /* || !epnum */) { 1751550a7375SFelipe Balbi init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0); 1752550a7375SFelipe Balbi count++; 1753550a7375SFelipe Balbi } else { 1754550a7375SFelipe Balbi if (hw_ep->max_packet_sz_tx) { 1755550a7375SFelipe Balbi init_peripheral_ep(musb, &hw_ep->ep_in, 1756550a7375SFelipe Balbi epnum, 1); 1757550a7375SFelipe Balbi count++; 1758550a7375SFelipe Balbi } 1759550a7375SFelipe Balbi if (hw_ep->max_packet_sz_rx) { 1760550a7375SFelipe Balbi init_peripheral_ep(musb, &hw_ep->ep_out, 1761550a7375SFelipe Balbi epnum, 0); 1762550a7375SFelipe Balbi count++; 1763550a7375SFelipe Balbi } 1764550a7375SFelipe Balbi } 1765550a7375SFelipe Balbi } 1766550a7375SFelipe Balbi } 1767550a7375SFelipe Balbi 1768550a7375SFelipe Balbi /* called once during driver setup to initialize and link into 1769550a7375SFelipe Balbi * the driver model; memory is zeroed. 1770550a7375SFelipe Balbi */ 177141ac7b3aSBill Pemberton int musb_gadget_setup(struct musb *musb) 1772550a7375SFelipe Balbi { 1773550a7375SFelipe Balbi int status; 1774550a7375SFelipe Balbi 1775550a7375SFelipe Balbi /* REVISIT minor race: if (erroneously) setting up two 1776550a7375SFelipe Balbi * musb peripherals at the same time, only the bus lock 1777550a7375SFelipe Balbi * is probably held. 1778550a7375SFelipe Balbi */ 1779550a7375SFelipe Balbi 1780550a7375SFelipe Balbi musb->g.ops = &musb_gadget_operations; 1781d327ab5bSMichal Nazarewicz musb->g.max_speed = USB_SPEED_HIGH; 1782550a7375SFelipe Balbi musb->g.speed = USB_SPEED_UNKNOWN; 1783550a7375SFelipe Balbi 17841374a430SBin Liu MUSB_DEV_MODE(musb); 1785e47d9254SAntoine Tenart musb->xceiv->otg->state = OTG_STATE_B_IDLE; 17861374a430SBin Liu 1787550a7375SFelipe Balbi /* this "gadget" abstracts/virtualizes the controller */ 1788550a7375SFelipe Balbi musb->g.name = musb_driver_name; 17890a9134bdSBin Liu /* don't support otg protocols */ 1790fd3923a9SApelete Seketeli musb->g.is_otg = 0; 1791517bafffSTony Lindgren INIT_DELAYED_WORK(&musb->gadget_work, musb_gadget_work); 1792550a7375SFelipe Balbi musb_g_init_endpoints(musb); 1793550a7375SFelipe Balbi 1794550a7375SFelipe Balbi musb->is_active = 0; 1795550a7375SFelipe Balbi musb_platform_try_idle(musb, 0); 1796550a7375SFelipe Balbi 17970f91349bSSebastian Andrzej Siewior status = usb_add_gadget_udc(musb->controller, &musb->g); 17980f91349bSSebastian Andrzej Siewior if (status) 17990f91349bSSebastian Andrzej Siewior goto err; 18000f91349bSSebastian Andrzej Siewior 18010f91349bSSebastian Andrzej Siewior return 0; 18020f91349bSSebastian Andrzej Siewior err: 18036193d699SSebastian Andrzej Siewior musb->g.dev.parent = NULL; 18040f91349bSSebastian Andrzej Siewior device_unregister(&musb->g.dev); 1805550a7375SFelipe Balbi return status; 1806550a7375SFelipe Balbi } 1807550a7375SFelipe Balbi 1808550a7375SFelipe Balbi void musb_gadget_cleanup(struct musb *musb) 1809550a7375SFelipe Balbi { 18107ad76955SBin Liu if (musb->port_mode == MUSB_HOST) 181190474288SSebastian Andrzej Siewior return; 1812517bafffSTony Lindgren 1813517bafffSTony Lindgren cancel_delayed_work_sync(&musb->gadget_work); 18140f91349bSSebastian Andrzej Siewior usb_del_gadget_udc(&musb->g); 1815550a7375SFelipe Balbi } 1816550a7375SFelipe Balbi 1817550a7375SFelipe Balbi /* 1818550a7375SFelipe Balbi * Register the gadget driver. Used by gadget drivers when 1819550a7375SFelipe Balbi * registering themselves with the controller. 1820550a7375SFelipe Balbi * 1821550a7375SFelipe Balbi * -EINVAL something went wrong (not driver) 1822550a7375SFelipe Balbi * -EBUSY another gadget is already using the controller 1823b595076aSUwe Kleine-König * -ENOMEM no memory to perform the operation 1824550a7375SFelipe Balbi * 1825550a7375SFelipe Balbi * @param driver the gadget driver 1826550a7375SFelipe Balbi * @return <0 if error, 0 if everything is fine 1827550a7375SFelipe Balbi */ 1828e71eb392SSebastian Andrzej Siewior static int musb_gadget_start(struct usb_gadget *g, 1829e71eb392SSebastian Andrzej Siewior struct usb_gadget_driver *driver) 1830550a7375SFelipe Balbi { 1831e71eb392SSebastian Andrzej Siewior struct musb *musb = gadget_to_musb(g); 1832d445b6daSHeikki Krogerus struct usb_otg *otg = musb->xceiv->otg; 183363eed2b5SFelipe Balbi unsigned long flags; 1834032ec49fSFelipe Balbi int retval = 0; 1835550a7375SFelipe Balbi 1836032ec49fSFelipe Balbi if (driver->max_speed < USB_SPEED_HIGH) { 1837032ec49fSFelipe Balbi retval = -EINVAL; 1838032ec49fSFelipe Balbi goto err; 1839032ec49fSFelipe Balbi } 1840550a7375SFelipe Balbi 18417acc6197SHema HK pm_runtime_get_sync(musb->controller); 18427acc6197SHema HK 1843e71eb392SSebastian Andrzej Siewior musb->softconnect = 0; 1844550a7375SFelipe Balbi musb->gadget_driver = driver; 1845550a7375SFelipe Balbi 1846550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 184743e699ceSGreg Kroah-Hartman musb->is_active = 1; 1848550a7375SFelipe Balbi 18496e13c650SHeikki Krogerus otg_set_peripheral(otg, &musb->g); 1850e47d9254SAntoine Tenart musb->xceiv->otg->state = OTG_STATE_B_IDLE; 1851550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1852550a7375SFelipe Balbi 1853001dd84aSSebastian Andrzej Siewior musb_start(musb); 1854001dd84aSSebastian Andrzej Siewior 1855550a7375SFelipe Balbi /* REVISIT: funcall to other code, which also 1856550a7375SFelipe Balbi * handles power budgeting ... this way also 1857550a7375SFelipe Balbi * ensures HdrcStart is indirectly called. 1858550a7375SFelipe Balbi */ 1859b65ae0f1SGrazvydas Ignotas if (musb->xceiv->last_event == USB_EVENT_ID) 1860b65ae0f1SGrazvydas Ignotas musb_platform_set_vbus(musb, 1); 1861550a7375SFelipe Balbi 186230647217STony Lindgren pm_runtime_mark_last_busy(musb->controller); 186330647217STony Lindgren pm_runtime_put_autosuspend(musb->controller); 18647acc6197SHema HK 186563eed2b5SFelipe Balbi return 0; 186663eed2b5SFelipe Balbi 1867032ec49fSFelipe Balbi err: 1868550a7375SFelipe Balbi return retval; 1869550a7375SFelipe Balbi } 1870550a7375SFelipe Balbi 1871550a7375SFelipe Balbi /* 1872550a7375SFelipe Balbi * Unregister the gadget driver. Used by gadget drivers when 1873550a7375SFelipe Balbi * unregistering themselves from the controller. 1874550a7375SFelipe Balbi * 1875550a7375SFelipe Balbi * @param driver the gadget driver to unregister 1876550a7375SFelipe Balbi */ 187722835b80SFelipe Balbi static int musb_gadget_stop(struct usb_gadget *g) 1878550a7375SFelipe Balbi { 1879e71eb392SSebastian Andrzej Siewior struct musb *musb = gadget_to_musb(g); 188063eed2b5SFelipe Balbi unsigned long flags; 1881550a7375SFelipe Balbi 18827acc6197SHema HK pm_runtime_get_sync(musb->controller); 18837acc6197SHema HK 188463eed2b5SFelipe Balbi /* 188563eed2b5SFelipe Balbi * REVISIT always use otg_set_peripheral() here too; 1886550a7375SFelipe Balbi * this needs to shut down the OTG engine. 1887550a7375SFelipe Balbi */ 1888550a7375SFelipe Balbi 1889550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1890550a7375SFelipe Balbi 1891550a7375SFelipe Balbi musb_hnp_stop(musb); 1892550a7375SFelipe Balbi 1893550a7375SFelipe Balbi (void) musb_gadget_vbus_draw(&musb->g, 0); 1894550a7375SFelipe Balbi 1895e47d9254SAntoine Tenart musb->xceiv->otg->state = OTG_STATE_UNDEFINED; 1896d5638fcfSFelipe Balbi musb_stop(musb); 18976e13c650SHeikki Krogerus otg_set_peripheral(musb->xceiv->otg, NULL); 1898550a7375SFelipe Balbi 1899550a7375SFelipe Balbi musb->is_active = 0; 1900e21de10cSGrazvydas Ignotas musb->gadget_driver = NULL; 1901550a7375SFelipe Balbi musb_platform_try_idle(musb, 0); 1902550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1903550a7375SFelipe Balbi 1904032ec49fSFelipe Balbi /* 1905032ec49fSFelipe Balbi * FIXME we need to be able to register another 1906550a7375SFelipe Balbi * gadget driver here and have everything work; 1907550a7375SFelipe Balbi * that currently misbehaves. 1908550a7375SFelipe Balbi */ 190963eed2b5SFelipe Balbi 19104e719183STony Lindgren /* Force check of devctl register for PM runtime */ 19112bff3916STony Lindgren schedule_delayed_work(&musb->irq_work, 0); 19124e719183STony Lindgren 19137099dbc5STony Lindgren pm_runtime_mark_last_busy(musb->controller); 19147099dbc5STony Lindgren pm_runtime_put_autosuspend(musb->controller); 19157acc6197SHema HK 191663eed2b5SFelipe Balbi return 0; 1917550a7375SFelipe Balbi } 1918550a7375SFelipe Balbi 1919550a7375SFelipe Balbi /* ----------------------------------------------------------------------- */ 1920550a7375SFelipe Balbi 1921550a7375SFelipe Balbi /* lifecycle operations called through plat_uds.c */ 1922550a7375SFelipe Balbi 1923550a7375SFelipe Balbi void musb_g_resume(struct musb *musb) 1924550a7375SFelipe Balbi { 1925550a7375SFelipe Balbi musb->is_suspended = 0; 1926e47d9254SAntoine Tenart switch (musb->xceiv->otg->state) { 1927550a7375SFelipe Balbi case OTG_STATE_B_IDLE: 1928550a7375SFelipe Balbi break; 1929550a7375SFelipe Balbi case OTG_STATE_B_WAIT_ACON: 1930550a7375SFelipe Balbi case OTG_STATE_B_PERIPHERAL: 1931550a7375SFelipe Balbi musb->is_active = 1; 1932550a7375SFelipe Balbi if (musb->gadget_driver && musb->gadget_driver->resume) { 1933550a7375SFelipe Balbi spin_unlock(&musb->lock); 1934550a7375SFelipe Balbi musb->gadget_driver->resume(&musb->g); 1935550a7375SFelipe Balbi spin_lock(&musb->lock); 1936550a7375SFelipe Balbi } 1937550a7375SFelipe Balbi break; 1938550a7375SFelipe Balbi default: 1939550a7375SFelipe Balbi WARNING("unhandled RESUME transition (%s)\n", 1940e47d9254SAntoine Tenart usb_otg_state_string(musb->xceiv->otg->state)); 1941550a7375SFelipe Balbi } 1942550a7375SFelipe Balbi } 1943550a7375SFelipe Balbi 1944550a7375SFelipe Balbi /* called when SOF packets stop for 3+ msec */ 1945550a7375SFelipe Balbi void musb_g_suspend(struct musb *musb) 1946550a7375SFelipe Balbi { 1947550a7375SFelipe Balbi u8 devctl; 1948550a7375SFelipe Balbi 1949550a7375SFelipe Balbi devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 1950b99d3659SBin Liu musb_dbg(musb, "musb_g_suspend: devctl %02x", devctl); 1951550a7375SFelipe Balbi 1952e47d9254SAntoine Tenart switch (musb->xceiv->otg->state) { 1953550a7375SFelipe Balbi case OTG_STATE_B_IDLE: 1954550a7375SFelipe Balbi if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) 1955e47d9254SAntoine Tenart musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; 1956550a7375SFelipe Balbi break; 1957550a7375SFelipe Balbi case OTG_STATE_B_PERIPHERAL: 1958550a7375SFelipe Balbi musb->is_suspended = 1; 1959550a7375SFelipe Balbi if (musb->gadget_driver && musb->gadget_driver->suspend) { 1960550a7375SFelipe Balbi spin_unlock(&musb->lock); 1961550a7375SFelipe Balbi musb->gadget_driver->suspend(&musb->g); 1962550a7375SFelipe Balbi spin_lock(&musb->lock); 1963550a7375SFelipe Balbi } 1964550a7375SFelipe Balbi break; 1965550a7375SFelipe Balbi default: 1966550a7375SFelipe Balbi /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ; 1967550a7375SFelipe Balbi * A_PERIPHERAL may need care too 1968550a7375SFelipe Balbi */ 1969b99d3659SBin Liu WARNING("unhandled SUSPEND transition (%s)", 1970e47d9254SAntoine Tenart usb_otg_state_string(musb->xceiv->otg->state)); 1971550a7375SFelipe Balbi } 1972550a7375SFelipe Balbi } 1973550a7375SFelipe Balbi 1974550a7375SFelipe Balbi /* Called during SRP */ 1975550a7375SFelipe Balbi void musb_g_wakeup(struct musb *musb) 1976550a7375SFelipe Balbi { 1977550a7375SFelipe Balbi musb_gadget_wakeup(&musb->g); 1978550a7375SFelipe Balbi } 1979550a7375SFelipe Balbi 1980550a7375SFelipe Balbi /* called when VBUS drops below session threshold, and in other cases */ 1981550a7375SFelipe Balbi void musb_g_disconnect(struct musb *musb) 1982550a7375SFelipe Balbi { 1983550a7375SFelipe Balbi void __iomem *mregs = musb->mregs; 1984550a7375SFelipe Balbi u8 devctl = musb_readb(mregs, MUSB_DEVCTL); 1985550a7375SFelipe Balbi 1986b99d3659SBin Liu musb_dbg(musb, "musb_g_disconnect: devctl %02x", devctl); 1987550a7375SFelipe Balbi 1988550a7375SFelipe Balbi /* clear HR */ 1989550a7375SFelipe Balbi musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION); 1990550a7375SFelipe Balbi 1991550a7375SFelipe Balbi /* don't draw vbus until new b-default session */ 1992550a7375SFelipe Balbi (void) musb_gadget_vbus_draw(&musb->g, 0); 1993550a7375SFelipe Balbi 1994550a7375SFelipe Balbi musb->g.speed = USB_SPEED_UNKNOWN; 1995550a7375SFelipe Balbi if (musb->gadget_driver && musb->gadget_driver->disconnect) { 1996550a7375SFelipe Balbi spin_unlock(&musb->lock); 1997550a7375SFelipe Balbi musb->gadget_driver->disconnect(&musb->g); 1998550a7375SFelipe Balbi spin_lock(&musb->lock); 1999550a7375SFelipe Balbi } 2000550a7375SFelipe Balbi 2001e47d9254SAntoine Tenart switch (musb->xceiv->otg->state) { 2002550a7375SFelipe Balbi default: 2003b99d3659SBin Liu musb_dbg(musb, "Unhandled disconnect %s, setting a_idle", 2004e47d9254SAntoine Tenart usb_otg_state_string(musb->xceiv->otg->state)); 2005e47d9254SAntoine Tenart musb->xceiv->otg->state = OTG_STATE_A_IDLE; 2006ab983f2aSDavid Brownell MUSB_HST_MODE(musb); 2007550a7375SFelipe Balbi break; 2008550a7375SFelipe Balbi case OTG_STATE_A_PERIPHERAL: 2009e47d9254SAntoine Tenart musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON; 2010ab983f2aSDavid Brownell MUSB_HST_MODE(musb); 2011550a7375SFelipe Balbi break; 2012550a7375SFelipe Balbi case OTG_STATE_B_WAIT_ACON: 2013550a7375SFelipe Balbi case OTG_STATE_B_HOST: 2014550a7375SFelipe Balbi case OTG_STATE_B_PERIPHERAL: 2015550a7375SFelipe Balbi case OTG_STATE_B_IDLE: 2016e47d9254SAntoine Tenart musb->xceiv->otg->state = OTG_STATE_B_IDLE; 2017550a7375SFelipe Balbi break; 2018550a7375SFelipe Balbi case OTG_STATE_B_SRP_INIT: 2019550a7375SFelipe Balbi break; 2020550a7375SFelipe Balbi } 2021550a7375SFelipe Balbi 2022550a7375SFelipe Balbi musb->is_active = 0; 2023550a7375SFelipe Balbi } 2024550a7375SFelipe Balbi 2025550a7375SFelipe Balbi void musb_g_reset(struct musb *musb) 2026550a7375SFelipe Balbi __releases(musb->lock) 2027550a7375SFelipe Balbi __acquires(musb->lock) 2028550a7375SFelipe Balbi { 2029550a7375SFelipe Balbi void __iomem *mbase = musb->mregs; 2030550a7375SFelipe Balbi u8 devctl = musb_readb(mbase, MUSB_DEVCTL); 2031550a7375SFelipe Balbi u8 power; 2032550a7375SFelipe Balbi 2033b99d3659SBin Liu musb_dbg(musb, "<== %s driver '%s'", 2034550a7375SFelipe Balbi (devctl & MUSB_DEVCTL_BDEVICE) 2035550a7375SFelipe Balbi ? "B-Device" : "A-Device", 2036550a7375SFelipe Balbi musb->gadget_driver 2037550a7375SFelipe Balbi ? musb->gadget_driver->driver.name 2038550a7375SFelipe Balbi : NULL 2039550a7375SFelipe Balbi ); 2040550a7375SFelipe Balbi 20411189f7f6SFelipe Balbi /* report reset, if we didn't already (flushing EP state) */ 20421189f7f6SFelipe Balbi if (musb->gadget_driver && musb->g.speed != USB_SPEED_UNKNOWN) { 20431189f7f6SFelipe Balbi spin_unlock(&musb->lock); 20441189f7f6SFelipe Balbi usb_gadget_udc_reset(&musb->g, musb->gadget_driver); 20451189f7f6SFelipe Balbi spin_lock(&musb->lock); 20461189f7f6SFelipe Balbi } 2047550a7375SFelipe Balbi 2048550a7375SFelipe Balbi /* clear HR */ 2049550a7375SFelipe Balbi else if (devctl & MUSB_DEVCTL_HR) 2050550a7375SFelipe Balbi musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); 2051550a7375SFelipe Balbi 2052550a7375SFelipe Balbi 2053550a7375SFelipe Balbi /* what speed did we negotiate? */ 2054550a7375SFelipe Balbi power = musb_readb(mbase, MUSB_POWER); 2055550a7375SFelipe Balbi musb->g.speed = (power & MUSB_POWER_HSMODE) 2056550a7375SFelipe Balbi ? USB_SPEED_HIGH : USB_SPEED_FULL; 2057550a7375SFelipe Balbi 2058550a7375SFelipe Balbi /* start in USB_STATE_DEFAULT */ 2059550a7375SFelipe Balbi musb->is_active = 1; 2060550a7375SFelipe Balbi musb->is_suspended = 0; 2061550a7375SFelipe Balbi MUSB_DEV_MODE(musb); 2062550a7375SFelipe Balbi musb->address = 0; 2063550a7375SFelipe Balbi musb->ep0_state = MUSB_EP0_STAGE_SETUP; 2064550a7375SFelipe Balbi 2065550a7375SFelipe Balbi musb->may_wakeup = 0; 2066550a7375SFelipe Balbi musb->g.b_hnp_enable = 0; 2067550a7375SFelipe Balbi musb->g.a_alt_hnp_support = 0; 2068550a7375SFelipe Balbi musb->g.a_hnp_support = 0; 2069ca1023c8SRobert Baldyga musb->g.quirk_zlp_not_supp = 1; 2070550a7375SFelipe Balbi 2071550a7375SFelipe Balbi /* Normal reset, as B-Device; 2072550a7375SFelipe Balbi * or else after HNP, as A-Device 2073550a7375SFelipe Balbi */ 207423db9fd2SApelete Seketeli if (!musb->g.is_otg) { 207523db9fd2SApelete Seketeli /* USB device controllers that are not OTG compatible 207623db9fd2SApelete Seketeli * may not have DEVCTL register in silicon. 207723db9fd2SApelete Seketeli * In that case, do not rely on devctl for setting 207823db9fd2SApelete Seketeli * peripheral mode. 207923db9fd2SApelete Seketeli */ 2080e47d9254SAntoine Tenart musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; 208123db9fd2SApelete Seketeli musb->g.is_a_peripheral = 0; 208223db9fd2SApelete Seketeli } else if (devctl & MUSB_DEVCTL_BDEVICE) { 2083e47d9254SAntoine Tenart musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; 2084550a7375SFelipe Balbi musb->g.is_a_peripheral = 0; 2085032ec49fSFelipe Balbi } else { 2086e47d9254SAntoine Tenart musb->xceiv->otg->state = OTG_STATE_A_PERIPHERAL; 2087550a7375SFelipe Balbi musb->g.is_a_peripheral = 1; 2088032ec49fSFelipe Balbi } 2089550a7375SFelipe Balbi 2090550a7375SFelipe Balbi /* start with default limits on VBUS power draw */ 2091032ec49fSFelipe Balbi (void) musb_gadget_vbus_draw(&musb->g, 8); 2092550a7375SFelipe Balbi } 2093