1550a7375SFelipe Balbi /* 2550a7375SFelipe Balbi * MUSB OTG driver peripheral support 3550a7375SFelipe Balbi * 4550a7375SFelipe Balbi * Copyright 2005 Mentor Graphics Corporation 5550a7375SFelipe Balbi * Copyright (C) 2005-2006 by Texas Instruments 6550a7375SFelipe Balbi * Copyright (C) 2006-2007 Nokia Corporation 7cea83241SSergei Shtylyov * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com> 8550a7375SFelipe Balbi * 9550a7375SFelipe Balbi * This program is free software; you can redistribute it and/or 10550a7375SFelipe Balbi * modify it under the terms of the GNU General Public License 11550a7375SFelipe Balbi * version 2 as published by the Free Software Foundation. 12550a7375SFelipe Balbi * 13550a7375SFelipe Balbi * This program is distributed in the hope that it will be useful, but 14550a7375SFelipe Balbi * WITHOUT ANY WARRANTY; without even the implied warranty of 15550a7375SFelipe Balbi * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16550a7375SFelipe Balbi * General Public License for more details. 17550a7375SFelipe Balbi * 18550a7375SFelipe Balbi * You should have received a copy of the GNU General Public License 19550a7375SFelipe Balbi * along with this program; if not, write to the Free Software 20550a7375SFelipe Balbi * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 21550a7375SFelipe Balbi * 02110-1301 USA 22550a7375SFelipe Balbi * 23550a7375SFelipe Balbi * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED 24550a7375SFelipe Balbi * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 25550a7375SFelipe Balbi * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 26550a7375SFelipe Balbi * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27550a7375SFelipe Balbi * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28550a7375SFelipe Balbi * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 29550a7375SFelipe Balbi * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 30550a7375SFelipe Balbi * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31550a7375SFelipe Balbi * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32550a7375SFelipe Balbi * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33550a7375SFelipe Balbi * 34550a7375SFelipe Balbi */ 35550a7375SFelipe Balbi 36550a7375SFelipe Balbi #include <linux/kernel.h> 37550a7375SFelipe Balbi #include <linux/list.h> 38550a7375SFelipe Balbi #include <linux/timer.h> 39550a7375SFelipe Balbi #include <linux/module.h> 40550a7375SFelipe Balbi #include <linux/smp.h> 41550a7375SFelipe Balbi #include <linux/spinlock.h> 42550a7375SFelipe Balbi #include <linux/delay.h> 43550a7375SFelipe Balbi #include <linux/dma-mapping.h> 445a0e3ad6STejun Heo #include <linux/slab.h> 45550a7375SFelipe Balbi 46550a7375SFelipe Balbi #include "musb_core.h" 47550a7375SFelipe Balbi 48550a7375SFelipe Balbi 49550a7375SFelipe Balbi /* ----------------------------------------------------------------------- */ 50550a7375SFelipe Balbi 51c65bfa62SMian Yousaf Kaukab #define is_buffer_mapped(req) (is_dma_capable() && \ 52c65bfa62SMian Yousaf Kaukab (req->map_state != UN_MAPPED)) 53c65bfa62SMian Yousaf Kaukab 5492d2711fSHema Kalliguddi /* Maps the buffer to dma */ 5592d2711fSHema Kalliguddi 5692d2711fSHema Kalliguddi static inline void map_dma_buffer(struct musb_request *request, 57c65bfa62SMian Yousaf Kaukab struct musb *musb, struct musb_ep *musb_ep) 5892d2711fSHema Kalliguddi { 595f5761cbSMian Yousaf Kaukab int compatible = true; 605f5761cbSMian Yousaf Kaukab struct dma_controller *dma = musb->dma_controller; 615f5761cbSMian Yousaf Kaukab 62c65bfa62SMian Yousaf Kaukab request->map_state = UN_MAPPED; 63c65bfa62SMian Yousaf Kaukab 64c65bfa62SMian Yousaf Kaukab if (!is_dma_capable() || !musb_ep->dma) 65c65bfa62SMian Yousaf Kaukab return; 66c65bfa62SMian Yousaf Kaukab 675f5761cbSMian Yousaf Kaukab /* Check if DMA engine can handle this request. 685f5761cbSMian Yousaf Kaukab * DMA code must reject the USB request explicitly. 695f5761cbSMian Yousaf Kaukab * Default behaviour is to map the request. 705f5761cbSMian Yousaf Kaukab */ 715f5761cbSMian Yousaf Kaukab if (dma->is_compatible) 725f5761cbSMian Yousaf Kaukab compatible = dma->is_compatible(musb_ep->dma, 735f5761cbSMian Yousaf Kaukab musb_ep->packet_sz, request->request.buf, 745f5761cbSMian Yousaf Kaukab request->request.length); 755f5761cbSMian Yousaf Kaukab if (!compatible) 765f5761cbSMian Yousaf Kaukab return; 775f5761cbSMian Yousaf Kaukab 7892d2711fSHema Kalliguddi if (request->request.dma == DMA_ADDR_INVALID) { 797b360f42SSebastian Andrzej Siewior dma_addr_t dma_addr; 807b360f42SSebastian Andrzej Siewior int ret; 817b360f42SSebastian Andrzej Siewior 827b360f42SSebastian Andrzej Siewior dma_addr = dma_map_single( 8392d2711fSHema Kalliguddi musb->controller, 8492d2711fSHema Kalliguddi request->request.buf, 8592d2711fSHema Kalliguddi request->request.length, 8692d2711fSHema Kalliguddi request->tx 8792d2711fSHema Kalliguddi ? DMA_TO_DEVICE 8892d2711fSHema Kalliguddi : DMA_FROM_DEVICE); 897b360f42SSebastian Andrzej Siewior ret = dma_mapping_error(musb->controller, dma_addr); 907b360f42SSebastian Andrzej Siewior if (ret) 917b360f42SSebastian Andrzej Siewior return; 927b360f42SSebastian Andrzej Siewior 937b360f42SSebastian Andrzej Siewior request->request.dma = dma_addr; 94c65bfa62SMian Yousaf Kaukab request->map_state = MUSB_MAPPED; 9592d2711fSHema Kalliguddi } else { 9692d2711fSHema Kalliguddi dma_sync_single_for_device(musb->controller, 9792d2711fSHema Kalliguddi request->request.dma, 9892d2711fSHema Kalliguddi request->request.length, 9992d2711fSHema Kalliguddi request->tx 10092d2711fSHema Kalliguddi ? DMA_TO_DEVICE 10192d2711fSHema Kalliguddi : DMA_FROM_DEVICE); 102c65bfa62SMian Yousaf Kaukab request->map_state = PRE_MAPPED; 10392d2711fSHema Kalliguddi } 10492d2711fSHema Kalliguddi } 10592d2711fSHema Kalliguddi 10692d2711fSHema Kalliguddi /* Unmap the buffer from dma and maps it back to cpu */ 10792d2711fSHema Kalliguddi static inline void unmap_dma_buffer(struct musb_request *request, 10892d2711fSHema Kalliguddi struct musb *musb) 10992d2711fSHema Kalliguddi { 11006d9db72SKishon Vijay Abraham I struct musb_ep *musb_ep = request->ep; 11106d9db72SKishon Vijay Abraham I 11206d9db72SKishon Vijay Abraham I if (!is_buffer_mapped(request) || !musb_ep->dma) 113c65bfa62SMian Yousaf Kaukab return; 114c65bfa62SMian Yousaf Kaukab 11592d2711fSHema Kalliguddi if (request->request.dma == DMA_ADDR_INVALID) { 1165c8a86e1SFelipe Balbi dev_vdbg(musb->controller, 1175c8a86e1SFelipe Balbi "not unmapping a never mapped buffer\n"); 11892d2711fSHema Kalliguddi return; 11992d2711fSHema Kalliguddi } 120c65bfa62SMian Yousaf Kaukab if (request->map_state == MUSB_MAPPED) { 12192d2711fSHema Kalliguddi dma_unmap_single(musb->controller, 12292d2711fSHema Kalliguddi request->request.dma, 12392d2711fSHema Kalliguddi request->request.length, 12492d2711fSHema Kalliguddi request->tx 12592d2711fSHema Kalliguddi ? DMA_TO_DEVICE 12692d2711fSHema Kalliguddi : DMA_FROM_DEVICE); 12792d2711fSHema Kalliguddi request->request.dma = DMA_ADDR_INVALID; 128c65bfa62SMian Yousaf Kaukab } else { /* PRE_MAPPED */ 12992d2711fSHema Kalliguddi dma_sync_single_for_cpu(musb->controller, 13092d2711fSHema Kalliguddi request->request.dma, 13192d2711fSHema Kalliguddi request->request.length, 13292d2711fSHema Kalliguddi request->tx 13392d2711fSHema Kalliguddi ? DMA_TO_DEVICE 13492d2711fSHema Kalliguddi : DMA_FROM_DEVICE); 13592d2711fSHema Kalliguddi } 136c65bfa62SMian Yousaf Kaukab request->map_state = UN_MAPPED; 13792d2711fSHema Kalliguddi } 13892d2711fSHema Kalliguddi 139550a7375SFelipe Balbi /* 140550a7375SFelipe Balbi * Immediately complete a request. 141550a7375SFelipe Balbi * 142550a7375SFelipe Balbi * @param request the request to complete 143550a7375SFelipe Balbi * @param status the status to complete the request with 144550a7375SFelipe Balbi * Context: controller locked, IRQs blocked. 145550a7375SFelipe Balbi */ 146550a7375SFelipe Balbi void musb_g_giveback( 147550a7375SFelipe Balbi struct musb_ep *ep, 148550a7375SFelipe Balbi struct usb_request *request, 149550a7375SFelipe Balbi int status) 150550a7375SFelipe Balbi __releases(ep->musb->lock) 151550a7375SFelipe Balbi __acquires(ep->musb->lock) 152550a7375SFelipe Balbi { 153550a7375SFelipe Balbi struct musb_request *req; 154550a7375SFelipe Balbi struct musb *musb; 155550a7375SFelipe Balbi int busy = ep->busy; 156550a7375SFelipe Balbi 157550a7375SFelipe Balbi req = to_musb_request(request); 158550a7375SFelipe Balbi 159ad1adb89SFelipe Balbi list_del(&req->list); 160550a7375SFelipe Balbi if (req->request.status == -EINPROGRESS) 161550a7375SFelipe Balbi req->request.status = status; 162550a7375SFelipe Balbi musb = req->musb; 163550a7375SFelipe Balbi 164550a7375SFelipe Balbi ep->busy = 1; 165550a7375SFelipe Balbi spin_unlock(&musb->lock); 16606d9db72SKishon Vijay Abraham I 16706d9db72SKishon Vijay Abraham I if (!dma_mapping_error(&musb->g.dev, request->dma)) 16892d2711fSHema Kalliguddi unmap_dma_buffer(req, musb); 16906d9db72SKishon Vijay Abraham I 170550a7375SFelipe Balbi if (request->status == 0) 1715c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s done request %p, %d/%d\n", 172550a7375SFelipe Balbi ep->end_point.name, request, 173550a7375SFelipe Balbi req->request.actual, req->request.length); 174550a7375SFelipe Balbi else 1755c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n", 176550a7375SFelipe Balbi ep->end_point.name, request, 177550a7375SFelipe Balbi req->request.actual, req->request.length, 178550a7375SFelipe Balbi request->status); 179304f7e5eSMichal Sojka usb_gadget_giveback_request(&req->ep->end_point, &req->request); 180550a7375SFelipe Balbi spin_lock(&musb->lock); 181550a7375SFelipe Balbi ep->busy = busy; 182550a7375SFelipe Balbi } 183550a7375SFelipe Balbi 184550a7375SFelipe Balbi /* ----------------------------------------------------------------------- */ 185550a7375SFelipe Balbi 186550a7375SFelipe Balbi /* 187550a7375SFelipe Balbi * Abort requests queued to an endpoint using the status. Synchronous. 188550a7375SFelipe Balbi * caller locked controller and blocked irqs, and selected this ep. 189550a7375SFelipe Balbi */ 190550a7375SFelipe Balbi static void nuke(struct musb_ep *ep, const int status) 191550a7375SFelipe Balbi { 1925c8a86e1SFelipe Balbi struct musb *musb = ep->musb; 193550a7375SFelipe Balbi struct musb_request *req = NULL; 194550a7375SFelipe Balbi void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs; 195550a7375SFelipe Balbi 196550a7375SFelipe Balbi ep->busy = 1; 197550a7375SFelipe Balbi 198550a7375SFelipe Balbi if (is_dma_capable() && ep->dma) { 199550a7375SFelipe Balbi struct dma_controller *c = ep->musb->dma_controller; 200550a7375SFelipe Balbi int value; 201b6e434a5SSergei Shtylyov 202550a7375SFelipe Balbi if (ep->is_in) { 203b6e434a5SSergei Shtylyov /* 204b6e434a5SSergei Shtylyov * The programming guide says that we must not clear 205b6e434a5SSergei Shtylyov * the DMAMODE bit before DMAENAB, so we only 206b6e434a5SSergei Shtylyov * clear it in the second write... 207b6e434a5SSergei Shtylyov */ 208550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, 209b6e434a5SSergei Shtylyov MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO); 210550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, 211550a7375SFelipe Balbi 0 | MUSB_TXCSR_FLUSHFIFO); 212550a7375SFelipe Balbi } else { 213550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, 214550a7375SFelipe Balbi 0 | MUSB_RXCSR_FLUSHFIFO); 215550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, 216550a7375SFelipe Balbi 0 | MUSB_RXCSR_FLUSHFIFO); 217550a7375SFelipe Balbi } 218550a7375SFelipe Balbi 219550a7375SFelipe Balbi value = c->channel_abort(ep->dma); 2205c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s: abort DMA --> %d\n", 2215c8a86e1SFelipe Balbi ep->name, value); 222550a7375SFelipe Balbi c->channel_release(ep->dma); 223550a7375SFelipe Balbi ep->dma = NULL; 224550a7375SFelipe Balbi } 225550a7375SFelipe Balbi 226ad1adb89SFelipe Balbi while (!list_empty(&ep->req_list)) { 227ad1adb89SFelipe Balbi req = list_first_entry(&ep->req_list, struct musb_request, list); 228550a7375SFelipe Balbi musb_g_giveback(ep, &req->request, status); 229550a7375SFelipe Balbi } 230550a7375SFelipe Balbi } 231550a7375SFelipe Balbi 232550a7375SFelipe Balbi /* ----------------------------------------------------------------------- */ 233550a7375SFelipe Balbi 234550a7375SFelipe Balbi /* Data transfers - pure PIO, pure DMA, or mixed mode */ 235550a7375SFelipe Balbi 236550a7375SFelipe Balbi /* 237550a7375SFelipe Balbi * This assumes the separate CPPI engine is responding to DMA requests 238550a7375SFelipe Balbi * from the usb core ... sequenced a bit differently from mentor dma. 239550a7375SFelipe Balbi */ 240550a7375SFelipe Balbi 241550a7375SFelipe Balbi static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep) 242550a7375SFelipe Balbi { 243550a7375SFelipe Balbi if (can_bulk_split(musb, ep->type)) 244550a7375SFelipe Balbi return ep->hw_ep->max_packet_sz_tx; 245550a7375SFelipe Balbi else 246550a7375SFelipe Balbi return ep->packet_sz; 247550a7375SFelipe Balbi } 248550a7375SFelipe Balbi 249550a7375SFelipe Balbi /* 250550a7375SFelipe Balbi * An endpoint is transmitting data. This can be called either from 251550a7375SFelipe Balbi * the IRQ routine or from ep.queue() to kickstart a request on an 252550a7375SFelipe Balbi * endpoint. 253550a7375SFelipe Balbi * 254550a7375SFelipe Balbi * Context: controller locked, IRQs blocked, endpoint selected 255550a7375SFelipe Balbi */ 256550a7375SFelipe Balbi static void txstate(struct musb *musb, struct musb_request *req) 257550a7375SFelipe Balbi { 258550a7375SFelipe Balbi u8 epnum = req->epnum; 259550a7375SFelipe Balbi struct musb_ep *musb_ep; 260550a7375SFelipe Balbi void __iomem *epio = musb->endpoints[epnum].regs; 261550a7375SFelipe Balbi struct usb_request *request; 262550a7375SFelipe Balbi u16 fifo_count = 0, csr; 263550a7375SFelipe Balbi int use_dma = 0; 264550a7375SFelipe Balbi 265550a7375SFelipe Balbi musb_ep = req->ep; 266550a7375SFelipe Balbi 267abf710e6SVikram Pandita /* Check if EP is disabled */ 268abf710e6SVikram Pandita if (!musb_ep->desc) { 269abf710e6SVikram Pandita dev_dbg(musb->controller, "ep:%s disabled - ignore request\n", 270abf710e6SVikram Pandita musb_ep->end_point.name); 271abf710e6SVikram Pandita return; 272abf710e6SVikram Pandita } 273abf710e6SVikram Pandita 274550a7375SFelipe Balbi /* we shouldn't get here while DMA is active ... but we do ... */ 275550a7375SFelipe Balbi if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { 2765c8a86e1SFelipe Balbi dev_dbg(musb->controller, "dma pending...\n"); 277550a7375SFelipe Balbi return; 278550a7375SFelipe Balbi } 279550a7375SFelipe Balbi 280550a7375SFelipe Balbi /* read TXCSR before */ 281550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 282550a7375SFelipe Balbi 283550a7375SFelipe Balbi request = &req->request; 284550a7375SFelipe Balbi fifo_count = min(max_ep_writesize(musb, musb_ep), 285550a7375SFelipe Balbi (int)(request->length - request->actual)); 286550a7375SFelipe Balbi 287550a7375SFelipe Balbi if (csr & MUSB_TXCSR_TXPKTRDY) { 2885c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n", 289550a7375SFelipe Balbi musb_ep->end_point.name, csr); 290550a7375SFelipe Balbi return; 291550a7375SFelipe Balbi } 292550a7375SFelipe Balbi 293550a7375SFelipe Balbi if (csr & MUSB_TXCSR_P_SENDSTALL) { 2945c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s stalling, txcsr %03x\n", 295550a7375SFelipe Balbi musb_ep->end_point.name, csr); 296550a7375SFelipe Balbi return; 297550a7375SFelipe Balbi } 298550a7375SFelipe Balbi 2995c8a86e1SFelipe Balbi dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n", 300550a7375SFelipe Balbi epnum, musb_ep->packet_sz, fifo_count, 301550a7375SFelipe Balbi csr); 302550a7375SFelipe Balbi 303550a7375SFelipe Balbi #ifndef CONFIG_MUSB_PIO_ONLY 304c65bfa62SMian Yousaf Kaukab if (is_buffer_mapped(req)) { 305550a7375SFelipe Balbi struct dma_controller *c = musb->dma_controller; 30666af83ddSMing Lei size_t request_size; 30766af83ddSMing Lei 30866af83ddSMing Lei /* setup DMA, then program endpoint CSR */ 30966af83ddSMing Lei request_size = min_t(size_t, request->length - request->actual, 31066af83ddSMing Lei musb_ep->dma->max_len); 311550a7375SFelipe Balbi 312d17d535fSAjay Kumar Gupta use_dma = (request->dma != DMA_ADDR_INVALID && request_size); 313550a7375SFelipe Balbi 314550a7375SFelipe Balbi /* MUSB_TXCSR_P_ISO is still set correctly */ 315550a7375SFelipe Balbi 31603840fadSFelipe Balbi if (musb_dma_inventra(musb) || musb_dma_ux500(musb)) { 317d1043a26SAnand Gadiyar if (request_size < musb_ep->packet_sz) 318550a7375SFelipe Balbi musb_ep->dma->desired_mode = 0; 319550a7375SFelipe Balbi else 320550a7375SFelipe Balbi musb_ep->dma->desired_mode = 1; 321550a7375SFelipe Balbi 322550a7375SFelipe Balbi use_dma = use_dma && c->channel_program( 323550a7375SFelipe Balbi musb_ep->dma, musb_ep->packet_sz, 324550a7375SFelipe Balbi musb_ep->dma->desired_mode, 325796a83faSCliff Cai request->dma + request->actual, request_size); 326550a7375SFelipe Balbi if (use_dma) { 327550a7375SFelipe Balbi if (musb_ep->dma->desired_mode == 0) { 328b6e434a5SSergei Shtylyov /* 329b6e434a5SSergei Shtylyov * We must not clear the DMAMODE bit 330b6e434a5SSergei Shtylyov * before the DMAENAB bit -- and the 331b6e434a5SSergei Shtylyov * latter doesn't always get cleared 332b6e434a5SSergei Shtylyov * before we get here... 333b6e434a5SSergei Shtylyov */ 334b6e434a5SSergei Shtylyov csr &= ~(MUSB_TXCSR_AUTOSET 335b6e434a5SSergei Shtylyov | MUSB_TXCSR_DMAENAB); 336b6e434a5SSergei Shtylyov musb_writew(epio, MUSB_TXCSR, csr 337b6e434a5SSergei Shtylyov | MUSB_TXCSR_P_WZC_BITS); 338b6e434a5SSergei Shtylyov csr &= ~MUSB_TXCSR_DMAMODE; 339550a7375SFelipe Balbi csr |= (MUSB_TXCSR_DMAENAB | 340550a7375SFelipe Balbi MUSB_TXCSR_MODE); 341550a7375SFelipe Balbi /* against programming guide */ 342f11d893dSMing Lei } else { 343f11d893dSMing Lei csr |= (MUSB_TXCSR_DMAENAB 344550a7375SFelipe Balbi | MUSB_TXCSR_DMAMODE 345550a7375SFelipe Balbi | MUSB_TXCSR_MODE); 346bb3a2ef2Ssupriya karanth /* 347bb3a2ef2Ssupriya karanth * Enable Autoset according to table 348bb3a2ef2Ssupriya karanth * below 349bb3a2ef2Ssupriya karanth * bulk_split hb_mult Autoset_Enable 350bb3a2ef2Ssupriya karanth * 0 0 Yes(Normal) 351bb3a2ef2Ssupriya karanth * 0 >0 No(High BW ISO) 352bb3a2ef2Ssupriya karanth * 1 0 Yes(HS bulk) 353bb3a2ef2Ssupriya karanth * 1 >0 Yes(FS bulk) 354bb3a2ef2Ssupriya karanth */ 355bb3a2ef2Ssupriya karanth if (!musb_ep->hb_mult || 356bb3a2ef2Ssupriya karanth (musb_ep->hb_mult && 357bb3a2ef2Ssupriya karanth can_bulk_split(musb, 358bb3a2ef2Ssupriya karanth musb_ep->type))) 359f11d893dSMing Lei csr |= MUSB_TXCSR_AUTOSET; 360f11d893dSMing Lei } 361550a7375SFelipe Balbi csr &= ~MUSB_TXCSR_P_UNDERRUN; 362f11d893dSMing Lei 363550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 364550a7375SFelipe Balbi } 365550a7375SFelipe Balbi } 366550a7375SFelipe Balbi 367f8e9f34fSTony Lindgren if (is_cppi_enabled(musb)) { 368550a7375SFelipe Balbi /* program endpoint CSR first, then setup DMA */ 369b6e434a5SSergei Shtylyov csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); 37037e3ee99SSergei Shtylyov csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE | 37137e3ee99SSergei Shtylyov MUSB_TXCSR_MODE; 372fc525751SSebastian Andrzej Siewior musb_writew(epio, MUSB_TXCSR, (MUSB_TXCSR_P_WZC_BITS & 373fc525751SSebastian Andrzej Siewior ~MUSB_TXCSR_P_UNDERRUN) | csr); 374550a7375SFelipe Balbi 375550a7375SFelipe Balbi /* ensure writebuffer is empty */ 376550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 377550a7375SFelipe Balbi 378fc525751SSebastian Andrzej Siewior /* 379fc525751SSebastian Andrzej Siewior * NOTE host side sets DMAENAB later than this; both are 380fc525751SSebastian Andrzej Siewior * OK since the transfer dma glue (between CPPI and 381fc525751SSebastian Andrzej Siewior * Mentor fifos) just tells CPPI it could start. Data 382fc525751SSebastian Andrzej Siewior * only moves to the USB TX fifo when both fifos are 383fc525751SSebastian Andrzej Siewior * ready. 384550a7375SFelipe Balbi */ 385fc525751SSebastian Andrzej Siewior /* 386fc525751SSebastian Andrzej Siewior * "mode" is irrelevant here; handle terminating ZLPs 387fc525751SSebastian Andrzej Siewior * like PIO does, since the hardware RNDIS mode seems 388fc525751SSebastian Andrzej Siewior * unreliable except for the 389fc525751SSebastian Andrzej Siewior * last-packet-is-already-short case. 390550a7375SFelipe Balbi */ 391550a7375SFelipe Balbi use_dma = use_dma && c->channel_program( 392550a7375SFelipe Balbi musb_ep->dma, musb_ep->packet_sz, 393550a7375SFelipe Balbi 0, 39466af83ddSMing Lei request->dma + request->actual, 39566af83ddSMing Lei request_size); 396550a7375SFelipe Balbi if (!use_dma) { 397550a7375SFelipe Balbi c->channel_release(musb_ep->dma); 398550a7375SFelipe Balbi musb_ep->dma = NULL; 399b6e434a5SSergei Shtylyov csr &= ~MUSB_TXCSR_DMAENAB; 400b6e434a5SSergei Shtylyov musb_writew(epio, MUSB_TXCSR, csr); 401550a7375SFelipe Balbi /* invariant: prequest->buf is non-null */ 402550a7375SFelipe Balbi } 403f8e9f34fSTony Lindgren } else if (tusb_dma_omap(musb)) 404550a7375SFelipe Balbi use_dma = use_dma && c->channel_program( 405550a7375SFelipe Balbi musb_ep->dma, musb_ep->packet_sz, 406550a7375SFelipe Balbi request->zero, 40766af83ddSMing Lei request->dma + request->actual, 40866af83ddSMing Lei request_size); 409550a7375SFelipe Balbi } 410550a7375SFelipe Balbi #endif 411550a7375SFelipe Balbi 412550a7375SFelipe Balbi if (!use_dma) { 41392d2711fSHema Kalliguddi /* 41492d2711fSHema Kalliguddi * Unmap the dma buffer back to cpu if dma channel 41592d2711fSHema Kalliguddi * programming fails 41692d2711fSHema Kalliguddi */ 41792d2711fSHema Kalliguddi unmap_dma_buffer(req, musb); 41892d2711fSHema Kalliguddi 419550a7375SFelipe Balbi musb_write_fifo(musb_ep->hw_ep, fifo_count, 420550a7375SFelipe Balbi (u8 *) (request->buf + request->actual)); 421550a7375SFelipe Balbi request->actual += fifo_count; 422550a7375SFelipe Balbi csr |= MUSB_TXCSR_TXPKTRDY; 423550a7375SFelipe Balbi csr &= ~MUSB_TXCSR_P_UNDERRUN; 424550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 425550a7375SFelipe Balbi } 426550a7375SFelipe Balbi 427550a7375SFelipe Balbi /* host may already have the data when this message shows... */ 4285c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n", 429550a7375SFelipe Balbi musb_ep->end_point.name, use_dma ? "dma" : "pio", 430550a7375SFelipe Balbi request->actual, request->length, 431550a7375SFelipe Balbi musb_readw(epio, MUSB_TXCSR), 432550a7375SFelipe Balbi fifo_count, 433550a7375SFelipe Balbi musb_readw(epio, MUSB_TXMAXP)); 434550a7375SFelipe Balbi } 435550a7375SFelipe Balbi 436550a7375SFelipe Balbi /* 437550a7375SFelipe Balbi * FIFO state update (e.g. data ready). 438550a7375SFelipe Balbi * Called from IRQ, with controller locked. 439550a7375SFelipe Balbi */ 440550a7375SFelipe Balbi void musb_g_tx(struct musb *musb, u8 epnum) 441550a7375SFelipe Balbi { 442550a7375SFelipe Balbi u16 csr; 443ad1adb89SFelipe Balbi struct musb_request *req; 444550a7375SFelipe Balbi struct usb_request *request; 445550a7375SFelipe Balbi u8 __iomem *mbase = musb->mregs; 446550a7375SFelipe Balbi struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in; 447550a7375SFelipe Balbi void __iomem *epio = musb->endpoints[epnum].regs; 448550a7375SFelipe Balbi struct dma_channel *dma; 449550a7375SFelipe Balbi 450550a7375SFelipe Balbi musb_ep_select(mbase, epnum); 451ad1adb89SFelipe Balbi req = next_request(musb_ep); 452ad1adb89SFelipe Balbi request = &req->request; 453550a7375SFelipe Balbi 454550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 4555c8a86e1SFelipe Balbi dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr); 456550a7375SFelipe Balbi 457550a7375SFelipe Balbi dma = is_dma_capable() ? musb_ep->dma : NULL; 4587723de7eSSergei Shtylyov 4597723de7eSSergei Shtylyov /* 4607723de7eSSergei Shtylyov * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX 4617723de7eSSergei Shtylyov * probably rates reporting as a host error. 462550a7375SFelipe Balbi */ 463550a7375SFelipe Balbi if (csr & MUSB_TXCSR_P_SENTSTALL) { 464550a7375SFelipe Balbi csr |= MUSB_TXCSR_P_WZC_BITS; 465550a7375SFelipe Balbi csr &= ~MUSB_TXCSR_P_SENTSTALL; 466550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 4677723de7eSSergei Shtylyov return; 468550a7375SFelipe Balbi } 469550a7375SFelipe Balbi 470550a7375SFelipe Balbi if (csr & MUSB_TXCSR_P_UNDERRUN) { 4717723de7eSSergei Shtylyov /* We NAKed, no big deal... little reason to care. */ 472550a7375SFelipe Balbi csr |= MUSB_TXCSR_P_WZC_BITS; 4737723de7eSSergei Shtylyov csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); 474550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 4755c8a86e1SFelipe Balbi dev_vdbg(musb->controller, "underrun on ep%d, req %p\n", 4765c8a86e1SFelipe Balbi epnum, request); 477550a7375SFelipe Balbi } 478550a7375SFelipe Balbi 479550a7375SFelipe Balbi if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 4807723de7eSSergei Shtylyov /* 4817723de7eSSergei Shtylyov * SHOULD NOT HAPPEN... has with CPPI though, after 482550a7375SFelipe Balbi * changing SENDSTALL (and other cases); harmless? 483550a7375SFelipe Balbi */ 4845c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name); 4857723de7eSSergei Shtylyov return; 486550a7375SFelipe Balbi } 487550a7375SFelipe Balbi 488550a7375SFelipe Balbi if (request) { 489550a7375SFelipe Balbi u8 is_dma = 0; 490fb91cddcSTony Lindgren bool short_packet = false; 491550a7375SFelipe Balbi 492550a7375SFelipe Balbi if (dma && (csr & MUSB_TXCSR_DMAENAB)) { 493550a7375SFelipe Balbi is_dma = 1; 494550a7375SFelipe Balbi csr |= MUSB_TXCSR_P_WZC_BITS; 4957723de7eSSergei Shtylyov csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | 496100d4a9dSMian Yousaf Kaukab MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); 497550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 4987723de7eSSergei Shtylyov /* Ensure writebuffer is empty. */ 499550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 500550a7375SFelipe Balbi request->actual += musb_ep->dma->actual_len; 5015c8a86e1SFelipe Balbi dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n", 5027723de7eSSergei Shtylyov epnum, csr, musb_ep->dma->actual_len, request); 503550a7375SFelipe Balbi } 504550a7375SFelipe Balbi 5057723de7eSSergei Shtylyov /* 5067723de7eSSergei Shtylyov * First, maybe a terminating short packet. Some DMA 5077723de7eSSergei Shtylyov * engines might handle this by themselves. 508550a7375SFelipe Balbi */ 509fb91cddcSTony Lindgren if ((request->zero && request->length) 510e7379aaaSMing Lei && (request->length % musb_ep->packet_sz == 0) 511e7379aaaSMing Lei && (request->actual == request->length)) 512fb91cddcSTony Lindgren short_packet = true; 513fb91cddcSTony Lindgren 514fb91cddcSTony Lindgren if ((musb_dma_inventra(musb) || musb_dma_ux500(musb)) && 515fb91cddcSTony Lindgren (is_dma && (!dma->desired_mode || 516550a7375SFelipe Balbi (request->actual & 517fb91cddcSTony Lindgren (musb_ep->packet_sz - 1))))) 518fb91cddcSTony Lindgren short_packet = true; 519fb91cddcSTony Lindgren 520fb91cddcSTony Lindgren if (short_packet) { 5217723de7eSSergei Shtylyov /* 5227723de7eSSergei Shtylyov * On DMA completion, FIFO may not be 5237723de7eSSergei Shtylyov * available yet... 524550a7375SFelipe Balbi */ 525550a7375SFelipe Balbi if (csr & MUSB_TXCSR_TXPKTRDY) 5267723de7eSSergei Shtylyov return; 527550a7375SFelipe Balbi 5285c8a86e1SFelipe Balbi dev_dbg(musb->controller, "sending zero pkt\n"); 5297723de7eSSergei Shtylyov musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE 530550a7375SFelipe Balbi | MUSB_TXCSR_TXPKTRDY); 531550a7375SFelipe Balbi request->zero = 0; 532550a7375SFelipe Balbi } 533550a7375SFelipe Balbi 534bb27bc2cSMing Lei if (request->actual == request->length) { 535550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, 0); 53639287076SSupriya Karanth /* 53739287076SSupriya Karanth * In the giveback function the MUSB lock is 53839287076SSupriya Karanth * released and acquired after sometime. During 53939287076SSupriya Karanth * this time period the INDEX register could get 54039287076SSupriya Karanth * changed by the gadget_queue function especially 54139287076SSupriya Karanth * on SMP systems. Reselect the INDEX to be sure 54239287076SSupriya Karanth * we are reading/modifying the right registers 54339287076SSupriya Karanth */ 54439287076SSupriya Karanth musb_ep_select(mbase, epnum); 545ad1adb89SFelipe Balbi req = musb_ep->desc ? next_request(musb_ep) : NULL; 546ad1adb89SFelipe Balbi if (!req) { 5475c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s idle now\n", 548550a7375SFelipe Balbi musb_ep->end_point.name); 5497723de7eSSergei Shtylyov return; 55095962a77SSergei Shtylyov } 551550a7375SFelipe Balbi } 552550a7375SFelipe Balbi 553ad1adb89SFelipe Balbi txstate(musb, req); 554550a7375SFelipe Balbi } 555550a7375SFelipe Balbi } 556550a7375SFelipe Balbi 557550a7375SFelipe Balbi /* ------------------------------------------------------------ */ 558550a7375SFelipe Balbi 559550a7375SFelipe Balbi /* 560550a7375SFelipe Balbi * Context: controller locked, IRQs blocked, endpoint selected 561550a7375SFelipe Balbi */ 562550a7375SFelipe Balbi static void rxstate(struct musb *musb, struct musb_request *req) 563550a7375SFelipe Balbi { 564550a7375SFelipe Balbi const u8 epnum = req->epnum; 565550a7375SFelipe Balbi struct usb_request *request = &req->request; 566bd2e74d6SMing Lei struct musb_ep *musb_ep; 567550a7375SFelipe Balbi void __iomem *epio = musb->endpoints[epnum].regs; 568f0443afdSSergei Shtylyov unsigned len = 0; 569f0443afdSSergei Shtylyov u16 fifo_count; 570cea83241SSergei Shtylyov u16 csr = musb_readw(epio, MUSB_RXCSR); 571bd2e74d6SMing Lei struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; 5720ae52d54SAnand Gadiyar u8 use_mode_1; 573bd2e74d6SMing Lei 574bd2e74d6SMing Lei if (hw_ep->is_shared_fifo) 575bd2e74d6SMing Lei musb_ep = &hw_ep->ep_in; 576bd2e74d6SMing Lei else 577bd2e74d6SMing Lei musb_ep = &hw_ep->ep_out; 578bd2e74d6SMing Lei 579f0443afdSSergei Shtylyov fifo_count = musb_ep->packet_sz; 580550a7375SFelipe Balbi 581abf710e6SVikram Pandita /* Check if EP is disabled */ 582abf710e6SVikram Pandita if (!musb_ep->desc) { 583abf710e6SVikram Pandita dev_dbg(musb->controller, "ep:%s disabled - ignore request\n", 584abf710e6SVikram Pandita musb_ep->end_point.name); 585abf710e6SVikram Pandita return; 586abf710e6SVikram Pandita } 587abf710e6SVikram Pandita 588cea83241SSergei Shtylyov /* We shouldn't get here while DMA is active, but we do... */ 589cea83241SSergei Shtylyov if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { 5905c8a86e1SFelipe Balbi dev_dbg(musb->controller, "DMA pending...\n"); 591cea83241SSergei Shtylyov return; 592cea83241SSergei Shtylyov } 593cea83241SSergei Shtylyov 594cea83241SSergei Shtylyov if (csr & MUSB_RXCSR_P_SENDSTALL) { 5955c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n", 596cea83241SSergei Shtylyov musb_ep->end_point.name, csr); 597cea83241SSergei Shtylyov return; 598cea83241SSergei Shtylyov } 599550a7375SFelipe Balbi 600f8e9f34fSTony Lindgren if (is_cppi_enabled(musb) && is_buffer_mapped(req)) { 601550a7375SFelipe Balbi struct dma_controller *c = musb->dma_controller; 602550a7375SFelipe Balbi struct dma_channel *channel = musb_ep->dma; 603550a7375SFelipe Balbi 604550a7375SFelipe Balbi /* NOTE: CPPI won't actually stop advancing the DMA 605550a7375SFelipe Balbi * queue after short packet transfers, so this is almost 606550a7375SFelipe Balbi * always going to run as IRQ-per-packet DMA so that 607550a7375SFelipe Balbi * faults will be handled correctly. 608550a7375SFelipe Balbi */ 609550a7375SFelipe Balbi if (c->channel_program(channel, 610550a7375SFelipe Balbi musb_ep->packet_sz, 611550a7375SFelipe Balbi !request->short_not_ok, 612550a7375SFelipe Balbi request->dma + request->actual, 613550a7375SFelipe Balbi request->length - request->actual)) { 614550a7375SFelipe Balbi 615550a7375SFelipe Balbi /* make sure that if an rxpkt arrived after the irq, 616550a7375SFelipe Balbi * the cppi engine will be ready to take it as soon 617550a7375SFelipe Balbi * as DMA is enabled 618550a7375SFelipe Balbi */ 619550a7375SFelipe Balbi csr &= ~(MUSB_RXCSR_AUTOCLEAR 620550a7375SFelipe Balbi | MUSB_RXCSR_DMAMODE); 621550a7375SFelipe Balbi csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS; 622550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 623550a7375SFelipe Balbi return; 624550a7375SFelipe Balbi } 625550a7375SFelipe Balbi } 626550a7375SFelipe Balbi 627550a7375SFelipe Balbi if (csr & MUSB_RXCSR_RXPKTRDY) { 628f0443afdSSergei Shtylyov fifo_count = musb_readw(epio, MUSB_RXCOUNT); 6290ae52d54SAnand Gadiyar 6300ae52d54SAnand Gadiyar /* 63100a89180SFelipe Balbi * Enable Mode 1 on RX transfers only when short_not_ok flag 63200a89180SFelipe Balbi * is set. Currently short_not_ok flag is set only from 63300a89180SFelipe Balbi * file_storage and f_mass_storage drivers 6340ae52d54SAnand Gadiyar */ 63500a89180SFelipe Balbi 63600a89180SFelipe Balbi if (request->short_not_ok && fifo_count == musb_ep->packet_sz) 6370ae52d54SAnand Gadiyar use_mode_1 = 1; 6380ae52d54SAnand Gadiyar else 6390ae52d54SAnand Gadiyar use_mode_1 = 0; 6400ae52d54SAnand Gadiyar 641550a7375SFelipe Balbi if (request->actual < request->length) { 64203840fadSFelipe Balbi if (!is_buffer_mapped(req)) 64303840fadSFelipe Balbi goto buffer_aint_mapped; 64403840fadSFelipe Balbi 64503840fadSFelipe Balbi if (musb_dma_inventra(musb)) { 646550a7375SFelipe Balbi struct dma_controller *c; 647550a7375SFelipe Balbi struct dma_channel *channel; 648550a7375SFelipe Balbi int use_dma = 0; 64937730eccSFelipe Balbi unsigned int transfer_size; 650550a7375SFelipe Balbi 651550a7375SFelipe Balbi c = musb->dma_controller; 652550a7375SFelipe Balbi channel = musb_ep->dma; 653550a7375SFelipe Balbi 65400a89180SFelipe Balbi /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in 65500a89180SFelipe Balbi * mode 0 only. So we do not get endpoint interrupts due to DMA 65600a89180SFelipe Balbi * completion. We only get interrupts from DMA controller. 65700a89180SFelipe Balbi * 65800a89180SFelipe Balbi * We could operate in DMA mode 1 if we knew the size of the tranfer 65900a89180SFelipe Balbi * in advance. For mass storage class, request->length = what the host 66000a89180SFelipe Balbi * sends, so that'd work. But for pretty much everything else, 66100a89180SFelipe Balbi * request->length is routinely more than what the host sends. For 66200a89180SFelipe Balbi * most these gadgets, end of is signified either by a short packet, 66300a89180SFelipe Balbi * or filling the last byte of the buffer. (Sending extra data in 66400a89180SFelipe Balbi * that last pckate should trigger an overflow fault.) But in mode 1, 66500a89180SFelipe Balbi * we don't get DMA completion interrupt for short packets. 66600a89180SFelipe Balbi * 66700a89180SFelipe Balbi * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1), 66800a89180SFelipe Balbi * to get endpoint interrupt on every DMA req, but that didn't seem 66900a89180SFelipe Balbi * to work reliably. 67000a89180SFelipe Balbi * 67100a89180SFelipe Balbi * REVISIT an updated g_file_storage can set req->short_not_ok, which 67200a89180SFelipe Balbi * then becomes usable as a runtime "use mode 1" hint... 67300a89180SFelipe Balbi */ 67400a89180SFelipe Balbi 6750ae52d54SAnand Gadiyar /* Experimental: Mode1 works with mass storage use cases */ 6760ae52d54SAnand Gadiyar if (use_mode_1) { 6779001d80dSMing Lei csr |= MUSB_RXCSR_AUTOCLEAR; 6780ae52d54SAnand Gadiyar musb_writew(epio, MUSB_RXCSR, csr); 6790ae52d54SAnand Gadiyar csr |= MUSB_RXCSR_DMAENAB; 6800ae52d54SAnand Gadiyar musb_writew(epio, MUSB_RXCSR, csr); 681550a7375SFelipe Balbi 6820ae52d54SAnand Gadiyar /* 6830ae52d54SAnand Gadiyar * this special sequence (enabling and then 684550a7375SFelipe Balbi * disabling MUSB_RXCSR_DMAMODE) is required 685550a7375SFelipe Balbi * to get DMAReq to activate 686550a7375SFelipe Balbi */ 687550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, 688550a7375SFelipe Balbi csr | MUSB_RXCSR_DMAMODE); 6890ae52d54SAnand Gadiyar musb_writew(epio, MUSB_RXCSR, csr); 6900ae52d54SAnand Gadiyar 69137730eccSFelipe Balbi transfer_size = min_t(unsigned int, 69237730eccSFelipe Balbi request->length - 69337730eccSFelipe Balbi request->actual, 694660fa886SRoger Quadros channel->max_len); 695660fa886SRoger Quadros musb_ep->dma->desired_mode = 1; 6960ae52d54SAnand Gadiyar } else { 6979001d80dSMing Lei if (!musb_ep->hb_mult && 6989001d80dSMing Lei musb_ep->hw_ep->rx_double_buffered) 6999001d80dSMing Lei csr |= MUSB_RXCSR_AUTOCLEAR; 7000ae52d54SAnand Gadiyar csr |= MUSB_RXCSR_DMAENAB; 701550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 702550a7375SFelipe Balbi 7031018b4e4SMing Lei transfer_size = min(request->length - request->actual, 704f0443afdSSergei Shtylyov (unsigned)fifo_count); 705550a7375SFelipe Balbi musb_ep->dma->desired_mode = 0; 7060ae52d54SAnand Gadiyar } 707550a7375SFelipe Balbi 708550a7375SFelipe Balbi use_dma = c->channel_program( 709550a7375SFelipe Balbi channel, 710550a7375SFelipe Balbi musb_ep->packet_sz, 711550a7375SFelipe Balbi channel->desired_mode, 712550a7375SFelipe Balbi request->dma 713550a7375SFelipe Balbi + request->actual, 714550a7375SFelipe Balbi transfer_size); 715550a7375SFelipe Balbi 716550a7375SFelipe Balbi if (use_dma) 717550a7375SFelipe Balbi return; 718550a7375SFelipe Balbi } 71903840fadSFelipe Balbi 72003840fadSFelipe Balbi if ((musb_dma_ux500(musb)) && 721a48ff906SMian Yousaf Kaukab (request->actual < request->length)) { 722a48ff906SMian Yousaf Kaukab 723a48ff906SMian Yousaf Kaukab struct dma_controller *c; 724a48ff906SMian Yousaf Kaukab struct dma_channel *channel; 72537730eccSFelipe Balbi unsigned int transfer_size = 0; 726a48ff906SMian Yousaf Kaukab 727a48ff906SMian Yousaf Kaukab c = musb->dma_controller; 728a48ff906SMian Yousaf Kaukab channel = musb_ep->dma; 729a48ff906SMian Yousaf Kaukab 730a48ff906SMian Yousaf Kaukab /* In case first packet is short */ 731f0443afdSSergei Shtylyov if (fifo_count < musb_ep->packet_sz) 732f0443afdSSergei Shtylyov transfer_size = fifo_count; 733a48ff906SMian Yousaf Kaukab else if (request->short_not_ok) 73437730eccSFelipe Balbi transfer_size = min_t(unsigned int, 73537730eccSFelipe Balbi request->length - 736a48ff906SMian Yousaf Kaukab request->actual, 737a48ff906SMian Yousaf Kaukab channel->max_len); 738a48ff906SMian Yousaf Kaukab else 73937730eccSFelipe Balbi transfer_size = min_t(unsigned int, 74037730eccSFelipe Balbi request->length - 741a48ff906SMian Yousaf Kaukab request->actual, 742f0443afdSSergei Shtylyov (unsigned)fifo_count); 743a48ff906SMian Yousaf Kaukab 744a48ff906SMian Yousaf Kaukab csr &= ~MUSB_RXCSR_DMAMODE; 745a48ff906SMian Yousaf Kaukab csr |= (MUSB_RXCSR_DMAENAB | 746a48ff906SMian Yousaf Kaukab MUSB_RXCSR_AUTOCLEAR); 747a48ff906SMian Yousaf Kaukab 748a48ff906SMian Yousaf Kaukab musb_writew(epio, MUSB_RXCSR, csr); 749a48ff906SMian Yousaf Kaukab 750a48ff906SMian Yousaf Kaukab if (transfer_size <= musb_ep->packet_sz) { 751a48ff906SMian Yousaf Kaukab musb_ep->dma->desired_mode = 0; 752a48ff906SMian Yousaf Kaukab } else { 753a48ff906SMian Yousaf Kaukab musb_ep->dma->desired_mode = 1; 754a48ff906SMian Yousaf Kaukab /* Mode must be set after DMAENAB */ 755a48ff906SMian Yousaf Kaukab csr |= MUSB_RXCSR_DMAMODE; 756a48ff906SMian Yousaf Kaukab musb_writew(epio, MUSB_RXCSR, csr); 757a48ff906SMian Yousaf Kaukab } 758a48ff906SMian Yousaf Kaukab 759a48ff906SMian Yousaf Kaukab if (c->channel_program(channel, 760a48ff906SMian Yousaf Kaukab musb_ep->packet_sz, 761a48ff906SMian Yousaf Kaukab channel->desired_mode, 762a48ff906SMian Yousaf Kaukab request->dma 763a48ff906SMian Yousaf Kaukab + request->actual, 764a48ff906SMian Yousaf Kaukab transfer_size)) 765a48ff906SMian Yousaf Kaukab 766a48ff906SMian Yousaf Kaukab return; 767a48ff906SMian Yousaf Kaukab } 768550a7375SFelipe Balbi 769f0443afdSSergei Shtylyov len = request->length - request->actual; 7705c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n", 771550a7375SFelipe Balbi musb_ep->end_point.name, 772f0443afdSSergei Shtylyov fifo_count, len, 773550a7375SFelipe Balbi musb_ep->packet_sz); 774550a7375SFelipe Balbi 775c2c96321SFelipe Balbi fifo_count = min_t(unsigned, len, fifo_count); 776550a7375SFelipe Balbi 77703840fadSFelipe Balbi if (tusb_dma_omap(musb)) { 778550a7375SFelipe Balbi struct dma_controller *c = musb->dma_controller; 779550a7375SFelipe Balbi struct dma_channel *channel = musb_ep->dma; 780550a7375SFelipe Balbi u32 dma_addr = request->dma + request->actual; 781550a7375SFelipe Balbi int ret; 782550a7375SFelipe Balbi 783550a7375SFelipe Balbi ret = c->channel_program(channel, 784550a7375SFelipe Balbi musb_ep->packet_sz, 785550a7375SFelipe Balbi channel->desired_mode, 786550a7375SFelipe Balbi dma_addr, 787550a7375SFelipe Balbi fifo_count); 788550a7375SFelipe Balbi if (ret) 789550a7375SFelipe Balbi return; 790550a7375SFelipe Balbi } 79103840fadSFelipe Balbi 79292d2711fSHema Kalliguddi /* 79392d2711fSHema Kalliguddi * Unmap the dma buffer back to cpu if dma channel 79492d2711fSHema Kalliguddi * programming fails. This buffer is mapped if the 79592d2711fSHema Kalliguddi * channel allocation is successful 79692d2711fSHema Kalliguddi */ 79792d2711fSHema Kalliguddi unmap_dma_buffer(req, musb); 79892d2711fSHema Kalliguddi 799e75df371SMing Lei /* 800e75df371SMing Lei * Clear DMAENAB and AUTOCLEAR for the 80192d2711fSHema Kalliguddi * PIO mode transfer 80292d2711fSHema Kalliguddi */ 803e75df371SMing Lei csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR); 80492d2711fSHema Kalliguddi musb_writew(epio, MUSB_RXCSR, csr); 805550a7375SFelipe Balbi 80603840fadSFelipe Balbi buffer_aint_mapped: 807550a7375SFelipe Balbi musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) 808550a7375SFelipe Balbi (request->buf + request->actual)); 809550a7375SFelipe Balbi request->actual += fifo_count; 810550a7375SFelipe Balbi 811550a7375SFelipe Balbi /* REVISIT if we left anything in the fifo, flush 812550a7375SFelipe Balbi * it and report -EOVERFLOW 813550a7375SFelipe Balbi */ 814550a7375SFelipe Balbi 815550a7375SFelipe Balbi /* ack the read! */ 816550a7375SFelipe Balbi csr |= MUSB_RXCSR_P_WZC_BITS; 817550a7375SFelipe Balbi csr &= ~MUSB_RXCSR_RXPKTRDY; 818550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 819550a7375SFelipe Balbi } 820550a7375SFelipe Balbi } 821550a7375SFelipe Balbi 822550a7375SFelipe Balbi /* reach the end or short packet detected */ 823f0443afdSSergei Shtylyov if (request->actual == request->length || 824f0443afdSSergei Shtylyov fifo_count < musb_ep->packet_sz) 825550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, 0); 826550a7375SFelipe Balbi } 827550a7375SFelipe Balbi 828550a7375SFelipe Balbi /* 829550a7375SFelipe Balbi * Data ready for a request; called from IRQ 830550a7375SFelipe Balbi */ 831550a7375SFelipe Balbi void musb_g_rx(struct musb *musb, u8 epnum) 832550a7375SFelipe Balbi { 833550a7375SFelipe Balbi u16 csr; 834ad1adb89SFelipe Balbi struct musb_request *req; 835550a7375SFelipe Balbi struct usb_request *request; 836550a7375SFelipe Balbi void __iomem *mbase = musb->mregs; 837bd2e74d6SMing Lei struct musb_ep *musb_ep; 838550a7375SFelipe Balbi void __iomem *epio = musb->endpoints[epnum].regs; 839550a7375SFelipe Balbi struct dma_channel *dma; 840bd2e74d6SMing Lei struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; 841bd2e74d6SMing Lei 842bd2e74d6SMing Lei if (hw_ep->is_shared_fifo) 843bd2e74d6SMing Lei musb_ep = &hw_ep->ep_in; 844bd2e74d6SMing Lei else 845bd2e74d6SMing Lei musb_ep = &hw_ep->ep_out; 846550a7375SFelipe Balbi 847550a7375SFelipe Balbi musb_ep_select(mbase, epnum); 848550a7375SFelipe Balbi 849ad1adb89SFelipe Balbi req = next_request(musb_ep); 850ad1adb89SFelipe Balbi if (!req) 8510abdc36fSMaulik Mankad return; 852550a7375SFelipe Balbi 853ad1adb89SFelipe Balbi request = &req->request; 854ad1adb89SFelipe Balbi 855550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_RXCSR); 856550a7375SFelipe Balbi dma = is_dma_capable() ? musb_ep->dma : NULL; 857550a7375SFelipe Balbi 8585c8a86e1SFelipe Balbi dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name, 859550a7375SFelipe Balbi csr, dma ? " (dma)" : "", request); 860550a7375SFelipe Balbi 861550a7375SFelipe Balbi if (csr & MUSB_RXCSR_P_SENTSTALL) { 862550a7375SFelipe Balbi csr |= MUSB_RXCSR_P_WZC_BITS; 863550a7375SFelipe Balbi csr &= ~MUSB_RXCSR_P_SENTSTALL; 864550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 865cea83241SSergei Shtylyov return; 866550a7375SFelipe Balbi } 867550a7375SFelipe Balbi 868550a7375SFelipe Balbi if (csr & MUSB_RXCSR_P_OVERRUN) { 869550a7375SFelipe Balbi /* csr |= MUSB_RXCSR_P_WZC_BITS; */ 870550a7375SFelipe Balbi csr &= ~MUSB_RXCSR_P_OVERRUN; 871550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 872550a7375SFelipe Balbi 8735c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request); 87443467868SSergei Shtylyov if (request->status == -EINPROGRESS) 875550a7375SFelipe Balbi request->status = -EOVERFLOW; 876550a7375SFelipe Balbi } 877550a7375SFelipe Balbi if (csr & MUSB_RXCSR_INCOMPRX) { 878550a7375SFelipe Balbi /* REVISIT not necessarily an error */ 8795c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name); 880550a7375SFelipe Balbi } 881550a7375SFelipe Balbi 882550a7375SFelipe Balbi if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 883550a7375SFelipe Balbi /* "should not happen"; likely RXPKTRDY pending for DMA */ 8845c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s busy, csr %04x\n", 885550a7375SFelipe Balbi musb_ep->end_point.name, csr); 886cea83241SSergei Shtylyov return; 887550a7375SFelipe Balbi } 888550a7375SFelipe Balbi 889550a7375SFelipe Balbi if (dma && (csr & MUSB_RXCSR_DMAENAB)) { 890550a7375SFelipe Balbi csr &= ~(MUSB_RXCSR_AUTOCLEAR 891550a7375SFelipe Balbi | MUSB_RXCSR_DMAENAB 892550a7375SFelipe Balbi | MUSB_RXCSR_DMAMODE); 893550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, 894550a7375SFelipe Balbi MUSB_RXCSR_P_WZC_BITS | csr); 895550a7375SFelipe Balbi 896550a7375SFelipe Balbi request->actual += musb_ep->dma->actual_len; 897550a7375SFelipe Balbi 8985c8a86e1SFelipe Balbi dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n", 899550a7375SFelipe Balbi epnum, csr, 900550a7375SFelipe Balbi musb_readw(epio, MUSB_RXCSR), 901550a7375SFelipe Balbi musb_ep->dma->actual_len, request); 902550a7375SFelipe Balbi 903a48ff906SMian Yousaf Kaukab #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \ 904a48ff906SMian Yousaf Kaukab defined(CONFIG_USB_UX500_DMA) 905550a7375SFelipe Balbi /* Autoclear doesn't clear RxPktRdy for short packets */ 9069001d80dSMing Lei if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered) 907550a7375SFelipe Balbi || (dma->actual_len 908550a7375SFelipe Balbi & (musb_ep->packet_sz - 1))) { 909550a7375SFelipe Balbi /* ack the read! */ 910550a7375SFelipe Balbi csr &= ~MUSB_RXCSR_RXPKTRDY; 911550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 912550a7375SFelipe Balbi } 913550a7375SFelipe Balbi 914550a7375SFelipe Balbi /* incomplete, and not short? wait for next IN packet */ 915550a7375SFelipe Balbi if ((request->actual < request->length) 916550a7375SFelipe Balbi && (musb_ep->dma->actual_len 9179001d80dSMing Lei == musb_ep->packet_sz)) { 9189001d80dSMing Lei /* In double buffer case, continue to unload fifo if 9199001d80dSMing Lei * there is Rx packet in FIFO. 9209001d80dSMing Lei **/ 9219001d80dSMing Lei csr = musb_readw(epio, MUSB_RXCSR); 9229001d80dSMing Lei if ((csr & MUSB_RXCSR_RXPKTRDY) && 9239001d80dSMing Lei hw_ep->rx_double_buffered) 9249001d80dSMing Lei goto exit; 925cea83241SSergei Shtylyov return; 9269001d80dSMing Lei } 927550a7375SFelipe Balbi #endif 928550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, 0); 92939287076SSupriya Karanth /* 93039287076SSupriya Karanth * In the giveback function the MUSB lock is 93139287076SSupriya Karanth * released and acquired after sometime. During 93239287076SSupriya Karanth * this time period the INDEX register could get 93339287076SSupriya Karanth * changed by the gadget_queue function especially 93439287076SSupriya Karanth * on SMP systems. Reselect the INDEX to be sure 93539287076SSupriya Karanth * we are reading/modifying the right registers 93639287076SSupriya Karanth */ 93739287076SSupriya Karanth musb_ep_select(mbase, epnum); 938550a7375SFelipe Balbi 939ad1adb89SFelipe Balbi req = next_request(musb_ep); 940ad1adb89SFelipe Balbi if (!req) 941cea83241SSergei Shtylyov return; 942550a7375SFelipe Balbi } 943a48ff906SMian Yousaf Kaukab #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \ 944a48ff906SMian Yousaf Kaukab defined(CONFIG_USB_UX500_DMA) 9459001d80dSMing Lei exit: 946bb324b08SAjay Kumar Gupta #endif 94743467868SSergei Shtylyov /* Analyze request */ 948ad1adb89SFelipe Balbi rxstate(musb, req); 949550a7375SFelipe Balbi } 950550a7375SFelipe Balbi 951550a7375SFelipe Balbi /* ------------------------------------------------------------ */ 952550a7375SFelipe Balbi 953550a7375SFelipe Balbi static int musb_gadget_enable(struct usb_ep *ep, 954550a7375SFelipe Balbi const struct usb_endpoint_descriptor *desc) 955550a7375SFelipe Balbi { 956550a7375SFelipe Balbi unsigned long flags; 957550a7375SFelipe Balbi struct musb_ep *musb_ep; 958550a7375SFelipe Balbi struct musb_hw_ep *hw_ep; 959550a7375SFelipe Balbi void __iomem *regs; 960550a7375SFelipe Balbi struct musb *musb; 961550a7375SFelipe Balbi void __iomem *mbase; 962550a7375SFelipe Balbi u8 epnum; 963550a7375SFelipe Balbi u16 csr; 964550a7375SFelipe Balbi unsigned tmp; 965550a7375SFelipe Balbi int status = -EINVAL; 966550a7375SFelipe Balbi 967550a7375SFelipe Balbi if (!ep || !desc) 968550a7375SFelipe Balbi return -EINVAL; 969550a7375SFelipe Balbi 970550a7375SFelipe Balbi musb_ep = to_musb_ep(ep); 971550a7375SFelipe Balbi hw_ep = musb_ep->hw_ep; 972550a7375SFelipe Balbi regs = hw_ep->regs; 973550a7375SFelipe Balbi musb = musb_ep->musb; 974550a7375SFelipe Balbi mbase = musb->mregs; 975550a7375SFelipe Balbi epnum = musb_ep->current_epnum; 976550a7375SFelipe Balbi 977550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 978550a7375SFelipe Balbi 979550a7375SFelipe Balbi if (musb_ep->desc) { 980550a7375SFelipe Balbi status = -EBUSY; 981550a7375SFelipe Balbi goto fail; 982550a7375SFelipe Balbi } 98396bcd090SJulia Lawall musb_ep->type = usb_endpoint_type(desc); 984550a7375SFelipe Balbi 985550a7375SFelipe Balbi /* check direction and (later) maxpacket size against endpoint */ 98696bcd090SJulia Lawall if (usb_endpoint_num(desc) != epnum) 987550a7375SFelipe Balbi goto fail; 988550a7375SFelipe Balbi 989550a7375SFelipe Balbi /* REVISIT this rules out high bandwidth periodic transfers */ 99029cc8897SKuninori Morimoto tmp = usb_endpoint_maxp(desc); 991f11d893dSMing Lei if (tmp & ~0x07ff) { 992f11d893dSMing Lei int ok; 993f11d893dSMing Lei 994f11d893dSMing Lei if (usb_endpoint_dir_in(desc)) 995f11d893dSMing Lei ok = musb->hb_iso_tx; 996f11d893dSMing Lei else 997f11d893dSMing Lei ok = musb->hb_iso_rx; 998f11d893dSMing Lei 999f11d893dSMing Lei if (!ok) { 10005c8a86e1SFelipe Balbi dev_dbg(musb->controller, "no support for high bandwidth ISO\n"); 1001550a7375SFelipe Balbi goto fail; 1002f11d893dSMing Lei } 1003f11d893dSMing Lei musb_ep->hb_mult = (tmp >> 11) & 3; 1004f11d893dSMing Lei } else { 1005f11d893dSMing Lei musb_ep->hb_mult = 0; 1006f11d893dSMing Lei } 1007f11d893dSMing Lei 1008f11d893dSMing Lei musb_ep->packet_sz = tmp & 0x7ff; 1009f11d893dSMing Lei tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1); 1010550a7375SFelipe Balbi 1011550a7375SFelipe Balbi /* enable the interrupts for the endpoint, set the endpoint 1012550a7375SFelipe Balbi * packet size (or fail), set the mode, clear the fifo 1013550a7375SFelipe Balbi */ 1014550a7375SFelipe Balbi musb_ep_select(mbase, epnum); 101596bcd090SJulia Lawall if (usb_endpoint_dir_in(desc)) { 1016550a7375SFelipe Balbi 1017550a7375SFelipe Balbi if (hw_ep->is_shared_fifo) 1018550a7375SFelipe Balbi musb_ep->is_in = 1; 1019550a7375SFelipe Balbi if (!musb_ep->is_in) 1020550a7375SFelipe Balbi goto fail; 1021f11d893dSMing Lei 1022f11d893dSMing Lei if (tmp > hw_ep->max_packet_sz_tx) { 10235c8a86e1SFelipe Balbi dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n"); 1024550a7375SFelipe Balbi goto fail; 1025f11d893dSMing Lei } 1026550a7375SFelipe Balbi 1027b18d26f6SSebastian Andrzej Siewior musb->intrtxe |= (1 << epnum); 1028b18d26f6SSebastian Andrzej Siewior musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe); 1029550a7375SFelipe Balbi 1030550a7375SFelipe Balbi /* REVISIT if can_bulk_split(), use by updating "tmp"; 1031550a7375SFelipe Balbi * likewise high bandwidth periodic tx 1032550a7375SFelipe Balbi */ 10339f445cb2SCliff Cai /* Set TXMAXP with the FIFO size of the endpoint 103431c9909bSMing Lei * to disable double buffering mode. 10359f445cb2SCliff Cai */ 1036bb3a2ef2Ssupriya karanth if (musb->double_buffer_not_ok) { 103706624818SFelipe Balbi musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); 1038bb3a2ef2Ssupriya karanth } else { 1039bb3a2ef2Ssupriya karanth if (can_bulk_split(musb, musb_ep->type)) 1040bb3a2ef2Ssupriya karanth musb_ep->hb_mult = (hw_ep->max_packet_sz_tx / 1041bb3a2ef2Ssupriya karanth musb_ep->packet_sz) - 1; 104206624818SFelipe Balbi musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz 104306624818SFelipe Balbi | (musb_ep->hb_mult << 11)); 1044bb3a2ef2Ssupriya karanth } 1045550a7375SFelipe Balbi 1046550a7375SFelipe Balbi csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; 1047550a7375SFelipe Balbi if (musb_readw(regs, MUSB_TXCSR) 1048550a7375SFelipe Balbi & MUSB_TXCSR_FIFONOTEMPTY) 1049550a7375SFelipe Balbi csr |= MUSB_TXCSR_FLUSHFIFO; 1050550a7375SFelipe Balbi if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) 1051550a7375SFelipe Balbi csr |= MUSB_TXCSR_P_ISO; 1052550a7375SFelipe Balbi 1053550a7375SFelipe Balbi /* set twice in case of double buffering */ 1054550a7375SFelipe Balbi musb_writew(regs, MUSB_TXCSR, csr); 1055550a7375SFelipe Balbi /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ 1056550a7375SFelipe Balbi musb_writew(regs, MUSB_TXCSR, csr); 1057550a7375SFelipe Balbi 1058550a7375SFelipe Balbi } else { 1059550a7375SFelipe Balbi 1060550a7375SFelipe Balbi if (hw_ep->is_shared_fifo) 1061550a7375SFelipe Balbi musb_ep->is_in = 0; 1062550a7375SFelipe Balbi if (musb_ep->is_in) 1063550a7375SFelipe Balbi goto fail; 1064f11d893dSMing Lei 1065f11d893dSMing Lei if (tmp > hw_ep->max_packet_sz_rx) { 10665c8a86e1SFelipe Balbi dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n"); 1067550a7375SFelipe Balbi goto fail; 1068f11d893dSMing Lei } 1069550a7375SFelipe Balbi 1070af5ec14dSSebastian Andrzej Siewior musb->intrrxe |= (1 << epnum); 1071af5ec14dSSebastian Andrzej Siewior musb_writew(mbase, MUSB_INTRRXE, musb->intrrxe); 1072550a7375SFelipe Balbi 1073550a7375SFelipe Balbi /* REVISIT if can_bulk_combine() use by updating "tmp" 1074550a7375SFelipe Balbi * likewise high bandwidth periodic rx 1075550a7375SFelipe Balbi */ 10769f445cb2SCliff Cai /* Set RXMAXP with the FIFO size of the endpoint 10779f445cb2SCliff Cai * to disable double buffering mode. 10789f445cb2SCliff Cai */ 107906624818SFelipe Balbi if (musb->double_buffer_not_ok) 108006624818SFelipe Balbi musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx); 108106624818SFelipe Balbi else 108206624818SFelipe Balbi musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz 108306624818SFelipe Balbi | (musb_ep->hb_mult << 11)); 1084550a7375SFelipe Balbi 1085550a7375SFelipe Balbi /* force shared fifo to OUT-only mode */ 1086550a7375SFelipe Balbi if (hw_ep->is_shared_fifo) { 1087550a7375SFelipe Balbi csr = musb_readw(regs, MUSB_TXCSR); 1088550a7375SFelipe Balbi csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY); 1089550a7375SFelipe Balbi musb_writew(regs, MUSB_TXCSR, csr); 1090550a7375SFelipe Balbi } 1091550a7375SFelipe Balbi 1092550a7375SFelipe Balbi csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG; 1093550a7375SFelipe Balbi if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) 1094550a7375SFelipe Balbi csr |= MUSB_RXCSR_P_ISO; 1095550a7375SFelipe Balbi else if (musb_ep->type == USB_ENDPOINT_XFER_INT) 1096550a7375SFelipe Balbi csr |= MUSB_RXCSR_DISNYET; 1097550a7375SFelipe Balbi 1098550a7375SFelipe Balbi /* set twice in case of double buffering */ 1099550a7375SFelipe Balbi musb_writew(regs, MUSB_RXCSR, csr); 1100550a7375SFelipe Balbi musb_writew(regs, MUSB_RXCSR, csr); 1101550a7375SFelipe Balbi } 1102550a7375SFelipe Balbi 1103550a7375SFelipe Balbi /* NOTE: all the I/O code _should_ work fine without DMA, in case 1104550a7375SFelipe Balbi * for some reason you run out of channels here. 1105550a7375SFelipe Balbi */ 1106550a7375SFelipe Balbi if (is_dma_capable() && musb->dma_controller) { 1107550a7375SFelipe Balbi struct dma_controller *c = musb->dma_controller; 1108550a7375SFelipe Balbi 1109550a7375SFelipe Balbi musb_ep->dma = c->channel_alloc(c, hw_ep, 1110550a7375SFelipe Balbi (desc->bEndpointAddress & USB_DIR_IN)); 1111550a7375SFelipe Balbi } else 1112550a7375SFelipe Balbi musb_ep->dma = NULL; 1113550a7375SFelipe Balbi 1114550a7375SFelipe Balbi musb_ep->desc = desc; 1115550a7375SFelipe Balbi musb_ep->busy = 0; 111647e97605SSergei Shtylyov musb_ep->wedged = 0; 1117550a7375SFelipe Balbi status = 0; 1118550a7375SFelipe Balbi 1119550a7375SFelipe Balbi pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n", 1120550a7375SFelipe Balbi musb_driver_name, musb_ep->end_point.name, 1121550a7375SFelipe Balbi ({ char *s; switch (musb_ep->type) { 1122550a7375SFelipe Balbi case USB_ENDPOINT_XFER_BULK: s = "bulk"; break; 1123550a7375SFelipe Balbi case USB_ENDPOINT_XFER_INT: s = "int"; break; 1124550a7375SFelipe Balbi default: s = "iso"; break; 11252b84f92bSJoe Perches } s; }), 1126550a7375SFelipe Balbi musb_ep->is_in ? "IN" : "OUT", 1127550a7375SFelipe Balbi musb_ep->dma ? "dma, " : "", 1128550a7375SFelipe Balbi musb_ep->packet_sz); 1129550a7375SFelipe Balbi 1130550a7375SFelipe Balbi schedule_work(&musb->irq_work); 1131550a7375SFelipe Balbi 1132550a7375SFelipe Balbi fail: 1133550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1134550a7375SFelipe Balbi return status; 1135550a7375SFelipe Balbi } 1136550a7375SFelipe Balbi 1137550a7375SFelipe Balbi /* 1138550a7375SFelipe Balbi * Disable an endpoint flushing all requests queued. 1139550a7375SFelipe Balbi */ 1140550a7375SFelipe Balbi static int musb_gadget_disable(struct usb_ep *ep) 1141550a7375SFelipe Balbi { 1142550a7375SFelipe Balbi unsigned long flags; 1143550a7375SFelipe Balbi struct musb *musb; 1144550a7375SFelipe Balbi u8 epnum; 1145550a7375SFelipe Balbi struct musb_ep *musb_ep; 1146550a7375SFelipe Balbi void __iomem *epio; 1147550a7375SFelipe Balbi int status = 0; 1148550a7375SFelipe Balbi 1149550a7375SFelipe Balbi musb_ep = to_musb_ep(ep); 1150550a7375SFelipe Balbi musb = musb_ep->musb; 1151550a7375SFelipe Balbi epnum = musb_ep->current_epnum; 1152550a7375SFelipe Balbi epio = musb->endpoints[epnum].regs; 1153550a7375SFelipe Balbi 1154550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1155550a7375SFelipe Balbi musb_ep_select(musb->mregs, epnum); 1156550a7375SFelipe Balbi 1157550a7375SFelipe Balbi /* zero the endpoint sizes */ 1158550a7375SFelipe Balbi if (musb_ep->is_in) { 1159b18d26f6SSebastian Andrzej Siewior musb->intrtxe &= ~(1 << epnum); 1160b18d26f6SSebastian Andrzej Siewior musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe); 1161550a7375SFelipe Balbi musb_writew(epio, MUSB_TXMAXP, 0); 1162550a7375SFelipe Balbi } else { 1163af5ec14dSSebastian Andrzej Siewior musb->intrrxe &= ~(1 << epnum); 1164af5ec14dSSebastian Andrzej Siewior musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe); 1165550a7375SFelipe Balbi musb_writew(epio, MUSB_RXMAXP, 0); 1166550a7375SFelipe Balbi } 1167550a7375SFelipe Balbi 1168550a7375SFelipe Balbi musb_ep->desc = NULL; 116908f75bf1SGrazvydas Ignotas musb_ep->end_point.desc = NULL; 1170550a7375SFelipe Balbi 1171550a7375SFelipe Balbi /* abort all pending DMA and requests */ 1172550a7375SFelipe Balbi nuke(musb_ep, -ESHUTDOWN); 1173550a7375SFelipe Balbi 1174550a7375SFelipe Balbi schedule_work(&musb->irq_work); 1175550a7375SFelipe Balbi 1176550a7375SFelipe Balbi spin_unlock_irqrestore(&(musb->lock), flags); 1177550a7375SFelipe Balbi 11785c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name); 1179550a7375SFelipe Balbi 1180550a7375SFelipe Balbi return status; 1181550a7375SFelipe Balbi } 1182550a7375SFelipe Balbi 1183550a7375SFelipe Balbi /* 1184550a7375SFelipe Balbi * Allocate a request for an endpoint. 1185550a7375SFelipe Balbi * Reused by ep0 code. 1186550a7375SFelipe Balbi */ 1187550a7375SFelipe Balbi struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) 1188550a7375SFelipe Balbi { 1189550a7375SFelipe Balbi struct musb_ep *musb_ep = to_musb_ep(ep); 11905c8a86e1SFelipe Balbi struct musb *musb = musb_ep->musb; 1191550a7375SFelipe Balbi struct musb_request *request = NULL; 1192550a7375SFelipe Balbi 1193550a7375SFelipe Balbi request = kzalloc(sizeof *request, gfp_flags); 11940607f862SFelipe Balbi if (!request) { 11955c8a86e1SFelipe Balbi dev_dbg(musb->controller, "not enough memory\n"); 11960607f862SFelipe Balbi return NULL; 11970607f862SFelipe Balbi } 11980607f862SFelipe Balbi 1199550a7375SFelipe Balbi request->request.dma = DMA_ADDR_INVALID; 1200550a7375SFelipe Balbi request->epnum = musb_ep->current_epnum; 1201550a7375SFelipe Balbi request->ep = musb_ep; 1202550a7375SFelipe Balbi 1203550a7375SFelipe Balbi return &request->request; 1204550a7375SFelipe Balbi } 1205550a7375SFelipe Balbi 1206550a7375SFelipe Balbi /* 1207550a7375SFelipe Balbi * Free a request 1208550a7375SFelipe Balbi * Reused by ep0 code. 1209550a7375SFelipe Balbi */ 1210550a7375SFelipe Balbi void musb_free_request(struct usb_ep *ep, struct usb_request *req) 1211550a7375SFelipe Balbi { 1212550a7375SFelipe Balbi kfree(to_musb_request(req)); 1213550a7375SFelipe Balbi } 1214550a7375SFelipe Balbi 1215550a7375SFelipe Balbi static LIST_HEAD(buffers); 1216550a7375SFelipe Balbi 1217550a7375SFelipe Balbi struct free_record { 1218550a7375SFelipe Balbi struct list_head list; 1219550a7375SFelipe Balbi struct device *dev; 1220550a7375SFelipe Balbi unsigned bytes; 1221550a7375SFelipe Balbi dma_addr_t dma; 1222550a7375SFelipe Balbi }; 1223550a7375SFelipe Balbi 1224550a7375SFelipe Balbi /* 1225550a7375SFelipe Balbi * Context: controller locked, IRQs blocked. 1226550a7375SFelipe Balbi */ 1227a666e3e6SSergei Shtylyov void musb_ep_restart(struct musb *musb, struct musb_request *req) 1228550a7375SFelipe Balbi { 12295c8a86e1SFelipe Balbi dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n", 1230550a7375SFelipe Balbi req->tx ? "TX/IN" : "RX/OUT", 1231550a7375SFelipe Balbi &req->request, req->request.length, req->epnum); 1232550a7375SFelipe Balbi 1233550a7375SFelipe Balbi musb_ep_select(musb->mregs, req->epnum); 1234550a7375SFelipe Balbi if (req->tx) 1235550a7375SFelipe Balbi txstate(musb, req); 1236550a7375SFelipe Balbi else 1237550a7375SFelipe Balbi rxstate(musb, req); 1238550a7375SFelipe Balbi } 1239550a7375SFelipe Balbi 1240550a7375SFelipe Balbi static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, 1241550a7375SFelipe Balbi gfp_t gfp_flags) 1242550a7375SFelipe Balbi { 1243550a7375SFelipe Balbi struct musb_ep *musb_ep; 1244550a7375SFelipe Balbi struct musb_request *request; 1245550a7375SFelipe Balbi struct musb *musb; 1246550a7375SFelipe Balbi int status = 0; 1247550a7375SFelipe Balbi unsigned long lockflags; 1248550a7375SFelipe Balbi 1249550a7375SFelipe Balbi if (!ep || !req) 1250550a7375SFelipe Balbi return -EINVAL; 1251550a7375SFelipe Balbi if (!req->buf) 1252550a7375SFelipe Balbi return -ENODATA; 1253550a7375SFelipe Balbi 1254550a7375SFelipe Balbi musb_ep = to_musb_ep(ep); 1255550a7375SFelipe Balbi musb = musb_ep->musb; 1256550a7375SFelipe Balbi 1257550a7375SFelipe Balbi request = to_musb_request(req); 1258550a7375SFelipe Balbi request->musb = musb; 1259550a7375SFelipe Balbi 1260550a7375SFelipe Balbi if (request->ep != musb_ep) 1261550a7375SFelipe Balbi return -EINVAL; 1262550a7375SFelipe Balbi 12635c8a86e1SFelipe Balbi dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req); 1264550a7375SFelipe Balbi 1265550a7375SFelipe Balbi /* request is mine now... */ 1266550a7375SFelipe Balbi request->request.actual = 0; 1267550a7375SFelipe Balbi request->request.status = -EINPROGRESS; 1268550a7375SFelipe Balbi request->epnum = musb_ep->current_epnum; 1269550a7375SFelipe Balbi request->tx = musb_ep->is_in; 1270550a7375SFelipe Balbi 1271c65bfa62SMian Yousaf Kaukab map_dma_buffer(request, musb, musb_ep); 1272550a7375SFelipe Balbi 1273550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, lockflags); 1274550a7375SFelipe Balbi 1275550a7375SFelipe Balbi /* don't queue if the ep is down */ 1276550a7375SFelipe Balbi if (!musb_ep->desc) { 12775c8a86e1SFelipe Balbi dev_dbg(musb->controller, "req %p queued to %s while ep %s\n", 1278550a7375SFelipe Balbi req, ep->name, "disabled"); 1279550a7375SFelipe Balbi status = -ESHUTDOWN; 128023a53d90SSebastian Andrzej Siewior unmap_dma_buffer(request, musb); 128123a53d90SSebastian Andrzej Siewior goto unlock; 1282550a7375SFelipe Balbi } 1283550a7375SFelipe Balbi 1284550a7375SFelipe Balbi /* add request to the list */ 1285ad1adb89SFelipe Balbi list_add_tail(&request->list, &musb_ep->req_list); 1286550a7375SFelipe Balbi 1287550a7375SFelipe Balbi /* it this is the head of the queue, start i/o ... */ 1288ad1adb89SFelipe Balbi if (!musb_ep->busy && &request->list == musb_ep->req_list.next) 1289550a7375SFelipe Balbi musb_ep_restart(musb, request); 1290550a7375SFelipe Balbi 129123a53d90SSebastian Andrzej Siewior unlock: 1292550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, lockflags); 1293550a7375SFelipe Balbi return status; 1294550a7375SFelipe Balbi } 1295550a7375SFelipe Balbi 1296550a7375SFelipe Balbi static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request) 1297550a7375SFelipe Balbi { 1298550a7375SFelipe Balbi struct musb_ep *musb_ep = to_musb_ep(ep); 12994cbbf084SFelipe Balbi struct musb_request *req = to_musb_request(request); 13004cbbf084SFelipe Balbi struct musb_request *r; 1301550a7375SFelipe Balbi unsigned long flags; 1302550a7375SFelipe Balbi int status = 0; 1303550a7375SFelipe Balbi struct musb *musb = musb_ep->musb; 1304550a7375SFelipe Balbi 1305550a7375SFelipe Balbi if (!ep || !request || to_musb_request(request)->ep != musb_ep) 1306550a7375SFelipe Balbi return -EINVAL; 1307550a7375SFelipe Balbi 1308550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1309550a7375SFelipe Balbi 1310550a7375SFelipe Balbi list_for_each_entry(r, &musb_ep->req_list, list) { 13114cbbf084SFelipe Balbi if (r == req) 1312550a7375SFelipe Balbi break; 1313550a7375SFelipe Balbi } 13144cbbf084SFelipe Balbi if (r != req) { 13155c8a86e1SFelipe Balbi dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name); 1316550a7375SFelipe Balbi status = -EINVAL; 1317550a7375SFelipe Balbi goto done; 1318550a7375SFelipe Balbi } 1319550a7375SFelipe Balbi 1320550a7375SFelipe Balbi /* if the hardware doesn't have the request, easy ... */ 13213d5ad13eSFelipe Balbi if (musb_ep->req_list.next != &req->list || musb_ep->busy) 1322550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, -ECONNRESET); 1323550a7375SFelipe Balbi 1324550a7375SFelipe Balbi /* ... else abort the dma transfer ... */ 1325550a7375SFelipe Balbi else if (is_dma_capable() && musb_ep->dma) { 1326550a7375SFelipe Balbi struct dma_controller *c = musb->dma_controller; 1327550a7375SFelipe Balbi 1328550a7375SFelipe Balbi musb_ep_select(musb->mregs, musb_ep->current_epnum); 1329550a7375SFelipe Balbi if (c->channel_abort) 1330550a7375SFelipe Balbi status = c->channel_abort(musb_ep->dma); 1331550a7375SFelipe Balbi else 1332550a7375SFelipe Balbi status = -EBUSY; 1333550a7375SFelipe Balbi if (status == 0) 1334550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, -ECONNRESET); 1335550a7375SFelipe Balbi } else { 1336550a7375SFelipe Balbi /* NOTE: by sticking to easily tested hardware/driver states, 1337550a7375SFelipe Balbi * we leave counting of in-flight packets imprecise. 1338550a7375SFelipe Balbi */ 1339550a7375SFelipe Balbi musb_g_giveback(musb_ep, request, -ECONNRESET); 1340550a7375SFelipe Balbi } 1341550a7375SFelipe Balbi 1342550a7375SFelipe Balbi done: 1343550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1344550a7375SFelipe Balbi return status; 1345550a7375SFelipe Balbi } 1346550a7375SFelipe Balbi 1347550a7375SFelipe Balbi /* 1348550a7375SFelipe Balbi * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any 1349550a7375SFelipe Balbi * data but will queue requests. 1350550a7375SFelipe Balbi * 1351550a7375SFelipe Balbi * exported to ep0 code 1352550a7375SFelipe Balbi */ 13531b6c3b0fSFelipe Balbi static int musb_gadget_set_halt(struct usb_ep *ep, int value) 1354550a7375SFelipe Balbi { 1355550a7375SFelipe Balbi struct musb_ep *musb_ep = to_musb_ep(ep); 1356550a7375SFelipe Balbi u8 epnum = musb_ep->current_epnum; 1357550a7375SFelipe Balbi struct musb *musb = musb_ep->musb; 1358550a7375SFelipe Balbi void __iomem *epio = musb->endpoints[epnum].regs; 1359550a7375SFelipe Balbi void __iomem *mbase; 1360550a7375SFelipe Balbi unsigned long flags; 1361550a7375SFelipe Balbi u16 csr; 1362cea83241SSergei Shtylyov struct musb_request *request; 1363550a7375SFelipe Balbi int status = 0; 1364550a7375SFelipe Balbi 1365550a7375SFelipe Balbi if (!ep) 1366550a7375SFelipe Balbi return -EINVAL; 1367550a7375SFelipe Balbi mbase = musb->mregs; 1368550a7375SFelipe Balbi 1369550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1370550a7375SFelipe Balbi 1371550a7375SFelipe Balbi if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) { 1372550a7375SFelipe Balbi status = -EINVAL; 1373550a7375SFelipe Balbi goto done; 1374550a7375SFelipe Balbi } 1375550a7375SFelipe Balbi 1376550a7375SFelipe Balbi musb_ep_select(mbase, epnum); 1377550a7375SFelipe Balbi 1378ad1adb89SFelipe Balbi request = next_request(musb_ep); 1379cea83241SSergei Shtylyov if (value) { 1380cea83241SSergei Shtylyov if (request) { 13815c8a86e1SFelipe Balbi dev_dbg(musb->controller, "request in progress, cannot halt %s\n", 1382cea83241SSergei Shtylyov ep->name); 1383cea83241SSergei Shtylyov status = -EAGAIN; 1384cea83241SSergei Shtylyov goto done; 1385cea83241SSergei Shtylyov } 1386cea83241SSergei Shtylyov /* Cannot portably stall with non-empty FIFO */ 1387cea83241SSergei Shtylyov if (musb_ep->is_in) { 1388550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 1389550a7375SFelipe Balbi if (csr & MUSB_TXCSR_FIFONOTEMPTY) { 13905c8a86e1SFelipe Balbi dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name); 1391cea83241SSergei Shtylyov status = -EAGAIN; 1392cea83241SSergei Shtylyov goto done; 1393550a7375SFelipe Balbi } 1394cea83241SSergei Shtylyov } 139547e97605SSergei Shtylyov } else 139647e97605SSergei Shtylyov musb_ep->wedged = 0; 1397550a7375SFelipe Balbi 1398550a7375SFelipe Balbi /* set/clear the stall and toggle bits */ 13995c8a86e1SFelipe Balbi dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear"); 1400550a7375SFelipe Balbi if (musb_ep->is_in) { 1401550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 1402550a7375SFelipe Balbi csr |= MUSB_TXCSR_P_WZC_BITS 1403550a7375SFelipe Balbi | MUSB_TXCSR_CLRDATATOG; 1404550a7375SFelipe Balbi if (value) 1405550a7375SFelipe Balbi csr |= MUSB_TXCSR_P_SENDSTALL; 1406550a7375SFelipe Balbi else 1407550a7375SFelipe Balbi csr &= ~(MUSB_TXCSR_P_SENDSTALL 1408550a7375SFelipe Balbi | MUSB_TXCSR_P_SENTSTALL); 1409550a7375SFelipe Balbi csr &= ~MUSB_TXCSR_TXPKTRDY; 1410550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 1411550a7375SFelipe Balbi } else { 1412550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_RXCSR); 1413550a7375SFelipe Balbi csr |= MUSB_RXCSR_P_WZC_BITS 1414550a7375SFelipe Balbi | MUSB_RXCSR_FLUSHFIFO 1415550a7375SFelipe Balbi | MUSB_RXCSR_CLRDATATOG; 1416550a7375SFelipe Balbi if (value) 1417550a7375SFelipe Balbi csr |= MUSB_RXCSR_P_SENDSTALL; 1418550a7375SFelipe Balbi else 1419550a7375SFelipe Balbi csr &= ~(MUSB_RXCSR_P_SENDSTALL 1420550a7375SFelipe Balbi | MUSB_RXCSR_P_SENTSTALL); 1421550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 1422550a7375SFelipe Balbi } 1423550a7375SFelipe Balbi 1424550a7375SFelipe Balbi /* maybe start the first request in the queue */ 1425550a7375SFelipe Balbi if (!musb_ep->busy && !value && request) { 14265c8a86e1SFelipe Balbi dev_dbg(musb->controller, "restarting the request\n"); 1427550a7375SFelipe Balbi musb_ep_restart(musb, request); 1428550a7375SFelipe Balbi } 1429550a7375SFelipe Balbi 1430cea83241SSergei Shtylyov done: 1431550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1432550a7375SFelipe Balbi return status; 1433550a7375SFelipe Balbi } 1434550a7375SFelipe Balbi 143547e97605SSergei Shtylyov /* 143647e97605SSergei Shtylyov * Sets the halt feature with the clear requests ignored 143747e97605SSergei Shtylyov */ 14381b6c3b0fSFelipe Balbi static int musb_gadget_set_wedge(struct usb_ep *ep) 143947e97605SSergei Shtylyov { 144047e97605SSergei Shtylyov struct musb_ep *musb_ep = to_musb_ep(ep); 144147e97605SSergei Shtylyov 144247e97605SSergei Shtylyov if (!ep) 144347e97605SSergei Shtylyov return -EINVAL; 144447e97605SSergei Shtylyov 144547e97605SSergei Shtylyov musb_ep->wedged = 1; 144647e97605SSergei Shtylyov 144747e97605SSergei Shtylyov return usb_ep_set_halt(ep); 144847e97605SSergei Shtylyov } 144947e97605SSergei Shtylyov 1450550a7375SFelipe Balbi static int musb_gadget_fifo_status(struct usb_ep *ep) 1451550a7375SFelipe Balbi { 1452550a7375SFelipe Balbi struct musb_ep *musb_ep = to_musb_ep(ep); 1453550a7375SFelipe Balbi void __iomem *epio = musb_ep->hw_ep->regs; 1454550a7375SFelipe Balbi int retval = -EINVAL; 1455550a7375SFelipe Balbi 1456550a7375SFelipe Balbi if (musb_ep->desc && !musb_ep->is_in) { 1457550a7375SFelipe Balbi struct musb *musb = musb_ep->musb; 1458550a7375SFelipe Balbi int epnum = musb_ep->current_epnum; 1459550a7375SFelipe Balbi void __iomem *mbase = musb->mregs; 1460550a7375SFelipe Balbi unsigned long flags; 1461550a7375SFelipe Balbi 1462550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1463550a7375SFelipe Balbi 1464550a7375SFelipe Balbi musb_ep_select(mbase, epnum); 1465550a7375SFelipe Balbi /* FIXME return zero unless RXPKTRDY is set */ 1466550a7375SFelipe Balbi retval = musb_readw(epio, MUSB_RXCOUNT); 1467550a7375SFelipe Balbi 1468550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1469550a7375SFelipe Balbi } 1470550a7375SFelipe Balbi return retval; 1471550a7375SFelipe Balbi } 1472550a7375SFelipe Balbi 1473550a7375SFelipe Balbi static void musb_gadget_fifo_flush(struct usb_ep *ep) 1474550a7375SFelipe Balbi { 1475550a7375SFelipe Balbi struct musb_ep *musb_ep = to_musb_ep(ep); 1476550a7375SFelipe Balbi struct musb *musb = musb_ep->musb; 1477550a7375SFelipe Balbi u8 epnum = musb_ep->current_epnum; 1478550a7375SFelipe Balbi void __iomem *epio = musb->endpoints[epnum].regs; 1479550a7375SFelipe Balbi void __iomem *mbase; 1480550a7375SFelipe Balbi unsigned long flags; 1481b18d26f6SSebastian Andrzej Siewior u16 csr; 1482550a7375SFelipe Balbi 1483550a7375SFelipe Balbi mbase = musb->mregs; 1484550a7375SFelipe Balbi 1485550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1486550a7375SFelipe Balbi musb_ep_select(mbase, (u8) epnum); 1487550a7375SFelipe Balbi 1488550a7375SFelipe Balbi /* disable interrupts */ 1489b18d26f6SSebastian Andrzej Siewior musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe & ~(1 << epnum)); 1490550a7375SFelipe Balbi 1491550a7375SFelipe Balbi if (musb_ep->is_in) { 1492550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_TXCSR); 1493550a7375SFelipe Balbi if (csr & MUSB_TXCSR_FIFONOTEMPTY) { 1494550a7375SFelipe Balbi csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS; 14954858f06eSYauheni Kaliuta /* 14964858f06eSYauheni Kaliuta * Setting both TXPKTRDY and FLUSHFIFO makes controller 14974858f06eSYauheni Kaliuta * to interrupt current FIFO loading, but not flushing 14984858f06eSYauheni Kaliuta * the already loaded ones. 14994858f06eSYauheni Kaliuta */ 15004858f06eSYauheni Kaliuta csr &= ~MUSB_TXCSR_TXPKTRDY; 1501550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 1502550a7375SFelipe Balbi /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ 1503550a7375SFelipe Balbi musb_writew(epio, MUSB_TXCSR, csr); 1504550a7375SFelipe Balbi } 1505550a7375SFelipe Balbi } else { 1506550a7375SFelipe Balbi csr = musb_readw(epio, MUSB_RXCSR); 1507550a7375SFelipe Balbi csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS; 1508550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 1509550a7375SFelipe Balbi musb_writew(epio, MUSB_RXCSR, csr); 1510550a7375SFelipe Balbi } 1511550a7375SFelipe Balbi 1512550a7375SFelipe Balbi /* re-enable interrupt */ 1513b18d26f6SSebastian Andrzej Siewior musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe); 1514550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1515550a7375SFelipe Balbi } 1516550a7375SFelipe Balbi 1517550a7375SFelipe Balbi static const struct usb_ep_ops musb_ep_ops = { 1518550a7375SFelipe Balbi .enable = musb_gadget_enable, 1519550a7375SFelipe Balbi .disable = musb_gadget_disable, 1520550a7375SFelipe Balbi .alloc_request = musb_alloc_request, 1521550a7375SFelipe Balbi .free_request = musb_free_request, 1522550a7375SFelipe Balbi .queue = musb_gadget_queue, 1523550a7375SFelipe Balbi .dequeue = musb_gadget_dequeue, 1524550a7375SFelipe Balbi .set_halt = musb_gadget_set_halt, 152547e97605SSergei Shtylyov .set_wedge = musb_gadget_set_wedge, 1526550a7375SFelipe Balbi .fifo_status = musb_gadget_fifo_status, 1527550a7375SFelipe Balbi .fifo_flush = musb_gadget_fifo_flush 1528550a7375SFelipe Balbi }; 1529550a7375SFelipe Balbi 1530550a7375SFelipe Balbi /* ----------------------------------------------------------------------- */ 1531550a7375SFelipe Balbi 1532550a7375SFelipe Balbi static int musb_gadget_get_frame(struct usb_gadget *gadget) 1533550a7375SFelipe Balbi { 1534550a7375SFelipe Balbi struct musb *musb = gadget_to_musb(gadget); 1535550a7375SFelipe Balbi 1536550a7375SFelipe Balbi return (int)musb_readw(musb->mregs, MUSB_FRAME); 1537550a7375SFelipe Balbi } 1538550a7375SFelipe Balbi 1539550a7375SFelipe Balbi static int musb_gadget_wakeup(struct usb_gadget *gadget) 1540550a7375SFelipe Balbi { 1541550a7375SFelipe Balbi struct musb *musb = gadget_to_musb(gadget); 1542550a7375SFelipe Balbi void __iomem *mregs = musb->mregs; 1543550a7375SFelipe Balbi unsigned long flags; 1544550a7375SFelipe Balbi int status = -EINVAL; 1545550a7375SFelipe Balbi u8 power, devctl; 1546550a7375SFelipe Balbi int retries; 1547550a7375SFelipe Balbi 1548550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1549550a7375SFelipe Balbi 1550e47d9254SAntoine Tenart switch (musb->xceiv->otg->state) { 1551550a7375SFelipe Balbi case OTG_STATE_B_PERIPHERAL: 1552550a7375SFelipe Balbi /* NOTE: OTG state machine doesn't include B_SUSPENDED; 1553550a7375SFelipe Balbi * that's part of the standard usb 1.1 state machine, and 1554550a7375SFelipe Balbi * doesn't affect OTG transitions. 1555550a7375SFelipe Balbi */ 1556550a7375SFelipe Balbi if (musb->may_wakeup && musb->is_suspended) 1557550a7375SFelipe Balbi break; 1558550a7375SFelipe Balbi goto done; 1559550a7375SFelipe Balbi case OTG_STATE_B_IDLE: 1560550a7375SFelipe Balbi /* Start SRP ... OTG not required. */ 1561550a7375SFelipe Balbi devctl = musb_readb(mregs, MUSB_DEVCTL); 15625c8a86e1SFelipe Balbi dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl); 1563550a7375SFelipe Balbi devctl |= MUSB_DEVCTL_SESSION; 1564550a7375SFelipe Balbi musb_writeb(mregs, MUSB_DEVCTL, devctl); 1565550a7375SFelipe Balbi devctl = musb_readb(mregs, MUSB_DEVCTL); 1566550a7375SFelipe Balbi retries = 100; 1567550a7375SFelipe Balbi while (!(devctl & MUSB_DEVCTL_SESSION)) { 1568550a7375SFelipe Balbi devctl = musb_readb(mregs, MUSB_DEVCTL); 1569550a7375SFelipe Balbi if (retries-- < 1) 1570550a7375SFelipe Balbi break; 1571550a7375SFelipe Balbi } 1572550a7375SFelipe Balbi retries = 10000; 1573550a7375SFelipe Balbi while (devctl & MUSB_DEVCTL_SESSION) { 1574550a7375SFelipe Balbi devctl = musb_readb(mregs, MUSB_DEVCTL); 1575550a7375SFelipe Balbi if (retries-- < 1) 1576550a7375SFelipe Balbi break; 1577550a7375SFelipe Balbi } 1578550a7375SFelipe Balbi 15798620543eSHema HK spin_unlock_irqrestore(&musb->lock, flags); 15806e13c650SHeikki Krogerus otg_start_srp(musb->xceiv->otg); 15818620543eSHema HK spin_lock_irqsave(&musb->lock, flags); 15828620543eSHema HK 1583550a7375SFelipe Balbi /* Block idling for at least 1s */ 1584550a7375SFelipe Balbi musb_platform_try_idle(musb, 1585550a7375SFelipe Balbi jiffies + msecs_to_jiffies(1 * HZ)); 1586550a7375SFelipe Balbi 1587550a7375SFelipe Balbi status = 0; 1588550a7375SFelipe Balbi goto done; 1589550a7375SFelipe Balbi default: 15905c8a86e1SFelipe Balbi dev_dbg(musb->controller, "Unhandled wake: %s\n", 1591e47d9254SAntoine Tenart usb_otg_state_string(musb->xceiv->otg->state)); 1592550a7375SFelipe Balbi goto done; 1593550a7375SFelipe Balbi } 1594550a7375SFelipe Balbi 1595550a7375SFelipe Balbi status = 0; 1596550a7375SFelipe Balbi 1597550a7375SFelipe Balbi power = musb_readb(mregs, MUSB_POWER); 1598550a7375SFelipe Balbi power |= MUSB_POWER_RESUME; 1599550a7375SFelipe Balbi musb_writeb(mregs, MUSB_POWER, power); 16005c8a86e1SFelipe Balbi dev_dbg(musb->controller, "issue wakeup\n"); 1601550a7375SFelipe Balbi 1602550a7375SFelipe Balbi /* FIXME do this next chunk in a timer callback, no udelay */ 1603550a7375SFelipe Balbi mdelay(2); 1604550a7375SFelipe Balbi 1605550a7375SFelipe Balbi power = musb_readb(mregs, MUSB_POWER); 1606550a7375SFelipe Balbi power &= ~MUSB_POWER_RESUME; 1607550a7375SFelipe Balbi musb_writeb(mregs, MUSB_POWER, power); 1608550a7375SFelipe Balbi done: 1609550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1610550a7375SFelipe Balbi return status; 1611550a7375SFelipe Balbi } 1612550a7375SFelipe Balbi 1613550a7375SFelipe Balbi static int 1614550a7375SFelipe Balbi musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered) 1615550a7375SFelipe Balbi { 1616dadac986SPeter Chen gadget->is_selfpowered = !!is_selfpowered; 1617550a7375SFelipe Balbi return 0; 1618550a7375SFelipe Balbi } 1619550a7375SFelipe Balbi 1620550a7375SFelipe Balbi static void musb_pullup(struct musb *musb, int is_on) 1621550a7375SFelipe Balbi { 1622550a7375SFelipe Balbi u8 power; 1623550a7375SFelipe Balbi 1624550a7375SFelipe Balbi power = musb_readb(musb->mregs, MUSB_POWER); 1625550a7375SFelipe Balbi if (is_on) 1626550a7375SFelipe Balbi power |= MUSB_POWER_SOFTCONN; 1627550a7375SFelipe Balbi else 1628550a7375SFelipe Balbi power &= ~MUSB_POWER_SOFTCONN; 1629550a7375SFelipe Balbi 1630550a7375SFelipe Balbi /* FIXME if on, HdrcStart; if off, HdrcStop */ 1631550a7375SFelipe Balbi 1632e71eb392SSebastian Andrzej Siewior dev_dbg(musb->controller, "gadget D+ pullup %s\n", 1633e71eb392SSebastian Andrzej Siewior is_on ? "on" : "off"); 1634550a7375SFelipe Balbi musb_writeb(musb->mregs, MUSB_POWER, power); 1635550a7375SFelipe Balbi } 1636550a7375SFelipe Balbi 1637550a7375SFelipe Balbi #if 0 1638550a7375SFelipe Balbi static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active) 1639550a7375SFelipe Balbi { 16405c8a86e1SFelipe Balbi dev_dbg(musb->controller, "<= %s =>\n", __func__); 1641550a7375SFelipe Balbi 1642550a7375SFelipe Balbi /* 1643550a7375SFelipe Balbi * FIXME iff driver's softconnect flag is set (as it is during probe, 1644550a7375SFelipe Balbi * though that can clear it), just musb_pullup(). 1645550a7375SFelipe Balbi */ 1646550a7375SFelipe Balbi 1647550a7375SFelipe Balbi return -EINVAL; 1648550a7375SFelipe Balbi } 1649550a7375SFelipe Balbi #endif 1650550a7375SFelipe Balbi 1651550a7375SFelipe Balbi static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) 1652550a7375SFelipe Balbi { 1653550a7375SFelipe Balbi struct musb *musb = gadget_to_musb(gadget); 1654550a7375SFelipe Balbi 165584e250ffSDavid Brownell if (!musb->xceiv->set_power) 1656550a7375SFelipe Balbi return -EOPNOTSUPP; 1657b96d3b08SHeikki Krogerus return usb_phy_set_power(musb->xceiv, mA); 1658550a7375SFelipe Balbi } 1659550a7375SFelipe Balbi 1660550a7375SFelipe Balbi static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) 1661550a7375SFelipe Balbi { 1662550a7375SFelipe Balbi struct musb *musb = gadget_to_musb(gadget); 1663550a7375SFelipe Balbi unsigned long flags; 1664550a7375SFelipe Balbi 1665550a7375SFelipe Balbi is_on = !!is_on; 1666550a7375SFelipe Balbi 166793e098a8SJohn Stultz pm_runtime_get_sync(musb->controller); 166893e098a8SJohn Stultz 1669550a7375SFelipe Balbi /* NOTE: this assumes we are sensing vbus; we'd rather 1670550a7375SFelipe Balbi * not pullup unless the B-session is active. 1671550a7375SFelipe Balbi */ 1672550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1673550a7375SFelipe Balbi if (is_on != musb->softconnect) { 1674550a7375SFelipe Balbi musb->softconnect = is_on; 1675550a7375SFelipe Balbi musb_pullup(musb, is_on); 1676550a7375SFelipe Balbi } 1677550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 167893e098a8SJohn Stultz 167993e098a8SJohn Stultz pm_runtime_put(musb->controller); 168093e098a8SJohn Stultz 1681550a7375SFelipe Balbi return 0; 1682550a7375SFelipe Balbi } 1683550a7375SFelipe Balbi 168426b8aa45SRobert Baldyga #ifdef CONFIG_BLACKFIN 168526b8aa45SRobert Baldyga static struct usb_ep *musb_match_ep(struct usb_gadget *g, 168626b8aa45SRobert Baldyga struct usb_endpoint_descriptor *desc, 168726b8aa45SRobert Baldyga struct usb_ss_ep_comp_descriptor *ep_comp) 168826b8aa45SRobert Baldyga { 168926b8aa45SRobert Baldyga struct usb_ep *ep = NULL; 169026b8aa45SRobert Baldyga 169126b8aa45SRobert Baldyga switch (usb_endpoint_type(desc)) { 169226b8aa45SRobert Baldyga case USB_ENDPOINT_XFER_ISOC: 169326b8aa45SRobert Baldyga case USB_ENDPOINT_XFER_BULK: 169426b8aa45SRobert Baldyga if (usb_endpoint_dir_in(desc)) 169526b8aa45SRobert Baldyga ep = gadget_find_ep_by_name(g, "ep5in"); 169626b8aa45SRobert Baldyga else 169726b8aa45SRobert Baldyga ep = gadget_find_ep_by_name(g, "ep6out"); 169826b8aa45SRobert Baldyga break; 169926b8aa45SRobert Baldyga case USB_ENDPOINT_XFER_INT: 170026b8aa45SRobert Baldyga if (usb_endpoint_dir_in(desc)) 170126b8aa45SRobert Baldyga ep = gadget_find_ep_by_name(g, "ep1in"); 170226b8aa45SRobert Baldyga else 170326b8aa45SRobert Baldyga ep = gadget_find_ep_by_name(g, "ep2out"); 170426b8aa45SRobert Baldyga break; 170526b8aa45SRobert Baldyga default: 1706*2f3cc24fSRobert Baldyga break; 170726b8aa45SRobert Baldyga } 170826b8aa45SRobert Baldyga 170926b8aa45SRobert Baldyga if (ep && usb_gadget_ep_match_desc(g, ep, desc, ep_comp)) 171026b8aa45SRobert Baldyga return ep; 171126b8aa45SRobert Baldyga 171226b8aa45SRobert Baldyga return NULL; 171326b8aa45SRobert Baldyga } 171426b8aa45SRobert Baldyga #else 171526b8aa45SRobert Baldyga #define musb_match_ep NULL 171626b8aa45SRobert Baldyga #endif 171726b8aa45SRobert Baldyga 1718e71eb392SSebastian Andrzej Siewior static int musb_gadget_start(struct usb_gadget *g, 1719e71eb392SSebastian Andrzej Siewior struct usb_gadget_driver *driver); 172022835b80SFelipe Balbi static int musb_gadget_stop(struct usb_gadget *g); 17210f91349bSSebastian Andrzej Siewior 1722550a7375SFelipe Balbi static const struct usb_gadget_ops musb_gadget_operations = { 1723550a7375SFelipe Balbi .get_frame = musb_gadget_get_frame, 1724550a7375SFelipe Balbi .wakeup = musb_gadget_wakeup, 1725550a7375SFelipe Balbi .set_selfpowered = musb_gadget_set_self_powered, 1726550a7375SFelipe Balbi /* .vbus_session = musb_gadget_vbus_session, */ 1727550a7375SFelipe Balbi .vbus_draw = musb_gadget_vbus_draw, 1728550a7375SFelipe Balbi .pullup = musb_gadget_pullup, 1729e71eb392SSebastian Andrzej Siewior .udc_start = musb_gadget_start, 1730e71eb392SSebastian Andrzej Siewior .udc_stop = musb_gadget_stop, 173126b8aa45SRobert Baldyga .match_ep = musb_match_ep, 1732550a7375SFelipe Balbi }; 1733550a7375SFelipe Balbi 1734550a7375SFelipe Balbi /* ----------------------------------------------------------------------- */ 1735550a7375SFelipe Balbi 1736550a7375SFelipe Balbi /* Registration */ 1737550a7375SFelipe Balbi 1738550a7375SFelipe Balbi /* Only this registration code "knows" the rule (from USB standards) 1739550a7375SFelipe Balbi * about there being only one external upstream port. It assumes 1740550a7375SFelipe Balbi * all peripheral ports are external... 1741550a7375SFelipe Balbi */ 1742550a7375SFelipe Balbi 174341ac7b3aSBill Pemberton static void 1744550a7375SFelipe Balbi init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in) 1745550a7375SFelipe Balbi { 1746550a7375SFelipe Balbi struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 1747550a7375SFelipe Balbi 1748550a7375SFelipe Balbi memset(ep, 0, sizeof *ep); 1749550a7375SFelipe Balbi 1750550a7375SFelipe Balbi ep->current_epnum = epnum; 1751550a7375SFelipe Balbi ep->musb = musb; 1752550a7375SFelipe Balbi ep->hw_ep = hw_ep; 1753550a7375SFelipe Balbi ep->is_in = is_in; 1754550a7375SFelipe Balbi 1755550a7375SFelipe Balbi INIT_LIST_HEAD(&ep->req_list); 1756550a7375SFelipe Balbi 1757550a7375SFelipe Balbi sprintf(ep->name, "ep%d%s", epnum, 1758550a7375SFelipe Balbi (!epnum || hw_ep->is_shared_fifo) ? "" : ( 1759550a7375SFelipe Balbi is_in ? "in" : "out")); 1760550a7375SFelipe Balbi ep->end_point.name = ep->name; 1761550a7375SFelipe Balbi INIT_LIST_HEAD(&ep->end_point.ep_list); 1762550a7375SFelipe Balbi if (!epnum) { 1763e117e742SRobert Baldyga usb_ep_set_maxpacket_limit(&ep->end_point, 64); 17648501955eSRobert Baldyga ep->end_point.caps.type_control = true; 1765550a7375SFelipe Balbi ep->end_point.ops = &musb_g_ep0_ops; 1766550a7375SFelipe Balbi musb->g.ep0 = &ep->end_point; 1767550a7375SFelipe Balbi } else { 1768550a7375SFelipe Balbi if (is_in) 1769e117e742SRobert Baldyga usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_tx); 1770550a7375SFelipe Balbi else 1771e117e742SRobert Baldyga usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_rx); 17728501955eSRobert Baldyga ep->end_point.caps.type_iso = true; 17738501955eSRobert Baldyga ep->end_point.caps.type_bulk = true; 17748501955eSRobert Baldyga ep->end_point.caps.type_int = true; 1775550a7375SFelipe Balbi ep->end_point.ops = &musb_ep_ops; 1776550a7375SFelipe Balbi list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list); 1777550a7375SFelipe Balbi } 17788501955eSRobert Baldyga 17798501955eSRobert Baldyga if (!epnum || hw_ep->is_shared_fifo) { 17808501955eSRobert Baldyga ep->end_point.caps.dir_in = true; 17818501955eSRobert Baldyga ep->end_point.caps.dir_out = true; 17828501955eSRobert Baldyga } else if (is_in) 17838501955eSRobert Baldyga ep->end_point.caps.dir_in = true; 17848501955eSRobert Baldyga else 17858501955eSRobert Baldyga ep->end_point.caps.dir_out = true; 1786550a7375SFelipe Balbi } 1787550a7375SFelipe Balbi 1788550a7375SFelipe Balbi /* 1789550a7375SFelipe Balbi * Initialize the endpoints exposed to peripheral drivers, with backlinks 1790550a7375SFelipe Balbi * to the rest of the driver state. 1791550a7375SFelipe Balbi */ 179241ac7b3aSBill Pemberton static inline void musb_g_init_endpoints(struct musb *musb) 1793550a7375SFelipe Balbi { 1794550a7375SFelipe Balbi u8 epnum; 1795550a7375SFelipe Balbi struct musb_hw_ep *hw_ep; 1796550a7375SFelipe Balbi unsigned count = 0; 1797550a7375SFelipe Balbi 1798b595076aSUwe Kleine-König /* initialize endpoint list just once */ 1799550a7375SFelipe Balbi INIT_LIST_HEAD(&(musb->g.ep_list)); 1800550a7375SFelipe Balbi 1801550a7375SFelipe Balbi for (epnum = 0, hw_ep = musb->endpoints; 1802550a7375SFelipe Balbi epnum < musb->nr_endpoints; 1803550a7375SFelipe Balbi epnum++, hw_ep++) { 1804550a7375SFelipe Balbi if (hw_ep->is_shared_fifo /* || !epnum */) { 1805550a7375SFelipe Balbi init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0); 1806550a7375SFelipe Balbi count++; 1807550a7375SFelipe Balbi } else { 1808550a7375SFelipe Balbi if (hw_ep->max_packet_sz_tx) { 1809550a7375SFelipe Balbi init_peripheral_ep(musb, &hw_ep->ep_in, 1810550a7375SFelipe Balbi epnum, 1); 1811550a7375SFelipe Balbi count++; 1812550a7375SFelipe Balbi } 1813550a7375SFelipe Balbi if (hw_ep->max_packet_sz_rx) { 1814550a7375SFelipe Balbi init_peripheral_ep(musb, &hw_ep->ep_out, 1815550a7375SFelipe Balbi epnum, 0); 1816550a7375SFelipe Balbi count++; 1817550a7375SFelipe Balbi } 1818550a7375SFelipe Balbi } 1819550a7375SFelipe Balbi } 1820550a7375SFelipe Balbi } 1821550a7375SFelipe Balbi 1822550a7375SFelipe Balbi /* called once during driver setup to initialize and link into 1823550a7375SFelipe Balbi * the driver model; memory is zeroed. 1824550a7375SFelipe Balbi */ 182541ac7b3aSBill Pemberton int musb_gadget_setup(struct musb *musb) 1826550a7375SFelipe Balbi { 1827550a7375SFelipe Balbi int status; 1828550a7375SFelipe Balbi 1829550a7375SFelipe Balbi /* REVISIT minor race: if (erroneously) setting up two 1830550a7375SFelipe Balbi * musb peripherals at the same time, only the bus lock 1831550a7375SFelipe Balbi * is probably held. 1832550a7375SFelipe Balbi */ 1833550a7375SFelipe Balbi 1834550a7375SFelipe Balbi musb->g.ops = &musb_gadget_operations; 1835d327ab5bSMichal Nazarewicz musb->g.max_speed = USB_SPEED_HIGH; 1836550a7375SFelipe Balbi musb->g.speed = USB_SPEED_UNKNOWN; 1837550a7375SFelipe Balbi 18381374a430SBin Liu MUSB_DEV_MODE(musb); 18391374a430SBin Liu musb->xceiv->otg->default_a = 0; 1840e47d9254SAntoine Tenart musb->xceiv->otg->state = OTG_STATE_B_IDLE; 18411374a430SBin Liu 1842550a7375SFelipe Balbi /* this "gadget" abstracts/virtualizes the controller */ 1843550a7375SFelipe Balbi musb->g.name = musb_driver_name; 1844fd3923a9SApelete Seketeli #if IS_ENABLED(CONFIG_USB_MUSB_DUAL_ROLE) 1845550a7375SFelipe Balbi musb->g.is_otg = 1; 1846fd3923a9SApelete Seketeli #elif IS_ENABLED(CONFIG_USB_MUSB_GADGET) 1847fd3923a9SApelete Seketeli musb->g.is_otg = 0; 1848fd3923a9SApelete Seketeli #endif 1849550a7375SFelipe Balbi 1850550a7375SFelipe Balbi musb_g_init_endpoints(musb); 1851550a7375SFelipe Balbi 1852550a7375SFelipe Balbi musb->is_active = 0; 1853550a7375SFelipe Balbi musb_platform_try_idle(musb, 0); 1854550a7375SFelipe Balbi 18550f91349bSSebastian Andrzej Siewior status = usb_add_gadget_udc(musb->controller, &musb->g); 18560f91349bSSebastian Andrzej Siewior if (status) 18570f91349bSSebastian Andrzej Siewior goto err; 18580f91349bSSebastian Andrzej Siewior 18590f91349bSSebastian Andrzej Siewior return 0; 18600f91349bSSebastian Andrzej Siewior err: 18616193d699SSebastian Andrzej Siewior musb->g.dev.parent = NULL; 18620f91349bSSebastian Andrzej Siewior device_unregister(&musb->g.dev); 1863550a7375SFelipe Balbi return status; 1864550a7375SFelipe Balbi } 1865550a7375SFelipe Balbi 1866550a7375SFelipe Balbi void musb_gadget_cleanup(struct musb *musb) 1867550a7375SFelipe Balbi { 186890474288SSebastian Andrzej Siewior if (musb->port_mode == MUSB_PORT_MODE_HOST) 186990474288SSebastian Andrzej Siewior return; 18700f91349bSSebastian Andrzej Siewior usb_del_gadget_udc(&musb->g); 1871550a7375SFelipe Balbi } 1872550a7375SFelipe Balbi 1873550a7375SFelipe Balbi /* 1874550a7375SFelipe Balbi * Register the gadget driver. Used by gadget drivers when 1875550a7375SFelipe Balbi * registering themselves with the controller. 1876550a7375SFelipe Balbi * 1877550a7375SFelipe Balbi * -EINVAL something went wrong (not driver) 1878550a7375SFelipe Balbi * -EBUSY another gadget is already using the controller 1879b595076aSUwe Kleine-König * -ENOMEM no memory to perform the operation 1880550a7375SFelipe Balbi * 1881550a7375SFelipe Balbi * @param driver the gadget driver 1882550a7375SFelipe Balbi * @return <0 if error, 0 if everything is fine 1883550a7375SFelipe Balbi */ 1884e71eb392SSebastian Andrzej Siewior static int musb_gadget_start(struct usb_gadget *g, 1885e71eb392SSebastian Andrzej Siewior struct usb_gadget_driver *driver) 1886550a7375SFelipe Balbi { 1887e71eb392SSebastian Andrzej Siewior struct musb *musb = gadget_to_musb(g); 1888d445b6daSHeikki Krogerus struct usb_otg *otg = musb->xceiv->otg; 188963eed2b5SFelipe Balbi unsigned long flags; 1890032ec49fSFelipe Balbi int retval = 0; 1891550a7375SFelipe Balbi 1892032ec49fSFelipe Balbi if (driver->max_speed < USB_SPEED_HIGH) { 1893032ec49fSFelipe Balbi retval = -EINVAL; 1894032ec49fSFelipe Balbi goto err; 1895032ec49fSFelipe Balbi } 1896550a7375SFelipe Balbi 18977acc6197SHema HK pm_runtime_get_sync(musb->controller); 18987acc6197SHema HK 1899e71eb392SSebastian Andrzej Siewior musb->softconnect = 0; 1900550a7375SFelipe Balbi musb->gadget_driver = driver; 1901550a7375SFelipe Balbi 1902550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 190343e699ceSGreg Kroah-Hartman musb->is_active = 1; 1904550a7375SFelipe Balbi 19056e13c650SHeikki Krogerus otg_set_peripheral(otg, &musb->g); 1906e47d9254SAntoine Tenart musb->xceiv->otg->state = OTG_STATE_B_IDLE; 1907550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1908550a7375SFelipe Balbi 1909001dd84aSSebastian Andrzej Siewior musb_start(musb); 1910001dd84aSSebastian Andrzej Siewior 1911550a7375SFelipe Balbi /* REVISIT: funcall to other code, which also 1912550a7375SFelipe Balbi * handles power budgeting ... this way also 1913550a7375SFelipe Balbi * ensures HdrcStart is indirectly called. 1914550a7375SFelipe Balbi */ 1915b65ae0f1SGrazvydas Ignotas if (musb->xceiv->last_event == USB_EVENT_ID) 1916b65ae0f1SGrazvydas Ignotas musb_platform_set_vbus(musb, 1); 1917550a7375SFelipe Balbi 19187acc6197SHema HK if (musb->xceiv->last_event == USB_EVENT_NONE) 19197acc6197SHema HK pm_runtime_put(musb->controller); 19207acc6197SHema HK 192163eed2b5SFelipe Balbi return 0; 192263eed2b5SFelipe Balbi 1923032ec49fSFelipe Balbi err: 1924550a7375SFelipe Balbi return retval; 1925550a7375SFelipe Balbi } 1926550a7375SFelipe Balbi 1927550a7375SFelipe Balbi /* 1928550a7375SFelipe Balbi * Unregister the gadget driver. Used by gadget drivers when 1929550a7375SFelipe Balbi * unregistering themselves from the controller. 1930550a7375SFelipe Balbi * 1931550a7375SFelipe Balbi * @param driver the gadget driver to unregister 1932550a7375SFelipe Balbi */ 193322835b80SFelipe Balbi static int musb_gadget_stop(struct usb_gadget *g) 1934550a7375SFelipe Balbi { 1935e71eb392SSebastian Andrzej Siewior struct musb *musb = gadget_to_musb(g); 193663eed2b5SFelipe Balbi unsigned long flags; 1937550a7375SFelipe Balbi 19387acc6197SHema HK if (musb->xceiv->last_event == USB_EVENT_NONE) 19397acc6197SHema HK pm_runtime_get_sync(musb->controller); 19407acc6197SHema HK 194163eed2b5SFelipe Balbi /* 194263eed2b5SFelipe Balbi * REVISIT always use otg_set_peripheral() here too; 1943550a7375SFelipe Balbi * this needs to shut down the OTG engine. 1944550a7375SFelipe Balbi */ 1945550a7375SFelipe Balbi 1946550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags); 1947550a7375SFelipe Balbi 1948550a7375SFelipe Balbi musb_hnp_stop(musb); 1949550a7375SFelipe Balbi 1950550a7375SFelipe Balbi (void) musb_gadget_vbus_draw(&musb->g, 0); 1951550a7375SFelipe Balbi 1952e47d9254SAntoine Tenart musb->xceiv->otg->state = OTG_STATE_UNDEFINED; 1953d5638fcfSFelipe Balbi musb_stop(musb); 19546e13c650SHeikki Krogerus otg_set_peripheral(musb->xceiv->otg, NULL); 1955550a7375SFelipe Balbi 1956550a7375SFelipe Balbi musb->is_active = 0; 1957e21de10cSGrazvydas Ignotas musb->gadget_driver = NULL; 1958550a7375SFelipe Balbi musb_platform_try_idle(musb, 0); 1959550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags); 1960550a7375SFelipe Balbi 1961032ec49fSFelipe Balbi /* 1962032ec49fSFelipe Balbi * FIXME we need to be able to register another 1963550a7375SFelipe Balbi * gadget driver here and have everything work; 1964550a7375SFelipe Balbi * that currently misbehaves. 1965550a7375SFelipe Balbi */ 196663eed2b5SFelipe Balbi 19677acc6197SHema HK pm_runtime_put(musb->controller); 19687acc6197SHema HK 196963eed2b5SFelipe Balbi return 0; 1970550a7375SFelipe Balbi } 1971550a7375SFelipe Balbi 1972550a7375SFelipe Balbi /* ----------------------------------------------------------------------- */ 1973550a7375SFelipe Balbi 1974550a7375SFelipe Balbi /* lifecycle operations called through plat_uds.c */ 1975550a7375SFelipe Balbi 1976550a7375SFelipe Balbi void musb_g_resume(struct musb *musb) 1977550a7375SFelipe Balbi { 1978550a7375SFelipe Balbi musb->is_suspended = 0; 1979e47d9254SAntoine Tenart switch (musb->xceiv->otg->state) { 1980550a7375SFelipe Balbi case OTG_STATE_B_IDLE: 1981550a7375SFelipe Balbi break; 1982550a7375SFelipe Balbi case OTG_STATE_B_WAIT_ACON: 1983550a7375SFelipe Balbi case OTG_STATE_B_PERIPHERAL: 1984550a7375SFelipe Balbi musb->is_active = 1; 1985550a7375SFelipe Balbi if (musb->gadget_driver && musb->gadget_driver->resume) { 1986550a7375SFelipe Balbi spin_unlock(&musb->lock); 1987550a7375SFelipe Balbi musb->gadget_driver->resume(&musb->g); 1988550a7375SFelipe Balbi spin_lock(&musb->lock); 1989550a7375SFelipe Balbi } 1990550a7375SFelipe Balbi break; 1991550a7375SFelipe Balbi default: 1992550a7375SFelipe Balbi WARNING("unhandled RESUME transition (%s)\n", 1993e47d9254SAntoine Tenart usb_otg_state_string(musb->xceiv->otg->state)); 1994550a7375SFelipe Balbi } 1995550a7375SFelipe Balbi } 1996550a7375SFelipe Balbi 1997550a7375SFelipe Balbi /* called when SOF packets stop for 3+ msec */ 1998550a7375SFelipe Balbi void musb_g_suspend(struct musb *musb) 1999550a7375SFelipe Balbi { 2000550a7375SFelipe Balbi u8 devctl; 2001550a7375SFelipe Balbi 2002550a7375SFelipe Balbi devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 20035c8a86e1SFelipe Balbi dev_dbg(musb->controller, "devctl %02x\n", devctl); 2004550a7375SFelipe Balbi 2005e47d9254SAntoine Tenart switch (musb->xceiv->otg->state) { 2006550a7375SFelipe Balbi case OTG_STATE_B_IDLE: 2007550a7375SFelipe Balbi if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) 2008e47d9254SAntoine Tenart musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; 2009550a7375SFelipe Balbi break; 2010550a7375SFelipe Balbi case OTG_STATE_B_PERIPHERAL: 2011550a7375SFelipe Balbi musb->is_suspended = 1; 2012550a7375SFelipe Balbi if (musb->gadget_driver && musb->gadget_driver->suspend) { 2013550a7375SFelipe Balbi spin_unlock(&musb->lock); 2014550a7375SFelipe Balbi musb->gadget_driver->suspend(&musb->g); 2015550a7375SFelipe Balbi spin_lock(&musb->lock); 2016550a7375SFelipe Balbi } 2017550a7375SFelipe Balbi break; 2018550a7375SFelipe Balbi default: 2019550a7375SFelipe Balbi /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ; 2020550a7375SFelipe Balbi * A_PERIPHERAL may need care too 2021550a7375SFelipe Balbi */ 2022550a7375SFelipe Balbi WARNING("unhandled SUSPEND transition (%s)\n", 2023e47d9254SAntoine Tenart usb_otg_state_string(musb->xceiv->otg->state)); 2024550a7375SFelipe Balbi } 2025550a7375SFelipe Balbi } 2026550a7375SFelipe Balbi 2027550a7375SFelipe Balbi /* Called during SRP */ 2028550a7375SFelipe Balbi void musb_g_wakeup(struct musb *musb) 2029550a7375SFelipe Balbi { 2030550a7375SFelipe Balbi musb_gadget_wakeup(&musb->g); 2031550a7375SFelipe Balbi } 2032550a7375SFelipe Balbi 2033550a7375SFelipe Balbi /* called when VBUS drops below session threshold, and in other cases */ 2034550a7375SFelipe Balbi void musb_g_disconnect(struct musb *musb) 2035550a7375SFelipe Balbi { 2036550a7375SFelipe Balbi void __iomem *mregs = musb->mregs; 2037550a7375SFelipe Balbi u8 devctl = musb_readb(mregs, MUSB_DEVCTL); 2038550a7375SFelipe Balbi 20395c8a86e1SFelipe Balbi dev_dbg(musb->controller, "devctl %02x\n", devctl); 2040550a7375SFelipe Balbi 2041550a7375SFelipe Balbi /* clear HR */ 2042550a7375SFelipe Balbi musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION); 2043550a7375SFelipe Balbi 2044550a7375SFelipe Balbi /* don't draw vbus until new b-default session */ 2045550a7375SFelipe Balbi (void) musb_gadget_vbus_draw(&musb->g, 0); 2046550a7375SFelipe Balbi 2047550a7375SFelipe Balbi musb->g.speed = USB_SPEED_UNKNOWN; 2048550a7375SFelipe Balbi if (musb->gadget_driver && musb->gadget_driver->disconnect) { 2049550a7375SFelipe Balbi spin_unlock(&musb->lock); 2050550a7375SFelipe Balbi musb->gadget_driver->disconnect(&musb->g); 2051550a7375SFelipe Balbi spin_lock(&musb->lock); 2052550a7375SFelipe Balbi } 2053550a7375SFelipe Balbi 2054e47d9254SAntoine Tenart switch (musb->xceiv->otg->state) { 2055550a7375SFelipe Balbi default: 20565c8a86e1SFelipe Balbi dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n", 2057e47d9254SAntoine Tenart usb_otg_state_string(musb->xceiv->otg->state)); 2058e47d9254SAntoine Tenart musb->xceiv->otg->state = OTG_STATE_A_IDLE; 2059ab983f2aSDavid Brownell MUSB_HST_MODE(musb); 2060550a7375SFelipe Balbi break; 2061550a7375SFelipe Balbi case OTG_STATE_A_PERIPHERAL: 2062e47d9254SAntoine Tenart musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON; 2063ab983f2aSDavid Brownell MUSB_HST_MODE(musb); 2064550a7375SFelipe Balbi break; 2065550a7375SFelipe Balbi case OTG_STATE_B_WAIT_ACON: 2066550a7375SFelipe Balbi case OTG_STATE_B_HOST: 2067550a7375SFelipe Balbi case OTG_STATE_B_PERIPHERAL: 2068550a7375SFelipe Balbi case OTG_STATE_B_IDLE: 2069e47d9254SAntoine Tenart musb->xceiv->otg->state = OTG_STATE_B_IDLE; 2070550a7375SFelipe Balbi break; 2071550a7375SFelipe Balbi case OTG_STATE_B_SRP_INIT: 2072550a7375SFelipe Balbi break; 2073550a7375SFelipe Balbi } 2074550a7375SFelipe Balbi 2075550a7375SFelipe Balbi musb->is_active = 0; 2076550a7375SFelipe Balbi } 2077550a7375SFelipe Balbi 2078550a7375SFelipe Balbi void musb_g_reset(struct musb *musb) 2079550a7375SFelipe Balbi __releases(musb->lock) 2080550a7375SFelipe Balbi __acquires(musb->lock) 2081550a7375SFelipe Balbi { 2082550a7375SFelipe Balbi void __iomem *mbase = musb->mregs; 2083550a7375SFelipe Balbi u8 devctl = musb_readb(mbase, MUSB_DEVCTL); 2084550a7375SFelipe Balbi u8 power; 2085550a7375SFelipe Balbi 2086515ba29cSSebastian Andrzej Siewior dev_dbg(musb->controller, "<== %s driver '%s'\n", 2087550a7375SFelipe Balbi (devctl & MUSB_DEVCTL_BDEVICE) 2088550a7375SFelipe Balbi ? "B-Device" : "A-Device", 2089550a7375SFelipe Balbi musb->gadget_driver 2090550a7375SFelipe Balbi ? musb->gadget_driver->driver.name 2091550a7375SFelipe Balbi : NULL 2092550a7375SFelipe Balbi ); 2093550a7375SFelipe Balbi 20941189f7f6SFelipe Balbi /* report reset, if we didn't already (flushing EP state) */ 20951189f7f6SFelipe Balbi if (musb->gadget_driver && musb->g.speed != USB_SPEED_UNKNOWN) { 20961189f7f6SFelipe Balbi spin_unlock(&musb->lock); 20971189f7f6SFelipe Balbi usb_gadget_udc_reset(&musb->g, musb->gadget_driver); 20981189f7f6SFelipe Balbi spin_lock(&musb->lock); 20991189f7f6SFelipe Balbi } 2100550a7375SFelipe Balbi 2101550a7375SFelipe Balbi /* clear HR */ 2102550a7375SFelipe Balbi else if (devctl & MUSB_DEVCTL_HR) 2103550a7375SFelipe Balbi musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); 2104550a7375SFelipe Balbi 2105550a7375SFelipe Balbi 2106550a7375SFelipe Balbi /* what speed did we negotiate? */ 2107550a7375SFelipe Balbi power = musb_readb(mbase, MUSB_POWER); 2108550a7375SFelipe Balbi musb->g.speed = (power & MUSB_POWER_HSMODE) 2109550a7375SFelipe Balbi ? USB_SPEED_HIGH : USB_SPEED_FULL; 2110550a7375SFelipe Balbi 2111550a7375SFelipe Balbi /* start in USB_STATE_DEFAULT */ 2112550a7375SFelipe Balbi musb->is_active = 1; 2113550a7375SFelipe Balbi musb->is_suspended = 0; 2114550a7375SFelipe Balbi MUSB_DEV_MODE(musb); 2115550a7375SFelipe Balbi musb->address = 0; 2116550a7375SFelipe Balbi musb->ep0_state = MUSB_EP0_STAGE_SETUP; 2117550a7375SFelipe Balbi 2118550a7375SFelipe Balbi musb->may_wakeup = 0; 2119550a7375SFelipe Balbi musb->g.b_hnp_enable = 0; 2120550a7375SFelipe Balbi musb->g.a_alt_hnp_support = 0; 2121550a7375SFelipe Balbi musb->g.a_hnp_support = 0; 2122ca1023c8SRobert Baldyga musb->g.quirk_zlp_not_supp = 1; 2123550a7375SFelipe Balbi 2124550a7375SFelipe Balbi /* Normal reset, as B-Device; 2125550a7375SFelipe Balbi * or else after HNP, as A-Device 2126550a7375SFelipe Balbi */ 212723db9fd2SApelete Seketeli if (!musb->g.is_otg) { 212823db9fd2SApelete Seketeli /* USB device controllers that are not OTG compatible 212923db9fd2SApelete Seketeli * may not have DEVCTL register in silicon. 213023db9fd2SApelete Seketeli * In that case, do not rely on devctl for setting 213123db9fd2SApelete Seketeli * peripheral mode. 213223db9fd2SApelete Seketeli */ 2133e47d9254SAntoine Tenart musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; 213423db9fd2SApelete Seketeli musb->g.is_a_peripheral = 0; 213523db9fd2SApelete Seketeli } else if (devctl & MUSB_DEVCTL_BDEVICE) { 2136e47d9254SAntoine Tenart musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; 2137550a7375SFelipe Balbi musb->g.is_a_peripheral = 0; 2138032ec49fSFelipe Balbi } else { 2139e47d9254SAntoine Tenart musb->xceiv->otg->state = OTG_STATE_A_PERIPHERAL; 2140550a7375SFelipe Balbi musb->g.is_a_peripheral = 1; 2141032ec49fSFelipe Balbi } 2142550a7375SFelipe Balbi 2143550a7375SFelipe Balbi /* start with default limits on VBUS power draw */ 2144032ec49fSFelipe Balbi (void) musb_gadget_vbus_draw(&musb->g, 8); 2145550a7375SFelipe Balbi } 2146