1*055276c1SNeal Liu // SPDX-License-Identifier: GPL-2.0+ 2*055276c1SNeal Liu /* 3*055276c1SNeal Liu * Copyright (c) 2021 Aspeed Technology Inc. 4*055276c1SNeal Liu */ 5*055276c1SNeal Liu 6*055276c1SNeal Liu #include <linux/clk.h> 7*055276c1SNeal Liu #include <linux/delay.h> 8*055276c1SNeal Liu #include <linux/dma-mapping.h> 9*055276c1SNeal Liu #include <linux/interrupt.h> 10*055276c1SNeal Liu #include <linux/kernel.h> 11*055276c1SNeal Liu #include <linux/module.h> 12*055276c1SNeal Liu #include <linux/of.h> 13*055276c1SNeal Liu #include <linux/platform_device.h> 14*055276c1SNeal Liu #include <linux/prefetch.h> 15*055276c1SNeal Liu #include <linux/usb/ch9.h> 16*055276c1SNeal Liu #include <linux/usb/gadget.h> 17*055276c1SNeal Liu #include <linux/slab.h> 18*055276c1SNeal Liu 19*055276c1SNeal Liu #define AST_UDC_NUM_ENDPOINTS (1 + 4) 20*055276c1SNeal Liu #define AST_UDC_EP0_MAX_PACKET 64 /* EP0's max packet size */ 21*055276c1SNeal Liu #define AST_UDC_EPn_MAX_PACKET 1024 /* Generic EPs max packet size */ 22*055276c1SNeal Liu #define AST_UDC_DESCS_COUNT 256 /* Use 256 stages descriptor mode (32/256) */ 23*055276c1SNeal Liu #define AST_UDC_DESC_MODE 1 /* Single/Multiple Stage(s) Descriptor Mode */ 24*055276c1SNeal Liu 25*055276c1SNeal Liu #define AST_UDC_EP_DMA_SIZE (AST_UDC_EPn_MAX_PACKET + 8 * AST_UDC_DESCS_COUNT) 26*055276c1SNeal Liu 27*055276c1SNeal Liu /***************************** 28*055276c1SNeal Liu * * 29*055276c1SNeal Liu * UDC register definitions * 30*055276c1SNeal Liu * * 31*055276c1SNeal Liu *****************************/ 32*055276c1SNeal Liu 33*055276c1SNeal Liu #define AST_UDC_FUNC_CTRL 0x00 /* Root Function Control & Status Register */ 34*055276c1SNeal Liu #define AST_UDC_CONFIG 0x04 /* Root Configuration Setting Register */ 35*055276c1SNeal Liu #define AST_UDC_IER 0x08 /* Interrupt Control Register */ 36*055276c1SNeal Liu #define AST_UDC_ISR 0x0C /* Interrupt Status Register */ 37*055276c1SNeal Liu #define AST_UDC_EP_ACK_IER 0x10 /* Programmable ep Pool ACK Interrupt Enable Reg */ 38*055276c1SNeal Liu #define AST_UDC_EP_NAK_IER 0x14 /* Programmable ep Pool NAK Interrupt Enable Reg */ 39*055276c1SNeal Liu #define AST_UDC_EP_ACK_ISR 0x18 /* Programmable ep Pool ACK Interrupt Status Reg */ 40*055276c1SNeal Liu #define AST_UDC_EP_NAK_ISR 0x1C /* Programmable ep Pool NAK Interrupt Status Reg */ 41*055276c1SNeal Liu #define AST_UDC_DEV_RESET 0x20 /* Device Controller Soft Reset Enable Register */ 42*055276c1SNeal Liu #define AST_UDC_STS 0x24 /* USB Status Register */ 43*055276c1SNeal Liu #define AST_VHUB_EP_DATA 0x28 /* Programmable ep Pool Data Toggle Value Set */ 44*055276c1SNeal Liu #define AST_VHUB_ISO_TX_FAIL 0x2C /* Isochronous Transaction Fail Accumulator */ 45*055276c1SNeal Liu #define AST_UDC_EP0_CTRL 0x30 /* Endpoint 0 Control/Status Register */ 46*055276c1SNeal Liu #define AST_UDC_EP0_DATA_BUFF 0x34 /* Base Address of ep0 IN/OUT Data Buffer Reg */ 47*055276c1SNeal Liu #define AST_UDC_SETUP0 0x80 /* Root Device Setup Data Buffer0 */ 48*055276c1SNeal Liu #define AST_UDC_SETUP1 0x84 /* Root Device Setup Data Buffer1 */ 49*055276c1SNeal Liu 50*055276c1SNeal Liu 51*055276c1SNeal Liu /* Main control reg */ 52*055276c1SNeal Liu #define USB_PHY_CLK_EN BIT(31) 53*055276c1SNeal Liu #define USB_FIFO_DYN_PWRD_EN BIT(19) 54*055276c1SNeal Liu #define USB_EP_LONG_DESC BIT(18) 55*055276c1SNeal Liu #define USB_BIST_TEST_PASS BIT(13) 56*055276c1SNeal Liu #define USB_BIST_TURN_ON BIT(12) 57*055276c1SNeal Liu #define USB_PHY_RESET_DIS BIT(11) 58*055276c1SNeal Liu #define USB_TEST_MODE(x) ((x) << 8) 59*055276c1SNeal Liu #define USB_FORCE_TIMER_HS BIT(7) 60*055276c1SNeal Liu #define USB_FORCE_HS BIT(6) 61*055276c1SNeal Liu #define USB_REMOTE_WAKEUP_12MS BIT(5) 62*055276c1SNeal Liu #define USB_REMOTE_WAKEUP_EN BIT(4) 63*055276c1SNeal Liu #define USB_AUTO_REMOTE_WAKEUP_EN BIT(3) 64*055276c1SNeal Liu #define USB_STOP_CLK_IN_SUPEND BIT(2) 65*055276c1SNeal Liu #define USB_UPSTREAM_FS BIT(1) 66*055276c1SNeal Liu #define USB_UPSTREAM_EN BIT(0) 67*055276c1SNeal Liu 68*055276c1SNeal Liu /* Main config reg */ 69*055276c1SNeal Liu #define UDC_CFG_SET_ADDR(x) ((x) & 0x3f) 70*055276c1SNeal Liu #define UDC_CFG_ADDR_MASK (0x3f) 71*055276c1SNeal Liu 72*055276c1SNeal Liu /* Interrupt ctrl & status reg */ 73*055276c1SNeal Liu #define UDC_IRQ_EP_POOL_NAK BIT(17) 74*055276c1SNeal Liu #define UDC_IRQ_EP_POOL_ACK_STALL BIT(16) 75*055276c1SNeal Liu #define UDC_IRQ_BUS_RESUME BIT(8) 76*055276c1SNeal Liu #define UDC_IRQ_BUS_SUSPEND BIT(7) 77*055276c1SNeal Liu #define UDC_IRQ_BUS_RESET BIT(6) 78*055276c1SNeal Liu #define UDC_IRQ_EP0_IN_DATA_NAK BIT(4) 79*055276c1SNeal Liu #define UDC_IRQ_EP0_IN_ACK_STALL BIT(3) 80*055276c1SNeal Liu #define UDC_IRQ_EP0_OUT_NAK BIT(2) 81*055276c1SNeal Liu #define UDC_IRQ_EP0_OUT_ACK_STALL BIT(1) 82*055276c1SNeal Liu #define UDC_IRQ_EP0_SETUP BIT(0) 83*055276c1SNeal Liu #define UDC_IRQ_ACK_ALL (0x1ff) 84*055276c1SNeal Liu 85*055276c1SNeal Liu /* EP isr reg */ 86*055276c1SNeal Liu #define USB_EP3_ISR BIT(3) 87*055276c1SNeal Liu #define USB_EP2_ISR BIT(2) 88*055276c1SNeal Liu #define USB_EP1_ISR BIT(1) 89*055276c1SNeal Liu #define USB_EP0_ISR BIT(0) 90*055276c1SNeal Liu #define UDC_IRQ_EP_ACK_ALL (0xf) 91*055276c1SNeal Liu 92*055276c1SNeal Liu /*Soft reset reg */ 93*055276c1SNeal Liu #define ROOT_UDC_SOFT_RESET BIT(0) 94*055276c1SNeal Liu 95*055276c1SNeal Liu /* USB status reg */ 96*055276c1SNeal Liu #define UDC_STS_HIGHSPEED BIT(27) 97*055276c1SNeal Liu 98*055276c1SNeal Liu /* Programmable EP data toggle */ 99*055276c1SNeal Liu #define EP_TOGGLE_SET_EPNUM(x) ((x) & 0x3) 100*055276c1SNeal Liu 101*055276c1SNeal Liu /* EP0 ctrl reg */ 102*055276c1SNeal Liu #define EP0_GET_RX_LEN(x) ((x >> 16) & 0x7f) 103*055276c1SNeal Liu #define EP0_TX_LEN(x) ((x & 0x7f) << 8) 104*055276c1SNeal Liu #define EP0_RX_BUFF_RDY BIT(2) 105*055276c1SNeal Liu #define EP0_TX_BUFF_RDY BIT(1) 106*055276c1SNeal Liu #define EP0_STALL BIT(0) 107*055276c1SNeal Liu 108*055276c1SNeal Liu /************************************* 109*055276c1SNeal Liu * * 110*055276c1SNeal Liu * per-endpoint register definitions * 111*055276c1SNeal Liu * * 112*055276c1SNeal Liu *************************************/ 113*055276c1SNeal Liu 114*055276c1SNeal Liu #define AST_UDC_EP_CONFIG 0x00 /* Endpoint Configuration Register */ 115*055276c1SNeal Liu #define AST_UDC_EP_DMA_CTRL 0x04 /* DMA Descriptor List Control/Status Register */ 116*055276c1SNeal Liu #define AST_UDC_EP_DMA_BUFF 0x08 /* DMA Descriptor/Buffer Base Address */ 117*055276c1SNeal Liu #define AST_UDC_EP_DMA_STS 0x0C /* DMA Descriptor List R/W Pointer and Status */ 118*055276c1SNeal Liu 119*055276c1SNeal Liu #define AST_UDC_EP_BASE 0x200 120*055276c1SNeal Liu #define AST_UDC_EP_OFFSET 0x10 121*055276c1SNeal Liu 122*055276c1SNeal Liu /* EP config reg */ 123*055276c1SNeal Liu #define EP_SET_MAX_PKT(x) ((x & 0x3ff) << 16) 124*055276c1SNeal Liu #define EP_DATA_FETCH_CTRL(x) ((x & 0x3) << 14) 125*055276c1SNeal Liu #define EP_AUTO_DATA_DISABLE (0x1 << 13) 126*055276c1SNeal Liu #define EP_SET_EP_STALL (0x1 << 12) 127*055276c1SNeal Liu #define EP_SET_EP_NUM(x) ((x & 0xf) << 8) 128*055276c1SNeal Liu #define EP_SET_TYPE_MASK(x) ((x) << 5) 129*055276c1SNeal Liu #define EP_TYPE_BULK (0x1) 130*055276c1SNeal Liu #define EP_TYPE_INT (0x2) 131*055276c1SNeal Liu #define EP_TYPE_ISO (0x3) 132*055276c1SNeal Liu #define EP_DIR_OUT (0x1 << 4) 133*055276c1SNeal Liu #define EP_ALLOCATED_MASK (0x7 << 1) 134*055276c1SNeal Liu #define EP_ENABLE BIT(0) 135*055276c1SNeal Liu 136*055276c1SNeal Liu /* EP DMA ctrl reg */ 137*055276c1SNeal Liu #define EP_DMA_CTRL_GET_PROC_STS(x) ((x >> 4) & 0xf) 138*055276c1SNeal Liu #define EP_DMA_CTRL_STS_RX_IDLE 0x0 139*055276c1SNeal Liu #define EP_DMA_CTRL_STS_TX_IDLE 0x8 140*055276c1SNeal Liu #define EP_DMA_CTRL_IN_LONG_MODE (0x1 << 3) 141*055276c1SNeal Liu #define EP_DMA_CTRL_RESET (0x1 << 2) 142*055276c1SNeal Liu #define EP_DMA_SINGLE_STAGE (0x1 << 1) 143*055276c1SNeal Liu #define EP_DMA_DESC_MODE (0x1 << 0) 144*055276c1SNeal Liu 145*055276c1SNeal Liu /* EP DMA status reg */ 146*055276c1SNeal Liu #define EP_DMA_SET_TX_SIZE(x) ((x & 0x7ff) << 16) 147*055276c1SNeal Liu #define EP_DMA_GET_TX_SIZE(x) (((x) >> 16) & 0x7ff) 148*055276c1SNeal Liu #define EP_DMA_GET_RPTR(x) (((x) >> 8) & 0xff) 149*055276c1SNeal Liu #define EP_DMA_GET_WPTR(x) ((x) & 0xff) 150*055276c1SNeal Liu #define EP_DMA_SINGLE_KICK (1 << 0) /* WPTR = 1 for single mode */ 151*055276c1SNeal Liu 152*055276c1SNeal Liu /* EP desc reg */ 153*055276c1SNeal Liu #define AST_EP_DMA_DESC_INTR_ENABLE BIT(31) 154*055276c1SNeal Liu #define AST_EP_DMA_DESC_PID_DATA0 (0 << 14) 155*055276c1SNeal Liu #define AST_EP_DMA_DESC_PID_DATA2 BIT(14) 156*055276c1SNeal Liu #define AST_EP_DMA_DESC_PID_DATA1 (2 << 14) 157*055276c1SNeal Liu #define AST_EP_DMA_DESC_PID_MDATA (3 << 14) 158*055276c1SNeal Liu #define EP_DESC1_IN_LEN(x) ((x) & 0x1fff) 159*055276c1SNeal Liu #define AST_EP_DMA_DESC_MAX_LEN (7680) /* Max packet length for trasmit in 1 desc */ 160*055276c1SNeal Liu 161*055276c1SNeal Liu struct ast_udc_request { 162*055276c1SNeal Liu struct usb_request req; 163*055276c1SNeal Liu struct list_head queue; 164*055276c1SNeal Liu unsigned mapped:1; 165*055276c1SNeal Liu unsigned int actual_dma_length; 166*055276c1SNeal Liu u32 saved_dma_wptr; 167*055276c1SNeal Liu }; 168*055276c1SNeal Liu 169*055276c1SNeal Liu #define to_ast_req(__req) container_of(__req, struct ast_udc_request, req) 170*055276c1SNeal Liu 171*055276c1SNeal Liu struct ast_dma_desc { 172*055276c1SNeal Liu u32 des_0; 173*055276c1SNeal Liu u32 des_1; 174*055276c1SNeal Liu }; 175*055276c1SNeal Liu 176*055276c1SNeal Liu struct ast_udc_ep { 177*055276c1SNeal Liu struct usb_ep ep; 178*055276c1SNeal Liu 179*055276c1SNeal Liu /* Request queue */ 180*055276c1SNeal Liu struct list_head queue; 181*055276c1SNeal Liu 182*055276c1SNeal Liu struct ast_udc_dev *udc; 183*055276c1SNeal Liu void __iomem *ep_reg; 184*055276c1SNeal Liu void *epn_buf; 185*055276c1SNeal Liu dma_addr_t epn_buf_dma; 186*055276c1SNeal Liu const struct usb_endpoint_descriptor *desc; 187*055276c1SNeal Liu 188*055276c1SNeal Liu /* DMA Descriptors */ 189*055276c1SNeal Liu struct ast_dma_desc *descs; 190*055276c1SNeal Liu dma_addr_t descs_dma; 191*055276c1SNeal Liu u32 descs_wptr; 192*055276c1SNeal Liu u32 chunk_max; 193*055276c1SNeal Liu 194*055276c1SNeal Liu bool dir_in:1; 195*055276c1SNeal Liu unsigned stopped:1; 196*055276c1SNeal Liu bool desc_mode:1; 197*055276c1SNeal Liu }; 198*055276c1SNeal Liu 199*055276c1SNeal Liu #define to_ast_ep(__ep) container_of(__ep, struct ast_udc_ep, ep) 200*055276c1SNeal Liu 201*055276c1SNeal Liu struct ast_udc_dev { 202*055276c1SNeal Liu struct platform_device *pdev; 203*055276c1SNeal Liu void __iomem *reg; 204*055276c1SNeal Liu int irq; 205*055276c1SNeal Liu spinlock_t lock; 206*055276c1SNeal Liu struct clk *clk; 207*055276c1SNeal Liu struct work_struct wake_work; 208*055276c1SNeal Liu 209*055276c1SNeal Liu /* EP0 DMA buffers allocated in one chunk */ 210*055276c1SNeal Liu void *ep0_buf; 211*055276c1SNeal Liu dma_addr_t ep0_buf_dma; 212*055276c1SNeal Liu struct ast_udc_ep ep[AST_UDC_NUM_ENDPOINTS]; 213*055276c1SNeal Liu 214*055276c1SNeal Liu struct usb_gadget gadget; 215*055276c1SNeal Liu struct usb_gadget_driver *driver; 216*055276c1SNeal Liu void __iomem *creq; 217*055276c1SNeal Liu enum usb_device_state suspended_from; 218*055276c1SNeal Liu int desc_mode; 219*055276c1SNeal Liu 220*055276c1SNeal Liu /* Force full speed only */ 221*055276c1SNeal Liu bool force_usb1:1; 222*055276c1SNeal Liu unsigned is_control_tx:1; 223*055276c1SNeal Liu bool wakeup_en:1; 224*055276c1SNeal Liu }; 225*055276c1SNeal Liu 226*055276c1SNeal Liu #define to_ast_dev(__g) container_of(__g, struct ast_udc_dev, gadget) 227*055276c1SNeal Liu 228*055276c1SNeal Liu static const char * const ast_ep_name[] = { 229*055276c1SNeal Liu "ep0", "ep1", "ep2", "ep3", "ep4" 230*055276c1SNeal Liu }; 231*055276c1SNeal Liu 232*055276c1SNeal Liu #ifdef AST_UDC_DEBUG_ALL 233*055276c1SNeal Liu #define AST_UDC_DEBUG 234*055276c1SNeal Liu #define AST_SETUP_DEBUG 235*055276c1SNeal Liu #define AST_EP_DEBUG 236*055276c1SNeal Liu #define AST_ISR_DEBUG 237*055276c1SNeal Liu #endif 238*055276c1SNeal Liu 239*055276c1SNeal Liu #ifdef AST_SETUP_DEBUG 240*055276c1SNeal Liu #define SETUP_DBG(u, fmt, ...) \ 241*055276c1SNeal Liu dev_dbg(&(u)->pdev->dev, "%s() " fmt, __func__, ##__VA_ARGS__) 242*055276c1SNeal Liu #else 243*055276c1SNeal Liu #define SETUP_DBG(u, fmt, ...) 244*055276c1SNeal Liu #endif 245*055276c1SNeal Liu 246*055276c1SNeal Liu #ifdef AST_EP_DEBUG 247*055276c1SNeal Liu #define EP_DBG(e, fmt, ...) \ 248*055276c1SNeal Liu dev_dbg(&(e)->udc->pdev->dev, "%s():%s " fmt, __func__, \ 249*055276c1SNeal Liu (e)->ep.name, ##__VA_ARGS__) 250*055276c1SNeal Liu #else 251*055276c1SNeal Liu #define EP_DBG(ep, fmt, ...) ((void)(ep)) 252*055276c1SNeal Liu #endif 253*055276c1SNeal Liu 254*055276c1SNeal Liu #ifdef AST_UDC_DEBUG 255*055276c1SNeal Liu #define UDC_DBG(u, fmt, ...) \ 256*055276c1SNeal Liu dev_dbg(&(u)->pdev->dev, "%s() " fmt, __func__, ##__VA_ARGS__) 257*055276c1SNeal Liu #else 258*055276c1SNeal Liu #define UDC_DBG(u, fmt, ...) 259*055276c1SNeal Liu #endif 260*055276c1SNeal Liu 261*055276c1SNeal Liu #ifdef AST_ISR_DEBUG 262*055276c1SNeal Liu #define ISR_DBG(u, fmt, ...) \ 263*055276c1SNeal Liu dev_dbg(&(u)->pdev->dev, "%s() " fmt, __func__, ##__VA_ARGS__) 264*055276c1SNeal Liu #else 265*055276c1SNeal Liu #define ISR_DBG(u, fmt, ...) 266*055276c1SNeal Liu #endif 267*055276c1SNeal Liu 268*055276c1SNeal Liu /*-------------------------------------------------------------------------*/ 269*055276c1SNeal Liu #define ast_udc_read(udc, offset) \ 270*055276c1SNeal Liu readl((udc)->reg + (offset)) 271*055276c1SNeal Liu #define ast_udc_write(udc, val, offset) \ 272*055276c1SNeal Liu writel((val), (udc)->reg + (offset)) 273*055276c1SNeal Liu 274*055276c1SNeal Liu #define ast_ep_read(ep, reg) \ 275*055276c1SNeal Liu readl((ep)->ep_reg + (reg)) 276*055276c1SNeal Liu #define ast_ep_write(ep, val, reg) \ 277*055276c1SNeal Liu writel((val), (ep)->ep_reg + (reg)) 278*055276c1SNeal Liu 279*055276c1SNeal Liu /*-------------------------------------------------------------------------*/ 280*055276c1SNeal Liu 281*055276c1SNeal Liu static void ast_udc_done(struct ast_udc_ep *ep, struct ast_udc_request *req, 282*055276c1SNeal Liu int status) 283*055276c1SNeal Liu { 284*055276c1SNeal Liu struct ast_udc_dev *udc = ep->udc; 285*055276c1SNeal Liu 286*055276c1SNeal Liu EP_DBG(ep, "req @%p, len (%d/%d), buf:0x%x, dir:0x%x\n", 287*055276c1SNeal Liu req, req->req.actual, req->req.length, 288*055276c1SNeal Liu (u32)req->req.buf, ep->dir_in); 289*055276c1SNeal Liu 290*055276c1SNeal Liu list_del(&req->queue); 291*055276c1SNeal Liu 292*055276c1SNeal Liu if (req->req.status == -EINPROGRESS) 293*055276c1SNeal Liu req->req.status = status; 294*055276c1SNeal Liu else 295*055276c1SNeal Liu status = req->req.status; 296*055276c1SNeal Liu 297*055276c1SNeal Liu if (status && status != -ESHUTDOWN) 298*055276c1SNeal Liu EP_DBG(ep, "done req:%p, status:%d\n", req, status); 299*055276c1SNeal Liu 300*055276c1SNeal Liu spin_unlock(&udc->lock); 301*055276c1SNeal Liu usb_gadget_giveback_request(&ep->ep, &req->req); 302*055276c1SNeal Liu spin_lock(&udc->lock); 303*055276c1SNeal Liu } 304*055276c1SNeal Liu 305*055276c1SNeal Liu static void ast_udc_nuke(struct ast_udc_ep *ep, int status) 306*055276c1SNeal Liu { 307*055276c1SNeal Liu int count = 0; 308*055276c1SNeal Liu 309*055276c1SNeal Liu while (!list_empty(&ep->queue)) { 310*055276c1SNeal Liu struct ast_udc_request *req; 311*055276c1SNeal Liu 312*055276c1SNeal Liu req = list_entry(ep->queue.next, struct ast_udc_request, 313*055276c1SNeal Liu queue); 314*055276c1SNeal Liu ast_udc_done(ep, req, status); 315*055276c1SNeal Liu count++; 316*055276c1SNeal Liu } 317*055276c1SNeal Liu 318*055276c1SNeal Liu if (count) 319*055276c1SNeal Liu EP_DBG(ep, "Nuked %d request(s)\n", count); 320*055276c1SNeal Liu } 321*055276c1SNeal Liu 322*055276c1SNeal Liu /* 323*055276c1SNeal Liu * Stop activity on all endpoints. 324*055276c1SNeal Liu * Device controller for which EP activity is to be stopped. 325*055276c1SNeal Liu * 326*055276c1SNeal Liu * All the endpoints are stopped and any pending transfer requests if any on 327*055276c1SNeal Liu * the endpoint are terminated. 328*055276c1SNeal Liu */ 329*055276c1SNeal Liu static void ast_udc_stop_activity(struct ast_udc_dev *udc) 330*055276c1SNeal Liu { 331*055276c1SNeal Liu struct ast_udc_ep *ep; 332*055276c1SNeal Liu int i; 333*055276c1SNeal Liu 334*055276c1SNeal Liu for (i = 0; i < AST_UDC_NUM_ENDPOINTS; i++) { 335*055276c1SNeal Liu ep = &udc->ep[i]; 336*055276c1SNeal Liu ep->stopped = 1; 337*055276c1SNeal Liu ast_udc_nuke(ep, -ESHUTDOWN); 338*055276c1SNeal Liu } 339*055276c1SNeal Liu } 340*055276c1SNeal Liu 341*055276c1SNeal Liu static int ast_udc_ep_enable(struct usb_ep *_ep, 342*055276c1SNeal Liu const struct usb_endpoint_descriptor *desc) 343*055276c1SNeal Liu { 344*055276c1SNeal Liu u16 maxpacket = usb_endpoint_maxp(desc); 345*055276c1SNeal Liu struct ast_udc_ep *ep = to_ast_ep(_ep); 346*055276c1SNeal Liu struct ast_udc_dev *udc = ep->udc; 347*055276c1SNeal Liu u8 epnum = usb_endpoint_num(desc); 348*055276c1SNeal Liu unsigned long flags; 349*055276c1SNeal Liu u32 ep_conf = 0; 350*055276c1SNeal Liu u8 dir_in; 351*055276c1SNeal Liu u8 type; 352*055276c1SNeal Liu 353*055276c1SNeal Liu if (!_ep || !ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT || 354*055276c1SNeal Liu maxpacket == 0 || maxpacket > ep->ep.maxpacket) { 355*055276c1SNeal Liu EP_DBG(ep, "Failed, invalid EP enable param\n"); 356*055276c1SNeal Liu return -EINVAL; 357*055276c1SNeal Liu } 358*055276c1SNeal Liu 359*055276c1SNeal Liu if (!udc->driver) { 360*055276c1SNeal Liu EP_DBG(ep, "bogus device state\n"); 361*055276c1SNeal Liu return -ESHUTDOWN; 362*055276c1SNeal Liu } 363*055276c1SNeal Liu 364*055276c1SNeal Liu EP_DBG(ep, "maxpacket:0x%x\n", maxpacket); 365*055276c1SNeal Liu 366*055276c1SNeal Liu spin_lock_irqsave(&udc->lock, flags); 367*055276c1SNeal Liu 368*055276c1SNeal Liu ep->desc = desc; 369*055276c1SNeal Liu ep->stopped = 0; 370*055276c1SNeal Liu ep->ep.maxpacket = maxpacket; 371*055276c1SNeal Liu ep->chunk_max = AST_EP_DMA_DESC_MAX_LEN; 372*055276c1SNeal Liu 373*055276c1SNeal Liu if (maxpacket < AST_UDC_EPn_MAX_PACKET) 374*055276c1SNeal Liu ep_conf = EP_SET_MAX_PKT(maxpacket); 375*055276c1SNeal Liu 376*055276c1SNeal Liu ep_conf |= EP_SET_EP_NUM(epnum); 377*055276c1SNeal Liu 378*055276c1SNeal Liu type = usb_endpoint_type(desc); 379*055276c1SNeal Liu dir_in = usb_endpoint_dir_in(desc); 380*055276c1SNeal Liu ep->dir_in = dir_in; 381*055276c1SNeal Liu if (!ep->dir_in) 382*055276c1SNeal Liu ep_conf |= EP_DIR_OUT; 383*055276c1SNeal Liu 384*055276c1SNeal Liu EP_DBG(ep, "type %d, dir_in %d\n", type, dir_in); 385*055276c1SNeal Liu switch (type) { 386*055276c1SNeal Liu case USB_ENDPOINT_XFER_ISOC: 387*055276c1SNeal Liu ep_conf |= EP_SET_TYPE_MASK(EP_TYPE_ISO); 388*055276c1SNeal Liu break; 389*055276c1SNeal Liu 390*055276c1SNeal Liu case USB_ENDPOINT_XFER_BULK: 391*055276c1SNeal Liu ep_conf |= EP_SET_TYPE_MASK(EP_TYPE_BULK); 392*055276c1SNeal Liu break; 393*055276c1SNeal Liu 394*055276c1SNeal Liu case USB_ENDPOINT_XFER_INT: 395*055276c1SNeal Liu ep_conf |= EP_SET_TYPE_MASK(EP_TYPE_INT); 396*055276c1SNeal Liu break; 397*055276c1SNeal Liu } 398*055276c1SNeal Liu 399*055276c1SNeal Liu ep->desc_mode = udc->desc_mode && ep->descs_dma && ep->dir_in; 400*055276c1SNeal Liu if (ep->desc_mode) { 401*055276c1SNeal Liu ast_ep_write(ep, EP_DMA_CTRL_RESET, AST_UDC_EP_DMA_CTRL); 402*055276c1SNeal Liu ast_ep_write(ep, 0, AST_UDC_EP_DMA_STS); 403*055276c1SNeal Liu ast_ep_write(ep, ep->descs_dma, AST_UDC_EP_DMA_BUFF); 404*055276c1SNeal Liu 405*055276c1SNeal Liu /* Enable Long Descriptor Mode */ 406*055276c1SNeal Liu ast_ep_write(ep, EP_DMA_CTRL_IN_LONG_MODE | EP_DMA_DESC_MODE, 407*055276c1SNeal Liu AST_UDC_EP_DMA_CTRL); 408*055276c1SNeal Liu 409*055276c1SNeal Liu ep->descs_wptr = 0; 410*055276c1SNeal Liu 411*055276c1SNeal Liu } else { 412*055276c1SNeal Liu ast_ep_write(ep, EP_DMA_CTRL_RESET, AST_UDC_EP_DMA_CTRL); 413*055276c1SNeal Liu ast_ep_write(ep, EP_DMA_SINGLE_STAGE, AST_UDC_EP_DMA_CTRL); 414*055276c1SNeal Liu ast_ep_write(ep, 0, AST_UDC_EP_DMA_STS); 415*055276c1SNeal Liu } 416*055276c1SNeal Liu 417*055276c1SNeal Liu /* Cleanup data toggle just in case */ 418*055276c1SNeal Liu ast_udc_write(udc, EP_TOGGLE_SET_EPNUM(epnum), AST_VHUB_EP_DATA); 419*055276c1SNeal Liu 420*055276c1SNeal Liu /* Enable EP */ 421*055276c1SNeal Liu ast_ep_write(ep, ep_conf | EP_ENABLE, AST_UDC_EP_CONFIG); 422*055276c1SNeal Liu 423*055276c1SNeal Liu EP_DBG(ep, "ep_config: 0x%x\n", ast_ep_read(ep, AST_UDC_EP_CONFIG)); 424*055276c1SNeal Liu 425*055276c1SNeal Liu spin_unlock_irqrestore(&udc->lock, flags); 426*055276c1SNeal Liu 427*055276c1SNeal Liu return 0; 428*055276c1SNeal Liu } 429*055276c1SNeal Liu 430*055276c1SNeal Liu static int ast_udc_ep_disable(struct usb_ep *_ep) 431*055276c1SNeal Liu { 432*055276c1SNeal Liu struct ast_udc_ep *ep = to_ast_ep(_ep); 433*055276c1SNeal Liu struct ast_udc_dev *udc = ep->udc; 434*055276c1SNeal Liu unsigned long flags; 435*055276c1SNeal Liu 436*055276c1SNeal Liu spin_lock_irqsave(&udc->lock, flags); 437*055276c1SNeal Liu 438*055276c1SNeal Liu ep->ep.desc = NULL; 439*055276c1SNeal Liu ep->stopped = 1; 440*055276c1SNeal Liu 441*055276c1SNeal Liu ast_udc_nuke(ep, -ESHUTDOWN); 442*055276c1SNeal Liu ast_ep_write(ep, 0, AST_UDC_EP_CONFIG); 443*055276c1SNeal Liu 444*055276c1SNeal Liu spin_unlock_irqrestore(&udc->lock, flags); 445*055276c1SNeal Liu 446*055276c1SNeal Liu return 0; 447*055276c1SNeal Liu } 448*055276c1SNeal Liu 449*055276c1SNeal Liu static struct usb_request *ast_udc_ep_alloc_request(struct usb_ep *_ep, 450*055276c1SNeal Liu gfp_t gfp_flags) 451*055276c1SNeal Liu { 452*055276c1SNeal Liu struct ast_udc_ep *ep = to_ast_ep(_ep); 453*055276c1SNeal Liu struct ast_udc_request *req; 454*055276c1SNeal Liu 455*055276c1SNeal Liu req = kzalloc(sizeof(struct ast_udc_request), gfp_flags); 456*055276c1SNeal Liu if (!req) { 457*055276c1SNeal Liu EP_DBG(ep, "request allocation failed\n"); 458*055276c1SNeal Liu return NULL; 459*055276c1SNeal Liu } 460*055276c1SNeal Liu 461*055276c1SNeal Liu INIT_LIST_HEAD(&req->queue); 462*055276c1SNeal Liu 463*055276c1SNeal Liu return &req->req; 464*055276c1SNeal Liu } 465*055276c1SNeal Liu 466*055276c1SNeal Liu static void ast_udc_ep_free_request(struct usb_ep *_ep, 467*055276c1SNeal Liu struct usb_request *_req) 468*055276c1SNeal Liu { 469*055276c1SNeal Liu struct ast_udc_request *req = to_ast_req(_req); 470*055276c1SNeal Liu 471*055276c1SNeal Liu kfree(req); 472*055276c1SNeal Liu } 473*055276c1SNeal Liu 474*055276c1SNeal Liu static int ast_dma_descriptor_setup(struct ast_udc_ep *ep, u32 dma_buf, 475*055276c1SNeal Liu u16 tx_len, struct ast_udc_request *req) 476*055276c1SNeal Liu { 477*055276c1SNeal Liu struct ast_udc_dev *udc = ep->udc; 478*055276c1SNeal Liu struct device *dev = &udc->pdev->dev; 479*055276c1SNeal Liu u32 offset, chunk; 480*055276c1SNeal Liu int count, last; 481*055276c1SNeal Liu 482*055276c1SNeal Liu if (!ep->descs) { 483*055276c1SNeal Liu dev_warn(dev, "%s: Empty DMA descs list failure\n", 484*055276c1SNeal Liu ep->ep.name); 485*055276c1SNeal Liu return -EINVAL; 486*055276c1SNeal Liu } 487*055276c1SNeal Liu 488*055276c1SNeal Liu chunk = tx_len; 489*055276c1SNeal Liu offset = count = last = 0; 490*055276c1SNeal Liu 491*055276c1SNeal Liu EP_DBG(ep, "req @%p, %s:%d, %s:0x%x, %s:0x%x\n", req, 492*055276c1SNeal Liu "wptr", ep->descs_wptr, "dma_buf", dma_buf, 493*055276c1SNeal Liu "tx_len", tx_len); 494*055276c1SNeal Liu 495*055276c1SNeal Liu /* Create Descriptor Lists */ 496*055276c1SNeal Liu while (chunk >= 0 && !last && count < AST_UDC_DESCS_COUNT) { 497*055276c1SNeal Liu 498*055276c1SNeal Liu ep->descs[ep->descs_wptr].des_0 = dma_buf + offset; 499*055276c1SNeal Liu 500*055276c1SNeal Liu if (chunk <= ep->chunk_max) { 501*055276c1SNeal Liu ep->descs[ep->descs_wptr].des_1 = chunk; 502*055276c1SNeal Liu last = 1; 503*055276c1SNeal Liu } else { 504*055276c1SNeal Liu ep->descs[ep->descs_wptr].des_1 = ep->chunk_max; 505*055276c1SNeal Liu chunk -= ep->chunk_max; 506*055276c1SNeal Liu } 507*055276c1SNeal Liu 508*055276c1SNeal Liu EP_DBG(ep, "descs[%d]: 0x%x 0x%x, last:%d\n", 509*055276c1SNeal Liu ep->descs_wptr, 510*055276c1SNeal Liu ep->descs[ep->descs_wptr].des_0, 511*055276c1SNeal Liu ep->descs[ep->descs_wptr].des_1, 512*055276c1SNeal Liu last); 513*055276c1SNeal Liu 514*055276c1SNeal Liu if (count == 0) 515*055276c1SNeal Liu req->saved_dma_wptr = ep->descs_wptr; 516*055276c1SNeal Liu 517*055276c1SNeal Liu ep->descs_wptr++; 518*055276c1SNeal Liu count++; 519*055276c1SNeal Liu 520*055276c1SNeal Liu if (ep->descs_wptr >= AST_UDC_DESCS_COUNT) 521*055276c1SNeal Liu ep->descs_wptr = 0; 522*055276c1SNeal Liu 523*055276c1SNeal Liu offset = ep->chunk_max * count; 524*055276c1SNeal Liu } 525*055276c1SNeal Liu 526*055276c1SNeal Liu return 0; 527*055276c1SNeal Liu } 528*055276c1SNeal Liu 529*055276c1SNeal Liu static void ast_udc_epn_kick(struct ast_udc_ep *ep, struct ast_udc_request *req) 530*055276c1SNeal Liu { 531*055276c1SNeal Liu u32 tx_len; 532*055276c1SNeal Liu u32 last; 533*055276c1SNeal Liu 534*055276c1SNeal Liu last = req->req.length - req->req.actual; 535*055276c1SNeal Liu tx_len = last > ep->ep.maxpacket ? ep->ep.maxpacket : last; 536*055276c1SNeal Liu 537*055276c1SNeal Liu EP_DBG(ep, "kick req @%p, len:%d, dir:%d\n", 538*055276c1SNeal Liu req, tx_len, ep->dir_in); 539*055276c1SNeal Liu 540*055276c1SNeal Liu ast_ep_write(ep, req->req.dma + req->req.actual, AST_UDC_EP_DMA_BUFF); 541*055276c1SNeal Liu 542*055276c1SNeal Liu /* Start DMA */ 543*055276c1SNeal Liu ast_ep_write(ep, EP_DMA_SET_TX_SIZE(tx_len), AST_UDC_EP_DMA_STS); 544*055276c1SNeal Liu ast_ep_write(ep, EP_DMA_SET_TX_SIZE(tx_len) | EP_DMA_SINGLE_KICK, 545*055276c1SNeal Liu AST_UDC_EP_DMA_STS); 546*055276c1SNeal Liu } 547*055276c1SNeal Liu 548*055276c1SNeal Liu static void ast_udc_epn_kick_desc(struct ast_udc_ep *ep, 549*055276c1SNeal Liu struct ast_udc_request *req) 550*055276c1SNeal Liu { 551*055276c1SNeal Liu u32 descs_max_size; 552*055276c1SNeal Liu u32 tx_len; 553*055276c1SNeal Liu u32 last; 554*055276c1SNeal Liu 555*055276c1SNeal Liu descs_max_size = AST_EP_DMA_DESC_MAX_LEN * AST_UDC_DESCS_COUNT; 556*055276c1SNeal Liu 557*055276c1SNeal Liu last = req->req.length - req->req.actual; 558*055276c1SNeal Liu tx_len = last > descs_max_size ? descs_max_size : last; 559*055276c1SNeal Liu 560*055276c1SNeal Liu EP_DBG(ep, "kick req @%p, %s:%d, %s:0x%x, %s:0x%x (%d/%d), %s:0x%x\n", 561*055276c1SNeal Liu req, "tx_len", tx_len, "dir_in", ep->dir_in, 562*055276c1SNeal Liu "dma", req->req.dma + req->req.actual, 563*055276c1SNeal Liu req->req.actual, req->req.length, 564*055276c1SNeal Liu "descs_max_size", descs_max_size); 565*055276c1SNeal Liu 566*055276c1SNeal Liu if (!ast_dma_descriptor_setup(ep, req->req.dma + req->req.actual, 567*055276c1SNeal Liu tx_len, req)) 568*055276c1SNeal Liu req->actual_dma_length += tx_len; 569*055276c1SNeal Liu 570*055276c1SNeal Liu /* make sure CPU done everything before triggering DMA */ 571*055276c1SNeal Liu mb(); 572*055276c1SNeal Liu 573*055276c1SNeal Liu ast_ep_write(ep, ep->descs_wptr, AST_UDC_EP_DMA_STS); 574*055276c1SNeal Liu 575*055276c1SNeal Liu EP_DBG(ep, "descs_wptr:%d, dstat:0x%x, dctrl:0x%x\n", 576*055276c1SNeal Liu ep->descs_wptr, 577*055276c1SNeal Liu ast_ep_read(ep, AST_UDC_EP_DMA_STS), 578*055276c1SNeal Liu ast_ep_read(ep, AST_UDC_EP_DMA_CTRL)); 579*055276c1SNeal Liu } 580*055276c1SNeal Liu 581*055276c1SNeal Liu static void ast_udc_ep0_queue(struct ast_udc_ep *ep, 582*055276c1SNeal Liu struct ast_udc_request *req) 583*055276c1SNeal Liu { 584*055276c1SNeal Liu struct ast_udc_dev *udc = ep->udc; 585*055276c1SNeal Liu u32 tx_len; 586*055276c1SNeal Liu u32 last; 587*055276c1SNeal Liu 588*055276c1SNeal Liu last = req->req.length - req->req.actual; 589*055276c1SNeal Liu tx_len = last > ep->ep.maxpacket ? ep->ep.maxpacket : last; 590*055276c1SNeal Liu 591*055276c1SNeal Liu ast_udc_write(udc, req->req.dma + req->req.actual, 592*055276c1SNeal Liu AST_UDC_EP0_DATA_BUFF); 593*055276c1SNeal Liu 594*055276c1SNeal Liu if (ep->dir_in) { 595*055276c1SNeal Liu /* IN requests, send data */ 596*055276c1SNeal Liu SETUP_DBG(udc, "IN: %s:0x%x, %s:0x%x, %s:%d (%d/%d), %s:%d\n", 597*055276c1SNeal Liu "buf", (u32)req->req.buf, 598*055276c1SNeal Liu "dma", req->req.dma + req->req.actual, 599*055276c1SNeal Liu "tx_len", tx_len, 600*055276c1SNeal Liu req->req.actual, req->req.length, 601*055276c1SNeal Liu "dir_in", ep->dir_in); 602*055276c1SNeal Liu 603*055276c1SNeal Liu req->req.actual += tx_len; 604*055276c1SNeal Liu ast_udc_write(udc, EP0_TX_LEN(tx_len), AST_UDC_EP0_CTRL); 605*055276c1SNeal Liu ast_udc_write(udc, EP0_TX_LEN(tx_len) | EP0_TX_BUFF_RDY, 606*055276c1SNeal Liu AST_UDC_EP0_CTRL); 607*055276c1SNeal Liu 608*055276c1SNeal Liu } else { 609*055276c1SNeal Liu /* OUT requests, receive data */ 610*055276c1SNeal Liu SETUP_DBG(udc, "OUT: %s:%x, %s:%x, %s:(%d/%d), %s:%d\n", 611*055276c1SNeal Liu "buf", (u32)req->req.buf, 612*055276c1SNeal Liu "dma", req->req.dma + req->req.actual, 613*055276c1SNeal Liu "len", req->req.actual, req->req.length, 614*055276c1SNeal Liu "dir_in", ep->dir_in); 615*055276c1SNeal Liu 616*055276c1SNeal Liu if (!req->req.length) { 617*055276c1SNeal Liu /* 0 len request, send tx as completion */ 618*055276c1SNeal Liu ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL); 619*055276c1SNeal Liu ep->dir_in = 0x1; 620*055276c1SNeal Liu } else 621*055276c1SNeal Liu ast_udc_write(udc, EP0_RX_BUFF_RDY, AST_UDC_EP0_CTRL); 622*055276c1SNeal Liu } 623*055276c1SNeal Liu } 624*055276c1SNeal Liu 625*055276c1SNeal Liu static int ast_udc_ep_queue(struct usb_ep *_ep, struct usb_request *_req, 626*055276c1SNeal Liu gfp_t gfp_flags) 627*055276c1SNeal Liu { 628*055276c1SNeal Liu struct ast_udc_request *req = to_ast_req(_req); 629*055276c1SNeal Liu struct ast_udc_ep *ep = to_ast_ep(_ep); 630*055276c1SNeal Liu struct ast_udc_dev *udc = ep->udc; 631*055276c1SNeal Liu struct device *dev = &udc->pdev->dev; 632*055276c1SNeal Liu unsigned long flags; 633*055276c1SNeal Liu int rc; 634*055276c1SNeal Liu 635*055276c1SNeal Liu if (unlikely(!_req || !_req->complete || !_req->buf || !_ep)) { 636*055276c1SNeal Liu dev_warn(dev, "Invalid EP request !\n"); 637*055276c1SNeal Liu return -EINVAL; 638*055276c1SNeal Liu } 639*055276c1SNeal Liu 640*055276c1SNeal Liu if (ep->stopped) { 641*055276c1SNeal Liu dev_warn(dev, "%s is already stopped !\n", _ep->name); 642*055276c1SNeal Liu return -ESHUTDOWN; 643*055276c1SNeal Liu } 644*055276c1SNeal Liu 645*055276c1SNeal Liu spin_lock_irqsave(&udc->lock, flags); 646*055276c1SNeal Liu 647*055276c1SNeal Liu list_add_tail(&req->queue, &ep->queue); 648*055276c1SNeal Liu 649*055276c1SNeal Liu req->req.actual = 0; 650*055276c1SNeal Liu req->req.status = -EINPROGRESS; 651*055276c1SNeal Liu req->actual_dma_length = 0; 652*055276c1SNeal Liu 653*055276c1SNeal Liu rc = usb_gadget_map_request(&udc->gadget, &req->req, ep->dir_in); 654*055276c1SNeal Liu if (rc) { 655*055276c1SNeal Liu EP_DBG(ep, "Request mapping failure %d\n", rc); 656*055276c1SNeal Liu dev_warn(dev, "Request mapping failure %d\n", rc); 657*055276c1SNeal Liu goto end; 658*055276c1SNeal Liu } 659*055276c1SNeal Liu 660*055276c1SNeal Liu EP_DBG(ep, "enqueue req @%p\n", req); 661*055276c1SNeal Liu EP_DBG(ep, "l=%d, dma:0x%x, zero:%d, is_in:%d\n", 662*055276c1SNeal Liu _req->length, _req->dma, _req->zero, ep->dir_in); 663*055276c1SNeal Liu 664*055276c1SNeal Liu /* EP0 request enqueue */ 665*055276c1SNeal Liu if (ep->ep.desc == NULL) { 666*055276c1SNeal Liu if ((req->req.dma % 4) != 0) { 667*055276c1SNeal Liu dev_warn(dev, "EP0 req dma alignment error\n"); 668*055276c1SNeal Liu return -ESHUTDOWN; 669*055276c1SNeal Liu } 670*055276c1SNeal Liu 671*055276c1SNeal Liu ast_udc_ep0_queue(ep, req); 672*055276c1SNeal Liu goto end; 673*055276c1SNeal Liu } 674*055276c1SNeal Liu 675*055276c1SNeal Liu /* EPn request enqueue */ 676*055276c1SNeal Liu if (list_is_singular(&ep->queue)) { 677*055276c1SNeal Liu if (ep->desc_mode) 678*055276c1SNeal Liu ast_udc_epn_kick_desc(ep, req); 679*055276c1SNeal Liu else 680*055276c1SNeal Liu ast_udc_epn_kick(ep, req); 681*055276c1SNeal Liu } 682*055276c1SNeal Liu 683*055276c1SNeal Liu end: 684*055276c1SNeal Liu spin_unlock_irqrestore(&udc->lock, flags); 685*055276c1SNeal Liu 686*055276c1SNeal Liu return rc; 687*055276c1SNeal Liu } 688*055276c1SNeal Liu 689*055276c1SNeal Liu static int ast_udc_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) 690*055276c1SNeal Liu { 691*055276c1SNeal Liu struct ast_udc_ep *ep = to_ast_ep(_ep); 692*055276c1SNeal Liu struct ast_udc_dev *udc = ep->udc; 693*055276c1SNeal Liu struct ast_udc_request *req; 694*055276c1SNeal Liu unsigned long flags; 695*055276c1SNeal Liu int rc = 0; 696*055276c1SNeal Liu 697*055276c1SNeal Liu spin_lock_irqsave(&udc->lock, flags); 698*055276c1SNeal Liu 699*055276c1SNeal Liu /* make sure it's actually queued on this endpoint */ 700*055276c1SNeal Liu list_for_each_entry(req, &ep->queue, queue) { 701*055276c1SNeal Liu if (&req->req == _req) { 702*055276c1SNeal Liu list_del_init(&req->queue); 703*055276c1SNeal Liu ast_udc_done(ep, req, -ESHUTDOWN); 704*055276c1SNeal Liu _req->status = -ECONNRESET; 705*055276c1SNeal Liu break; 706*055276c1SNeal Liu } 707*055276c1SNeal Liu } 708*055276c1SNeal Liu 709*055276c1SNeal Liu /* dequeue request not found */ 710*055276c1SNeal Liu if (&req->req != _req) 711*055276c1SNeal Liu rc = -EINVAL; 712*055276c1SNeal Liu 713*055276c1SNeal Liu spin_unlock_irqrestore(&udc->lock, flags); 714*055276c1SNeal Liu 715*055276c1SNeal Liu return rc; 716*055276c1SNeal Liu } 717*055276c1SNeal Liu 718*055276c1SNeal Liu static int ast_udc_ep_set_halt(struct usb_ep *_ep, int value) 719*055276c1SNeal Liu { 720*055276c1SNeal Liu struct ast_udc_ep *ep = to_ast_ep(_ep); 721*055276c1SNeal Liu struct ast_udc_dev *udc = ep->udc; 722*055276c1SNeal Liu unsigned long flags; 723*055276c1SNeal Liu int epnum; 724*055276c1SNeal Liu u32 ctrl; 725*055276c1SNeal Liu 726*055276c1SNeal Liu EP_DBG(ep, "val:%d\n", value); 727*055276c1SNeal Liu 728*055276c1SNeal Liu spin_lock_irqsave(&udc->lock, flags); 729*055276c1SNeal Liu 730*055276c1SNeal Liu epnum = usb_endpoint_num(ep->desc); 731*055276c1SNeal Liu 732*055276c1SNeal Liu /* EP0 */ 733*055276c1SNeal Liu if (epnum == 0) { 734*055276c1SNeal Liu ctrl = ast_udc_read(udc, AST_UDC_EP0_CTRL); 735*055276c1SNeal Liu if (value) 736*055276c1SNeal Liu ctrl |= EP0_STALL; 737*055276c1SNeal Liu else 738*055276c1SNeal Liu ctrl &= ~EP0_STALL; 739*055276c1SNeal Liu 740*055276c1SNeal Liu ast_udc_write(udc, ctrl, AST_UDC_EP0_CTRL); 741*055276c1SNeal Liu 742*055276c1SNeal Liu } else { 743*055276c1SNeal Liu /* EPn */ 744*055276c1SNeal Liu ctrl = ast_udc_read(udc, AST_UDC_EP_CONFIG); 745*055276c1SNeal Liu if (value) 746*055276c1SNeal Liu ctrl |= EP_SET_EP_STALL; 747*055276c1SNeal Liu else 748*055276c1SNeal Liu ctrl &= ~EP_SET_EP_STALL; 749*055276c1SNeal Liu 750*055276c1SNeal Liu ast_ep_write(ep, ctrl, AST_UDC_EP_CONFIG); 751*055276c1SNeal Liu 752*055276c1SNeal Liu /* only epn is stopped and waits for clear */ 753*055276c1SNeal Liu ep->stopped = value ? 1 : 0; 754*055276c1SNeal Liu } 755*055276c1SNeal Liu 756*055276c1SNeal Liu spin_unlock_irqrestore(&udc->lock, flags); 757*055276c1SNeal Liu 758*055276c1SNeal Liu return 0; 759*055276c1SNeal Liu } 760*055276c1SNeal Liu 761*055276c1SNeal Liu static const struct usb_ep_ops ast_udc_ep_ops = { 762*055276c1SNeal Liu .enable = ast_udc_ep_enable, 763*055276c1SNeal Liu .disable = ast_udc_ep_disable, 764*055276c1SNeal Liu .alloc_request = ast_udc_ep_alloc_request, 765*055276c1SNeal Liu .free_request = ast_udc_ep_free_request, 766*055276c1SNeal Liu .queue = ast_udc_ep_queue, 767*055276c1SNeal Liu .dequeue = ast_udc_ep_dequeue, 768*055276c1SNeal Liu .set_halt = ast_udc_ep_set_halt, 769*055276c1SNeal Liu /* there's only imprecise fifo status reporting */ 770*055276c1SNeal Liu }; 771*055276c1SNeal Liu 772*055276c1SNeal Liu static void ast_udc_ep0_rx(struct ast_udc_dev *udc) 773*055276c1SNeal Liu { 774*055276c1SNeal Liu ast_udc_write(udc, udc->ep0_buf_dma, AST_UDC_EP0_DATA_BUFF); 775*055276c1SNeal Liu ast_udc_write(udc, EP0_RX_BUFF_RDY, AST_UDC_EP0_CTRL); 776*055276c1SNeal Liu } 777*055276c1SNeal Liu 778*055276c1SNeal Liu static void ast_udc_ep0_tx(struct ast_udc_dev *udc) 779*055276c1SNeal Liu { 780*055276c1SNeal Liu ast_udc_write(udc, udc->ep0_buf_dma, AST_UDC_EP0_DATA_BUFF); 781*055276c1SNeal Liu ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL); 782*055276c1SNeal Liu } 783*055276c1SNeal Liu 784*055276c1SNeal Liu static void ast_udc_ep0_out(struct ast_udc_dev *udc) 785*055276c1SNeal Liu { 786*055276c1SNeal Liu struct device *dev = &udc->pdev->dev; 787*055276c1SNeal Liu struct ast_udc_ep *ep = &udc->ep[0]; 788*055276c1SNeal Liu struct ast_udc_request *req; 789*055276c1SNeal Liu u16 rx_len; 790*055276c1SNeal Liu 791*055276c1SNeal Liu if (list_empty(&ep->queue)) 792*055276c1SNeal Liu return; 793*055276c1SNeal Liu 794*055276c1SNeal Liu req = list_entry(ep->queue.next, struct ast_udc_request, queue); 795*055276c1SNeal Liu 796*055276c1SNeal Liu rx_len = EP0_GET_RX_LEN(ast_udc_read(udc, AST_UDC_EP0_CTRL)); 797*055276c1SNeal Liu req->req.actual += rx_len; 798*055276c1SNeal Liu 799*055276c1SNeal Liu SETUP_DBG(udc, "req %p (%d/%d)\n", req, 800*055276c1SNeal Liu req->req.actual, req->req.length); 801*055276c1SNeal Liu 802*055276c1SNeal Liu if ((rx_len < ep->ep.maxpacket) || 803*055276c1SNeal Liu (req->req.actual == req->req.length)) { 804*055276c1SNeal Liu ast_udc_ep0_tx(udc); 805*055276c1SNeal Liu if (!ep->dir_in) 806*055276c1SNeal Liu ast_udc_done(ep, req, 0); 807*055276c1SNeal Liu 808*055276c1SNeal Liu } else { 809*055276c1SNeal Liu if (rx_len > req->req.length) { 810*055276c1SNeal Liu // Issue Fix 811*055276c1SNeal Liu dev_warn(dev, "Something wrong (%d/%d)\n", 812*055276c1SNeal Liu req->req.actual, req->req.length); 813*055276c1SNeal Liu ast_udc_ep0_tx(udc); 814*055276c1SNeal Liu ast_udc_done(ep, req, 0); 815*055276c1SNeal Liu return; 816*055276c1SNeal Liu } 817*055276c1SNeal Liu 818*055276c1SNeal Liu ep->dir_in = 0; 819*055276c1SNeal Liu 820*055276c1SNeal Liu /* More works */ 821*055276c1SNeal Liu ast_udc_ep0_queue(ep, req); 822*055276c1SNeal Liu } 823*055276c1SNeal Liu } 824*055276c1SNeal Liu 825*055276c1SNeal Liu static void ast_udc_ep0_in(struct ast_udc_dev *udc) 826*055276c1SNeal Liu { 827*055276c1SNeal Liu struct ast_udc_ep *ep = &udc->ep[0]; 828*055276c1SNeal Liu struct ast_udc_request *req; 829*055276c1SNeal Liu 830*055276c1SNeal Liu if (list_empty(&ep->queue)) { 831*055276c1SNeal Liu if (udc->is_control_tx) { 832*055276c1SNeal Liu ast_udc_ep0_rx(udc); 833*055276c1SNeal Liu udc->is_control_tx = 0; 834*055276c1SNeal Liu } 835*055276c1SNeal Liu 836*055276c1SNeal Liu return; 837*055276c1SNeal Liu } 838*055276c1SNeal Liu 839*055276c1SNeal Liu req = list_entry(ep->queue.next, struct ast_udc_request, queue); 840*055276c1SNeal Liu 841*055276c1SNeal Liu SETUP_DBG(udc, "req %p (%d/%d)\n", req, 842*055276c1SNeal Liu req->req.actual, req->req.length); 843*055276c1SNeal Liu 844*055276c1SNeal Liu if (req->req.length == req->req.actual) { 845*055276c1SNeal Liu if (req->req.length) 846*055276c1SNeal Liu ast_udc_ep0_rx(udc); 847*055276c1SNeal Liu 848*055276c1SNeal Liu if (ep->dir_in) 849*055276c1SNeal Liu ast_udc_done(ep, req, 0); 850*055276c1SNeal Liu 851*055276c1SNeal Liu } else { 852*055276c1SNeal Liu /* More works */ 853*055276c1SNeal Liu ast_udc_ep0_queue(ep, req); 854*055276c1SNeal Liu } 855*055276c1SNeal Liu } 856*055276c1SNeal Liu 857*055276c1SNeal Liu static void ast_udc_epn_handle(struct ast_udc_dev *udc, u16 ep_num) 858*055276c1SNeal Liu { 859*055276c1SNeal Liu struct ast_udc_ep *ep = &udc->ep[ep_num]; 860*055276c1SNeal Liu struct ast_udc_request *req; 861*055276c1SNeal Liu u16 len = 0; 862*055276c1SNeal Liu 863*055276c1SNeal Liu if (list_empty(&ep->queue)) 864*055276c1SNeal Liu return; 865*055276c1SNeal Liu 866*055276c1SNeal Liu req = list_first_entry(&ep->queue, struct ast_udc_request, queue); 867*055276c1SNeal Liu 868*055276c1SNeal Liu len = EP_DMA_GET_TX_SIZE(ast_ep_read(ep, AST_UDC_EP_DMA_STS)); 869*055276c1SNeal Liu req->req.actual += len; 870*055276c1SNeal Liu 871*055276c1SNeal Liu EP_DBG(ep, "req @%p, length:(%d/%d), %s:0x%x\n", req, 872*055276c1SNeal Liu req->req.actual, req->req.length, "len", len); 873*055276c1SNeal Liu 874*055276c1SNeal Liu /* Done this request */ 875*055276c1SNeal Liu if (req->req.length == req->req.actual) { 876*055276c1SNeal Liu ast_udc_done(ep, req, 0); 877*055276c1SNeal Liu req = list_first_entry_or_null(&ep->queue, 878*055276c1SNeal Liu struct ast_udc_request, 879*055276c1SNeal Liu queue); 880*055276c1SNeal Liu 881*055276c1SNeal Liu } else { 882*055276c1SNeal Liu /* Check for short packet */ 883*055276c1SNeal Liu if (len < ep->ep.maxpacket) { 884*055276c1SNeal Liu ast_udc_done(ep, req, 0); 885*055276c1SNeal Liu req = list_first_entry_or_null(&ep->queue, 886*055276c1SNeal Liu struct ast_udc_request, 887*055276c1SNeal Liu queue); 888*055276c1SNeal Liu } 889*055276c1SNeal Liu } 890*055276c1SNeal Liu 891*055276c1SNeal Liu /* More requests */ 892*055276c1SNeal Liu if (req) 893*055276c1SNeal Liu ast_udc_epn_kick(ep, req); 894*055276c1SNeal Liu } 895*055276c1SNeal Liu 896*055276c1SNeal Liu static void ast_udc_epn_handle_desc(struct ast_udc_dev *udc, u16 ep_num) 897*055276c1SNeal Liu { 898*055276c1SNeal Liu struct ast_udc_ep *ep = &udc->ep[ep_num]; 899*055276c1SNeal Liu struct device *dev = &udc->pdev->dev; 900*055276c1SNeal Liu struct ast_udc_request *req; 901*055276c1SNeal Liu u32 proc_sts, wr_ptr, rd_ptr; 902*055276c1SNeal Liu u32 len_in_desc, ctrl; 903*055276c1SNeal Liu u16 total_len = 0; 904*055276c1SNeal Liu int i; 905*055276c1SNeal Liu 906*055276c1SNeal Liu if (list_empty(&ep->queue)) { 907*055276c1SNeal Liu dev_warn(dev, "%s reqest queue empty !\n", ep->ep.name); 908*055276c1SNeal Liu return; 909*055276c1SNeal Liu } 910*055276c1SNeal Liu 911*055276c1SNeal Liu req = list_first_entry(&ep->queue, struct ast_udc_request, queue); 912*055276c1SNeal Liu 913*055276c1SNeal Liu ctrl = ast_ep_read(ep, AST_UDC_EP_DMA_CTRL); 914*055276c1SNeal Liu proc_sts = EP_DMA_CTRL_GET_PROC_STS(ctrl); 915*055276c1SNeal Liu 916*055276c1SNeal Liu /* Check processing status is idle */ 917*055276c1SNeal Liu if (proc_sts != EP_DMA_CTRL_STS_RX_IDLE && 918*055276c1SNeal Liu proc_sts != EP_DMA_CTRL_STS_TX_IDLE) { 919*055276c1SNeal Liu dev_warn(dev, "EP DMA CTRL: 0x%x, PS:0x%x\n", 920*055276c1SNeal Liu ast_ep_read(ep, AST_UDC_EP_DMA_CTRL), 921*055276c1SNeal Liu proc_sts); 922*055276c1SNeal Liu return; 923*055276c1SNeal Liu } 924*055276c1SNeal Liu 925*055276c1SNeal Liu ctrl = ast_ep_read(ep, AST_UDC_EP_DMA_STS); 926*055276c1SNeal Liu rd_ptr = EP_DMA_GET_RPTR(ctrl); 927*055276c1SNeal Liu wr_ptr = EP_DMA_GET_WPTR(ctrl); 928*055276c1SNeal Liu 929*055276c1SNeal Liu if (rd_ptr != wr_ptr) { 930*055276c1SNeal Liu dev_warn(dev, "desc list is not empty ! %s:%d, %s:%d\n", 931*055276c1SNeal Liu "rptr", rd_ptr, "wptr", wr_ptr); 932*055276c1SNeal Liu return; 933*055276c1SNeal Liu } 934*055276c1SNeal Liu 935*055276c1SNeal Liu EP_DBG(ep, "rd_ptr:%d, wr_ptr:%d\n", rd_ptr, wr_ptr); 936*055276c1SNeal Liu i = req->saved_dma_wptr; 937*055276c1SNeal Liu 938*055276c1SNeal Liu do { 939*055276c1SNeal Liu len_in_desc = EP_DESC1_IN_LEN(ep->descs[i].des_1); 940*055276c1SNeal Liu EP_DBG(ep, "desc[%d] len: %d\n", i, len_in_desc); 941*055276c1SNeal Liu total_len += len_in_desc; 942*055276c1SNeal Liu i++; 943*055276c1SNeal Liu if (i >= AST_UDC_DESCS_COUNT) 944*055276c1SNeal Liu i = 0; 945*055276c1SNeal Liu 946*055276c1SNeal Liu } while (i != wr_ptr); 947*055276c1SNeal Liu 948*055276c1SNeal Liu req->req.actual += total_len; 949*055276c1SNeal Liu 950*055276c1SNeal Liu EP_DBG(ep, "req @%p, length:(%d/%d), %s:0x%x\n", req, 951*055276c1SNeal Liu req->req.actual, req->req.length, "len", total_len); 952*055276c1SNeal Liu 953*055276c1SNeal Liu /* Done this request */ 954*055276c1SNeal Liu if (req->req.length == req->req.actual) { 955*055276c1SNeal Liu ast_udc_done(ep, req, 0); 956*055276c1SNeal Liu req = list_first_entry_or_null(&ep->queue, 957*055276c1SNeal Liu struct ast_udc_request, 958*055276c1SNeal Liu queue); 959*055276c1SNeal Liu 960*055276c1SNeal Liu } else { 961*055276c1SNeal Liu /* Check for short packet */ 962*055276c1SNeal Liu if (total_len < ep->ep.maxpacket) { 963*055276c1SNeal Liu ast_udc_done(ep, req, 0); 964*055276c1SNeal Liu req = list_first_entry_or_null(&ep->queue, 965*055276c1SNeal Liu struct ast_udc_request, 966*055276c1SNeal Liu queue); 967*055276c1SNeal Liu } 968*055276c1SNeal Liu } 969*055276c1SNeal Liu 970*055276c1SNeal Liu /* More requests & dma descs not setup yet */ 971*055276c1SNeal Liu if (req && (req->actual_dma_length == req->req.actual)) { 972*055276c1SNeal Liu EP_DBG(ep, "More requests\n"); 973*055276c1SNeal Liu ast_udc_epn_kick_desc(ep, req); 974*055276c1SNeal Liu } 975*055276c1SNeal Liu } 976*055276c1SNeal Liu 977*055276c1SNeal Liu static void ast_udc_ep0_data_tx(struct ast_udc_dev *udc, u8 *tx_data, u32 len) 978*055276c1SNeal Liu { 979*055276c1SNeal Liu if (len) { 980*055276c1SNeal Liu memcpy(udc->ep0_buf, tx_data, len); 981*055276c1SNeal Liu 982*055276c1SNeal Liu ast_udc_write(udc, udc->ep0_buf_dma, AST_UDC_EP0_DATA_BUFF); 983*055276c1SNeal Liu ast_udc_write(udc, EP0_TX_LEN(len), AST_UDC_EP0_CTRL); 984*055276c1SNeal Liu ast_udc_write(udc, EP0_TX_LEN(len) | EP0_TX_BUFF_RDY, 985*055276c1SNeal Liu AST_UDC_EP0_CTRL); 986*055276c1SNeal Liu udc->is_control_tx = 1; 987*055276c1SNeal Liu 988*055276c1SNeal Liu } else 989*055276c1SNeal Liu ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL); 990*055276c1SNeal Liu } 991*055276c1SNeal Liu 992*055276c1SNeal Liu static void ast_udc_getstatus(struct ast_udc_dev *udc) 993*055276c1SNeal Liu { 994*055276c1SNeal Liu struct usb_ctrlrequest crq; 995*055276c1SNeal Liu struct ast_udc_ep *ep; 996*055276c1SNeal Liu u16 status = 0; 997*055276c1SNeal Liu u16 epnum = 0; 998*055276c1SNeal Liu 999*055276c1SNeal Liu memcpy_fromio(&crq, udc->creq, sizeof(crq)); 1000*055276c1SNeal Liu 1001*055276c1SNeal Liu switch (crq.bRequestType & USB_RECIP_MASK) { 1002*055276c1SNeal Liu case USB_RECIP_DEVICE: 1003*055276c1SNeal Liu /* Get device status */ 1004*055276c1SNeal Liu status = 1 << USB_DEVICE_SELF_POWERED; 1005*055276c1SNeal Liu break; 1006*055276c1SNeal Liu case USB_RECIP_INTERFACE: 1007*055276c1SNeal Liu break; 1008*055276c1SNeal Liu case USB_RECIP_ENDPOINT: 1009*055276c1SNeal Liu epnum = crq.wIndex & USB_ENDPOINT_NUMBER_MASK; 1010*055276c1SNeal Liu status = udc->ep[epnum].stopped; 1011*055276c1SNeal Liu break; 1012*055276c1SNeal Liu default: 1013*055276c1SNeal Liu goto stall; 1014*055276c1SNeal Liu } 1015*055276c1SNeal Liu 1016*055276c1SNeal Liu ep = &udc->ep[epnum]; 1017*055276c1SNeal Liu EP_DBG(ep, "status: 0x%x\n", status); 1018*055276c1SNeal Liu ast_udc_ep0_data_tx(udc, (u8 *)&status, sizeof(status)); 1019*055276c1SNeal Liu 1020*055276c1SNeal Liu return; 1021*055276c1SNeal Liu 1022*055276c1SNeal Liu stall: 1023*055276c1SNeal Liu EP_DBG(ep, "Can't respond request\n"); 1024*055276c1SNeal Liu ast_udc_write(udc, ast_udc_read(udc, AST_UDC_EP0_CTRL) | EP0_STALL, 1025*055276c1SNeal Liu AST_UDC_EP0_CTRL); 1026*055276c1SNeal Liu } 1027*055276c1SNeal Liu 1028*055276c1SNeal Liu static void ast_udc_ep0_handle_setup(struct ast_udc_dev *udc) 1029*055276c1SNeal Liu { 1030*055276c1SNeal Liu struct ast_udc_ep *ep = &udc->ep[0]; 1031*055276c1SNeal Liu struct ast_udc_request *req; 1032*055276c1SNeal Liu struct usb_ctrlrequest crq; 1033*055276c1SNeal Liu int req_num = 0; 1034*055276c1SNeal Liu int rc = 0; 1035*055276c1SNeal Liu u32 reg; 1036*055276c1SNeal Liu 1037*055276c1SNeal Liu memcpy_fromio(&crq, udc->creq, sizeof(crq)); 1038*055276c1SNeal Liu 1039*055276c1SNeal Liu SETUP_DBG(udc, "SETEUP packet: %02x/%02x/%04x/%04x/%04x\n", 1040*055276c1SNeal Liu crq.bRequestType, crq.bRequest, le16_to_cpu(crq.wValue), 1041*055276c1SNeal Liu le16_to_cpu(crq.wIndex), le16_to_cpu(crq.wLength)); 1042*055276c1SNeal Liu 1043*055276c1SNeal Liu /* 1044*055276c1SNeal Liu * Cleanup ep0 request(s) in queue because 1045*055276c1SNeal Liu * there is a new control setup comes. 1046*055276c1SNeal Liu */ 1047*055276c1SNeal Liu list_for_each_entry(req, &udc->ep[0].queue, queue) { 1048*055276c1SNeal Liu req_num++; 1049*055276c1SNeal Liu EP_DBG(ep, "there is req %p in ep0 queue !\n", req); 1050*055276c1SNeal Liu } 1051*055276c1SNeal Liu 1052*055276c1SNeal Liu if (req_num) 1053*055276c1SNeal Liu ast_udc_nuke(&udc->ep[0], -ETIMEDOUT); 1054*055276c1SNeal Liu 1055*055276c1SNeal Liu udc->ep[0].dir_in = crq.bRequestType & USB_DIR_IN; 1056*055276c1SNeal Liu 1057*055276c1SNeal Liu if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { 1058*055276c1SNeal Liu switch (crq.bRequest) { 1059*055276c1SNeal Liu case USB_REQ_SET_ADDRESS: 1060*055276c1SNeal Liu if (ast_udc_read(udc, AST_UDC_STS) & UDC_STS_HIGHSPEED) 1061*055276c1SNeal Liu udc->gadget.speed = USB_SPEED_HIGH; 1062*055276c1SNeal Liu else 1063*055276c1SNeal Liu udc->gadget.speed = USB_SPEED_FULL; 1064*055276c1SNeal Liu 1065*055276c1SNeal Liu SETUP_DBG(udc, "set addr: 0x%x\n", crq.wValue); 1066*055276c1SNeal Liu reg = ast_udc_read(udc, AST_UDC_CONFIG); 1067*055276c1SNeal Liu reg &= ~UDC_CFG_ADDR_MASK; 1068*055276c1SNeal Liu reg |= UDC_CFG_SET_ADDR(crq.wValue); 1069*055276c1SNeal Liu ast_udc_write(udc, reg, AST_UDC_CONFIG); 1070*055276c1SNeal Liu goto req_complete; 1071*055276c1SNeal Liu 1072*055276c1SNeal Liu case USB_REQ_CLEAR_FEATURE: 1073*055276c1SNeal Liu SETUP_DBG(udc, "ep0: CLEAR FEATURE\n"); 1074*055276c1SNeal Liu goto req_driver; 1075*055276c1SNeal Liu 1076*055276c1SNeal Liu case USB_REQ_SET_FEATURE: 1077*055276c1SNeal Liu SETUP_DBG(udc, "ep0: SET FEATURE\n"); 1078*055276c1SNeal Liu goto req_driver; 1079*055276c1SNeal Liu 1080*055276c1SNeal Liu case USB_REQ_GET_STATUS: 1081*055276c1SNeal Liu ast_udc_getstatus(udc); 1082*055276c1SNeal Liu return; 1083*055276c1SNeal Liu 1084*055276c1SNeal Liu default: 1085*055276c1SNeal Liu goto req_driver; 1086*055276c1SNeal Liu } 1087*055276c1SNeal Liu 1088*055276c1SNeal Liu } 1089*055276c1SNeal Liu 1090*055276c1SNeal Liu req_driver: 1091*055276c1SNeal Liu if (udc->driver) { 1092*055276c1SNeal Liu SETUP_DBG(udc, "Forwarding %s to gadget...\n", 1093*055276c1SNeal Liu udc->gadget.name); 1094*055276c1SNeal Liu 1095*055276c1SNeal Liu spin_unlock(&udc->lock); 1096*055276c1SNeal Liu rc = udc->driver->setup(&udc->gadget, &crq); 1097*055276c1SNeal Liu spin_lock(&udc->lock); 1098*055276c1SNeal Liu 1099*055276c1SNeal Liu } else { 1100*055276c1SNeal Liu SETUP_DBG(udc, "No gadget for request !\n"); 1101*055276c1SNeal Liu } 1102*055276c1SNeal Liu 1103*055276c1SNeal Liu if (rc >= 0) 1104*055276c1SNeal Liu return; 1105*055276c1SNeal Liu 1106*055276c1SNeal Liu /* Stall if gadget failed */ 1107*055276c1SNeal Liu SETUP_DBG(udc, "Stalling, rc:0x%x\n", rc); 1108*055276c1SNeal Liu ast_udc_write(udc, ast_udc_read(udc, AST_UDC_EP0_CTRL) | EP0_STALL, 1109*055276c1SNeal Liu AST_UDC_EP0_CTRL); 1110*055276c1SNeal Liu return; 1111*055276c1SNeal Liu 1112*055276c1SNeal Liu req_complete: 1113*055276c1SNeal Liu SETUP_DBG(udc, "ep0: Sending IN status without data\n"); 1114*055276c1SNeal Liu ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL); 1115*055276c1SNeal Liu } 1116*055276c1SNeal Liu 1117*055276c1SNeal Liu static irqreturn_t ast_udc_isr(int irq, void *data) 1118*055276c1SNeal Liu { 1119*055276c1SNeal Liu struct ast_udc_dev *udc = (struct ast_udc_dev *)data; 1120*055276c1SNeal Liu struct ast_udc_ep *ep; 1121*055276c1SNeal Liu u32 isr, ep_isr; 1122*055276c1SNeal Liu int i; 1123*055276c1SNeal Liu 1124*055276c1SNeal Liu spin_lock(&udc->lock); 1125*055276c1SNeal Liu 1126*055276c1SNeal Liu isr = ast_udc_read(udc, AST_UDC_ISR); 1127*055276c1SNeal Liu if (!isr) 1128*055276c1SNeal Liu goto done; 1129*055276c1SNeal Liu 1130*055276c1SNeal Liu /* Ack interrupts */ 1131*055276c1SNeal Liu ast_udc_write(udc, isr, AST_UDC_ISR); 1132*055276c1SNeal Liu 1133*055276c1SNeal Liu if (isr & UDC_IRQ_BUS_RESET) { 1134*055276c1SNeal Liu ISR_DBG(udc, "UDC_IRQ_BUS_RESET\n"); 1135*055276c1SNeal Liu udc->gadget.speed = USB_SPEED_UNKNOWN; 1136*055276c1SNeal Liu 1137*055276c1SNeal Liu ep = &udc->ep[1]; 1138*055276c1SNeal Liu EP_DBG(ep, "dctrl:0x%x\n", 1139*055276c1SNeal Liu ast_ep_read(ep, AST_UDC_EP_DMA_CTRL)); 1140*055276c1SNeal Liu 1141*055276c1SNeal Liu if (udc->driver && udc->driver->reset) { 1142*055276c1SNeal Liu spin_unlock(&udc->lock); 1143*055276c1SNeal Liu udc->driver->reset(&udc->gadget); 1144*055276c1SNeal Liu spin_lock(&udc->lock); 1145*055276c1SNeal Liu } 1146*055276c1SNeal Liu } 1147*055276c1SNeal Liu 1148*055276c1SNeal Liu if (isr & UDC_IRQ_BUS_SUSPEND) { 1149*055276c1SNeal Liu ISR_DBG(udc, "UDC_IRQ_BUS_SUSPEND\n"); 1150*055276c1SNeal Liu udc->suspended_from = udc->gadget.state; 1151*055276c1SNeal Liu usb_gadget_set_state(&udc->gadget, USB_STATE_SUSPENDED); 1152*055276c1SNeal Liu 1153*055276c1SNeal Liu if (udc->driver && udc->driver->suspend) { 1154*055276c1SNeal Liu spin_unlock(&udc->lock); 1155*055276c1SNeal Liu udc->driver->suspend(&udc->gadget); 1156*055276c1SNeal Liu spin_lock(&udc->lock); 1157*055276c1SNeal Liu } 1158*055276c1SNeal Liu } 1159*055276c1SNeal Liu 1160*055276c1SNeal Liu if (isr & UDC_IRQ_BUS_RESUME) { 1161*055276c1SNeal Liu ISR_DBG(udc, "UDC_IRQ_BUS_RESUME\n"); 1162*055276c1SNeal Liu usb_gadget_set_state(&udc->gadget, udc->suspended_from); 1163*055276c1SNeal Liu 1164*055276c1SNeal Liu if (udc->driver && udc->driver->resume) { 1165*055276c1SNeal Liu spin_unlock(&udc->lock); 1166*055276c1SNeal Liu udc->driver->resume(&udc->gadget); 1167*055276c1SNeal Liu spin_lock(&udc->lock); 1168*055276c1SNeal Liu } 1169*055276c1SNeal Liu } 1170*055276c1SNeal Liu 1171*055276c1SNeal Liu if (isr & UDC_IRQ_EP0_IN_ACK_STALL) { 1172*055276c1SNeal Liu ISR_DBG(udc, "UDC_IRQ_EP0_IN_ACK_STALL\n"); 1173*055276c1SNeal Liu ast_udc_ep0_in(udc); 1174*055276c1SNeal Liu } 1175*055276c1SNeal Liu 1176*055276c1SNeal Liu if (isr & UDC_IRQ_EP0_OUT_ACK_STALL) { 1177*055276c1SNeal Liu ISR_DBG(udc, "UDC_IRQ_EP0_OUT_ACK_STALL\n"); 1178*055276c1SNeal Liu ast_udc_ep0_out(udc); 1179*055276c1SNeal Liu } 1180*055276c1SNeal Liu 1181*055276c1SNeal Liu if (isr & UDC_IRQ_EP0_SETUP) { 1182*055276c1SNeal Liu ISR_DBG(udc, "UDC_IRQ_EP0_SETUP\n"); 1183*055276c1SNeal Liu ast_udc_ep0_handle_setup(udc); 1184*055276c1SNeal Liu } 1185*055276c1SNeal Liu 1186*055276c1SNeal Liu if (isr & UDC_IRQ_EP_POOL_ACK_STALL) { 1187*055276c1SNeal Liu ISR_DBG(udc, "UDC_IRQ_EP_POOL_ACK_STALL\n"); 1188*055276c1SNeal Liu ep_isr = ast_udc_read(udc, AST_UDC_EP_ACK_ISR); 1189*055276c1SNeal Liu 1190*055276c1SNeal Liu /* Ack EP interrupts */ 1191*055276c1SNeal Liu ast_udc_write(udc, ep_isr, AST_UDC_EP_ACK_ISR); 1192*055276c1SNeal Liu 1193*055276c1SNeal Liu /* Handle each EP */ 1194*055276c1SNeal Liu for (i = 0; i < AST_UDC_NUM_ENDPOINTS - 1; i++) { 1195*055276c1SNeal Liu if (ep_isr & (0x1 << i)) { 1196*055276c1SNeal Liu ep = &udc->ep[i + 1]; 1197*055276c1SNeal Liu if (ep->desc_mode) 1198*055276c1SNeal Liu ast_udc_epn_handle_desc(udc, i + 1); 1199*055276c1SNeal Liu else 1200*055276c1SNeal Liu ast_udc_epn_handle(udc, i + 1); 1201*055276c1SNeal Liu } 1202*055276c1SNeal Liu } 1203*055276c1SNeal Liu } 1204*055276c1SNeal Liu 1205*055276c1SNeal Liu done: 1206*055276c1SNeal Liu spin_unlock(&udc->lock); 1207*055276c1SNeal Liu return IRQ_HANDLED; 1208*055276c1SNeal Liu } 1209*055276c1SNeal Liu 1210*055276c1SNeal Liu static int ast_udc_gadget_getframe(struct usb_gadget *gadget) 1211*055276c1SNeal Liu { 1212*055276c1SNeal Liu struct ast_udc_dev *udc = to_ast_dev(gadget); 1213*055276c1SNeal Liu 1214*055276c1SNeal Liu return (ast_udc_read(udc, AST_UDC_STS) >> 16) & 0x7ff; 1215*055276c1SNeal Liu } 1216*055276c1SNeal Liu 1217*055276c1SNeal Liu static void ast_udc_wake_work(struct work_struct *work) 1218*055276c1SNeal Liu { 1219*055276c1SNeal Liu struct ast_udc_dev *udc = container_of(work, struct ast_udc_dev, 1220*055276c1SNeal Liu wake_work); 1221*055276c1SNeal Liu unsigned long flags; 1222*055276c1SNeal Liu u32 ctrl; 1223*055276c1SNeal Liu 1224*055276c1SNeal Liu spin_lock_irqsave(&udc->lock, flags); 1225*055276c1SNeal Liu 1226*055276c1SNeal Liu UDC_DBG(udc, "Wakeup Host !\n"); 1227*055276c1SNeal Liu ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL); 1228*055276c1SNeal Liu ast_udc_write(udc, ctrl | USB_REMOTE_WAKEUP_EN, AST_UDC_FUNC_CTRL); 1229*055276c1SNeal Liu 1230*055276c1SNeal Liu spin_unlock_irqrestore(&udc->lock, flags); 1231*055276c1SNeal Liu } 1232*055276c1SNeal Liu 1233*055276c1SNeal Liu static void ast_udc_wakeup_all(struct ast_udc_dev *udc) 1234*055276c1SNeal Liu { 1235*055276c1SNeal Liu /* 1236*055276c1SNeal Liu * A device is trying to wake the world, because this 1237*055276c1SNeal Liu * can recurse into the device, we break the call chain 1238*055276c1SNeal Liu * using a work queue 1239*055276c1SNeal Liu */ 1240*055276c1SNeal Liu schedule_work(&udc->wake_work); 1241*055276c1SNeal Liu } 1242*055276c1SNeal Liu 1243*055276c1SNeal Liu static int ast_udc_wakeup(struct usb_gadget *gadget) 1244*055276c1SNeal Liu { 1245*055276c1SNeal Liu struct ast_udc_dev *udc = to_ast_dev(gadget); 1246*055276c1SNeal Liu unsigned long flags; 1247*055276c1SNeal Liu int rc = 0; 1248*055276c1SNeal Liu 1249*055276c1SNeal Liu spin_lock_irqsave(&udc->lock, flags); 1250*055276c1SNeal Liu 1251*055276c1SNeal Liu if (!udc->wakeup_en) { 1252*055276c1SNeal Liu UDC_DBG(udc, "Remote Wakeup is disabled\n"); 1253*055276c1SNeal Liu rc = -EINVAL; 1254*055276c1SNeal Liu goto err; 1255*055276c1SNeal Liu } 1256*055276c1SNeal Liu 1257*055276c1SNeal Liu UDC_DBG(udc, "Device initiated wakeup\n"); 1258*055276c1SNeal Liu ast_udc_wakeup_all(udc); 1259*055276c1SNeal Liu 1260*055276c1SNeal Liu err: 1261*055276c1SNeal Liu spin_unlock_irqrestore(&udc->lock, flags); 1262*055276c1SNeal Liu return rc; 1263*055276c1SNeal Liu } 1264*055276c1SNeal Liu 1265*055276c1SNeal Liu /* 1266*055276c1SNeal Liu * Activate/Deactivate link with host 1267*055276c1SNeal Liu */ 1268*055276c1SNeal Liu static int ast_udc_pullup(struct usb_gadget *gadget, int is_on) 1269*055276c1SNeal Liu { 1270*055276c1SNeal Liu struct ast_udc_dev *udc = to_ast_dev(gadget); 1271*055276c1SNeal Liu unsigned long flags; 1272*055276c1SNeal Liu u32 ctrl; 1273*055276c1SNeal Liu 1274*055276c1SNeal Liu spin_lock_irqsave(&udc->lock, flags); 1275*055276c1SNeal Liu 1276*055276c1SNeal Liu UDC_DBG(udc, "is_on: %d\n", is_on); 1277*055276c1SNeal Liu if (is_on) 1278*055276c1SNeal Liu ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) | USB_UPSTREAM_EN; 1279*055276c1SNeal Liu else 1280*055276c1SNeal Liu ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) & ~USB_UPSTREAM_EN; 1281*055276c1SNeal Liu 1282*055276c1SNeal Liu ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL); 1283*055276c1SNeal Liu 1284*055276c1SNeal Liu spin_unlock_irqrestore(&udc->lock, flags); 1285*055276c1SNeal Liu 1286*055276c1SNeal Liu return 0; 1287*055276c1SNeal Liu } 1288*055276c1SNeal Liu 1289*055276c1SNeal Liu static int ast_udc_start(struct usb_gadget *gadget, 1290*055276c1SNeal Liu struct usb_gadget_driver *driver) 1291*055276c1SNeal Liu { 1292*055276c1SNeal Liu struct ast_udc_dev *udc = to_ast_dev(gadget); 1293*055276c1SNeal Liu struct ast_udc_ep *ep; 1294*055276c1SNeal Liu unsigned long flags; 1295*055276c1SNeal Liu int i; 1296*055276c1SNeal Liu 1297*055276c1SNeal Liu spin_lock_irqsave(&udc->lock, flags); 1298*055276c1SNeal Liu 1299*055276c1SNeal Liu UDC_DBG(udc, "\n"); 1300*055276c1SNeal Liu udc->driver = driver; 1301*055276c1SNeal Liu udc->gadget.dev.of_node = udc->pdev->dev.of_node; 1302*055276c1SNeal Liu 1303*055276c1SNeal Liu for (i = 0; i < AST_UDC_NUM_ENDPOINTS; i++) { 1304*055276c1SNeal Liu ep = &udc->ep[i]; 1305*055276c1SNeal Liu ep->stopped = 0; 1306*055276c1SNeal Liu } 1307*055276c1SNeal Liu 1308*055276c1SNeal Liu spin_unlock_irqrestore(&udc->lock, flags); 1309*055276c1SNeal Liu 1310*055276c1SNeal Liu return 0; 1311*055276c1SNeal Liu } 1312*055276c1SNeal Liu 1313*055276c1SNeal Liu static int ast_udc_stop(struct usb_gadget *gadget) 1314*055276c1SNeal Liu { 1315*055276c1SNeal Liu struct ast_udc_dev *udc = to_ast_dev(gadget); 1316*055276c1SNeal Liu unsigned long flags; 1317*055276c1SNeal Liu u32 ctrl; 1318*055276c1SNeal Liu 1319*055276c1SNeal Liu spin_lock_irqsave(&udc->lock, flags); 1320*055276c1SNeal Liu 1321*055276c1SNeal Liu UDC_DBG(udc, "\n"); 1322*055276c1SNeal Liu ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) & ~USB_UPSTREAM_EN; 1323*055276c1SNeal Liu ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL); 1324*055276c1SNeal Liu 1325*055276c1SNeal Liu udc->gadget.speed = USB_SPEED_UNKNOWN; 1326*055276c1SNeal Liu udc->driver = NULL; 1327*055276c1SNeal Liu 1328*055276c1SNeal Liu ast_udc_stop_activity(udc); 1329*055276c1SNeal Liu usb_gadget_set_state(&udc->gadget, USB_STATE_NOTATTACHED); 1330*055276c1SNeal Liu 1331*055276c1SNeal Liu spin_unlock_irqrestore(&udc->lock, flags); 1332*055276c1SNeal Liu 1333*055276c1SNeal Liu return 0; 1334*055276c1SNeal Liu } 1335*055276c1SNeal Liu 1336*055276c1SNeal Liu static const struct usb_gadget_ops ast_udc_ops = { 1337*055276c1SNeal Liu .get_frame = ast_udc_gadget_getframe, 1338*055276c1SNeal Liu .wakeup = ast_udc_wakeup, 1339*055276c1SNeal Liu .pullup = ast_udc_pullup, 1340*055276c1SNeal Liu .udc_start = ast_udc_start, 1341*055276c1SNeal Liu .udc_stop = ast_udc_stop, 1342*055276c1SNeal Liu }; 1343*055276c1SNeal Liu 1344*055276c1SNeal Liu /* 1345*055276c1SNeal Liu * Support 1 Control Endpoint. 1346*055276c1SNeal Liu * Support multiple programmable endpoints that can be configured to 1347*055276c1SNeal Liu * Bulk IN/OUT, Interrupt IN/OUT, and Isochronous IN/OUT type endpoint. 1348*055276c1SNeal Liu */ 1349*055276c1SNeal Liu static void ast_udc_init_ep(struct ast_udc_dev *udc) 1350*055276c1SNeal Liu { 1351*055276c1SNeal Liu struct ast_udc_ep *ep; 1352*055276c1SNeal Liu int i; 1353*055276c1SNeal Liu 1354*055276c1SNeal Liu for (i = 0; i < AST_UDC_NUM_ENDPOINTS; i++) { 1355*055276c1SNeal Liu ep = &udc->ep[i]; 1356*055276c1SNeal Liu ep->ep.name = ast_ep_name[i]; 1357*055276c1SNeal Liu if (i == 0) { 1358*055276c1SNeal Liu ep->ep.caps.type_control = true; 1359*055276c1SNeal Liu } else { 1360*055276c1SNeal Liu ep->ep.caps.type_iso = true; 1361*055276c1SNeal Liu ep->ep.caps.type_bulk = true; 1362*055276c1SNeal Liu ep->ep.caps.type_int = true; 1363*055276c1SNeal Liu } 1364*055276c1SNeal Liu ep->ep.caps.dir_in = true; 1365*055276c1SNeal Liu ep->ep.caps.dir_out = true; 1366*055276c1SNeal Liu 1367*055276c1SNeal Liu ep->ep.ops = &ast_udc_ep_ops; 1368*055276c1SNeal Liu ep->udc = udc; 1369*055276c1SNeal Liu 1370*055276c1SNeal Liu INIT_LIST_HEAD(&ep->queue); 1371*055276c1SNeal Liu 1372*055276c1SNeal Liu if (i == 0) { 1373*055276c1SNeal Liu usb_ep_set_maxpacket_limit(&ep->ep, 1374*055276c1SNeal Liu AST_UDC_EP0_MAX_PACKET); 1375*055276c1SNeal Liu continue; 1376*055276c1SNeal Liu } 1377*055276c1SNeal Liu 1378*055276c1SNeal Liu ep->ep_reg = udc->reg + AST_UDC_EP_BASE + 1379*055276c1SNeal Liu (AST_UDC_EP_OFFSET * (i - 1)); 1380*055276c1SNeal Liu 1381*055276c1SNeal Liu ep->epn_buf = udc->ep0_buf + (i * AST_UDC_EP_DMA_SIZE); 1382*055276c1SNeal Liu ep->epn_buf_dma = udc->ep0_buf_dma + (i * AST_UDC_EP_DMA_SIZE); 1383*055276c1SNeal Liu usb_ep_set_maxpacket_limit(&ep->ep, AST_UDC_EPn_MAX_PACKET); 1384*055276c1SNeal Liu 1385*055276c1SNeal Liu ep->descs = ep->epn_buf + AST_UDC_EPn_MAX_PACKET; 1386*055276c1SNeal Liu ep->descs_dma = ep->epn_buf_dma + AST_UDC_EPn_MAX_PACKET; 1387*055276c1SNeal Liu ep->descs_wptr = 0; 1388*055276c1SNeal Liu 1389*055276c1SNeal Liu list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); 1390*055276c1SNeal Liu } 1391*055276c1SNeal Liu } 1392*055276c1SNeal Liu 1393*055276c1SNeal Liu static void ast_udc_init_dev(struct ast_udc_dev *udc) 1394*055276c1SNeal Liu { 1395*055276c1SNeal Liu INIT_WORK(&udc->wake_work, ast_udc_wake_work); 1396*055276c1SNeal Liu } 1397*055276c1SNeal Liu 1398*055276c1SNeal Liu static void ast_udc_init_hw(struct ast_udc_dev *udc) 1399*055276c1SNeal Liu { 1400*055276c1SNeal Liu u32 ctrl; 1401*055276c1SNeal Liu 1402*055276c1SNeal Liu /* Enable PHY */ 1403*055276c1SNeal Liu ctrl = USB_PHY_CLK_EN | USB_PHY_RESET_DIS; 1404*055276c1SNeal Liu ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL); 1405*055276c1SNeal Liu 1406*055276c1SNeal Liu udelay(1); 1407*055276c1SNeal Liu ast_udc_write(udc, 0, AST_UDC_DEV_RESET); 1408*055276c1SNeal Liu 1409*055276c1SNeal Liu /* Set descriptor ring size */ 1410*055276c1SNeal Liu if (AST_UDC_DESCS_COUNT == 256) { 1411*055276c1SNeal Liu ctrl |= USB_EP_LONG_DESC; 1412*055276c1SNeal Liu ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL); 1413*055276c1SNeal Liu } 1414*055276c1SNeal Liu 1415*055276c1SNeal Liu /* Mask & ack all interrupts before installing the handler */ 1416*055276c1SNeal Liu ast_udc_write(udc, 0, AST_UDC_IER); 1417*055276c1SNeal Liu ast_udc_write(udc, UDC_IRQ_ACK_ALL, AST_UDC_ISR); 1418*055276c1SNeal Liu 1419*055276c1SNeal Liu /* Enable some interrupts */ 1420*055276c1SNeal Liu ctrl = UDC_IRQ_EP_POOL_ACK_STALL | UDC_IRQ_BUS_RESUME | 1421*055276c1SNeal Liu UDC_IRQ_BUS_SUSPEND | UDC_IRQ_BUS_RESET | 1422*055276c1SNeal Liu UDC_IRQ_EP0_IN_ACK_STALL | UDC_IRQ_EP0_OUT_ACK_STALL | 1423*055276c1SNeal Liu UDC_IRQ_EP0_SETUP; 1424*055276c1SNeal Liu ast_udc_write(udc, ctrl, AST_UDC_IER); 1425*055276c1SNeal Liu 1426*055276c1SNeal Liu /* Cleanup and enable ep ACK interrupts */ 1427*055276c1SNeal Liu ast_udc_write(udc, UDC_IRQ_EP_ACK_ALL, AST_UDC_EP_ACK_IER); 1428*055276c1SNeal Liu ast_udc_write(udc, UDC_IRQ_EP_ACK_ALL, AST_UDC_EP_ACK_ISR); 1429*055276c1SNeal Liu 1430*055276c1SNeal Liu ast_udc_write(udc, 0, AST_UDC_EP0_CTRL); 1431*055276c1SNeal Liu } 1432*055276c1SNeal Liu 1433*055276c1SNeal Liu static int ast_udc_remove(struct platform_device *pdev) 1434*055276c1SNeal Liu { 1435*055276c1SNeal Liu struct ast_udc_dev *udc = platform_get_drvdata(pdev); 1436*055276c1SNeal Liu unsigned long flags; 1437*055276c1SNeal Liu u32 ctrl; 1438*055276c1SNeal Liu 1439*055276c1SNeal Liu usb_del_gadget_udc(&udc->gadget); 1440*055276c1SNeal Liu if (udc->driver) 1441*055276c1SNeal Liu return -EBUSY; 1442*055276c1SNeal Liu 1443*055276c1SNeal Liu spin_lock_irqsave(&udc->lock, flags); 1444*055276c1SNeal Liu 1445*055276c1SNeal Liu /* Disable upstream port connection */ 1446*055276c1SNeal Liu ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) & ~USB_UPSTREAM_EN; 1447*055276c1SNeal Liu ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL); 1448*055276c1SNeal Liu 1449*055276c1SNeal Liu clk_disable_unprepare(udc->clk); 1450*055276c1SNeal Liu 1451*055276c1SNeal Liu spin_unlock_irqrestore(&udc->lock, flags); 1452*055276c1SNeal Liu 1453*055276c1SNeal Liu if (udc->ep0_buf) 1454*055276c1SNeal Liu dma_free_coherent(&pdev->dev, 1455*055276c1SNeal Liu AST_UDC_EP_DMA_SIZE * AST_UDC_NUM_ENDPOINTS, 1456*055276c1SNeal Liu udc->ep0_buf, 1457*055276c1SNeal Liu udc->ep0_buf_dma); 1458*055276c1SNeal Liu 1459*055276c1SNeal Liu udc->ep0_buf = NULL; 1460*055276c1SNeal Liu 1461*055276c1SNeal Liu return 0; 1462*055276c1SNeal Liu } 1463*055276c1SNeal Liu 1464*055276c1SNeal Liu static int ast_udc_probe(struct platform_device *pdev) 1465*055276c1SNeal Liu { 1466*055276c1SNeal Liu enum usb_device_speed max_speed; 1467*055276c1SNeal Liu struct device *dev = &pdev->dev; 1468*055276c1SNeal Liu struct ast_udc_dev *udc; 1469*055276c1SNeal Liu struct resource *res; 1470*055276c1SNeal Liu int rc; 1471*055276c1SNeal Liu 1472*055276c1SNeal Liu udc = devm_kzalloc(&pdev->dev, sizeof(struct ast_udc_dev), GFP_KERNEL); 1473*055276c1SNeal Liu if (!udc) 1474*055276c1SNeal Liu return -ENOMEM; 1475*055276c1SNeal Liu 1476*055276c1SNeal Liu udc->gadget.dev.parent = dev; 1477*055276c1SNeal Liu udc->pdev = pdev; 1478*055276c1SNeal Liu spin_lock_init(&udc->lock); 1479*055276c1SNeal Liu 1480*055276c1SNeal Liu udc->gadget.ops = &ast_udc_ops; 1481*055276c1SNeal Liu udc->gadget.ep0 = &udc->ep[0].ep; 1482*055276c1SNeal Liu udc->gadget.name = "aspeed-udc"; 1483*055276c1SNeal Liu udc->gadget.dev.init_name = "gadget"; 1484*055276c1SNeal Liu 1485*055276c1SNeal Liu res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1486*055276c1SNeal Liu udc->reg = devm_ioremap_resource(&pdev->dev, res); 1487*055276c1SNeal Liu if (IS_ERR(udc->reg)) { 1488*055276c1SNeal Liu dev_err(&pdev->dev, "Failed to map resources\n"); 1489*055276c1SNeal Liu return PTR_ERR(udc->reg); 1490*055276c1SNeal Liu } 1491*055276c1SNeal Liu 1492*055276c1SNeal Liu platform_set_drvdata(pdev, udc); 1493*055276c1SNeal Liu 1494*055276c1SNeal Liu udc->clk = devm_clk_get(&pdev->dev, NULL); 1495*055276c1SNeal Liu if (IS_ERR(udc->clk)) { 1496*055276c1SNeal Liu rc = PTR_ERR(udc->clk); 1497*055276c1SNeal Liu goto err; 1498*055276c1SNeal Liu } 1499*055276c1SNeal Liu rc = clk_prepare_enable(udc->clk); 1500*055276c1SNeal Liu if (rc) { 1501*055276c1SNeal Liu dev_err(&pdev->dev, "Failed to enable clock (0x%x)\n", rc); 1502*055276c1SNeal Liu goto err; 1503*055276c1SNeal Liu } 1504*055276c1SNeal Liu 1505*055276c1SNeal Liu /* Check if we need to limit the HW to USB1 */ 1506*055276c1SNeal Liu max_speed = usb_get_maximum_speed(&pdev->dev); 1507*055276c1SNeal Liu if (max_speed != USB_SPEED_UNKNOWN && max_speed < USB_SPEED_HIGH) 1508*055276c1SNeal Liu udc->force_usb1 = true; 1509*055276c1SNeal Liu 1510*055276c1SNeal Liu /* 1511*055276c1SNeal Liu * Allocate DMA buffers for all EPs in one chunk 1512*055276c1SNeal Liu */ 1513*055276c1SNeal Liu udc->ep0_buf = dma_alloc_coherent(&pdev->dev, 1514*055276c1SNeal Liu AST_UDC_EP_DMA_SIZE * 1515*055276c1SNeal Liu AST_UDC_NUM_ENDPOINTS, 1516*055276c1SNeal Liu &udc->ep0_buf_dma, GFP_KERNEL); 1517*055276c1SNeal Liu 1518*055276c1SNeal Liu udc->gadget.speed = USB_SPEED_UNKNOWN; 1519*055276c1SNeal Liu udc->gadget.max_speed = USB_SPEED_HIGH; 1520*055276c1SNeal Liu udc->creq = udc->reg + AST_UDC_SETUP0; 1521*055276c1SNeal Liu 1522*055276c1SNeal Liu /* 1523*055276c1SNeal Liu * Support single stage mode or 32/256 stages descriptor mode. 1524*055276c1SNeal Liu * Set default as Descriptor Mode. 1525*055276c1SNeal Liu */ 1526*055276c1SNeal Liu udc->desc_mode = AST_UDC_DESC_MODE; 1527*055276c1SNeal Liu 1528*055276c1SNeal Liu dev_info(&pdev->dev, "DMA %s\n", udc->desc_mode ? 1529*055276c1SNeal Liu "descriptor mode" : "single mode"); 1530*055276c1SNeal Liu 1531*055276c1SNeal Liu INIT_LIST_HEAD(&udc->gadget.ep_list); 1532*055276c1SNeal Liu INIT_LIST_HEAD(&udc->gadget.ep0->ep_list); 1533*055276c1SNeal Liu 1534*055276c1SNeal Liu /* Initialized udc ep */ 1535*055276c1SNeal Liu ast_udc_init_ep(udc); 1536*055276c1SNeal Liu 1537*055276c1SNeal Liu /* Initialized udc device */ 1538*055276c1SNeal Liu ast_udc_init_dev(udc); 1539*055276c1SNeal Liu 1540*055276c1SNeal Liu /* Initialized udc hardware */ 1541*055276c1SNeal Liu ast_udc_init_hw(udc); 1542*055276c1SNeal Liu 1543*055276c1SNeal Liu /* Find interrupt and install handler */ 1544*055276c1SNeal Liu udc->irq = platform_get_irq(pdev, 0); 1545*055276c1SNeal Liu if (udc->irq < 0) { 1546*055276c1SNeal Liu dev_err(&pdev->dev, "Failed to get interrupt\n"); 1547*055276c1SNeal Liu rc = udc->irq; 1548*055276c1SNeal Liu goto err; 1549*055276c1SNeal Liu } 1550*055276c1SNeal Liu 1551*055276c1SNeal Liu rc = devm_request_irq(&pdev->dev, udc->irq, ast_udc_isr, 0, 1552*055276c1SNeal Liu KBUILD_MODNAME, udc); 1553*055276c1SNeal Liu if (rc) { 1554*055276c1SNeal Liu dev_err(&pdev->dev, "Failed to request interrupt\n"); 1555*055276c1SNeal Liu goto err; 1556*055276c1SNeal Liu } 1557*055276c1SNeal Liu 1558*055276c1SNeal Liu rc = usb_add_gadget_udc(&pdev->dev, &udc->gadget); 1559*055276c1SNeal Liu if (rc) { 1560*055276c1SNeal Liu dev_err(&pdev->dev, "Failed to add gadget udc\n"); 1561*055276c1SNeal Liu goto err; 1562*055276c1SNeal Liu } 1563*055276c1SNeal Liu 1564*055276c1SNeal Liu dev_info(&pdev->dev, "Initialized udc in USB%s mode\n", 1565*055276c1SNeal Liu udc->force_usb1 ? "1" : "2"); 1566*055276c1SNeal Liu 1567*055276c1SNeal Liu return 0; 1568*055276c1SNeal Liu 1569*055276c1SNeal Liu err: 1570*055276c1SNeal Liu dev_err(&pdev->dev, "Failed to udc probe, rc:0x%x\n", rc); 1571*055276c1SNeal Liu ast_udc_remove(pdev); 1572*055276c1SNeal Liu 1573*055276c1SNeal Liu return rc; 1574*055276c1SNeal Liu } 1575*055276c1SNeal Liu 1576*055276c1SNeal Liu static const struct of_device_id ast_udc_of_dt_ids[] = { 1577*055276c1SNeal Liu { .compatible = "aspeed,ast2600-udc", }, 1578*055276c1SNeal Liu {} 1579*055276c1SNeal Liu }; 1580*055276c1SNeal Liu 1581*055276c1SNeal Liu MODULE_DEVICE_TABLE(of, ast_udc_of_dt_ids); 1582*055276c1SNeal Liu 1583*055276c1SNeal Liu static struct platform_driver ast_udc_driver = { 1584*055276c1SNeal Liu .probe = ast_udc_probe, 1585*055276c1SNeal Liu .remove = ast_udc_remove, 1586*055276c1SNeal Liu .driver = { 1587*055276c1SNeal Liu .name = KBUILD_MODNAME, 1588*055276c1SNeal Liu .of_match_table = ast_udc_of_dt_ids, 1589*055276c1SNeal Liu }, 1590*055276c1SNeal Liu }; 1591*055276c1SNeal Liu 1592*055276c1SNeal Liu module_platform_driver(ast_udc_driver); 1593*055276c1SNeal Liu 1594*055276c1SNeal Liu MODULE_DESCRIPTION("ASPEED UDC driver"); 1595*055276c1SNeal Liu MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>"); 1596*055276c1SNeal Liu MODULE_LICENSE("GPL"); 1597