1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (c) 2021 Aspeed Technology Inc. 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/delay.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/interrupt.h> 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/of.h> 13 #include <linux/platform_device.h> 14 #include <linux/prefetch.h> 15 #include <linux/usb/ch9.h> 16 #include <linux/usb/gadget.h> 17 #include <linux/slab.h> 18 19 #define AST_UDC_NUM_ENDPOINTS (1 + 4) 20 #define AST_UDC_EP0_MAX_PACKET 64 /* EP0's max packet size */ 21 #define AST_UDC_EPn_MAX_PACKET 1024 /* Generic EPs max packet size */ 22 #define AST_UDC_DESCS_COUNT 256 /* Use 256 stages descriptor mode (32/256) */ 23 #define AST_UDC_DESC_MODE 1 /* Single/Multiple Stage(s) Descriptor Mode */ 24 25 #define AST_UDC_EP_DMA_SIZE (AST_UDC_EPn_MAX_PACKET + 8 * AST_UDC_DESCS_COUNT) 26 27 /***************************** 28 * * 29 * UDC register definitions * 30 * * 31 *****************************/ 32 33 #define AST_UDC_FUNC_CTRL 0x00 /* Root Function Control & Status Register */ 34 #define AST_UDC_CONFIG 0x04 /* Root Configuration Setting Register */ 35 #define AST_UDC_IER 0x08 /* Interrupt Control Register */ 36 #define AST_UDC_ISR 0x0C /* Interrupt Status Register */ 37 #define AST_UDC_EP_ACK_IER 0x10 /* Programmable ep Pool ACK Interrupt Enable Reg */ 38 #define AST_UDC_EP_NAK_IER 0x14 /* Programmable ep Pool NAK Interrupt Enable Reg */ 39 #define AST_UDC_EP_ACK_ISR 0x18 /* Programmable ep Pool ACK Interrupt Status Reg */ 40 #define AST_UDC_EP_NAK_ISR 0x1C /* Programmable ep Pool NAK Interrupt Status Reg */ 41 #define AST_UDC_DEV_RESET 0x20 /* Device Controller Soft Reset Enable Register */ 42 #define AST_UDC_STS 0x24 /* USB Status Register */ 43 #define AST_VHUB_EP_DATA 0x28 /* Programmable ep Pool Data Toggle Value Set */ 44 #define AST_VHUB_ISO_TX_FAIL 0x2C /* Isochronous Transaction Fail Accumulator */ 45 #define AST_UDC_EP0_CTRL 0x30 /* Endpoint 0 Control/Status Register */ 46 #define AST_UDC_EP0_DATA_BUFF 0x34 /* Base Address of ep0 IN/OUT Data Buffer Reg */ 47 #define AST_UDC_SETUP0 0x80 /* Root Device Setup Data Buffer0 */ 48 #define AST_UDC_SETUP1 0x84 /* Root Device Setup Data Buffer1 */ 49 50 51 /* Main control reg */ 52 #define USB_PHY_CLK_EN BIT(31) 53 #define USB_FIFO_DYN_PWRD_EN BIT(19) 54 #define USB_EP_LONG_DESC BIT(18) 55 #define USB_BIST_TEST_PASS BIT(13) 56 #define USB_BIST_TURN_ON BIT(12) 57 #define USB_PHY_RESET_DIS BIT(11) 58 #define USB_TEST_MODE(x) ((x) << 8) 59 #define USB_FORCE_TIMER_HS BIT(7) 60 #define USB_FORCE_HS BIT(6) 61 #define USB_REMOTE_WAKEUP_12MS BIT(5) 62 #define USB_REMOTE_WAKEUP_EN BIT(4) 63 #define USB_AUTO_REMOTE_WAKEUP_EN BIT(3) 64 #define USB_STOP_CLK_IN_SUPEND BIT(2) 65 #define USB_UPSTREAM_FS BIT(1) 66 #define USB_UPSTREAM_EN BIT(0) 67 68 /* Main config reg */ 69 #define UDC_CFG_SET_ADDR(x) ((x) & 0x3f) 70 #define UDC_CFG_ADDR_MASK (0x3f) 71 72 /* Interrupt ctrl & status reg */ 73 #define UDC_IRQ_EP_POOL_NAK BIT(17) 74 #define UDC_IRQ_EP_POOL_ACK_STALL BIT(16) 75 #define UDC_IRQ_BUS_RESUME BIT(8) 76 #define UDC_IRQ_BUS_SUSPEND BIT(7) 77 #define UDC_IRQ_BUS_RESET BIT(6) 78 #define UDC_IRQ_EP0_IN_DATA_NAK BIT(4) 79 #define UDC_IRQ_EP0_IN_ACK_STALL BIT(3) 80 #define UDC_IRQ_EP0_OUT_NAK BIT(2) 81 #define UDC_IRQ_EP0_OUT_ACK_STALL BIT(1) 82 #define UDC_IRQ_EP0_SETUP BIT(0) 83 #define UDC_IRQ_ACK_ALL (0x1ff) 84 85 /* EP isr reg */ 86 #define USB_EP3_ISR BIT(3) 87 #define USB_EP2_ISR BIT(2) 88 #define USB_EP1_ISR BIT(1) 89 #define USB_EP0_ISR BIT(0) 90 #define UDC_IRQ_EP_ACK_ALL (0xf) 91 92 /*Soft reset reg */ 93 #define ROOT_UDC_SOFT_RESET BIT(0) 94 95 /* USB status reg */ 96 #define UDC_STS_HIGHSPEED BIT(27) 97 98 /* Programmable EP data toggle */ 99 #define EP_TOGGLE_SET_EPNUM(x) ((x) & 0x3) 100 101 /* EP0 ctrl reg */ 102 #define EP0_GET_RX_LEN(x) ((x >> 16) & 0x7f) 103 #define EP0_TX_LEN(x) ((x & 0x7f) << 8) 104 #define EP0_RX_BUFF_RDY BIT(2) 105 #define EP0_TX_BUFF_RDY BIT(1) 106 #define EP0_STALL BIT(0) 107 108 /************************************* 109 * * 110 * per-endpoint register definitions * 111 * * 112 *************************************/ 113 114 #define AST_UDC_EP_CONFIG 0x00 /* Endpoint Configuration Register */ 115 #define AST_UDC_EP_DMA_CTRL 0x04 /* DMA Descriptor List Control/Status Register */ 116 #define AST_UDC_EP_DMA_BUFF 0x08 /* DMA Descriptor/Buffer Base Address */ 117 #define AST_UDC_EP_DMA_STS 0x0C /* DMA Descriptor List R/W Pointer and Status */ 118 119 #define AST_UDC_EP_BASE 0x200 120 #define AST_UDC_EP_OFFSET 0x10 121 122 /* EP config reg */ 123 #define EP_SET_MAX_PKT(x) ((x & 0x3ff) << 16) 124 #define EP_DATA_FETCH_CTRL(x) ((x & 0x3) << 14) 125 #define EP_AUTO_DATA_DISABLE (0x1 << 13) 126 #define EP_SET_EP_STALL (0x1 << 12) 127 #define EP_SET_EP_NUM(x) ((x & 0xf) << 8) 128 #define EP_SET_TYPE_MASK(x) ((x) << 5) 129 #define EP_TYPE_BULK (0x1) 130 #define EP_TYPE_INT (0x2) 131 #define EP_TYPE_ISO (0x3) 132 #define EP_DIR_OUT (0x1 << 4) 133 #define EP_ALLOCATED_MASK (0x7 << 1) 134 #define EP_ENABLE BIT(0) 135 136 /* EP DMA ctrl reg */ 137 #define EP_DMA_CTRL_GET_PROC_STS(x) ((x >> 4) & 0xf) 138 #define EP_DMA_CTRL_STS_RX_IDLE 0x0 139 #define EP_DMA_CTRL_STS_TX_IDLE 0x8 140 #define EP_DMA_CTRL_IN_LONG_MODE (0x1 << 3) 141 #define EP_DMA_CTRL_RESET (0x1 << 2) 142 #define EP_DMA_SINGLE_STAGE (0x1 << 1) 143 #define EP_DMA_DESC_MODE (0x1 << 0) 144 145 /* EP DMA status reg */ 146 #define EP_DMA_SET_TX_SIZE(x) ((x & 0x7ff) << 16) 147 #define EP_DMA_GET_TX_SIZE(x) (((x) >> 16) & 0x7ff) 148 #define EP_DMA_GET_RPTR(x) (((x) >> 8) & 0xff) 149 #define EP_DMA_GET_WPTR(x) ((x) & 0xff) 150 #define EP_DMA_SINGLE_KICK (1 << 0) /* WPTR = 1 for single mode */ 151 152 /* EP desc reg */ 153 #define AST_EP_DMA_DESC_INTR_ENABLE BIT(31) 154 #define AST_EP_DMA_DESC_PID_DATA0 (0 << 14) 155 #define AST_EP_DMA_DESC_PID_DATA2 BIT(14) 156 #define AST_EP_DMA_DESC_PID_DATA1 (2 << 14) 157 #define AST_EP_DMA_DESC_PID_MDATA (3 << 14) 158 #define EP_DESC1_IN_LEN(x) ((x) & 0x1fff) 159 #define AST_EP_DMA_DESC_MAX_LEN (7680) /* Max packet length for trasmit in 1 desc */ 160 161 struct ast_udc_request { 162 struct usb_request req; 163 struct list_head queue; 164 unsigned mapped:1; 165 unsigned int actual_dma_length; 166 u32 saved_dma_wptr; 167 }; 168 169 #define to_ast_req(__req) container_of(__req, struct ast_udc_request, req) 170 171 struct ast_dma_desc { 172 u32 des_0; 173 u32 des_1; 174 }; 175 176 struct ast_udc_ep { 177 struct usb_ep ep; 178 179 /* Request queue */ 180 struct list_head queue; 181 182 struct ast_udc_dev *udc; 183 void __iomem *ep_reg; 184 void *epn_buf; 185 dma_addr_t epn_buf_dma; 186 const struct usb_endpoint_descriptor *desc; 187 188 /* DMA Descriptors */ 189 struct ast_dma_desc *descs; 190 dma_addr_t descs_dma; 191 u32 descs_wptr; 192 u32 chunk_max; 193 194 bool dir_in:1; 195 unsigned stopped:1; 196 bool desc_mode:1; 197 }; 198 199 #define to_ast_ep(__ep) container_of(__ep, struct ast_udc_ep, ep) 200 201 struct ast_udc_dev { 202 struct platform_device *pdev; 203 void __iomem *reg; 204 int irq; 205 spinlock_t lock; 206 struct clk *clk; 207 struct work_struct wake_work; 208 209 /* EP0 DMA buffers allocated in one chunk */ 210 void *ep0_buf; 211 dma_addr_t ep0_buf_dma; 212 struct ast_udc_ep ep[AST_UDC_NUM_ENDPOINTS]; 213 214 struct usb_gadget gadget; 215 struct usb_gadget_driver *driver; 216 void __iomem *creq; 217 enum usb_device_state suspended_from; 218 int desc_mode; 219 220 /* Force full speed only */ 221 bool force_usb1:1; 222 unsigned is_control_tx:1; 223 bool wakeup_en:1; 224 }; 225 226 #define to_ast_dev(__g) container_of(__g, struct ast_udc_dev, gadget) 227 228 static const char * const ast_ep_name[] = { 229 "ep0", "ep1", "ep2", "ep3", "ep4" 230 }; 231 232 #ifdef AST_UDC_DEBUG_ALL 233 #define AST_UDC_DEBUG 234 #define AST_SETUP_DEBUG 235 #define AST_EP_DEBUG 236 #define AST_ISR_DEBUG 237 #endif 238 239 #ifdef AST_SETUP_DEBUG 240 #define SETUP_DBG(u, fmt, ...) \ 241 dev_dbg(&(u)->pdev->dev, "%s() " fmt, __func__, ##__VA_ARGS__) 242 #else 243 #define SETUP_DBG(u, fmt, ...) 244 #endif 245 246 #ifdef AST_EP_DEBUG 247 #define EP_DBG(e, fmt, ...) \ 248 dev_dbg(&(e)->udc->pdev->dev, "%s():%s " fmt, __func__, \ 249 (e)->ep.name, ##__VA_ARGS__) 250 #else 251 #define EP_DBG(ep, fmt, ...) ((void)(ep)) 252 #endif 253 254 #ifdef AST_UDC_DEBUG 255 #define UDC_DBG(u, fmt, ...) \ 256 dev_dbg(&(u)->pdev->dev, "%s() " fmt, __func__, ##__VA_ARGS__) 257 #else 258 #define UDC_DBG(u, fmt, ...) 259 #endif 260 261 #ifdef AST_ISR_DEBUG 262 #define ISR_DBG(u, fmt, ...) \ 263 dev_dbg(&(u)->pdev->dev, "%s() " fmt, __func__, ##__VA_ARGS__) 264 #else 265 #define ISR_DBG(u, fmt, ...) 266 #endif 267 268 /*-------------------------------------------------------------------------*/ 269 #define ast_udc_read(udc, offset) \ 270 readl((udc)->reg + (offset)) 271 #define ast_udc_write(udc, val, offset) \ 272 writel((val), (udc)->reg + (offset)) 273 274 #define ast_ep_read(ep, reg) \ 275 readl((ep)->ep_reg + (reg)) 276 #define ast_ep_write(ep, val, reg) \ 277 writel((val), (ep)->ep_reg + (reg)) 278 279 /*-------------------------------------------------------------------------*/ 280 281 static void ast_udc_done(struct ast_udc_ep *ep, struct ast_udc_request *req, 282 int status) 283 { 284 struct ast_udc_dev *udc = ep->udc; 285 286 EP_DBG(ep, "req @%p, len (%d/%d), buf:0x%x, dir:0x%x\n", 287 req, req->req.actual, req->req.length, 288 (u32)req->req.buf, ep->dir_in); 289 290 list_del(&req->queue); 291 292 if (req->req.status == -EINPROGRESS) 293 req->req.status = status; 294 else 295 status = req->req.status; 296 297 if (status && status != -ESHUTDOWN) 298 EP_DBG(ep, "done req:%p, status:%d\n", req, status); 299 300 spin_unlock(&udc->lock); 301 usb_gadget_giveback_request(&ep->ep, &req->req); 302 spin_lock(&udc->lock); 303 } 304 305 static void ast_udc_nuke(struct ast_udc_ep *ep, int status) 306 { 307 int count = 0; 308 309 while (!list_empty(&ep->queue)) { 310 struct ast_udc_request *req; 311 312 req = list_entry(ep->queue.next, struct ast_udc_request, 313 queue); 314 ast_udc_done(ep, req, status); 315 count++; 316 } 317 318 if (count) 319 EP_DBG(ep, "Nuked %d request(s)\n", count); 320 } 321 322 /* 323 * Stop activity on all endpoints. 324 * Device controller for which EP activity is to be stopped. 325 * 326 * All the endpoints are stopped and any pending transfer requests if any on 327 * the endpoint are terminated. 328 */ 329 static void ast_udc_stop_activity(struct ast_udc_dev *udc) 330 { 331 struct ast_udc_ep *ep; 332 int i; 333 334 for (i = 0; i < AST_UDC_NUM_ENDPOINTS; i++) { 335 ep = &udc->ep[i]; 336 ep->stopped = 1; 337 ast_udc_nuke(ep, -ESHUTDOWN); 338 } 339 } 340 341 static int ast_udc_ep_enable(struct usb_ep *_ep, 342 const struct usb_endpoint_descriptor *desc) 343 { 344 u16 maxpacket = usb_endpoint_maxp(desc); 345 struct ast_udc_ep *ep = to_ast_ep(_ep); 346 struct ast_udc_dev *udc = ep->udc; 347 u8 epnum = usb_endpoint_num(desc); 348 unsigned long flags; 349 u32 ep_conf = 0; 350 u8 dir_in; 351 u8 type; 352 353 if (!_ep || !ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT || 354 maxpacket == 0 || maxpacket > ep->ep.maxpacket) { 355 EP_DBG(ep, "Failed, invalid EP enable param\n"); 356 return -EINVAL; 357 } 358 359 if (!udc->driver) { 360 EP_DBG(ep, "bogus device state\n"); 361 return -ESHUTDOWN; 362 } 363 364 EP_DBG(ep, "maxpacket:0x%x\n", maxpacket); 365 366 spin_lock_irqsave(&udc->lock, flags); 367 368 ep->desc = desc; 369 ep->stopped = 0; 370 ep->ep.maxpacket = maxpacket; 371 ep->chunk_max = AST_EP_DMA_DESC_MAX_LEN; 372 373 if (maxpacket < AST_UDC_EPn_MAX_PACKET) 374 ep_conf = EP_SET_MAX_PKT(maxpacket); 375 376 ep_conf |= EP_SET_EP_NUM(epnum); 377 378 type = usb_endpoint_type(desc); 379 dir_in = usb_endpoint_dir_in(desc); 380 ep->dir_in = dir_in; 381 if (!ep->dir_in) 382 ep_conf |= EP_DIR_OUT; 383 384 EP_DBG(ep, "type %d, dir_in %d\n", type, dir_in); 385 switch (type) { 386 case USB_ENDPOINT_XFER_ISOC: 387 ep_conf |= EP_SET_TYPE_MASK(EP_TYPE_ISO); 388 break; 389 390 case USB_ENDPOINT_XFER_BULK: 391 ep_conf |= EP_SET_TYPE_MASK(EP_TYPE_BULK); 392 break; 393 394 case USB_ENDPOINT_XFER_INT: 395 ep_conf |= EP_SET_TYPE_MASK(EP_TYPE_INT); 396 break; 397 } 398 399 ep->desc_mode = udc->desc_mode && ep->descs_dma && ep->dir_in; 400 if (ep->desc_mode) { 401 ast_ep_write(ep, EP_DMA_CTRL_RESET, AST_UDC_EP_DMA_CTRL); 402 ast_ep_write(ep, 0, AST_UDC_EP_DMA_STS); 403 ast_ep_write(ep, ep->descs_dma, AST_UDC_EP_DMA_BUFF); 404 405 /* Enable Long Descriptor Mode */ 406 ast_ep_write(ep, EP_DMA_CTRL_IN_LONG_MODE | EP_DMA_DESC_MODE, 407 AST_UDC_EP_DMA_CTRL); 408 409 ep->descs_wptr = 0; 410 411 } else { 412 ast_ep_write(ep, EP_DMA_CTRL_RESET, AST_UDC_EP_DMA_CTRL); 413 ast_ep_write(ep, EP_DMA_SINGLE_STAGE, AST_UDC_EP_DMA_CTRL); 414 ast_ep_write(ep, 0, AST_UDC_EP_DMA_STS); 415 } 416 417 /* Cleanup data toggle just in case */ 418 ast_udc_write(udc, EP_TOGGLE_SET_EPNUM(epnum), AST_VHUB_EP_DATA); 419 420 /* Enable EP */ 421 ast_ep_write(ep, ep_conf | EP_ENABLE, AST_UDC_EP_CONFIG); 422 423 EP_DBG(ep, "ep_config: 0x%x\n", ast_ep_read(ep, AST_UDC_EP_CONFIG)); 424 425 spin_unlock_irqrestore(&udc->lock, flags); 426 427 return 0; 428 } 429 430 static int ast_udc_ep_disable(struct usb_ep *_ep) 431 { 432 struct ast_udc_ep *ep = to_ast_ep(_ep); 433 struct ast_udc_dev *udc = ep->udc; 434 unsigned long flags; 435 436 spin_lock_irqsave(&udc->lock, flags); 437 438 ep->ep.desc = NULL; 439 ep->stopped = 1; 440 441 ast_udc_nuke(ep, -ESHUTDOWN); 442 ast_ep_write(ep, 0, AST_UDC_EP_CONFIG); 443 444 spin_unlock_irqrestore(&udc->lock, flags); 445 446 return 0; 447 } 448 449 static struct usb_request *ast_udc_ep_alloc_request(struct usb_ep *_ep, 450 gfp_t gfp_flags) 451 { 452 struct ast_udc_ep *ep = to_ast_ep(_ep); 453 struct ast_udc_request *req; 454 455 req = kzalloc(sizeof(struct ast_udc_request), gfp_flags); 456 if (!req) { 457 EP_DBG(ep, "request allocation failed\n"); 458 return NULL; 459 } 460 461 INIT_LIST_HEAD(&req->queue); 462 463 return &req->req; 464 } 465 466 static void ast_udc_ep_free_request(struct usb_ep *_ep, 467 struct usb_request *_req) 468 { 469 struct ast_udc_request *req = to_ast_req(_req); 470 471 kfree(req); 472 } 473 474 static int ast_dma_descriptor_setup(struct ast_udc_ep *ep, u32 dma_buf, 475 u16 tx_len, struct ast_udc_request *req) 476 { 477 struct ast_udc_dev *udc = ep->udc; 478 struct device *dev = &udc->pdev->dev; 479 int chunk, count; 480 u32 offset; 481 482 if (!ep->descs) { 483 dev_warn(dev, "%s: Empty DMA descs list failure\n", 484 ep->ep.name); 485 return -EINVAL; 486 } 487 488 chunk = tx_len; 489 offset = count = 0; 490 491 EP_DBG(ep, "req @%p, %s:%d, %s:0x%x, %s:0x%x\n", req, 492 "wptr", ep->descs_wptr, "dma_buf", dma_buf, 493 "tx_len", tx_len); 494 495 /* Create Descriptor Lists */ 496 while (chunk > 0 && count < AST_UDC_DESCS_COUNT) { 497 498 ep->descs[ep->descs_wptr].des_0 = dma_buf + offset; 499 500 if (chunk > ep->chunk_max) 501 ep->descs[ep->descs_wptr].des_1 = ep->chunk_max; 502 else 503 ep->descs[ep->descs_wptr].des_1 = chunk; 504 505 chunk -= ep->chunk_max; 506 507 EP_DBG(ep, "descs[%d]: 0x%x 0x%x\n", 508 ep->descs_wptr, 509 ep->descs[ep->descs_wptr].des_0, 510 ep->descs[ep->descs_wptr].des_1); 511 512 if (count == 0) 513 req->saved_dma_wptr = ep->descs_wptr; 514 515 ep->descs_wptr++; 516 count++; 517 518 if (ep->descs_wptr >= AST_UDC_DESCS_COUNT) 519 ep->descs_wptr = 0; 520 521 offset = ep->chunk_max * count; 522 } 523 524 return 0; 525 } 526 527 static void ast_udc_epn_kick(struct ast_udc_ep *ep, struct ast_udc_request *req) 528 { 529 u32 tx_len; 530 u32 last; 531 532 last = req->req.length - req->req.actual; 533 tx_len = last > ep->ep.maxpacket ? ep->ep.maxpacket : last; 534 535 EP_DBG(ep, "kick req @%p, len:%d, dir:%d\n", 536 req, tx_len, ep->dir_in); 537 538 ast_ep_write(ep, req->req.dma + req->req.actual, AST_UDC_EP_DMA_BUFF); 539 540 /* Start DMA */ 541 ast_ep_write(ep, EP_DMA_SET_TX_SIZE(tx_len), AST_UDC_EP_DMA_STS); 542 ast_ep_write(ep, EP_DMA_SET_TX_SIZE(tx_len) | EP_DMA_SINGLE_KICK, 543 AST_UDC_EP_DMA_STS); 544 } 545 546 static void ast_udc_epn_kick_desc(struct ast_udc_ep *ep, 547 struct ast_udc_request *req) 548 { 549 u32 descs_max_size; 550 u32 tx_len; 551 u32 last; 552 553 descs_max_size = AST_EP_DMA_DESC_MAX_LEN * AST_UDC_DESCS_COUNT; 554 555 last = req->req.length - req->req.actual; 556 tx_len = last > descs_max_size ? descs_max_size : last; 557 558 EP_DBG(ep, "kick req @%p, %s:%d, %s:0x%x, %s:0x%x (%d/%d), %s:0x%x\n", 559 req, "tx_len", tx_len, "dir_in", ep->dir_in, 560 "dma", req->req.dma + req->req.actual, 561 req->req.actual, req->req.length, 562 "descs_max_size", descs_max_size); 563 564 if (!ast_dma_descriptor_setup(ep, req->req.dma + req->req.actual, 565 tx_len, req)) 566 req->actual_dma_length += tx_len; 567 568 /* make sure CPU done everything before triggering DMA */ 569 mb(); 570 571 ast_ep_write(ep, ep->descs_wptr, AST_UDC_EP_DMA_STS); 572 573 EP_DBG(ep, "descs_wptr:%d, dstat:0x%x, dctrl:0x%x\n", 574 ep->descs_wptr, 575 ast_ep_read(ep, AST_UDC_EP_DMA_STS), 576 ast_ep_read(ep, AST_UDC_EP_DMA_CTRL)); 577 } 578 579 static void ast_udc_ep0_queue(struct ast_udc_ep *ep, 580 struct ast_udc_request *req) 581 { 582 struct ast_udc_dev *udc = ep->udc; 583 u32 tx_len; 584 u32 last; 585 586 last = req->req.length - req->req.actual; 587 tx_len = last > ep->ep.maxpacket ? ep->ep.maxpacket : last; 588 589 ast_udc_write(udc, req->req.dma + req->req.actual, 590 AST_UDC_EP0_DATA_BUFF); 591 592 if (ep->dir_in) { 593 /* IN requests, send data */ 594 SETUP_DBG(udc, "IN: %s:0x%x, %s:0x%x, %s:%d (%d/%d), %s:%d\n", 595 "buf", (u32)req->req.buf, 596 "dma", req->req.dma + req->req.actual, 597 "tx_len", tx_len, 598 req->req.actual, req->req.length, 599 "dir_in", ep->dir_in); 600 601 req->req.actual += tx_len; 602 ast_udc_write(udc, EP0_TX_LEN(tx_len), AST_UDC_EP0_CTRL); 603 ast_udc_write(udc, EP0_TX_LEN(tx_len) | EP0_TX_BUFF_RDY, 604 AST_UDC_EP0_CTRL); 605 606 } else { 607 /* OUT requests, receive data */ 608 SETUP_DBG(udc, "OUT: %s:%x, %s:%x, %s:(%d/%d), %s:%d\n", 609 "buf", (u32)req->req.buf, 610 "dma", req->req.dma + req->req.actual, 611 "len", req->req.actual, req->req.length, 612 "dir_in", ep->dir_in); 613 614 if (!req->req.length) { 615 /* 0 len request, send tx as completion */ 616 ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL); 617 ep->dir_in = 0x1; 618 } else 619 ast_udc_write(udc, EP0_RX_BUFF_RDY, AST_UDC_EP0_CTRL); 620 } 621 } 622 623 static int ast_udc_ep_queue(struct usb_ep *_ep, struct usb_request *_req, 624 gfp_t gfp_flags) 625 { 626 struct ast_udc_request *req = to_ast_req(_req); 627 struct ast_udc_ep *ep = to_ast_ep(_ep); 628 struct ast_udc_dev *udc = ep->udc; 629 struct device *dev = &udc->pdev->dev; 630 unsigned long flags; 631 int rc; 632 633 if (unlikely(!_req || !_req->complete || !_req->buf || !_ep)) { 634 dev_warn(dev, "Invalid EP request !\n"); 635 return -EINVAL; 636 } 637 638 if (ep->stopped) { 639 dev_warn(dev, "%s is already stopped !\n", _ep->name); 640 return -ESHUTDOWN; 641 } 642 643 spin_lock_irqsave(&udc->lock, flags); 644 645 list_add_tail(&req->queue, &ep->queue); 646 647 req->req.actual = 0; 648 req->req.status = -EINPROGRESS; 649 req->actual_dma_length = 0; 650 651 rc = usb_gadget_map_request(&udc->gadget, &req->req, ep->dir_in); 652 if (rc) { 653 EP_DBG(ep, "Request mapping failure %d\n", rc); 654 dev_warn(dev, "Request mapping failure %d\n", rc); 655 goto end; 656 } 657 658 EP_DBG(ep, "enqueue req @%p\n", req); 659 EP_DBG(ep, "l=%d, dma:0x%x, zero:%d, is_in:%d\n", 660 _req->length, _req->dma, _req->zero, ep->dir_in); 661 662 /* EP0 request enqueue */ 663 if (ep->ep.desc == NULL) { 664 if ((req->req.dma % 4) != 0) { 665 dev_warn(dev, "EP0 req dma alignment error\n"); 666 rc = -ESHUTDOWN; 667 goto end; 668 } 669 670 ast_udc_ep0_queue(ep, req); 671 goto end; 672 } 673 674 /* EPn request enqueue */ 675 if (list_is_singular(&ep->queue)) { 676 if (ep->desc_mode) 677 ast_udc_epn_kick_desc(ep, req); 678 else 679 ast_udc_epn_kick(ep, req); 680 } 681 682 end: 683 spin_unlock_irqrestore(&udc->lock, flags); 684 685 return rc; 686 } 687 688 static int ast_udc_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) 689 { 690 struct ast_udc_ep *ep = to_ast_ep(_ep); 691 struct ast_udc_dev *udc = ep->udc; 692 struct ast_udc_request *req; 693 unsigned long flags; 694 int rc = 0; 695 696 spin_lock_irqsave(&udc->lock, flags); 697 698 /* make sure it's actually queued on this endpoint */ 699 list_for_each_entry(req, &ep->queue, queue) { 700 if (&req->req == _req) { 701 list_del_init(&req->queue); 702 ast_udc_done(ep, req, -ESHUTDOWN); 703 _req->status = -ECONNRESET; 704 break; 705 } 706 } 707 708 /* dequeue request not found */ 709 if (&req->req != _req) 710 rc = -EINVAL; 711 712 spin_unlock_irqrestore(&udc->lock, flags); 713 714 return rc; 715 } 716 717 static int ast_udc_ep_set_halt(struct usb_ep *_ep, int value) 718 { 719 struct ast_udc_ep *ep = to_ast_ep(_ep); 720 struct ast_udc_dev *udc = ep->udc; 721 unsigned long flags; 722 int epnum; 723 u32 ctrl; 724 725 EP_DBG(ep, "val:%d\n", value); 726 727 spin_lock_irqsave(&udc->lock, flags); 728 729 epnum = usb_endpoint_num(ep->desc); 730 731 /* EP0 */ 732 if (epnum == 0) { 733 ctrl = ast_udc_read(udc, AST_UDC_EP0_CTRL); 734 if (value) 735 ctrl |= EP0_STALL; 736 else 737 ctrl &= ~EP0_STALL; 738 739 ast_udc_write(udc, ctrl, AST_UDC_EP0_CTRL); 740 741 } else { 742 /* EPn */ 743 ctrl = ast_udc_read(udc, AST_UDC_EP_CONFIG); 744 if (value) 745 ctrl |= EP_SET_EP_STALL; 746 else 747 ctrl &= ~EP_SET_EP_STALL; 748 749 ast_ep_write(ep, ctrl, AST_UDC_EP_CONFIG); 750 751 /* only epn is stopped and waits for clear */ 752 ep->stopped = value ? 1 : 0; 753 } 754 755 spin_unlock_irqrestore(&udc->lock, flags); 756 757 return 0; 758 } 759 760 static const struct usb_ep_ops ast_udc_ep_ops = { 761 .enable = ast_udc_ep_enable, 762 .disable = ast_udc_ep_disable, 763 .alloc_request = ast_udc_ep_alloc_request, 764 .free_request = ast_udc_ep_free_request, 765 .queue = ast_udc_ep_queue, 766 .dequeue = ast_udc_ep_dequeue, 767 .set_halt = ast_udc_ep_set_halt, 768 /* there's only imprecise fifo status reporting */ 769 }; 770 771 static void ast_udc_ep0_rx(struct ast_udc_dev *udc) 772 { 773 ast_udc_write(udc, udc->ep0_buf_dma, AST_UDC_EP0_DATA_BUFF); 774 ast_udc_write(udc, EP0_RX_BUFF_RDY, AST_UDC_EP0_CTRL); 775 } 776 777 static void ast_udc_ep0_tx(struct ast_udc_dev *udc) 778 { 779 ast_udc_write(udc, udc->ep0_buf_dma, AST_UDC_EP0_DATA_BUFF); 780 ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL); 781 } 782 783 static void ast_udc_ep0_out(struct ast_udc_dev *udc) 784 { 785 struct device *dev = &udc->pdev->dev; 786 struct ast_udc_ep *ep = &udc->ep[0]; 787 struct ast_udc_request *req; 788 u16 rx_len; 789 790 if (list_empty(&ep->queue)) 791 return; 792 793 req = list_entry(ep->queue.next, struct ast_udc_request, queue); 794 795 rx_len = EP0_GET_RX_LEN(ast_udc_read(udc, AST_UDC_EP0_CTRL)); 796 req->req.actual += rx_len; 797 798 SETUP_DBG(udc, "req %p (%d/%d)\n", req, 799 req->req.actual, req->req.length); 800 801 if ((rx_len < ep->ep.maxpacket) || 802 (req->req.actual == req->req.length)) { 803 ast_udc_ep0_tx(udc); 804 if (!ep->dir_in) 805 ast_udc_done(ep, req, 0); 806 807 } else { 808 if (rx_len > req->req.length) { 809 // Issue Fix 810 dev_warn(dev, "Something wrong (%d/%d)\n", 811 req->req.actual, req->req.length); 812 ast_udc_ep0_tx(udc); 813 ast_udc_done(ep, req, 0); 814 return; 815 } 816 817 ep->dir_in = 0; 818 819 /* More works */ 820 ast_udc_ep0_queue(ep, req); 821 } 822 } 823 824 static void ast_udc_ep0_in(struct ast_udc_dev *udc) 825 { 826 struct ast_udc_ep *ep = &udc->ep[0]; 827 struct ast_udc_request *req; 828 829 if (list_empty(&ep->queue)) { 830 if (udc->is_control_tx) { 831 ast_udc_ep0_rx(udc); 832 udc->is_control_tx = 0; 833 } 834 835 return; 836 } 837 838 req = list_entry(ep->queue.next, struct ast_udc_request, queue); 839 840 SETUP_DBG(udc, "req %p (%d/%d)\n", req, 841 req->req.actual, req->req.length); 842 843 if (req->req.length == req->req.actual) { 844 if (req->req.length) 845 ast_udc_ep0_rx(udc); 846 847 if (ep->dir_in) 848 ast_udc_done(ep, req, 0); 849 850 } else { 851 /* More works */ 852 ast_udc_ep0_queue(ep, req); 853 } 854 } 855 856 static void ast_udc_epn_handle(struct ast_udc_dev *udc, u16 ep_num) 857 { 858 struct ast_udc_ep *ep = &udc->ep[ep_num]; 859 struct ast_udc_request *req; 860 u16 len = 0; 861 862 if (list_empty(&ep->queue)) 863 return; 864 865 req = list_first_entry(&ep->queue, struct ast_udc_request, queue); 866 867 len = EP_DMA_GET_TX_SIZE(ast_ep_read(ep, AST_UDC_EP_DMA_STS)); 868 req->req.actual += len; 869 870 EP_DBG(ep, "req @%p, length:(%d/%d), %s:0x%x\n", req, 871 req->req.actual, req->req.length, "len", len); 872 873 /* Done this request */ 874 if (req->req.length == req->req.actual) { 875 ast_udc_done(ep, req, 0); 876 req = list_first_entry_or_null(&ep->queue, 877 struct ast_udc_request, 878 queue); 879 880 } else { 881 /* Check for short packet */ 882 if (len < ep->ep.maxpacket) { 883 ast_udc_done(ep, req, 0); 884 req = list_first_entry_or_null(&ep->queue, 885 struct ast_udc_request, 886 queue); 887 } 888 } 889 890 /* More requests */ 891 if (req) 892 ast_udc_epn_kick(ep, req); 893 } 894 895 static void ast_udc_epn_handle_desc(struct ast_udc_dev *udc, u16 ep_num) 896 { 897 struct ast_udc_ep *ep = &udc->ep[ep_num]; 898 struct device *dev = &udc->pdev->dev; 899 struct ast_udc_request *req; 900 u32 proc_sts, wr_ptr, rd_ptr; 901 u32 len_in_desc, ctrl; 902 u16 total_len = 0; 903 int i; 904 905 if (list_empty(&ep->queue)) { 906 dev_warn(dev, "%s reqest queue empty !\n", ep->ep.name); 907 return; 908 } 909 910 req = list_first_entry(&ep->queue, struct ast_udc_request, queue); 911 912 ctrl = ast_ep_read(ep, AST_UDC_EP_DMA_CTRL); 913 proc_sts = EP_DMA_CTRL_GET_PROC_STS(ctrl); 914 915 /* Check processing status is idle */ 916 if (proc_sts != EP_DMA_CTRL_STS_RX_IDLE && 917 proc_sts != EP_DMA_CTRL_STS_TX_IDLE) { 918 dev_warn(dev, "EP DMA CTRL: 0x%x, PS:0x%x\n", 919 ast_ep_read(ep, AST_UDC_EP_DMA_CTRL), 920 proc_sts); 921 return; 922 } 923 924 ctrl = ast_ep_read(ep, AST_UDC_EP_DMA_STS); 925 rd_ptr = EP_DMA_GET_RPTR(ctrl); 926 wr_ptr = EP_DMA_GET_WPTR(ctrl); 927 928 if (rd_ptr != wr_ptr) { 929 dev_warn(dev, "desc list is not empty ! %s:%d, %s:%d\n", 930 "rptr", rd_ptr, "wptr", wr_ptr); 931 return; 932 } 933 934 EP_DBG(ep, "rd_ptr:%d, wr_ptr:%d\n", rd_ptr, wr_ptr); 935 i = req->saved_dma_wptr; 936 937 do { 938 len_in_desc = EP_DESC1_IN_LEN(ep->descs[i].des_1); 939 EP_DBG(ep, "desc[%d] len: %d\n", i, len_in_desc); 940 total_len += len_in_desc; 941 i++; 942 if (i >= AST_UDC_DESCS_COUNT) 943 i = 0; 944 945 } while (i != wr_ptr); 946 947 req->req.actual += total_len; 948 949 EP_DBG(ep, "req @%p, length:(%d/%d), %s:0x%x\n", req, 950 req->req.actual, req->req.length, "len", total_len); 951 952 /* Done this request */ 953 if (req->req.length == req->req.actual) { 954 ast_udc_done(ep, req, 0); 955 req = list_first_entry_or_null(&ep->queue, 956 struct ast_udc_request, 957 queue); 958 959 } else { 960 /* Check for short packet */ 961 if (total_len < ep->ep.maxpacket) { 962 ast_udc_done(ep, req, 0); 963 req = list_first_entry_or_null(&ep->queue, 964 struct ast_udc_request, 965 queue); 966 } 967 } 968 969 /* More requests & dma descs not setup yet */ 970 if (req && (req->actual_dma_length == req->req.actual)) { 971 EP_DBG(ep, "More requests\n"); 972 ast_udc_epn_kick_desc(ep, req); 973 } 974 } 975 976 static void ast_udc_ep0_data_tx(struct ast_udc_dev *udc, u8 *tx_data, u32 len) 977 { 978 if (len) { 979 memcpy(udc->ep0_buf, tx_data, len); 980 981 ast_udc_write(udc, udc->ep0_buf_dma, AST_UDC_EP0_DATA_BUFF); 982 ast_udc_write(udc, EP0_TX_LEN(len), AST_UDC_EP0_CTRL); 983 ast_udc_write(udc, EP0_TX_LEN(len) | EP0_TX_BUFF_RDY, 984 AST_UDC_EP0_CTRL); 985 udc->is_control_tx = 1; 986 987 } else 988 ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL); 989 } 990 991 static void ast_udc_getstatus(struct ast_udc_dev *udc) 992 { 993 struct usb_ctrlrequest crq; 994 struct ast_udc_ep *ep; 995 u16 status = 0; 996 u16 epnum = 0; 997 998 memcpy_fromio(&crq, udc->creq, sizeof(crq)); 999 1000 switch (crq.bRequestType & USB_RECIP_MASK) { 1001 case USB_RECIP_DEVICE: 1002 /* Get device status */ 1003 status = 1 << USB_DEVICE_SELF_POWERED; 1004 break; 1005 case USB_RECIP_INTERFACE: 1006 break; 1007 case USB_RECIP_ENDPOINT: 1008 epnum = crq.wIndex & USB_ENDPOINT_NUMBER_MASK; 1009 status = udc->ep[epnum].stopped; 1010 break; 1011 default: 1012 goto stall; 1013 } 1014 1015 ep = &udc->ep[epnum]; 1016 EP_DBG(ep, "status: 0x%x\n", status); 1017 ast_udc_ep0_data_tx(udc, (u8 *)&status, sizeof(status)); 1018 1019 return; 1020 1021 stall: 1022 EP_DBG(ep, "Can't respond request\n"); 1023 ast_udc_write(udc, ast_udc_read(udc, AST_UDC_EP0_CTRL) | EP0_STALL, 1024 AST_UDC_EP0_CTRL); 1025 } 1026 1027 static void ast_udc_ep0_handle_setup(struct ast_udc_dev *udc) 1028 { 1029 struct ast_udc_ep *ep = &udc->ep[0]; 1030 struct ast_udc_request *req; 1031 struct usb_ctrlrequest crq; 1032 int req_num = 0; 1033 int rc = 0; 1034 u32 reg; 1035 1036 memcpy_fromio(&crq, udc->creq, sizeof(crq)); 1037 1038 SETUP_DBG(udc, "SETEUP packet: %02x/%02x/%04x/%04x/%04x\n", 1039 crq.bRequestType, crq.bRequest, le16_to_cpu(crq.wValue), 1040 le16_to_cpu(crq.wIndex), le16_to_cpu(crq.wLength)); 1041 1042 /* 1043 * Cleanup ep0 request(s) in queue because 1044 * there is a new control setup comes. 1045 */ 1046 list_for_each_entry(req, &udc->ep[0].queue, queue) { 1047 req_num++; 1048 EP_DBG(ep, "there is req %p in ep0 queue !\n", req); 1049 } 1050 1051 if (req_num) 1052 ast_udc_nuke(&udc->ep[0], -ETIMEDOUT); 1053 1054 udc->ep[0].dir_in = crq.bRequestType & USB_DIR_IN; 1055 1056 if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { 1057 switch (crq.bRequest) { 1058 case USB_REQ_SET_ADDRESS: 1059 if (ast_udc_read(udc, AST_UDC_STS) & UDC_STS_HIGHSPEED) 1060 udc->gadget.speed = USB_SPEED_HIGH; 1061 else 1062 udc->gadget.speed = USB_SPEED_FULL; 1063 1064 SETUP_DBG(udc, "set addr: 0x%x\n", crq.wValue); 1065 reg = ast_udc_read(udc, AST_UDC_CONFIG); 1066 reg &= ~UDC_CFG_ADDR_MASK; 1067 reg |= UDC_CFG_SET_ADDR(crq.wValue); 1068 ast_udc_write(udc, reg, AST_UDC_CONFIG); 1069 goto req_complete; 1070 1071 case USB_REQ_CLEAR_FEATURE: 1072 SETUP_DBG(udc, "ep0: CLEAR FEATURE\n"); 1073 goto req_driver; 1074 1075 case USB_REQ_SET_FEATURE: 1076 SETUP_DBG(udc, "ep0: SET FEATURE\n"); 1077 goto req_driver; 1078 1079 case USB_REQ_GET_STATUS: 1080 ast_udc_getstatus(udc); 1081 return; 1082 1083 default: 1084 goto req_driver; 1085 } 1086 1087 } 1088 1089 req_driver: 1090 if (udc->driver) { 1091 SETUP_DBG(udc, "Forwarding %s to gadget...\n", 1092 udc->gadget.name); 1093 1094 spin_unlock(&udc->lock); 1095 rc = udc->driver->setup(&udc->gadget, &crq); 1096 spin_lock(&udc->lock); 1097 1098 } else { 1099 SETUP_DBG(udc, "No gadget for request !\n"); 1100 } 1101 1102 if (rc >= 0) 1103 return; 1104 1105 /* Stall if gadget failed */ 1106 SETUP_DBG(udc, "Stalling, rc:0x%x\n", rc); 1107 ast_udc_write(udc, ast_udc_read(udc, AST_UDC_EP0_CTRL) | EP0_STALL, 1108 AST_UDC_EP0_CTRL); 1109 return; 1110 1111 req_complete: 1112 SETUP_DBG(udc, "ep0: Sending IN status without data\n"); 1113 ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL); 1114 } 1115 1116 static irqreturn_t ast_udc_isr(int irq, void *data) 1117 { 1118 struct ast_udc_dev *udc = (struct ast_udc_dev *)data; 1119 struct ast_udc_ep *ep; 1120 u32 isr, ep_isr; 1121 int i; 1122 1123 spin_lock(&udc->lock); 1124 1125 isr = ast_udc_read(udc, AST_UDC_ISR); 1126 if (!isr) 1127 goto done; 1128 1129 /* Ack interrupts */ 1130 ast_udc_write(udc, isr, AST_UDC_ISR); 1131 1132 if (isr & UDC_IRQ_BUS_RESET) { 1133 ISR_DBG(udc, "UDC_IRQ_BUS_RESET\n"); 1134 udc->gadget.speed = USB_SPEED_UNKNOWN; 1135 1136 ep = &udc->ep[1]; 1137 EP_DBG(ep, "dctrl:0x%x\n", 1138 ast_ep_read(ep, AST_UDC_EP_DMA_CTRL)); 1139 1140 if (udc->driver && udc->driver->reset) { 1141 spin_unlock(&udc->lock); 1142 udc->driver->reset(&udc->gadget); 1143 spin_lock(&udc->lock); 1144 } 1145 } 1146 1147 if (isr & UDC_IRQ_BUS_SUSPEND) { 1148 ISR_DBG(udc, "UDC_IRQ_BUS_SUSPEND\n"); 1149 udc->suspended_from = udc->gadget.state; 1150 usb_gadget_set_state(&udc->gadget, USB_STATE_SUSPENDED); 1151 1152 if (udc->driver && udc->driver->suspend) { 1153 spin_unlock(&udc->lock); 1154 udc->driver->suspend(&udc->gadget); 1155 spin_lock(&udc->lock); 1156 } 1157 } 1158 1159 if (isr & UDC_IRQ_BUS_RESUME) { 1160 ISR_DBG(udc, "UDC_IRQ_BUS_RESUME\n"); 1161 usb_gadget_set_state(&udc->gadget, udc->suspended_from); 1162 1163 if (udc->driver && udc->driver->resume) { 1164 spin_unlock(&udc->lock); 1165 udc->driver->resume(&udc->gadget); 1166 spin_lock(&udc->lock); 1167 } 1168 } 1169 1170 if (isr & UDC_IRQ_EP0_IN_ACK_STALL) { 1171 ISR_DBG(udc, "UDC_IRQ_EP0_IN_ACK_STALL\n"); 1172 ast_udc_ep0_in(udc); 1173 } 1174 1175 if (isr & UDC_IRQ_EP0_OUT_ACK_STALL) { 1176 ISR_DBG(udc, "UDC_IRQ_EP0_OUT_ACK_STALL\n"); 1177 ast_udc_ep0_out(udc); 1178 } 1179 1180 if (isr & UDC_IRQ_EP0_SETUP) { 1181 ISR_DBG(udc, "UDC_IRQ_EP0_SETUP\n"); 1182 ast_udc_ep0_handle_setup(udc); 1183 } 1184 1185 if (isr & UDC_IRQ_EP_POOL_ACK_STALL) { 1186 ISR_DBG(udc, "UDC_IRQ_EP_POOL_ACK_STALL\n"); 1187 ep_isr = ast_udc_read(udc, AST_UDC_EP_ACK_ISR); 1188 1189 /* Ack EP interrupts */ 1190 ast_udc_write(udc, ep_isr, AST_UDC_EP_ACK_ISR); 1191 1192 /* Handle each EP */ 1193 for (i = 0; i < AST_UDC_NUM_ENDPOINTS - 1; i++) { 1194 if (ep_isr & (0x1 << i)) { 1195 ep = &udc->ep[i + 1]; 1196 if (ep->desc_mode) 1197 ast_udc_epn_handle_desc(udc, i + 1); 1198 else 1199 ast_udc_epn_handle(udc, i + 1); 1200 } 1201 } 1202 } 1203 1204 done: 1205 spin_unlock(&udc->lock); 1206 return IRQ_HANDLED; 1207 } 1208 1209 static int ast_udc_gadget_getframe(struct usb_gadget *gadget) 1210 { 1211 struct ast_udc_dev *udc = to_ast_dev(gadget); 1212 1213 return (ast_udc_read(udc, AST_UDC_STS) >> 16) & 0x7ff; 1214 } 1215 1216 static void ast_udc_wake_work(struct work_struct *work) 1217 { 1218 struct ast_udc_dev *udc = container_of(work, struct ast_udc_dev, 1219 wake_work); 1220 unsigned long flags; 1221 u32 ctrl; 1222 1223 spin_lock_irqsave(&udc->lock, flags); 1224 1225 UDC_DBG(udc, "Wakeup Host !\n"); 1226 ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL); 1227 ast_udc_write(udc, ctrl | USB_REMOTE_WAKEUP_EN, AST_UDC_FUNC_CTRL); 1228 1229 spin_unlock_irqrestore(&udc->lock, flags); 1230 } 1231 1232 static void ast_udc_wakeup_all(struct ast_udc_dev *udc) 1233 { 1234 /* 1235 * A device is trying to wake the world, because this 1236 * can recurse into the device, we break the call chain 1237 * using a work queue 1238 */ 1239 schedule_work(&udc->wake_work); 1240 } 1241 1242 static int ast_udc_wakeup(struct usb_gadget *gadget) 1243 { 1244 struct ast_udc_dev *udc = to_ast_dev(gadget); 1245 unsigned long flags; 1246 int rc = 0; 1247 1248 spin_lock_irqsave(&udc->lock, flags); 1249 1250 if (!udc->wakeup_en) { 1251 UDC_DBG(udc, "Remote Wakeup is disabled\n"); 1252 rc = -EINVAL; 1253 goto err; 1254 } 1255 1256 UDC_DBG(udc, "Device initiated wakeup\n"); 1257 ast_udc_wakeup_all(udc); 1258 1259 err: 1260 spin_unlock_irqrestore(&udc->lock, flags); 1261 return rc; 1262 } 1263 1264 /* 1265 * Activate/Deactivate link with host 1266 */ 1267 static int ast_udc_pullup(struct usb_gadget *gadget, int is_on) 1268 { 1269 struct ast_udc_dev *udc = to_ast_dev(gadget); 1270 unsigned long flags; 1271 u32 ctrl; 1272 1273 spin_lock_irqsave(&udc->lock, flags); 1274 1275 UDC_DBG(udc, "is_on: %d\n", is_on); 1276 if (is_on) 1277 ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) | USB_UPSTREAM_EN; 1278 else 1279 ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) & ~USB_UPSTREAM_EN; 1280 1281 ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL); 1282 1283 spin_unlock_irqrestore(&udc->lock, flags); 1284 1285 return 0; 1286 } 1287 1288 static int ast_udc_start(struct usb_gadget *gadget, 1289 struct usb_gadget_driver *driver) 1290 { 1291 struct ast_udc_dev *udc = to_ast_dev(gadget); 1292 struct ast_udc_ep *ep; 1293 unsigned long flags; 1294 int i; 1295 1296 spin_lock_irqsave(&udc->lock, flags); 1297 1298 UDC_DBG(udc, "\n"); 1299 udc->driver = driver; 1300 udc->gadget.dev.of_node = udc->pdev->dev.of_node; 1301 1302 for (i = 0; i < AST_UDC_NUM_ENDPOINTS; i++) { 1303 ep = &udc->ep[i]; 1304 ep->stopped = 0; 1305 } 1306 1307 spin_unlock_irqrestore(&udc->lock, flags); 1308 1309 return 0; 1310 } 1311 1312 static int ast_udc_stop(struct usb_gadget *gadget) 1313 { 1314 struct ast_udc_dev *udc = to_ast_dev(gadget); 1315 unsigned long flags; 1316 u32 ctrl; 1317 1318 spin_lock_irqsave(&udc->lock, flags); 1319 1320 UDC_DBG(udc, "\n"); 1321 ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) & ~USB_UPSTREAM_EN; 1322 ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL); 1323 1324 udc->gadget.speed = USB_SPEED_UNKNOWN; 1325 udc->driver = NULL; 1326 1327 ast_udc_stop_activity(udc); 1328 usb_gadget_set_state(&udc->gadget, USB_STATE_NOTATTACHED); 1329 1330 spin_unlock_irqrestore(&udc->lock, flags); 1331 1332 return 0; 1333 } 1334 1335 static const struct usb_gadget_ops ast_udc_ops = { 1336 .get_frame = ast_udc_gadget_getframe, 1337 .wakeup = ast_udc_wakeup, 1338 .pullup = ast_udc_pullup, 1339 .udc_start = ast_udc_start, 1340 .udc_stop = ast_udc_stop, 1341 }; 1342 1343 /* 1344 * Support 1 Control Endpoint. 1345 * Support multiple programmable endpoints that can be configured to 1346 * Bulk IN/OUT, Interrupt IN/OUT, and Isochronous IN/OUT type endpoint. 1347 */ 1348 static void ast_udc_init_ep(struct ast_udc_dev *udc) 1349 { 1350 struct ast_udc_ep *ep; 1351 int i; 1352 1353 for (i = 0; i < AST_UDC_NUM_ENDPOINTS; i++) { 1354 ep = &udc->ep[i]; 1355 ep->ep.name = ast_ep_name[i]; 1356 if (i == 0) { 1357 ep->ep.caps.type_control = true; 1358 } else { 1359 ep->ep.caps.type_iso = true; 1360 ep->ep.caps.type_bulk = true; 1361 ep->ep.caps.type_int = true; 1362 } 1363 ep->ep.caps.dir_in = true; 1364 ep->ep.caps.dir_out = true; 1365 1366 ep->ep.ops = &ast_udc_ep_ops; 1367 ep->udc = udc; 1368 1369 INIT_LIST_HEAD(&ep->queue); 1370 1371 if (i == 0) { 1372 usb_ep_set_maxpacket_limit(&ep->ep, 1373 AST_UDC_EP0_MAX_PACKET); 1374 continue; 1375 } 1376 1377 ep->ep_reg = udc->reg + AST_UDC_EP_BASE + 1378 (AST_UDC_EP_OFFSET * (i - 1)); 1379 1380 ep->epn_buf = udc->ep0_buf + (i * AST_UDC_EP_DMA_SIZE); 1381 ep->epn_buf_dma = udc->ep0_buf_dma + (i * AST_UDC_EP_DMA_SIZE); 1382 usb_ep_set_maxpacket_limit(&ep->ep, AST_UDC_EPn_MAX_PACKET); 1383 1384 ep->descs = ep->epn_buf + AST_UDC_EPn_MAX_PACKET; 1385 ep->descs_dma = ep->epn_buf_dma + AST_UDC_EPn_MAX_PACKET; 1386 ep->descs_wptr = 0; 1387 1388 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); 1389 } 1390 } 1391 1392 static void ast_udc_init_dev(struct ast_udc_dev *udc) 1393 { 1394 INIT_WORK(&udc->wake_work, ast_udc_wake_work); 1395 } 1396 1397 static void ast_udc_init_hw(struct ast_udc_dev *udc) 1398 { 1399 u32 ctrl; 1400 1401 /* Enable PHY */ 1402 ctrl = USB_PHY_CLK_EN | USB_PHY_RESET_DIS; 1403 ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL); 1404 1405 udelay(1); 1406 ast_udc_write(udc, 0, AST_UDC_DEV_RESET); 1407 1408 /* Set descriptor ring size */ 1409 if (AST_UDC_DESCS_COUNT == 256) { 1410 ctrl |= USB_EP_LONG_DESC; 1411 ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL); 1412 } 1413 1414 /* Mask & ack all interrupts before installing the handler */ 1415 ast_udc_write(udc, 0, AST_UDC_IER); 1416 ast_udc_write(udc, UDC_IRQ_ACK_ALL, AST_UDC_ISR); 1417 1418 /* Enable some interrupts */ 1419 ctrl = UDC_IRQ_EP_POOL_ACK_STALL | UDC_IRQ_BUS_RESUME | 1420 UDC_IRQ_BUS_SUSPEND | UDC_IRQ_BUS_RESET | 1421 UDC_IRQ_EP0_IN_ACK_STALL | UDC_IRQ_EP0_OUT_ACK_STALL | 1422 UDC_IRQ_EP0_SETUP; 1423 ast_udc_write(udc, ctrl, AST_UDC_IER); 1424 1425 /* Cleanup and enable ep ACK interrupts */ 1426 ast_udc_write(udc, UDC_IRQ_EP_ACK_ALL, AST_UDC_EP_ACK_IER); 1427 ast_udc_write(udc, UDC_IRQ_EP_ACK_ALL, AST_UDC_EP_ACK_ISR); 1428 1429 ast_udc_write(udc, 0, AST_UDC_EP0_CTRL); 1430 } 1431 1432 static int ast_udc_remove(struct platform_device *pdev) 1433 { 1434 struct ast_udc_dev *udc = platform_get_drvdata(pdev); 1435 unsigned long flags; 1436 u32 ctrl; 1437 1438 usb_del_gadget_udc(&udc->gadget); 1439 if (udc->driver) 1440 return -EBUSY; 1441 1442 spin_lock_irqsave(&udc->lock, flags); 1443 1444 /* Disable upstream port connection */ 1445 ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) & ~USB_UPSTREAM_EN; 1446 ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL); 1447 1448 clk_disable_unprepare(udc->clk); 1449 1450 spin_unlock_irqrestore(&udc->lock, flags); 1451 1452 if (udc->ep0_buf) 1453 dma_free_coherent(&pdev->dev, 1454 AST_UDC_EP_DMA_SIZE * AST_UDC_NUM_ENDPOINTS, 1455 udc->ep0_buf, 1456 udc->ep0_buf_dma); 1457 1458 udc->ep0_buf = NULL; 1459 1460 return 0; 1461 } 1462 1463 static int ast_udc_probe(struct platform_device *pdev) 1464 { 1465 enum usb_device_speed max_speed; 1466 struct device *dev = &pdev->dev; 1467 struct ast_udc_dev *udc; 1468 struct resource *res; 1469 int rc; 1470 1471 udc = devm_kzalloc(&pdev->dev, sizeof(struct ast_udc_dev), GFP_KERNEL); 1472 if (!udc) 1473 return -ENOMEM; 1474 1475 udc->gadget.dev.parent = dev; 1476 udc->pdev = pdev; 1477 spin_lock_init(&udc->lock); 1478 1479 udc->gadget.ops = &ast_udc_ops; 1480 udc->gadget.ep0 = &udc->ep[0].ep; 1481 udc->gadget.name = "aspeed-udc"; 1482 udc->gadget.dev.init_name = "gadget"; 1483 1484 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1485 udc->reg = devm_ioremap_resource(&pdev->dev, res); 1486 if (IS_ERR(udc->reg)) { 1487 dev_err(&pdev->dev, "Failed to map resources\n"); 1488 return PTR_ERR(udc->reg); 1489 } 1490 1491 platform_set_drvdata(pdev, udc); 1492 1493 udc->clk = devm_clk_get(&pdev->dev, NULL); 1494 if (IS_ERR(udc->clk)) { 1495 rc = PTR_ERR(udc->clk); 1496 goto err; 1497 } 1498 rc = clk_prepare_enable(udc->clk); 1499 if (rc) { 1500 dev_err(&pdev->dev, "Failed to enable clock (0x%x)\n", rc); 1501 goto err; 1502 } 1503 1504 /* Check if we need to limit the HW to USB1 */ 1505 max_speed = usb_get_maximum_speed(&pdev->dev); 1506 if (max_speed != USB_SPEED_UNKNOWN && max_speed < USB_SPEED_HIGH) 1507 udc->force_usb1 = true; 1508 1509 /* 1510 * Allocate DMA buffers for all EPs in one chunk 1511 */ 1512 udc->ep0_buf = dma_alloc_coherent(&pdev->dev, 1513 AST_UDC_EP_DMA_SIZE * 1514 AST_UDC_NUM_ENDPOINTS, 1515 &udc->ep0_buf_dma, GFP_KERNEL); 1516 1517 udc->gadget.speed = USB_SPEED_UNKNOWN; 1518 udc->gadget.max_speed = USB_SPEED_HIGH; 1519 udc->creq = udc->reg + AST_UDC_SETUP0; 1520 1521 /* 1522 * Support single stage mode or 32/256 stages descriptor mode. 1523 * Set default as Descriptor Mode. 1524 */ 1525 udc->desc_mode = AST_UDC_DESC_MODE; 1526 1527 dev_info(&pdev->dev, "DMA %s\n", udc->desc_mode ? 1528 "descriptor mode" : "single mode"); 1529 1530 INIT_LIST_HEAD(&udc->gadget.ep_list); 1531 INIT_LIST_HEAD(&udc->gadget.ep0->ep_list); 1532 1533 /* Initialized udc ep */ 1534 ast_udc_init_ep(udc); 1535 1536 /* Initialized udc device */ 1537 ast_udc_init_dev(udc); 1538 1539 /* Initialized udc hardware */ 1540 ast_udc_init_hw(udc); 1541 1542 /* Find interrupt and install handler */ 1543 udc->irq = platform_get_irq(pdev, 0); 1544 if (udc->irq < 0) { 1545 dev_err(&pdev->dev, "Failed to get interrupt\n"); 1546 rc = udc->irq; 1547 goto err; 1548 } 1549 1550 rc = devm_request_irq(&pdev->dev, udc->irq, ast_udc_isr, 0, 1551 KBUILD_MODNAME, udc); 1552 if (rc) { 1553 dev_err(&pdev->dev, "Failed to request interrupt\n"); 1554 goto err; 1555 } 1556 1557 rc = usb_add_gadget_udc(&pdev->dev, &udc->gadget); 1558 if (rc) { 1559 dev_err(&pdev->dev, "Failed to add gadget udc\n"); 1560 goto err; 1561 } 1562 1563 dev_info(&pdev->dev, "Initialized udc in USB%s mode\n", 1564 udc->force_usb1 ? "1" : "2"); 1565 1566 return 0; 1567 1568 err: 1569 dev_err(&pdev->dev, "Failed to udc probe, rc:0x%x\n", rc); 1570 ast_udc_remove(pdev); 1571 1572 return rc; 1573 } 1574 1575 static const struct of_device_id ast_udc_of_dt_ids[] = { 1576 { .compatible = "aspeed,ast2600-udc", }, 1577 {} 1578 }; 1579 1580 MODULE_DEVICE_TABLE(of, ast_udc_of_dt_ids); 1581 1582 static struct platform_driver ast_udc_driver = { 1583 .probe = ast_udc_probe, 1584 .remove = ast_udc_remove, 1585 .driver = { 1586 .name = KBUILD_MODNAME, 1587 .of_match_table = ast_udc_of_dt_ids, 1588 }, 1589 }; 1590 1591 module_platform_driver(ast_udc_driver); 1592 1593 MODULE_DESCRIPTION("ASPEED UDC driver"); 1594 MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>"); 1595 MODULE_LICENSE("GPL"); 1596