1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2011 Marvell International Ltd. All rights reserved. 4 * Author: Chao Xie <chao.xie@marvell.com> 5 * Neil Zhang <zhangwm@marvell.com> 6 */ 7 8 #include <linux/module.h> 9 #include <linux/pci.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/dmapool.h> 12 #include <linux/kernel.h> 13 #include <linux/delay.h> 14 #include <linux/ioport.h> 15 #include <linux/sched.h> 16 #include <linux/slab.h> 17 #include <linux/errno.h> 18 #include <linux/err.h> 19 #include <linux/timer.h> 20 #include <linux/list.h> 21 #include <linux/interrupt.h> 22 #include <linux/moduleparam.h> 23 #include <linux/device.h> 24 #include <linux/usb/ch9.h> 25 #include <linux/usb/gadget.h> 26 #include <linux/usb/otg.h> 27 #include <linux/pm.h> 28 #include <linux/io.h> 29 #include <linux/irq.h> 30 #include <linux/platform_device.h> 31 #include <linux/clk.h> 32 #include <linux/platform_data/mv_usb.h> 33 #include <asm/unaligned.h> 34 35 #include "mv_udc.h" 36 37 #define DRIVER_DESC "Marvell PXA USB Device Controller driver" 38 39 #define ep_dir(ep) (((ep)->ep_num == 0) ? \ 40 ((ep)->udc->ep0_dir) : ((ep)->direction)) 41 42 /* timeout value -- usec */ 43 #define RESET_TIMEOUT 10000 44 #define FLUSH_TIMEOUT 10000 45 #define EPSTATUS_TIMEOUT 10000 46 #define PRIME_TIMEOUT 10000 47 #define READSAFE_TIMEOUT 1000 48 49 #define LOOPS_USEC_SHIFT 1 50 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT) 51 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT) 52 53 static DECLARE_COMPLETION(release_done); 54 55 static const char driver_name[] = "mv_udc"; 56 57 static void nuke(struct mv_ep *ep, int status); 58 static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver); 59 60 /* for endpoint 0 operations */ 61 static const struct usb_endpoint_descriptor mv_ep0_desc = { 62 .bLength = USB_DT_ENDPOINT_SIZE, 63 .bDescriptorType = USB_DT_ENDPOINT, 64 .bEndpointAddress = 0, 65 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 66 .wMaxPacketSize = EP0_MAX_PKT_SIZE, 67 }; 68 69 static void ep0_reset(struct mv_udc *udc) 70 { 71 struct mv_ep *ep; 72 u32 epctrlx; 73 int i = 0; 74 75 /* ep0 in and out */ 76 for (i = 0; i < 2; i++) { 77 ep = &udc->eps[i]; 78 ep->udc = udc; 79 80 /* ep0 dQH */ 81 ep->dqh = &udc->ep_dqh[i]; 82 83 /* configure ep0 endpoint capabilities in dQH */ 84 ep->dqh->max_packet_length = 85 (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS) 86 | EP_QUEUE_HEAD_IOS; 87 88 ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE; 89 90 epctrlx = readl(&udc->op_regs->epctrlx[0]); 91 if (i) { /* TX */ 92 epctrlx |= EPCTRL_TX_ENABLE 93 | (USB_ENDPOINT_XFER_CONTROL 94 << EPCTRL_TX_EP_TYPE_SHIFT); 95 96 } else { /* RX */ 97 epctrlx |= EPCTRL_RX_ENABLE 98 | (USB_ENDPOINT_XFER_CONTROL 99 << EPCTRL_RX_EP_TYPE_SHIFT); 100 } 101 102 writel(epctrlx, &udc->op_regs->epctrlx[0]); 103 } 104 } 105 106 /* protocol ep0 stall, will automatically be cleared on new transaction */ 107 static void ep0_stall(struct mv_udc *udc) 108 { 109 u32 epctrlx; 110 111 /* set TX and RX to stall */ 112 epctrlx = readl(&udc->op_regs->epctrlx[0]); 113 epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL; 114 writel(epctrlx, &udc->op_regs->epctrlx[0]); 115 116 /* update ep0 state */ 117 udc->ep0_state = WAIT_FOR_SETUP; 118 udc->ep0_dir = EP_DIR_OUT; 119 } 120 121 static int process_ep_req(struct mv_udc *udc, int index, 122 struct mv_req *curr_req) 123 { 124 struct mv_dtd *curr_dtd; 125 struct mv_dqh *curr_dqh; 126 int actual, remaining_length; 127 int i, direction; 128 int retval = 0; 129 u32 errors; 130 u32 bit_pos; 131 132 curr_dqh = &udc->ep_dqh[index]; 133 direction = index % 2; 134 135 curr_dtd = curr_req->head; 136 actual = curr_req->req.length; 137 138 for (i = 0; i < curr_req->dtd_count; i++) { 139 if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) { 140 dev_dbg(&udc->dev->dev, "%s, dTD not completed\n", 141 udc->eps[index].name); 142 return 1; 143 } 144 145 errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK; 146 if (!errors) { 147 remaining_length = 148 (curr_dtd->size_ioc_sts & DTD_PACKET_SIZE) 149 >> DTD_LENGTH_BIT_POS; 150 actual -= remaining_length; 151 152 if (remaining_length) { 153 if (direction) { 154 dev_dbg(&udc->dev->dev, 155 "TX dTD remains data\n"); 156 retval = -EPROTO; 157 break; 158 } else 159 break; 160 } 161 } else { 162 dev_info(&udc->dev->dev, 163 "complete_tr error: ep=%d %s: error = 0x%x\n", 164 index >> 1, direction ? "SEND" : "RECV", 165 errors); 166 if (errors & DTD_STATUS_HALTED) { 167 /* Clear the errors and Halt condition */ 168 curr_dqh->size_ioc_int_sts &= ~errors; 169 retval = -EPIPE; 170 } else if (errors & DTD_STATUS_DATA_BUFF_ERR) { 171 retval = -EPROTO; 172 } else if (errors & DTD_STATUS_TRANSACTION_ERR) { 173 retval = -EILSEQ; 174 } 175 } 176 if (i != curr_req->dtd_count - 1) 177 curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt; 178 } 179 if (retval) 180 return retval; 181 182 if (direction == EP_DIR_OUT) 183 bit_pos = 1 << curr_req->ep->ep_num; 184 else 185 bit_pos = 1 << (16 + curr_req->ep->ep_num); 186 187 while (curr_dqh->curr_dtd_ptr == curr_dtd->td_dma) { 188 if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) { 189 while (readl(&udc->op_regs->epstatus) & bit_pos) 190 udelay(1); 191 break; 192 } 193 udelay(1); 194 } 195 196 curr_req->req.actual = actual; 197 198 return 0; 199 } 200 201 /* 202 * done() - retire a request; caller blocked irqs 203 * @status : request status to be set, only works when 204 * request is still in progress. 205 */ 206 static void done(struct mv_ep *ep, struct mv_req *req, int status) 207 __releases(&ep->udc->lock) 208 __acquires(&ep->udc->lock) 209 { 210 struct mv_udc *udc = NULL; 211 unsigned char stopped = ep->stopped; 212 struct mv_dtd *curr_td, *next_td; 213 int j; 214 215 udc = (struct mv_udc *)ep->udc; 216 /* Removed the req from fsl_ep->queue */ 217 list_del_init(&req->queue); 218 219 /* req.status should be set as -EINPROGRESS in ep_queue() */ 220 if (req->req.status == -EINPROGRESS) 221 req->req.status = status; 222 else 223 status = req->req.status; 224 225 /* Free dtd for the request */ 226 next_td = req->head; 227 for (j = 0; j < req->dtd_count; j++) { 228 curr_td = next_td; 229 if (j != req->dtd_count - 1) 230 next_td = curr_td->next_dtd_virt; 231 dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma); 232 } 233 234 usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep)); 235 236 if (status && (status != -ESHUTDOWN)) 237 dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u", 238 ep->ep.name, &req->req, status, 239 req->req.actual, req->req.length); 240 241 ep->stopped = 1; 242 243 spin_unlock(&ep->udc->lock); 244 245 usb_gadget_giveback_request(&ep->ep, &req->req); 246 247 spin_lock(&ep->udc->lock); 248 ep->stopped = stopped; 249 } 250 251 static int queue_dtd(struct mv_ep *ep, struct mv_req *req) 252 { 253 struct mv_udc *udc; 254 struct mv_dqh *dqh; 255 u32 bit_pos, direction; 256 u32 usbcmd, epstatus; 257 unsigned int loops; 258 int retval = 0; 259 260 udc = ep->udc; 261 direction = ep_dir(ep); 262 dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]); 263 bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num); 264 265 /* check if the pipe is empty */ 266 if (!(list_empty(&ep->queue))) { 267 struct mv_req *lastreq; 268 lastreq = list_entry(ep->queue.prev, struct mv_req, queue); 269 lastreq->tail->dtd_next = 270 req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK; 271 272 wmb(); 273 274 if (readl(&udc->op_regs->epprime) & bit_pos) 275 goto done; 276 277 loops = LOOPS(READSAFE_TIMEOUT); 278 while (1) { 279 /* start with setting the semaphores */ 280 usbcmd = readl(&udc->op_regs->usbcmd); 281 usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET; 282 writel(usbcmd, &udc->op_regs->usbcmd); 283 284 /* read the endpoint status */ 285 epstatus = readl(&udc->op_regs->epstatus) & bit_pos; 286 287 /* 288 * Reread the ATDTW semaphore bit to check if it is 289 * cleared. When hardware see a hazard, it will clear 290 * the bit or else we remain set to 1 and we can 291 * proceed with priming of endpoint if not already 292 * primed. 293 */ 294 if (readl(&udc->op_regs->usbcmd) 295 & USBCMD_ATDTW_TRIPWIRE_SET) 296 break; 297 298 loops--; 299 if (loops == 0) { 300 dev_err(&udc->dev->dev, 301 "Timeout for ATDTW_TRIPWIRE...\n"); 302 retval = -ETIME; 303 goto done; 304 } 305 udelay(LOOPS_USEC); 306 } 307 308 /* Clear the semaphore */ 309 usbcmd = readl(&udc->op_regs->usbcmd); 310 usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR; 311 writel(usbcmd, &udc->op_regs->usbcmd); 312 313 if (epstatus) 314 goto done; 315 } 316 317 /* Write dQH next pointer and terminate bit to 0 */ 318 dqh->next_dtd_ptr = req->head->td_dma 319 & EP_QUEUE_HEAD_NEXT_POINTER_MASK; 320 321 /* clear active and halt bit, in case set from a previous error */ 322 dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED); 323 324 /* Ensure that updates to the QH will occur before priming. */ 325 wmb(); 326 327 /* Prime the Endpoint */ 328 writel(bit_pos, &udc->op_regs->epprime); 329 330 done: 331 return retval; 332 } 333 334 static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length, 335 dma_addr_t *dma, int *is_last) 336 { 337 struct mv_dtd *dtd; 338 struct mv_udc *udc; 339 struct mv_dqh *dqh; 340 u32 temp, mult = 0; 341 342 /* how big will this transfer be? */ 343 if (usb_endpoint_xfer_isoc(req->ep->ep.desc)) { 344 dqh = req->ep->dqh; 345 mult = (dqh->max_packet_length >> EP_QUEUE_HEAD_MULT_POS) 346 & 0x3; 347 *length = min(req->req.length - req->req.actual, 348 (unsigned)(mult * req->ep->ep.maxpacket)); 349 } else 350 *length = min(req->req.length - req->req.actual, 351 (unsigned)EP_MAX_LENGTH_TRANSFER); 352 353 udc = req->ep->udc; 354 355 /* 356 * Be careful that no _GFP_HIGHMEM is set, 357 * or we can not use dma_to_virt 358 */ 359 dtd = dma_pool_alloc(udc->dtd_pool, GFP_ATOMIC, dma); 360 if (dtd == NULL) 361 return dtd; 362 363 dtd->td_dma = *dma; 364 /* initialize buffer page pointers */ 365 temp = (u32)(req->req.dma + req->req.actual); 366 dtd->buff_ptr0 = cpu_to_le32(temp); 367 temp &= ~0xFFF; 368 dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000); 369 dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000); 370 dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000); 371 dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000); 372 373 req->req.actual += *length; 374 375 /* zlp is needed if req->req.zero is set */ 376 if (req->req.zero) { 377 if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0) 378 *is_last = 1; 379 else 380 *is_last = 0; 381 } else if (req->req.length == req->req.actual) 382 *is_last = 1; 383 else 384 *is_last = 0; 385 386 /* Fill in the transfer size; set active bit */ 387 temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE); 388 389 /* Enable interrupt for the last dtd of a request */ 390 if (*is_last && !req->req.no_interrupt) 391 temp |= DTD_IOC; 392 393 temp |= mult << 10; 394 395 dtd->size_ioc_sts = temp; 396 397 mb(); 398 399 return dtd; 400 } 401 402 /* generate dTD linked list for a request */ 403 static int req_to_dtd(struct mv_req *req) 404 { 405 unsigned count; 406 int is_last, is_first = 1; 407 struct mv_dtd *dtd, *last_dtd = NULL; 408 dma_addr_t dma; 409 410 do { 411 dtd = build_dtd(req, &count, &dma, &is_last); 412 if (dtd == NULL) 413 return -ENOMEM; 414 415 if (is_first) { 416 is_first = 0; 417 req->head = dtd; 418 } else { 419 last_dtd->dtd_next = dma; 420 last_dtd->next_dtd_virt = dtd; 421 } 422 last_dtd = dtd; 423 req->dtd_count++; 424 } while (!is_last); 425 426 /* set terminate bit to 1 for the last dTD */ 427 dtd->dtd_next = DTD_NEXT_TERMINATE; 428 429 req->tail = dtd; 430 431 return 0; 432 } 433 434 static int mv_ep_enable(struct usb_ep *_ep, 435 const struct usb_endpoint_descriptor *desc) 436 { 437 struct mv_udc *udc; 438 struct mv_ep *ep; 439 struct mv_dqh *dqh; 440 u16 max = 0; 441 u32 bit_pos, epctrlx, direction; 442 const unsigned char zlt = 1; 443 unsigned char ios, mult; 444 unsigned long flags; 445 446 ep = container_of(_ep, struct mv_ep, ep); 447 udc = ep->udc; 448 449 if (!_ep || !desc 450 || desc->bDescriptorType != USB_DT_ENDPOINT) 451 return -EINVAL; 452 453 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) 454 return -ESHUTDOWN; 455 456 direction = ep_dir(ep); 457 max = usb_endpoint_maxp(desc); 458 459 /* 460 * disable HW zero length termination select 461 * driver handles zero length packet through req->req.zero 462 */ 463 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num); 464 465 /* Check if the Endpoint is Primed */ 466 if ((readl(&udc->op_regs->epprime) & bit_pos) 467 || (readl(&udc->op_regs->epstatus) & bit_pos)) { 468 dev_info(&udc->dev->dev, 469 "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x," 470 " ENDPTSTATUS=0x%x, bit_pos=0x%x\n", 471 (unsigned)ep->ep_num, direction ? "SEND" : "RECV", 472 (unsigned)readl(&udc->op_regs->epprime), 473 (unsigned)readl(&udc->op_regs->epstatus), 474 (unsigned)bit_pos); 475 goto en_done; 476 } 477 478 /* Set the max packet length, interrupt on Setup and Mult fields */ 479 ios = 0; 480 mult = 0; 481 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { 482 case USB_ENDPOINT_XFER_BULK: 483 case USB_ENDPOINT_XFER_INT: 484 break; 485 case USB_ENDPOINT_XFER_CONTROL: 486 ios = 1; 487 break; 488 case USB_ENDPOINT_XFER_ISOC: 489 /* Calculate transactions needed for high bandwidth iso */ 490 mult = usb_endpoint_maxp_mult(desc); 491 /* 3 transactions at most */ 492 if (mult > 3) 493 goto en_done; 494 break; 495 default: 496 goto en_done; 497 } 498 499 spin_lock_irqsave(&udc->lock, flags); 500 /* Get the endpoint queue head address */ 501 dqh = ep->dqh; 502 dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS) 503 | (mult << EP_QUEUE_HEAD_MULT_POS) 504 | (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0) 505 | (ios ? EP_QUEUE_HEAD_IOS : 0); 506 dqh->next_dtd_ptr = 1; 507 dqh->size_ioc_int_sts = 0; 508 509 ep->ep.maxpacket = max; 510 ep->ep.desc = desc; 511 ep->stopped = 0; 512 513 /* Enable the endpoint for Rx or Tx and set the endpoint type */ 514 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); 515 if (direction == EP_DIR_IN) { 516 epctrlx &= ~EPCTRL_TX_ALL_MASK; 517 epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST 518 | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) 519 << EPCTRL_TX_EP_TYPE_SHIFT); 520 } else { 521 epctrlx &= ~EPCTRL_RX_ALL_MASK; 522 epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST 523 | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) 524 << EPCTRL_RX_EP_TYPE_SHIFT); 525 } 526 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); 527 528 /* 529 * Implement Guideline (GL# USB-7) The unused endpoint type must 530 * be programmed to bulk. 531 */ 532 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); 533 if ((epctrlx & EPCTRL_RX_ENABLE) == 0) { 534 epctrlx |= (USB_ENDPOINT_XFER_BULK 535 << EPCTRL_RX_EP_TYPE_SHIFT); 536 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); 537 } 538 539 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); 540 if ((epctrlx & EPCTRL_TX_ENABLE) == 0) { 541 epctrlx |= (USB_ENDPOINT_XFER_BULK 542 << EPCTRL_TX_EP_TYPE_SHIFT); 543 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); 544 } 545 546 spin_unlock_irqrestore(&udc->lock, flags); 547 548 return 0; 549 en_done: 550 return -EINVAL; 551 } 552 553 static int mv_ep_disable(struct usb_ep *_ep) 554 { 555 struct mv_udc *udc; 556 struct mv_ep *ep; 557 struct mv_dqh *dqh; 558 u32 epctrlx, direction; 559 unsigned long flags; 560 561 ep = container_of(_ep, struct mv_ep, ep); 562 if ((_ep == NULL) || !ep->ep.desc) 563 return -EINVAL; 564 565 udc = ep->udc; 566 567 /* Get the endpoint queue head address */ 568 dqh = ep->dqh; 569 570 spin_lock_irqsave(&udc->lock, flags); 571 572 direction = ep_dir(ep); 573 574 /* Reset the max packet length and the interrupt on Setup */ 575 dqh->max_packet_length = 0; 576 577 /* Disable the endpoint for Rx or Tx and reset the endpoint type */ 578 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); 579 epctrlx &= ~((direction == EP_DIR_IN) 580 ? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE) 581 : (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE)); 582 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); 583 584 /* nuke all pending requests (does flush) */ 585 nuke(ep, -ESHUTDOWN); 586 587 ep->ep.desc = NULL; 588 ep->stopped = 1; 589 590 spin_unlock_irqrestore(&udc->lock, flags); 591 592 return 0; 593 } 594 595 static struct usb_request * 596 mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) 597 { 598 struct mv_req *req = NULL; 599 600 req = kzalloc(sizeof *req, gfp_flags); 601 if (!req) 602 return NULL; 603 604 req->req.dma = DMA_ADDR_INVALID; 605 INIT_LIST_HEAD(&req->queue); 606 607 return &req->req; 608 } 609 610 static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req) 611 { 612 struct mv_req *req = NULL; 613 614 req = container_of(_req, struct mv_req, req); 615 616 if (_req) 617 kfree(req); 618 } 619 620 static void mv_ep_fifo_flush(struct usb_ep *_ep) 621 { 622 struct mv_udc *udc; 623 u32 bit_pos, direction; 624 struct mv_ep *ep; 625 unsigned int loops; 626 627 if (!_ep) 628 return; 629 630 ep = container_of(_ep, struct mv_ep, ep); 631 if (!ep->ep.desc) 632 return; 633 634 udc = ep->udc; 635 direction = ep_dir(ep); 636 637 if (ep->ep_num == 0) 638 bit_pos = (1 << 16) | 1; 639 else if (direction == EP_DIR_OUT) 640 bit_pos = 1 << ep->ep_num; 641 else 642 bit_pos = 1 << (16 + ep->ep_num); 643 644 loops = LOOPS(EPSTATUS_TIMEOUT); 645 do { 646 unsigned int inter_loops; 647 648 if (loops == 0) { 649 dev_err(&udc->dev->dev, 650 "TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n", 651 (unsigned)readl(&udc->op_regs->epstatus), 652 (unsigned)bit_pos); 653 return; 654 } 655 /* Write 1 to the Flush register */ 656 writel(bit_pos, &udc->op_regs->epflush); 657 658 /* Wait until flushing completed */ 659 inter_loops = LOOPS(FLUSH_TIMEOUT); 660 while (readl(&udc->op_regs->epflush)) { 661 /* 662 * ENDPTFLUSH bit should be cleared to indicate this 663 * operation is complete 664 */ 665 if (inter_loops == 0) { 666 dev_err(&udc->dev->dev, 667 "TIMEOUT for ENDPTFLUSH=0x%x," 668 "bit_pos=0x%x\n", 669 (unsigned)readl(&udc->op_regs->epflush), 670 (unsigned)bit_pos); 671 return; 672 } 673 inter_loops--; 674 udelay(LOOPS_USEC); 675 } 676 loops--; 677 } while (readl(&udc->op_regs->epstatus) & bit_pos); 678 } 679 680 /* queues (submits) an I/O request to an endpoint */ 681 static int 682 mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) 683 { 684 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep); 685 struct mv_req *req = container_of(_req, struct mv_req, req); 686 struct mv_udc *udc = ep->udc; 687 unsigned long flags; 688 int retval; 689 690 /* catch various bogus parameters */ 691 if (!_req || !req->req.complete || !req->req.buf 692 || !list_empty(&req->queue)) { 693 dev_err(&udc->dev->dev, "%s, bad params", __func__); 694 return -EINVAL; 695 } 696 if (unlikely(!_ep || !ep->ep.desc)) { 697 dev_err(&udc->dev->dev, "%s, bad ep", __func__); 698 return -EINVAL; 699 } 700 701 udc = ep->udc; 702 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) 703 return -ESHUTDOWN; 704 705 req->ep = ep; 706 707 /* map virtual address to hardware */ 708 retval = usb_gadget_map_request(&udc->gadget, _req, ep_dir(ep)); 709 if (retval) 710 return retval; 711 712 req->req.status = -EINPROGRESS; 713 req->req.actual = 0; 714 req->dtd_count = 0; 715 716 spin_lock_irqsave(&udc->lock, flags); 717 718 /* build dtds and push them to device queue */ 719 if (!req_to_dtd(req)) { 720 retval = queue_dtd(ep, req); 721 if (retval) { 722 spin_unlock_irqrestore(&udc->lock, flags); 723 dev_err(&udc->dev->dev, "Failed to queue dtd\n"); 724 goto err_unmap_dma; 725 } 726 } else { 727 spin_unlock_irqrestore(&udc->lock, flags); 728 dev_err(&udc->dev->dev, "Failed to dma_pool_alloc\n"); 729 retval = -ENOMEM; 730 goto err_unmap_dma; 731 } 732 733 /* Update ep0 state */ 734 if (ep->ep_num == 0) 735 udc->ep0_state = DATA_STATE_XMIT; 736 737 /* irq handler advances the queue */ 738 list_add_tail(&req->queue, &ep->queue); 739 spin_unlock_irqrestore(&udc->lock, flags); 740 741 return 0; 742 743 err_unmap_dma: 744 usb_gadget_unmap_request(&udc->gadget, _req, ep_dir(ep)); 745 746 return retval; 747 } 748 749 static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req) 750 { 751 struct mv_dqh *dqh = ep->dqh; 752 u32 bit_pos; 753 754 /* Write dQH next pointer and terminate bit to 0 */ 755 dqh->next_dtd_ptr = req->head->td_dma 756 & EP_QUEUE_HEAD_NEXT_POINTER_MASK; 757 758 /* clear active and halt bit, in case set from a previous error */ 759 dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED); 760 761 /* Ensure that updates to the QH will occure before priming. */ 762 wmb(); 763 764 bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num); 765 766 /* Prime the Endpoint */ 767 writel(bit_pos, &ep->udc->op_regs->epprime); 768 } 769 770 /* dequeues (cancels, unlinks) an I/O request from an endpoint */ 771 static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) 772 { 773 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep); 774 struct mv_req *req; 775 struct mv_udc *udc = ep->udc; 776 unsigned long flags; 777 int stopped, ret = 0; 778 u32 epctrlx; 779 780 if (!_ep || !_req) 781 return -EINVAL; 782 783 spin_lock_irqsave(&ep->udc->lock, flags); 784 stopped = ep->stopped; 785 786 /* Stop the ep before we deal with the queue */ 787 ep->stopped = 1; 788 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); 789 if (ep_dir(ep) == EP_DIR_IN) 790 epctrlx &= ~EPCTRL_TX_ENABLE; 791 else 792 epctrlx &= ~EPCTRL_RX_ENABLE; 793 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); 794 795 /* make sure it's actually queued on this endpoint */ 796 list_for_each_entry(req, &ep->queue, queue) { 797 if (&req->req == _req) 798 break; 799 } 800 if (&req->req != _req) { 801 ret = -EINVAL; 802 goto out; 803 } 804 805 /* The request is in progress, or completed but not dequeued */ 806 if (ep->queue.next == &req->queue) { 807 _req->status = -ECONNRESET; 808 mv_ep_fifo_flush(_ep); /* flush current transfer */ 809 810 /* The request isn't the last request in this ep queue */ 811 if (req->queue.next != &ep->queue) { 812 struct mv_req *next_req; 813 814 next_req = list_entry(req->queue.next, 815 struct mv_req, queue); 816 817 /* Point the QH to the first TD of next request */ 818 mv_prime_ep(ep, next_req); 819 } else { 820 struct mv_dqh *qh; 821 822 qh = ep->dqh; 823 qh->next_dtd_ptr = 1; 824 qh->size_ioc_int_sts = 0; 825 } 826 827 /* The request hasn't been processed, patch up the TD chain */ 828 } else { 829 struct mv_req *prev_req; 830 831 prev_req = list_entry(req->queue.prev, struct mv_req, queue); 832 writel(readl(&req->tail->dtd_next), 833 &prev_req->tail->dtd_next); 834 835 } 836 837 done(ep, req, -ECONNRESET); 838 839 /* Enable EP */ 840 out: 841 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); 842 if (ep_dir(ep) == EP_DIR_IN) 843 epctrlx |= EPCTRL_TX_ENABLE; 844 else 845 epctrlx |= EPCTRL_RX_ENABLE; 846 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); 847 ep->stopped = stopped; 848 849 spin_unlock_irqrestore(&ep->udc->lock, flags); 850 return ret; 851 } 852 853 static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall) 854 { 855 u32 epctrlx; 856 857 epctrlx = readl(&udc->op_regs->epctrlx[ep_num]); 858 859 if (stall) { 860 if (direction == EP_DIR_IN) 861 epctrlx |= EPCTRL_TX_EP_STALL; 862 else 863 epctrlx |= EPCTRL_RX_EP_STALL; 864 } else { 865 if (direction == EP_DIR_IN) { 866 epctrlx &= ~EPCTRL_TX_EP_STALL; 867 epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST; 868 } else { 869 epctrlx &= ~EPCTRL_RX_EP_STALL; 870 epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST; 871 } 872 } 873 writel(epctrlx, &udc->op_regs->epctrlx[ep_num]); 874 } 875 876 static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction) 877 { 878 u32 epctrlx; 879 880 epctrlx = readl(&udc->op_regs->epctrlx[ep_num]); 881 882 if (direction == EP_DIR_OUT) 883 return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0; 884 else 885 return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0; 886 } 887 888 static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge) 889 { 890 struct mv_ep *ep; 891 unsigned long flags; 892 int status = 0; 893 struct mv_udc *udc; 894 895 ep = container_of(_ep, struct mv_ep, ep); 896 udc = ep->udc; 897 if (!_ep || !ep->ep.desc) { 898 status = -EINVAL; 899 goto out; 900 } 901 902 if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { 903 status = -EOPNOTSUPP; 904 goto out; 905 } 906 907 /* 908 * Attempt to halt IN ep will fail if any transfer requests 909 * are still queue 910 */ 911 if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) { 912 status = -EAGAIN; 913 goto out; 914 } 915 916 spin_lock_irqsave(&ep->udc->lock, flags); 917 ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt); 918 if (halt && wedge) 919 ep->wedge = 1; 920 else if (!halt) 921 ep->wedge = 0; 922 spin_unlock_irqrestore(&ep->udc->lock, flags); 923 924 if (ep->ep_num == 0) { 925 udc->ep0_state = WAIT_FOR_SETUP; 926 udc->ep0_dir = EP_DIR_OUT; 927 } 928 out: 929 return status; 930 } 931 932 static int mv_ep_set_halt(struct usb_ep *_ep, int halt) 933 { 934 return mv_ep_set_halt_wedge(_ep, halt, 0); 935 } 936 937 static int mv_ep_set_wedge(struct usb_ep *_ep) 938 { 939 return mv_ep_set_halt_wedge(_ep, 1, 1); 940 } 941 942 static const struct usb_ep_ops mv_ep_ops = { 943 .enable = mv_ep_enable, 944 .disable = mv_ep_disable, 945 946 .alloc_request = mv_alloc_request, 947 .free_request = mv_free_request, 948 949 .queue = mv_ep_queue, 950 .dequeue = mv_ep_dequeue, 951 952 .set_wedge = mv_ep_set_wedge, 953 .set_halt = mv_ep_set_halt, 954 .fifo_flush = mv_ep_fifo_flush, /* flush fifo */ 955 }; 956 957 static int udc_clock_enable(struct mv_udc *udc) 958 { 959 return clk_prepare_enable(udc->clk); 960 } 961 962 static void udc_clock_disable(struct mv_udc *udc) 963 { 964 clk_disable_unprepare(udc->clk); 965 } 966 967 static void udc_stop(struct mv_udc *udc) 968 { 969 u32 tmp; 970 971 /* Disable interrupts */ 972 tmp = readl(&udc->op_regs->usbintr); 973 tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN | 974 USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN); 975 writel(tmp, &udc->op_regs->usbintr); 976 977 udc->stopped = 1; 978 979 /* Reset the Run the bit in the command register to stop VUSB */ 980 tmp = readl(&udc->op_regs->usbcmd); 981 tmp &= ~USBCMD_RUN_STOP; 982 writel(tmp, &udc->op_regs->usbcmd); 983 } 984 985 static void udc_start(struct mv_udc *udc) 986 { 987 u32 usbintr; 988 989 usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN 990 | USBINTR_PORT_CHANGE_DETECT_EN 991 | USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND; 992 /* Enable interrupts */ 993 writel(usbintr, &udc->op_regs->usbintr); 994 995 udc->stopped = 0; 996 997 /* Set the Run bit in the command register */ 998 writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd); 999 } 1000 1001 static int udc_reset(struct mv_udc *udc) 1002 { 1003 unsigned int loops; 1004 u32 tmp, portsc; 1005 1006 /* Stop the controller */ 1007 tmp = readl(&udc->op_regs->usbcmd); 1008 tmp &= ~USBCMD_RUN_STOP; 1009 writel(tmp, &udc->op_regs->usbcmd); 1010 1011 /* Reset the controller to get default values */ 1012 writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd); 1013 1014 /* wait for reset to complete */ 1015 loops = LOOPS(RESET_TIMEOUT); 1016 while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) { 1017 if (loops == 0) { 1018 dev_err(&udc->dev->dev, 1019 "Wait for RESET completed TIMEOUT\n"); 1020 return -ETIMEDOUT; 1021 } 1022 loops--; 1023 udelay(LOOPS_USEC); 1024 } 1025 1026 /* set controller to device mode */ 1027 tmp = readl(&udc->op_regs->usbmode); 1028 tmp |= USBMODE_CTRL_MODE_DEVICE; 1029 1030 /* turn setup lockout off, require setup tripwire in usbcmd */ 1031 tmp |= USBMODE_SETUP_LOCK_OFF; 1032 1033 writel(tmp, &udc->op_regs->usbmode); 1034 1035 writel(0x0, &udc->op_regs->epsetupstat); 1036 1037 /* Configure the Endpoint List Address */ 1038 writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK, 1039 &udc->op_regs->eplistaddr); 1040 1041 portsc = readl(&udc->op_regs->portsc[0]); 1042 if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC) 1043 portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER); 1044 1045 if (udc->force_fs) 1046 portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT; 1047 else 1048 portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT); 1049 1050 writel(portsc, &udc->op_regs->portsc[0]); 1051 1052 tmp = readl(&udc->op_regs->epctrlx[0]); 1053 tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL); 1054 writel(tmp, &udc->op_regs->epctrlx[0]); 1055 1056 return 0; 1057 } 1058 1059 static int mv_udc_enable_internal(struct mv_udc *udc) 1060 { 1061 int retval; 1062 1063 if (udc->active) 1064 return 0; 1065 1066 dev_dbg(&udc->dev->dev, "enable udc\n"); 1067 retval = udc_clock_enable(udc); 1068 if (retval) 1069 return retval; 1070 1071 if (udc->pdata->phy_init) { 1072 retval = udc->pdata->phy_init(udc->phy_regs); 1073 if (retval) { 1074 dev_err(&udc->dev->dev, 1075 "init phy error %d\n", retval); 1076 udc_clock_disable(udc); 1077 return retval; 1078 } 1079 } 1080 udc->active = 1; 1081 1082 return 0; 1083 } 1084 1085 static int mv_udc_enable(struct mv_udc *udc) 1086 { 1087 if (udc->clock_gating) 1088 return mv_udc_enable_internal(udc); 1089 1090 return 0; 1091 } 1092 1093 static void mv_udc_disable_internal(struct mv_udc *udc) 1094 { 1095 if (udc->active) { 1096 dev_dbg(&udc->dev->dev, "disable udc\n"); 1097 if (udc->pdata->phy_deinit) 1098 udc->pdata->phy_deinit(udc->phy_regs); 1099 udc_clock_disable(udc); 1100 udc->active = 0; 1101 } 1102 } 1103 1104 static void mv_udc_disable(struct mv_udc *udc) 1105 { 1106 if (udc->clock_gating) 1107 mv_udc_disable_internal(udc); 1108 } 1109 1110 static int mv_udc_get_frame(struct usb_gadget *gadget) 1111 { 1112 struct mv_udc *udc; 1113 u16 retval; 1114 1115 if (!gadget) 1116 return -ENODEV; 1117 1118 udc = container_of(gadget, struct mv_udc, gadget); 1119 1120 retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS; 1121 1122 return retval; 1123 } 1124 1125 /* Tries to wake up the host connected to this gadget */ 1126 static int mv_udc_wakeup(struct usb_gadget *gadget) 1127 { 1128 struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget); 1129 u32 portsc; 1130 1131 /* Remote wakeup feature not enabled by host */ 1132 if (!udc->remote_wakeup) 1133 return -ENOTSUPP; 1134 1135 portsc = readl(&udc->op_regs->portsc); 1136 /* not suspended? */ 1137 if (!(portsc & PORTSCX_PORT_SUSPEND)) 1138 return 0; 1139 /* trigger force resume */ 1140 portsc |= PORTSCX_PORT_FORCE_RESUME; 1141 writel(portsc, &udc->op_regs->portsc[0]); 1142 return 0; 1143 } 1144 1145 static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active) 1146 { 1147 struct mv_udc *udc; 1148 unsigned long flags; 1149 int retval = 0; 1150 1151 udc = container_of(gadget, struct mv_udc, gadget); 1152 spin_lock_irqsave(&udc->lock, flags); 1153 1154 udc->vbus_active = (is_active != 0); 1155 1156 dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n", 1157 __func__, udc->softconnect, udc->vbus_active); 1158 1159 if (udc->driver && udc->softconnect && udc->vbus_active) { 1160 retval = mv_udc_enable(udc); 1161 if (retval == 0) { 1162 /* Clock is disabled, need re-init registers */ 1163 udc_reset(udc); 1164 ep0_reset(udc); 1165 udc_start(udc); 1166 } 1167 } else if (udc->driver && udc->softconnect) { 1168 if (!udc->active) 1169 goto out; 1170 1171 /* stop all the transfer in queue*/ 1172 stop_activity(udc, udc->driver); 1173 udc_stop(udc); 1174 mv_udc_disable(udc); 1175 } 1176 1177 out: 1178 spin_unlock_irqrestore(&udc->lock, flags); 1179 return retval; 1180 } 1181 1182 static int mv_udc_pullup(struct usb_gadget *gadget, int is_on) 1183 { 1184 struct mv_udc *udc; 1185 unsigned long flags; 1186 int retval = 0; 1187 1188 udc = container_of(gadget, struct mv_udc, gadget); 1189 spin_lock_irqsave(&udc->lock, flags); 1190 1191 udc->softconnect = (is_on != 0); 1192 1193 dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n", 1194 __func__, udc->softconnect, udc->vbus_active); 1195 1196 if (udc->driver && udc->softconnect && udc->vbus_active) { 1197 retval = mv_udc_enable(udc); 1198 if (retval == 0) { 1199 /* Clock is disabled, need re-init registers */ 1200 udc_reset(udc); 1201 ep0_reset(udc); 1202 udc_start(udc); 1203 } 1204 } else if (udc->driver && udc->vbus_active) { 1205 /* stop all the transfer in queue*/ 1206 stop_activity(udc, udc->driver); 1207 udc_stop(udc); 1208 mv_udc_disable(udc); 1209 } 1210 1211 spin_unlock_irqrestore(&udc->lock, flags); 1212 return retval; 1213 } 1214 1215 static int mv_udc_start(struct usb_gadget *, struct usb_gadget_driver *); 1216 static int mv_udc_stop(struct usb_gadget *); 1217 /* device controller usb_gadget_ops structure */ 1218 static const struct usb_gadget_ops mv_ops = { 1219 1220 /* returns the current frame number */ 1221 .get_frame = mv_udc_get_frame, 1222 1223 /* tries to wake up the host connected to this gadget */ 1224 .wakeup = mv_udc_wakeup, 1225 1226 /* notify controller that VBUS is powered or not */ 1227 .vbus_session = mv_udc_vbus_session, 1228 1229 /* D+ pullup, software-controlled connect/disconnect to USB host */ 1230 .pullup = mv_udc_pullup, 1231 .udc_start = mv_udc_start, 1232 .udc_stop = mv_udc_stop, 1233 }; 1234 1235 static int eps_init(struct mv_udc *udc) 1236 { 1237 struct mv_ep *ep; 1238 char name[14]; 1239 int i; 1240 1241 /* initialize ep0 */ 1242 ep = &udc->eps[0]; 1243 ep->udc = udc; 1244 strncpy(ep->name, "ep0", sizeof(ep->name)); 1245 ep->ep.name = ep->name; 1246 ep->ep.ops = &mv_ep_ops; 1247 ep->wedge = 0; 1248 ep->stopped = 0; 1249 usb_ep_set_maxpacket_limit(&ep->ep, EP0_MAX_PKT_SIZE); 1250 ep->ep.caps.type_control = true; 1251 ep->ep.caps.dir_in = true; 1252 ep->ep.caps.dir_out = true; 1253 ep->ep_num = 0; 1254 ep->ep.desc = &mv_ep0_desc; 1255 INIT_LIST_HEAD(&ep->queue); 1256 1257 ep->ep_type = USB_ENDPOINT_XFER_CONTROL; 1258 1259 /* initialize other endpoints */ 1260 for (i = 2; i < udc->max_eps * 2; i++) { 1261 ep = &udc->eps[i]; 1262 if (i % 2) { 1263 snprintf(name, sizeof(name), "ep%din", i / 2); 1264 ep->direction = EP_DIR_IN; 1265 ep->ep.caps.dir_in = true; 1266 } else { 1267 snprintf(name, sizeof(name), "ep%dout", i / 2); 1268 ep->direction = EP_DIR_OUT; 1269 ep->ep.caps.dir_out = true; 1270 } 1271 ep->udc = udc; 1272 strncpy(ep->name, name, sizeof(ep->name)); 1273 ep->ep.name = ep->name; 1274 1275 ep->ep.caps.type_iso = true; 1276 ep->ep.caps.type_bulk = true; 1277 ep->ep.caps.type_int = true; 1278 1279 ep->ep.ops = &mv_ep_ops; 1280 ep->stopped = 0; 1281 usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0); 1282 ep->ep_num = i / 2; 1283 1284 INIT_LIST_HEAD(&ep->queue); 1285 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); 1286 1287 ep->dqh = &udc->ep_dqh[i]; 1288 } 1289 1290 return 0; 1291 } 1292 1293 /* delete all endpoint requests, called with spinlock held */ 1294 static void nuke(struct mv_ep *ep, int status) 1295 { 1296 /* called with spinlock held */ 1297 ep->stopped = 1; 1298 1299 /* endpoint fifo flush */ 1300 mv_ep_fifo_flush(&ep->ep); 1301 1302 while (!list_empty(&ep->queue)) { 1303 struct mv_req *req = NULL; 1304 req = list_entry(ep->queue.next, struct mv_req, queue); 1305 done(ep, req, status); 1306 } 1307 } 1308 1309 static void gadget_reset(struct mv_udc *udc, struct usb_gadget_driver *driver) 1310 { 1311 struct mv_ep *ep; 1312 1313 nuke(&udc->eps[0], -ESHUTDOWN); 1314 1315 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) { 1316 nuke(ep, -ESHUTDOWN); 1317 } 1318 1319 /* report reset; the driver is already quiesced */ 1320 if (driver) { 1321 spin_unlock(&udc->lock); 1322 usb_gadget_udc_reset(&udc->gadget, driver); 1323 spin_lock(&udc->lock); 1324 } 1325 } 1326 /* stop all USB activities */ 1327 static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver) 1328 { 1329 struct mv_ep *ep; 1330 1331 nuke(&udc->eps[0], -ESHUTDOWN); 1332 1333 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) { 1334 nuke(ep, -ESHUTDOWN); 1335 } 1336 1337 /* report disconnect; the driver is already quiesced */ 1338 if (driver) { 1339 spin_unlock(&udc->lock); 1340 driver->disconnect(&udc->gadget); 1341 spin_lock(&udc->lock); 1342 } 1343 } 1344 1345 static int mv_udc_start(struct usb_gadget *gadget, 1346 struct usb_gadget_driver *driver) 1347 { 1348 struct mv_udc *udc; 1349 int retval = 0; 1350 unsigned long flags; 1351 1352 udc = container_of(gadget, struct mv_udc, gadget); 1353 1354 if (udc->driver) 1355 return -EBUSY; 1356 1357 spin_lock_irqsave(&udc->lock, flags); 1358 1359 /* hook up the driver ... */ 1360 driver->driver.bus = NULL; 1361 udc->driver = driver; 1362 1363 udc->usb_state = USB_STATE_ATTACHED; 1364 udc->ep0_state = WAIT_FOR_SETUP; 1365 udc->ep0_dir = EP_DIR_OUT; 1366 1367 spin_unlock_irqrestore(&udc->lock, flags); 1368 1369 if (udc->transceiver) { 1370 retval = otg_set_peripheral(udc->transceiver->otg, 1371 &udc->gadget); 1372 if (retval) { 1373 dev_err(&udc->dev->dev, 1374 "unable to register peripheral to otg\n"); 1375 udc->driver = NULL; 1376 return retval; 1377 } 1378 } 1379 1380 /* When boot with cable attached, there will be no vbus irq occurred */ 1381 if (udc->qwork) 1382 queue_work(udc->qwork, &udc->vbus_work); 1383 1384 return 0; 1385 } 1386 1387 static int mv_udc_stop(struct usb_gadget *gadget) 1388 { 1389 struct mv_udc *udc; 1390 unsigned long flags; 1391 1392 udc = container_of(gadget, struct mv_udc, gadget); 1393 1394 spin_lock_irqsave(&udc->lock, flags); 1395 1396 mv_udc_enable(udc); 1397 udc_stop(udc); 1398 1399 /* stop all usb activities */ 1400 udc->gadget.speed = USB_SPEED_UNKNOWN; 1401 stop_activity(udc, NULL); 1402 mv_udc_disable(udc); 1403 1404 spin_unlock_irqrestore(&udc->lock, flags); 1405 1406 /* unbind gadget driver */ 1407 udc->driver = NULL; 1408 1409 return 0; 1410 } 1411 1412 static void mv_set_ptc(struct mv_udc *udc, u32 mode) 1413 { 1414 u32 portsc; 1415 1416 portsc = readl(&udc->op_regs->portsc[0]); 1417 portsc |= mode << 16; 1418 writel(portsc, &udc->op_regs->portsc[0]); 1419 } 1420 1421 static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req) 1422 { 1423 struct mv_ep *mvep = container_of(ep, struct mv_ep, ep); 1424 struct mv_req *req = container_of(_req, struct mv_req, req); 1425 struct mv_udc *udc; 1426 unsigned long flags; 1427 1428 udc = mvep->udc; 1429 1430 dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode); 1431 1432 spin_lock_irqsave(&udc->lock, flags); 1433 if (req->test_mode) { 1434 mv_set_ptc(udc, req->test_mode); 1435 req->test_mode = 0; 1436 } 1437 spin_unlock_irqrestore(&udc->lock, flags); 1438 } 1439 1440 static int 1441 udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty) 1442 { 1443 int retval = 0; 1444 struct mv_req *req; 1445 struct mv_ep *ep; 1446 1447 ep = &udc->eps[0]; 1448 udc->ep0_dir = direction; 1449 udc->ep0_state = WAIT_FOR_OUT_STATUS; 1450 1451 req = udc->status_req; 1452 1453 /* fill in the reqest structure */ 1454 if (empty == false) { 1455 *((u16 *) req->req.buf) = cpu_to_le16(status); 1456 req->req.length = 2; 1457 } else 1458 req->req.length = 0; 1459 1460 req->ep = ep; 1461 req->req.status = -EINPROGRESS; 1462 req->req.actual = 0; 1463 if (udc->test_mode) { 1464 req->req.complete = prime_status_complete; 1465 req->test_mode = udc->test_mode; 1466 udc->test_mode = 0; 1467 } else 1468 req->req.complete = NULL; 1469 req->dtd_count = 0; 1470 1471 if (req->req.dma == DMA_ADDR_INVALID) { 1472 req->req.dma = dma_map_single(ep->udc->gadget.dev.parent, 1473 req->req.buf, req->req.length, 1474 ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1475 req->mapped = 1; 1476 } 1477 1478 /* prime the data phase */ 1479 if (!req_to_dtd(req)) { 1480 retval = queue_dtd(ep, req); 1481 if (retval) { 1482 dev_err(&udc->dev->dev, 1483 "Failed to queue dtd when prime status\n"); 1484 goto out; 1485 } 1486 } else{ /* no mem */ 1487 retval = -ENOMEM; 1488 dev_err(&udc->dev->dev, 1489 "Failed to dma_pool_alloc when prime status\n"); 1490 goto out; 1491 } 1492 1493 list_add_tail(&req->queue, &ep->queue); 1494 1495 return 0; 1496 out: 1497 usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep)); 1498 1499 return retval; 1500 } 1501 1502 static void mv_udc_testmode(struct mv_udc *udc, u16 index) 1503 { 1504 if (index <= USB_TEST_FORCE_ENABLE) { 1505 udc->test_mode = index; 1506 if (udc_prime_status(udc, EP_DIR_IN, 0, true)) 1507 ep0_stall(udc); 1508 } else 1509 dev_err(&udc->dev->dev, 1510 "This test mode(%d) is not supported\n", index); 1511 } 1512 1513 static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup) 1514 { 1515 udc->dev_addr = (u8)setup->wValue; 1516 1517 /* update usb state */ 1518 udc->usb_state = USB_STATE_ADDRESS; 1519 1520 if (udc_prime_status(udc, EP_DIR_IN, 0, true)) 1521 ep0_stall(udc); 1522 } 1523 1524 static void ch9getstatus(struct mv_udc *udc, u8 ep_num, 1525 struct usb_ctrlrequest *setup) 1526 { 1527 u16 status = 0; 1528 int retval; 1529 1530 if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK)) 1531 != (USB_DIR_IN | USB_TYPE_STANDARD)) 1532 return; 1533 1534 if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) { 1535 status = 1 << USB_DEVICE_SELF_POWERED; 1536 status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP; 1537 } else if ((setup->bRequestType & USB_RECIP_MASK) 1538 == USB_RECIP_INTERFACE) { 1539 /* get interface status */ 1540 status = 0; 1541 } else if ((setup->bRequestType & USB_RECIP_MASK) 1542 == USB_RECIP_ENDPOINT) { 1543 u8 ep_num, direction; 1544 1545 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK; 1546 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK) 1547 ? EP_DIR_IN : EP_DIR_OUT; 1548 status = ep_is_stall(udc, ep_num, direction) 1549 << USB_ENDPOINT_HALT; 1550 } 1551 1552 retval = udc_prime_status(udc, EP_DIR_IN, status, false); 1553 if (retval) 1554 ep0_stall(udc); 1555 else 1556 udc->ep0_state = DATA_STATE_XMIT; 1557 } 1558 1559 static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup) 1560 { 1561 u8 ep_num; 1562 u8 direction; 1563 struct mv_ep *ep; 1564 1565 if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK)) 1566 == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) { 1567 switch (setup->wValue) { 1568 case USB_DEVICE_REMOTE_WAKEUP: 1569 udc->remote_wakeup = 0; 1570 break; 1571 default: 1572 goto out; 1573 } 1574 } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK)) 1575 == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) { 1576 switch (setup->wValue) { 1577 case USB_ENDPOINT_HALT: 1578 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK; 1579 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK) 1580 ? EP_DIR_IN : EP_DIR_OUT; 1581 if (setup->wValue != 0 || setup->wLength != 0 1582 || ep_num > udc->max_eps) 1583 goto out; 1584 ep = &udc->eps[ep_num * 2 + direction]; 1585 if (ep->wedge == 1) 1586 break; 1587 spin_unlock(&udc->lock); 1588 ep_set_stall(udc, ep_num, direction, 0); 1589 spin_lock(&udc->lock); 1590 break; 1591 default: 1592 goto out; 1593 } 1594 } else 1595 goto out; 1596 1597 if (udc_prime_status(udc, EP_DIR_IN, 0, true)) 1598 ep0_stall(udc); 1599 out: 1600 return; 1601 } 1602 1603 static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup) 1604 { 1605 u8 ep_num; 1606 u8 direction; 1607 1608 if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK)) 1609 == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) { 1610 switch (setup->wValue) { 1611 case USB_DEVICE_REMOTE_WAKEUP: 1612 udc->remote_wakeup = 1; 1613 break; 1614 case USB_DEVICE_TEST_MODE: 1615 if (setup->wIndex & 0xFF 1616 || udc->gadget.speed != USB_SPEED_HIGH) 1617 ep0_stall(udc); 1618 1619 if (udc->usb_state != USB_STATE_CONFIGURED 1620 && udc->usb_state != USB_STATE_ADDRESS 1621 && udc->usb_state != USB_STATE_DEFAULT) 1622 ep0_stall(udc); 1623 1624 mv_udc_testmode(udc, (setup->wIndex >> 8)); 1625 goto out; 1626 default: 1627 goto out; 1628 } 1629 } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK)) 1630 == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) { 1631 switch (setup->wValue) { 1632 case USB_ENDPOINT_HALT: 1633 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK; 1634 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK) 1635 ? EP_DIR_IN : EP_DIR_OUT; 1636 if (setup->wValue != 0 || setup->wLength != 0 1637 || ep_num > udc->max_eps) 1638 goto out; 1639 spin_unlock(&udc->lock); 1640 ep_set_stall(udc, ep_num, direction, 1); 1641 spin_lock(&udc->lock); 1642 break; 1643 default: 1644 goto out; 1645 } 1646 } else 1647 goto out; 1648 1649 if (udc_prime_status(udc, EP_DIR_IN, 0, true)) 1650 ep0_stall(udc); 1651 out: 1652 return; 1653 } 1654 1655 static void handle_setup_packet(struct mv_udc *udc, u8 ep_num, 1656 struct usb_ctrlrequest *setup) 1657 __releases(&ep->udc->lock) 1658 __acquires(&ep->udc->lock) 1659 { 1660 bool delegate = false; 1661 1662 nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN); 1663 1664 dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n", 1665 setup->bRequestType, setup->bRequest, 1666 setup->wValue, setup->wIndex, setup->wLength); 1667 /* We process some standard setup requests here */ 1668 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { 1669 switch (setup->bRequest) { 1670 case USB_REQ_GET_STATUS: 1671 ch9getstatus(udc, ep_num, setup); 1672 break; 1673 1674 case USB_REQ_SET_ADDRESS: 1675 ch9setaddress(udc, setup); 1676 break; 1677 1678 case USB_REQ_CLEAR_FEATURE: 1679 ch9clearfeature(udc, setup); 1680 break; 1681 1682 case USB_REQ_SET_FEATURE: 1683 ch9setfeature(udc, setup); 1684 break; 1685 1686 default: 1687 delegate = true; 1688 } 1689 } else 1690 delegate = true; 1691 1692 /* delegate USB standard requests to the gadget driver */ 1693 if (delegate == true) { 1694 /* USB requests handled by gadget */ 1695 if (setup->wLength) { 1696 /* DATA phase from gadget, STATUS phase from udc */ 1697 udc->ep0_dir = (setup->bRequestType & USB_DIR_IN) 1698 ? EP_DIR_IN : EP_DIR_OUT; 1699 spin_unlock(&udc->lock); 1700 if (udc->driver->setup(&udc->gadget, 1701 &udc->local_setup_buff) < 0) 1702 ep0_stall(udc); 1703 spin_lock(&udc->lock); 1704 udc->ep0_state = (setup->bRequestType & USB_DIR_IN) 1705 ? DATA_STATE_XMIT : DATA_STATE_RECV; 1706 } else { 1707 /* no DATA phase, IN STATUS phase from gadget */ 1708 udc->ep0_dir = EP_DIR_IN; 1709 spin_unlock(&udc->lock); 1710 if (udc->driver->setup(&udc->gadget, 1711 &udc->local_setup_buff) < 0) 1712 ep0_stall(udc); 1713 spin_lock(&udc->lock); 1714 udc->ep0_state = WAIT_FOR_OUT_STATUS; 1715 } 1716 } 1717 } 1718 1719 /* complete DATA or STATUS phase of ep0 prime status phase if needed */ 1720 static void ep0_req_complete(struct mv_udc *udc, 1721 struct mv_ep *ep0, struct mv_req *req) 1722 { 1723 u32 new_addr; 1724 1725 if (udc->usb_state == USB_STATE_ADDRESS) { 1726 /* set the new address */ 1727 new_addr = (u32)udc->dev_addr; 1728 writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT, 1729 &udc->op_regs->deviceaddr); 1730 } 1731 1732 done(ep0, req, 0); 1733 1734 switch (udc->ep0_state) { 1735 case DATA_STATE_XMIT: 1736 /* receive status phase */ 1737 if (udc_prime_status(udc, EP_DIR_OUT, 0, true)) 1738 ep0_stall(udc); 1739 break; 1740 case DATA_STATE_RECV: 1741 /* send status phase */ 1742 if (udc_prime_status(udc, EP_DIR_IN, 0 , true)) 1743 ep0_stall(udc); 1744 break; 1745 case WAIT_FOR_OUT_STATUS: 1746 udc->ep0_state = WAIT_FOR_SETUP; 1747 break; 1748 case WAIT_FOR_SETUP: 1749 dev_err(&udc->dev->dev, "unexpect ep0 packets\n"); 1750 break; 1751 default: 1752 ep0_stall(udc); 1753 break; 1754 } 1755 } 1756 1757 static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr) 1758 { 1759 u32 temp; 1760 struct mv_dqh *dqh; 1761 1762 dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT]; 1763 1764 /* Clear bit in ENDPTSETUPSTAT */ 1765 writel((1 << ep_num), &udc->op_regs->epsetupstat); 1766 1767 /* while a hazard exists when setup package arrives */ 1768 do { 1769 /* Set Setup Tripwire */ 1770 temp = readl(&udc->op_regs->usbcmd); 1771 writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd); 1772 1773 /* Copy the setup packet to local buffer */ 1774 memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8); 1775 } while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET)); 1776 1777 /* Clear Setup Tripwire */ 1778 temp = readl(&udc->op_regs->usbcmd); 1779 writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd); 1780 } 1781 1782 static void irq_process_tr_complete(struct mv_udc *udc) 1783 { 1784 u32 tmp, bit_pos; 1785 int i, ep_num = 0, direction = 0; 1786 struct mv_ep *curr_ep; 1787 struct mv_req *curr_req, *temp_req; 1788 int status; 1789 1790 /* 1791 * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE 1792 * because the setup packets are to be read ASAP 1793 */ 1794 1795 /* Process all Setup packet received interrupts */ 1796 tmp = readl(&udc->op_regs->epsetupstat); 1797 1798 if (tmp) { 1799 for (i = 0; i < udc->max_eps; i++) { 1800 if (tmp & (1 << i)) { 1801 get_setup_data(udc, i, 1802 (u8 *)(&udc->local_setup_buff)); 1803 handle_setup_packet(udc, i, 1804 &udc->local_setup_buff); 1805 } 1806 } 1807 } 1808 1809 /* Don't clear the endpoint setup status register here. 1810 * It is cleared as a setup packet is read out of the buffer 1811 */ 1812 1813 /* Process non-setup transaction complete interrupts */ 1814 tmp = readl(&udc->op_regs->epcomplete); 1815 1816 if (!tmp) 1817 return; 1818 1819 writel(tmp, &udc->op_regs->epcomplete); 1820 1821 for (i = 0; i < udc->max_eps * 2; i++) { 1822 ep_num = i >> 1; 1823 direction = i % 2; 1824 1825 bit_pos = 1 << (ep_num + 16 * direction); 1826 1827 if (!(bit_pos & tmp)) 1828 continue; 1829 1830 if (i == 1) 1831 curr_ep = &udc->eps[0]; 1832 else 1833 curr_ep = &udc->eps[i]; 1834 /* process the req queue until an uncomplete request */ 1835 list_for_each_entry_safe(curr_req, temp_req, 1836 &curr_ep->queue, queue) { 1837 status = process_ep_req(udc, i, curr_req); 1838 if (status) 1839 break; 1840 1841 /* write back status to req */ 1842 curr_req->req.status = status; 1843 1844 /* ep0 request completion */ 1845 if (ep_num == 0) { 1846 ep0_req_complete(udc, curr_ep, curr_req); 1847 break; 1848 } else { 1849 done(curr_ep, curr_req, status); 1850 } 1851 } 1852 } 1853 } 1854 1855 static void irq_process_reset(struct mv_udc *udc) 1856 { 1857 u32 tmp; 1858 unsigned int loops; 1859 1860 udc->ep0_dir = EP_DIR_OUT; 1861 udc->ep0_state = WAIT_FOR_SETUP; 1862 udc->remote_wakeup = 0; /* default to 0 on reset */ 1863 1864 /* The address bits are past bit 25-31. Set the address */ 1865 tmp = readl(&udc->op_regs->deviceaddr); 1866 tmp &= ~(USB_DEVICE_ADDRESS_MASK); 1867 writel(tmp, &udc->op_regs->deviceaddr); 1868 1869 /* Clear all the setup token semaphores */ 1870 tmp = readl(&udc->op_regs->epsetupstat); 1871 writel(tmp, &udc->op_regs->epsetupstat); 1872 1873 /* Clear all the endpoint complete status bits */ 1874 tmp = readl(&udc->op_regs->epcomplete); 1875 writel(tmp, &udc->op_regs->epcomplete); 1876 1877 /* wait until all endptprime bits cleared */ 1878 loops = LOOPS(PRIME_TIMEOUT); 1879 while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) { 1880 if (loops == 0) { 1881 dev_err(&udc->dev->dev, 1882 "Timeout for ENDPTPRIME = 0x%x\n", 1883 readl(&udc->op_regs->epprime)); 1884 break; 1885 } 1886 loops--; 1887 udelay(LOOPS_USEC); 1888 } 1889 1890 /* Write 1s to the Flush register */ 1891 writel((u32)~0, &udc->op_regs->epflush); 1892 1893 if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) { 1894 dev_info(&udc->dev->dev, "usb bus reset\n"); 1895 udc->usb_state = USB_STATE_DEFAULT; 1896 /* reset all the queues, stop all USB activities */ 1897 gadget_reset(udc, udc->driver); 1898 } else { 1899 dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n", 1900 readl(&udc->op_regs->portsc)); 1901 1902 /* 1903 * re-initialize 1904 * controller reset 1905 */ 1906 udc_reset(udc); 1907 1908 /* reset all the queues, stop all USB activities */ 1909 stop_activity(udc, udc->driver); 1910 1911 /* reset ep0 dQH and endptctrl */ 1912 ep0_reset(udc); 1913 1914 /* enable interrupt and set controller to run state */ 1915 udc_start(udc); 1916 1917 udc->usb_state = USB_STATE_ATTACHED; 1918 } 1919 } 1920 1921 static void handle_bus_resume(struct mv_udc *udc) 1922 { 1923 udc->usb_state = udc->resume_state; 1924 udc->resume_state = 0; 1925 1926 /* report resume to the driver */ 1927 if (udc->driver) { 1928 if (udc->driver->resume) { 1929 spin_unlock(&udc->lock); 1930 udc->driver->resume(&udc->gadget); 1931 spin_lock(&udc->lock); 1932 } 1933 } 1934 } 1935 1936 static void irq_process_suspend(struct mv_udc *udc) 1937 { 1938 udc->resume_state = udc->usb_state; 1939 udc->usb_state = USB_STATE_SUSPENDED; 1940 1941 if (udc->driver->suspend) { 1942 spin_unlock(&udc->lock); 1943 udc->driver->suspend(&udc->gadget); 1944 spin_lock(&udc->lock); 1945 } 1946 } 1947 1948 static void irq_process_port_change(struct mv_udc *udc) 1949 { 1950 u32 portsc; 1951 1952 portsc = readl(&udc->op_regs->portsc[0]); 1953 if (!(portsc & PORTSCX_PORT_RESET)) { 1954 /* Get the speed */ 1955 u32 speed = portsc & PORTSCX_PORT_SPEED_MASK; 1956 switch (speed) { 1957 case PORTSCX_PORT_SPEED_HIGH: 1958 udc->gadget.speed = USB_SPEED_HIGH; 1959 break; 1960 case PORTSCX_PORT_SPEED_FULL: 1961 udc->gadget.speed = USB_SPEED_FULL; 1962 break; 1963 case PORTSCX_PORT_SPEED_LOW: 1964 udc->gadget.speed = USB_SPEED_LOW; 1965 break; 1966 default: 1967 udc->gadget.speed = USB_SPEED_UNKNOWN; 1968 break; 1969 } 1970 } 1971 1972 if (portsc & PORTSCX_PORT_SUSPEND) { 1973 udc->resume_state = udc->usb_state; 1974 udc->usb_state = USB_STATE_SUSPENDED; 1975 if (udc->driver->suspend) { 1976 spin_unlock(&udc->lock); 1977 udc->driver->suspend(&udc->gadget); 1978 spin_lock(&udc->lock); 1979 } 1980 } 1981 1982 if (!(portsc & PORTSCX_PORT_SUSPEND) 1983 && udc->usb_state == USB_STATE_SUSPENDED) { 1984 handle_bus_resume(udc); 1985 } 1986 1987 if (!udc->resume_state) 1988 udc->usb_state = USB_STATE_DEFAULT; 1989 } 1990 1991 static void irq_process_error(struct mv_udc *udc) 1992 { 1993 /* Increment the error count */ 1994 udc->errors++; 1995 } 1996 1997 static irqreturn_t mv_udc_irq(int irq, void *dev) 1998 { 1999 struct mv_udc *udc = (struct mv_udc *)dev; 2000 u32 status, intr; 2001 2002 /* Disable ISR when stopped bit is set */ 2003 if (udc->stopped) 2004 return IRQ_NONE; 2005 2006 spin_lock(&udc->lock); 2007 2008 status = readl(&udc->op_regs->usbsts); 2009 intr = readl(&udc->op_regs->usbintr); 2010 status &= intr; 2011 2012 if (status == 0) { 2013 spin_unlock(&udc->lock); 2014 return IRQ_NONE; 2015 } 2016 2017 /* Clear all the interrupts occurred */ 2018 writel(status, &udc->op_regs->usbsts); 2019 2020 if (status & USBSTS_ERR) 2021 irq_process_error(udc); 2022 2023 if (status & USBSTS_RESET) 2024 irq_process_reset(udc); 2025 2026 if (status & USBSTS_PORT_CHANGE) 2027 irq_process_port_change(udc); 2028 2029 if (status & USBSTS_INT) 2030 irq_process_tr_complete(udc); 2031 2032 if (status & USBSTS_SUSPEND) 2033 irq_process_suspend(udc); 2034 2035 spin_unlock(&udc->lock); 2036 2037 return IRQ_HANDLED; 2038 } 2039 2040 static irqreturn_t mv_udc_vbus_irq(int irq, void *dev) 2041 { 2042 struct mv_udc *udc = (struct mv_udc *)dev; 2043 2044 /* polling VBUS and init phy may cause too much time*/ 2045 if (udc->qwork) 2046 queue_work(udc->qwork, &udc->vbus_work); 2047 2048 return IRQ_HANDLED; 2049 } 2050 2051 static void mv_udc_vbus_work(struct work_struct *work) 2052 { 2053 struct mv_udc *udc; 2054 unsigned int vbus; 2055 2056 udc = container_of(work, struct mv_udc, vbus_work); 2057 if (!udc->pdata->vbus) 2058 return; 2059 2060 vbus = udc->pdata->vbus->poll(); 2061 dev_info(&udc->dev->dev, "vbus is %d\n", vbus); 2062 2063 if (vbus == VBUS_HIGH) 2064 mv_udc_vbus_session(&udc->gadget, 1); 2065 else if (vbus == VBUS_LOW) 2066 mv_udc_vbus_session(&udc->gadget, 0); 2067 } 2068 2069 /* release device structure */ 2070 static void gadget_release(struct device *_dev) 2071 { 2072 struct mv_udc *udc; 2073 2074 udc = dev_get_drvdata(_dev); 2075 2076 complete(udc->done); 2077 } 2078 2079 static int mv_udc_remove(struct platform_device *pdev) 2080 { 2081 struct mv_udc *udc; 2082 2083 udc = platform_get_drvdata(pdev); 2084 2085 usb_del_gadget_udc(&udc->gadget); 2086 2087 if (udc->qwork) 2088 destroy_workqueue(udc->qwork); 2089 2090 /* free memory allocated in probe */ 2091 dma_pool_destroy(udc->dtd_pool); 2092 2093 if (udc->ep_dqh) 2094 dma_free_coherent(&pdev->dev, udc->ep_dqh_size, 2095 udc->ep_dqh, udc->ep_dqh_dma); 2096 2097 mv_udc_disable(udc); 2098 2099 /* free dev, wait for the release() finished */ 2100 wait_for_completion(udc->done); 2101 2102 return 0; 2103 } 2104 2105 static int mv_udc_probe(struct platform_device *pdev) 2106 { 2107 struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev); 2108 struct mv_udc *udc; 2109 int retval = 0; 2110 struct resource *r; 2111 size_t size; 2112 2113 if (pdata == NULL) { 2114 dev_err(&pdev->dev, "missing platform_data\n"); 2115 return -ENODEV; 2116 } 2117 2118 udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL); 2119 if (udc == NULL) 2120 return -ENOMEM; 2121 2122 udc->done = &release_done; 2123 udc->pdata = dev_get_platdata(&pdev->dev); 2124 spin_lock_init(&udc->lock); 2125 2126 udc->dev = pdev; 2127 2128 if (pdata->mode == MV_USB_MODE_OTG) { 2129 udc->transceiver = devm_usb_get_phy(&pdev->dev, 2130 USB_PHY_TYPE_USB2); 2131 if (IS_ERR(udc->transceiver)) { 2132 retval = PTR_ERR(udc->transceiver); 2133 2134 if (retval == -ENXIO) 2135 return retval; 2136 2137 udc->transceiver = NULL; 2138 return -EPROBE_DEFER; 2139 } 2140 } 2141 2142 /* udc only have one sysclk. */ 2143 udc->clk = devm_clk_get(&pdev->dev, NULL); 2144 if (IS_ERR(udc->clk)) 2145 return PTR_ERR(udc->clk); 2146 2147 r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs"); 2148 if (r == NULL) { 2149 dev_err(&pdev->dev, "no I/O memory resource defined\n"); 2150 return -ENODEV; 2151 } 2152 2153 udc->cap_regs = (struct mv_cap_regs __iomem *) 2154 devm_ioremap(&pdev->dev, r->start, resource_size(r)); 2155 if (udc->cap_regs == NULL) { 2156 dev_err(&pdev->dev, "failed to map I/O memory\n"); 2157 return -EBUSY; 2158 } 2159 2160 r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs"); 2161 if (r == NULL) { 2162 dev_err(&pdev->dev, "no phy I/O memory resource defined\n"); 2163 return -ENODEV; 2164 } 2165 2166 udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); 2167 if (udc->phy_regs == NULL) { 2168 dev_err(&pdev->dev, "failed to map phy I/O memory\n"); 2169 return -EBUSY; 2170 } 2171 2172 /* we will acces controller register, so enable the clk */ 2173 retval = mv_udc_enable_internal(udc); 2174 if (retval) 2175 return retval; 2176 2177 udc->op_regs = 2178 (struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs 2179 + (readl(&udc->cap_regs->caplength_hciversion) 2180 & CAPLENGTH_MASK)); 2181 udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK; 2182 2183 /* 2184 * some platform will use usb to download image, it may not disconnect 2185 * usb gadget before loading kernel. So first stop udc here. 2186 */ 2187 udc_stop(udc); 2188 writel(0xFFFFFFFF, &udc->op_regs->usbsts); 2189 2190 size = udc->max_eps * sizeof(struct mv_dqh) *2; 2191 size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1); 2192 udc->ep_dqh = dma_alloc_coherent(&pdev->dev, size, 2193 &udc->ep_dqh_dma, GFP_KERNEL); 2194 2195 if (udc->ep_dqh == NULL) { 2196 dev_err(&pdev->dev, "allocate dQH memory failed\n"); 2197 retval = -ENOMEM; 2198 goto err_disable_clock; 2199 } 2200 udc->ep_dqh_size = size; 2201 2202 /* create dTD dma_pool resource */ 2203 udc->dtd_pool = dma_pool_create("mv_dtd", 2204 &pdev->dev, 2205 sizeof(struct mv_dtd), 2206 DTD_ALIGNMENT, 2207 DMA_BOUNDARY); 2208 2209 if (!udc->dtd_pool) { 2210 retval = -ENOMEM; 2211 goto err_free_dma; 2212 } 2213 2214 size = udc->max_eps * sizeof(struct mv_ep) *2; 2215 udc->eps = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); 2216 if (udc->eps == NULL) { 2217 retval = -ENOMEM; 2218 goto err_destroy_dma; 2219 } 2220 2221 /* initialize ep0 status request structure */ 2222 udc->status_req = devm_kzalloc(&pdev->dev, sizeof(struct mv_req), 2223 GFP_KERNEL); 2224 if (!udc->status_req) { 2225 retval = -ENOMEM; 2226 goto err_destroy_dma; 2227 } 2228 INIT_LIST_HEAD(&udc->status_req->queue); 2229 2230 /* allocate a small amount of memory to get valid address */ 2231 udc->status_req->req.buf = kzalloc(8, GFP_KERNEL); 2232 udc->status_req->req.dma = DMA_ADDR_INVALID; 2233 2234 udc->resume_state = USB_STATE_NOTATTACHED; 2235 udc->usb_state = USB_STATE_POWERED; 2236 udc->ep0_dir = EP_DIR_OUT; 2237 udc->remote_wakeup = 0; 2238 2239 r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0); 2240 if (r == NULL) { 2241 dev_err(&pdev->dev, "no IRQ resource defined\n"); 2242 retval = -ENODEV; 2243 goto err_destroy_dma; 2244 } 2245 udc->irq = r->start; 2246 if (devm_request_irq(&pdev->dev, udc->irq, mv_udc_irq, 2247 IRQF_SHARED, driver_name, udc)) { 2248 dev_err(&pdev->dev, "Request irq %d for UDC failed\n", 2249 udc->irq); 2250 retval = -ENODEV; 2251 goto err_destroy_dma; 2252 } 2253 2254 /* initialize gadget structure */ 2255 udc->gadget.ops = &mv_ops; /* usb_gadget_ops */ 2256 udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */ 2257 INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */ 2258 udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */ 2259 udc->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */ 2260 2261 /* the "gadget" abstracts/virtualizes the controller */ 2262 udc->gadget.name = driver_name; /* gadget name */ 2263 2264 eps_init(udc); 2265 2266 /* VBUS detect: we can disable/enable clock on demand.*/ 2267 if (udc->transceiver) 2268 udc->clock_gating = 1; 2269 else if (pdata->vbus) { 2270 udc->clock_gating = 1; 2271 retval = devm_request_threaded_irq(&pdev->dev, 2272 pdata->vbus->irq, NULL, 2273 mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc); 2274 if (retval) { 2275 dev_info(&pdev->dev, 2276 "Can not request irq for VBUS, " 2277 "disable clock gating\n"); 2278 udc->clock_gating = 0; 2279 } 2280 2281 udc->qwork = create_singlethread_workqueue("mv_udc_queue"); 2282 if (!udc->qwork) { 2283 dev_err(&pdev->dev, "cannot create workqueue\n"); 2284 retval = -ENOMEM; 2285 goto err_destroy_dma; 2286 } 2287 2288 INIT_WORK(&udc->vbus_work, mv_udc_vbus_work); 2289 } 2290 2291 /* 2292 * When clock gating is supported, we can disable clk and phy. 2293 * If not, it means that VBUS detection is not supported, we 2294 * have to enable vbus active all the time to let controller work. 2295 */ 2296 if (udc->clock_gating) 2297 mv_udc_disable_internal(udc); 2298 else 2299 udc->vbus_active = 1; 2300 2301 retval = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget, 2302 gadget_release); 2303 if (retval) 2304 goto err_create_workqueue; 2305 2306 platform_set_drvdata(pdev, udc); 2307 dev_info(&pdev->dev, "successful probe UDC device %s clock gating.\n", 2308 udc->clock_gating ? "with" : "without"); 2309 2310 return 0; 2311 2312 err_create_workqueue: 2313 if (udc->qwork) 2314 destroy_workqueue(udc->qwork); 2315 err_destroy_dma: 2316 dma_pool_destroy(udc->dtd_pool); 2317 err_free_dma: 2318 dma_free_coherent(&pdev->dev, udc->ep_dqh_size, 2319 udc->ep_dqh, udc->ep_dqh_dma); 2320 err_disable_clock: 2321 mv_udc_disable_internal(udc); 2322 2323 return retval; 2324 } 2325 2326 #ifdef CONFIG_PM 2327 static int mv_udc_suspend(struct device *dev) 2328 { 2329 struct mv_udc *udc; 2330 2331 udc = dev_get_drvdata(dev); 2332 2333 /* if OTG is enabled, the following will be done in OTG driver*/ 2334 if (udc->transceiver) 2335 return 0; 2336 2337 if (udc->pdata->vbus && udc->pdata->vbus->poll) 2338 if (udc->pdata->vbus->poll() == VBUS_HIGH) { 2339 dev_info(&udc->dev->dev, "USB cable is connected!\n"); 2340 return -EAGAIN; 2341 } 2342 2343 /* 2344 * only cable is unplugged, udc can suspend. 2345 * So do not care about clock_gating == 1. 2346 */ 2347 if (!udc->clock_gating) { 2348 udc_stop(udc); 2349 2350 spin_lock_irq(&udc->lock); 2351 /* stop all usb activities */ 2352 stop_activity(udc, udc->driver); 2353 spin_unlock_irq(&udc->lock); 2354 2355 mv_udc_disable_internal(udc); 2356 } 2357 2358 return 0; 2359 } 2360 2361 static int mv_udc_resume(struct device *dev) 2362 { 2363 struct mv_udc *udc; 2364 int retval; 2365 2366 udc = dev_get_drvdata(dev); 2367 2368 /* if OTG is enabled, the following will be done in OTG driver*/ 2369 if (udc->transceiver) 2370 return 0; 2371 2372 if (!udc->clock_gating) { 2373 retval = mv_udc_enable_internal(udc); 2374 if (retval) 2375 return retval; 2376 2377 if (udc->driver && udc->softconnect) { 2378 udc_reset(udc); 2379 ep0_reset(udc); 2380 udc_start(udc); 2381 } 2382 } 2383 2384 return 0; 2385 } 2386 2387 static const struct dev_pm_ops mv_udc_pm_ops = { 2388 .suspend = mv_udc_suspend, 2389 .resume = mv_udc_resume, 2390 }; 2391 #endif 2392 2393 static void mv_udc_shutdown(struct platform_device *pdev) 2394 { 2395 struct mv_udc *udc; 2396 u32 mode; 2397 2398 udc = platform_get_drvdata(pdev); 2399 /* reset controller mode to IDLE */ 2400 mv_udc_enable(udc); 2401 mode = readl(&udc->op_regs->usbmode); 2402 mode &= ~3; 2403 writel(mode, &udc->op_regs->usbmode); 2404 mv_udc_disable(udc); 2405 } 2406 2407 static struct platform_driver udc_driver = { 2408 .probe = mv_udc_probe, 2409 .remove = mv_udc_remove, 2410 .shutdown = mv_udc_shutdown, 2411 .driver = { 2412 .name = "mv-udc", 2413 #ifdef CONFIG_PM 2414 .pm = &mv_udc_pm_ops, 2415 #endif 2416 }, 2417 }; 2418 2419 module_platform_driver(udc_driver); 2420 MODULE_ALIAS("platform:mv-udc"); 2421 MODULE_DESCRIPTION(DRIVER_DESC); 2422 MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>"); 2423 MODULE_LICENSE("GPL"); 2424