1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2011 Marvell International Ltd. All rights reserved. 4 * Author: Chao Xie <chao.xie@marvell.com> 5 * Neil Zhang <zhangwm@marvell.com> 6 */ 7 8 #include <linux/module.h> 9 #include <linux/pci.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/dmapool.h> 12 #include <linux/kernel.h> 13 #include <linux/delay.h> 14 #include <linux/ioport.h> 15 #include <linux/sched.h> 16 #include <linux/slab.h> 17 #include <linux/errno.h> 18 #include <linux/err.h> 19 #include <linux/timer.h> 20 #include <linux/list.h> 21 #include <linux/interrupt.h> 22 #include <linux/moduleparam.h> 23 #include <linux/device.h> 24 #include <linux/usb/ch9.h> 25 #include <linux/usb/gadget.h> 26 #include <linux/usb/otg.h> 27 #include <linux/pm.h> 28 #include <linux/io.h> 29 #include <linux/irq.h> 30 #include <linux/platform_device.h> 31 #include <linux/clk.h> 32 #include <linux/platform_data/mv_usb.h> 33 #include <asm/unaligned.h> 34 35 #include "mv_udc.h" 36 37 #define DRIVER_DESC "Marvell PXA USB Device Controller driver" 38 39 #define ep_dir(ep) (((ep)->ep_num == 0) ? \ 40 ((ep)->udc->ep0_dir) : ((ep)->direction)) 41 42 /* timeout value -- usec */ 43 #define RESET_TIMEOUT 10000 44 #define FLUSH_TIMEOUT 10000 45 #define EPSTATUS_TIMEOUT 10000 46 #define PRIME_TIMEOUT 10000 47 #define READSAFE_TIMEOUT 1000 48 49 #define LOOPS_USEC_SHIFT 1 50 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT) 51 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT) 52 53 static DECLARE_COMPLETION(release_done); 54 55 static const char driver_name[] = "mv_udc"; 56 static const char driver_desc[] = DRIVER_DESC; 57 58 static void nuke(struct mv_ep *ep, int status); 59 static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver); 60 61 /* for endpoint 0 operations */ 62 static const struct usb_endpoint_descriptor mv_ep0_desc = { 63 .bLength = USB_DT_ENDPOINT_SIZE, 64 .bDescriptorType = USB_DT_ENDPOINT, 65 .bEndpointAddress = 0, 66 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 67 .wMaxPacketSize = EP0_MAX_PKT_SIZE, 68 }; 69 70 static void ep0_reset(struct mv_udc *udc) 71 { 72 struct mv_ep *ep; 73 u32 epctrlx; 74 int i = 0; 75 76 /* ep0 in and out */ 77 for (i = 0; i < 2; i++) { 78 ep = &udc->eps[i]; 79 ep->udc = udc; 80 81 /* ep0 dQH */ 82 ep->dqh = &udc->ep_dqh[i]; 83 84 /* configure ep0 endpoint capabilities in dQH */ 85 ep->dqh->max_packet_length = 86 (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS) 87 | EP_QUEUE_HEAD_IOS; 88 89 ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE; 90 91 epctrlx = readl(&udc->op_regs->epctrlx[0]); 92 if (i) { /* TX */ 93 epctrlx |= EPCTRL_TX_ENABLE 94 | (USB_ENDPOINT_XFER_CONTROL 95 << EPCTRL_TX_EP_TYPE_SHIFT); 96 97 } else { /* RX */ 98 epctrlx |= EPCTRL_RX_ENABLE 99 | (USB_ENDPOINT_XFER_CONTROL 100 << EPCTRL_RX_EP_TYPE_SHIFT); 101 } 102 103 writel(epctrlx, &udc->op_regs->epctrlx[0]); 104 } 105 } 106 107 /* protocol ep0 stall, will automatically be cleared on new transaction */ 108 static void ep0_stall(struct mv_udc *udc) 109 { 110 u32 epctrlx; 111 112 /* set TX and RX to stall */ 113 epctrlx = readl(&udc->op_regs->epctrlx[0]); 114 epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL; 115 writel(epctrlx, &udc->op_regs->epctrlx[0]); 116 117 /* update ep0 state */ 118 udc->ep0_state = WAIT_FOR_SETUP; 119 udc->ep0_dir = EP_DIR_OUT; 120 } 121 122 static int process_ep_req(struct mv_udc *udc, int index, 123 struct mv_req *curr_req) 124 { 125 struct mv_dtd *curr_dtd; 126 struct mv_dqh *curr_dqh; 127 int actual, remaining_length; 128 int i, direction; 129 int retval = 0; 130 u32 errors; 131 u32 bit_pos; 132 133 curr_dqh = &udc->ep_dqh[index]; 134 direction = index % 2; 135 136 curr_dtd = curr_req->head; 137 actual = curr_req->req.length; 138 139 for (i = 0; i < curr_req->dtd_count; i++) { 140 if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) { 141 dev_dbg(&udc->dev->dev, "%s, dTD not completed\n", 142 udc->eps[index].name); 143 return 1; 144 } 145 146 errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK; 147 if (!errors) { 148 remaining_length = 149 (curr_dtd->size_ioc_sts & DTD_PACKET_SIZE) 150 >> DTD_LENGTH_BIT_POS; 151 actual -= remaining_length; 152 153 if (remaining_length) { 154 if (direction) { 155 dev_dbg(&udc->dev->dev, 156 "TX dTD remains data\n"); 157 retval = -EPROTO; 158 break; 159 } else 160 break; 161 } 162 } else { 163 dev_info(&udc->dev->dev, 164 "complete_tr error: ep=%d %s: error = 0x%x\n", 165 index >> 1, direction ? "SEND" : "RECV", 166 errors); 167 if (errors & DTD_STATUS_HALTED) { 168 /* Clear the errors and Halt condition */ 169 curr_dqh->size_ioc_int_sts &= ~errors; 170 retval = -EPIPE; 171 } else if (errors & DTD_STATUS_DATA_BUFF_ERR) { 172 retval = -EPROTO; 173 } else if (errors & DTD_STATUS_TRANSACTION_ERR) { 174 retval = -EILSEQ; 175 } 176 } 177 if (i != curr_req->dtd_count - 1) 178 curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt; 179 } 180 if (retval) 181 return retval; 182 183 if (direction == EP_DIR_OUT) 184 bit_pos = 1 << curr_req->ep->ep_num; 185 else 186 bit_pos = 1 << (16 + curr_req->ep->ep_num); 187 188 while (curr_dqh->curr_dtd_ptr == curr_dtd->td_dma) { 189 if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) { 190 while (readl(&udc->op_regs->epstatus) & bit_pos) 191 udelay(1); 192 break; 193 } 194 udelay(1); 195 } 196 197 curr_req->req.actual = actual; 198 199 return 0; 200 } 201 202 /* 203 * done() - retire a request; caller blocked irqs 204 * @status : request status to be set, only works when 205 * request is still in progress. 206 */ 207 static void done(struct mv_ep *ep, struct mv_req *req, int status) 208 __releases(&ep->udc->lock) 209 __acquires(&ep->udc->lock) 210 { 211 struct mv_udc *udc = NULL; 212 unsigned char stopped = ep->stopped; 213 struct mv_dtd *curr_td, *next_td; 214 int j; 215 216 udc = (struct mv_udc *)ep->udc; 217 /* Removed the req from fsl_ep->queue */ 218 list_del_init(&req->queue); 219 220 /* req.status should be set as -EINPROGRESS in ep_queue() */ 221 if (req->req.status == -EINPROGRESS) 222 req->req.status = status; 223 else 224 status = req->req.status; 225 226 /* Free dtd for the request */ 227 next_td = req->head; 228 for (j = 0; j < req->dtd_count; j++) { 229 curr_td = next_td; 230 if (j != req->dtd_count - 1) 231 next_td = curr_td->next_dtd_virt; 232 dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma); 233 } 234 235 usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep)); 236 237 if (status && (status != -ESHUTDOWN)) 238 dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u", 239 ep->ep.name, &req->req, status, 240 req->req.actual, req->req.length); 241 242 ep->stopped = 1; 243 244 spin_unlock(&ep->udc->lock); 245 246 usb_gadget_giveback_request(&ep->ep, &req->req); 247 248 spin_lock(&ep->udc->lock); 249 ep->stopped = stopped; 250 } 251 252 static int queue_dtd(struct mv_ep *ep, struct mv_req *req) 253 { 254 struct mv_udc *udc; 255 struct mv_dqh *dqh; 256 u32 bit_pos, direction; 257 u32 usbcmd, epstatus; 258 unsigned int loops; 259 int retval = 0; 260 261 udc = ep->udc; 262 direction = ep_dir(ep); 263 dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]); 264 bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num); 265 266 /* check if the pipe is empty */ 267 if (!(list_empty(&ep->queue))) { 268 struct mv_req *lastreq; 269 lastreq = list_entry(ep->queue.prev, struct mv_req, queue); 270 lastreq->tail->dtd_next = 271 req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK; 272 273 wmb(); 274 275 if (readl(&udc->op_regs->epprime) & bit_pos) 276 goto done; 277 278 loops = LOOPS(READSAFE_TIMEOUT); 279 while (1) { 280 /* start with setting the semaphores */ 281 usbcmd = readl(&udc->op_regs->usbcmd); 282 usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET; 283 writel(usbcmd, &udc->op_regs->usbcmd); 284 285 /* read the endpoint status */ 286 epstatus = readl(&udc->op_regs->epstatus) & bit_pos; 287 288 /* 289 * Reread the ATDTW semaphore bit to check if it is 290 * cleared. When hardware see a hazard, it will clear 291 * the bit or else we remain set to 1 and we can 292 * proceed with priming of endpoint if not already 293 * primed. 294 */ 295 if (readl(&udc->op_regs->usbcmd) 296 & USBCMD_ATDTW_TRIPWIRE_SET) 297 break; 298 299 loops--; 300 if (loops == 0) { 301 dev_err(&udc->dev->dev, 302 "Timeout for ATDTW_TRIPWIRE...\n"); 303 retval = -ETIME; 304 goto done; 305 } 306 udelay(LOOPS_USEC); 307 } 308 309 /* Clear the semaphore */ 310 usbcmd = readl(&udc->op_regs->usbcmd); 311 usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR; 312 writel(usbcmd, &udc->op_regs->usbcmd); 313 314 if (epstatus) 315 goto done; 316 } 317 318 /* Write dQH next pointer and terminate bit to 0 */ 319 dqh->next_dtd_ptr = req->head->td_dma 320 & EP_QUEUE_HEAD_NEXT_POINTER_MASK; 321 322 /* clear active and halt bit, in case set from a previous error */ 323 dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED); 324 325 /* Ensure that updates to the QH will occur before priming. */ 326 wmb(); 327 328 /* Prime the Endpoint */ 329 writel(bit_pos, &udc->op_regs->epprime); 330 331 done: 332 return retval; 333 } 334 335 static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length, 336 dma_addr_t *dma, int *is_last) 337 { 338 struct mv_dtd *dtd; 339 struct mv_udc *udc; 340 struct mv_dqh *dqh; 341 u32 temp, mult = 0; 342 343 /* how big will this transfer be? */ 344 if (usb_endpoint_xfer_isoc(req->ep->ep.desc)) { 345 dqh = req->ep->dqh; 346 mult = (dqh->max_packet_length >> EP_QUEUE_HEAD_MULT_POS) 347 & 0x3; 348 *length = min(req->req.length - req->req.actual, 349 (unsigned)(mult * req->ep->ep.maxpacket)); 350 } else 351 *length = min(req->req.length - req->req.actual, 352 (unsigned)EP_MAX_LENGTH_TRANSFER); 353 354 udc = req->ep->udc; 355 356 /* 357 * Be careful that no _GFP_HIGHMEM is set, 358 * or we can not use dma_to_virt 359 */ 360 dtd = dma_pool_alloc(udc->dtd_pool, GFP_ATOMIC, dma); 361 if (dtd == NULL) 362 return dtd; 363 364 dtd->td_dma = *dma; 365 /* initialize buffer page pointers */ 366 temp = (u32)(req->req.dma + req->req.actual); 367 dtd->buff_ptr0 = cpu_to_le32(temp); 368 temp &= ~0xFFF; 369 dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000); 370 dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000); 371 dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000); 372 dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000); 373 374 req->req.actual += *length; 375 376 /* zlp is needed if req->req.zero is set */ 377 if (req->req.zero) { 378 if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0) 379 *is_last = 1; 380 else 381 *is_last = 0; 382 } else if (req->req.length == req->req.actual) 383 *is_last = 1; 384 else 385 *is_last = 0; 386 387 /* Fill in the transfer size; set active bit */ 388 temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE); 389 390 /* Enable interrupt for the last dtd of a request */ 391 if (*is_last && !req->req.no_interrupt) 392 temp |= DTD_IOC; 393 394 temp |= mult << 10; 395 396 dtd->size_ioc_sts = temp; 397 398 mb(); 399 400 return dtd; 401 } 402 403 /* generate dTD linked list for a request */ 404 static int req_to_dtd(struct mv_req *req) 405 { 406 unsigned count; 407 int is_last, is_first = 1; 408 struct mv_dtd *dtd, *last_dtd = NULL; 409 dma_addr_t dma; 410 411 do { 412 dtd = build_dtd(req, &count, &dma, &is_last); 413 if (dtd == NULL) 414 return -ENOMEM; 415 416 if (is_first) { 417 is_first = 0; 418 req->head = dtd; 419 } else { 420 last_dtd->dtd_next = dma; 421 last_dtd->next_dtd_virt = dtd; 422 } 423 last_dtd = dtd; 424 req->dtd_count++; 425 } while (!is_last); 426 427 /* set terminate bit to 1 for the last dTD */ 428 dtd->dtd_next = DTD_NEXT_TERMINATE; 429 430 req->tail = dtd; 431 432 return 0; 433 } 434 435 static int mv_ep_enable(struct usb_ep *_ep, 436 const struct usb_endpoint_descriptor *desc) 437 { 438 struct mv_udc *udc; 439 struct mv_ep *ep; 440 struct mv_dqh *dqh; 441 u16 max = 0; 442 u32 bit_pos, epctrlx, direction; 443 const unsigned char zlt = 1; 444 unsigned char ios, mult; 445 unsigned long flags; 446 447 ep = container_of(_ep, struct mv_ep, ep); 448 udc = ep->udc; 449 450 if (!_ep || !desc 451 || desc->bDescriptorType != USB_DT_ENDPOINT) 452 return -EINVAL; 453 454 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) 455 return -ESHUTDOWN; 456 457 direction = ep_dir(ep); 458 max = usb_endpoint_maxp(desc); 459 460 /* 461 * disable HW zero length termination select 462 * driver handles zero length packet through req->req.zero 463 */ 464 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num); 465 466 /* Check if the Endpoint is Primed */ 467 if ((readl(&udc->op_regs->epprime) & bit_pos) 468 || (readl(&udc->op_regs->epstatus) & bit_pos)) { 469 dev_info(&udc->dev->dev, 470 "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x," 471 " ENDPTSTATUS=0x%x, bit_pos=0x%x\n", 472 (unsigned)ep->ep_num, direction ? "SEND" : "RECV", 473 (unsigned)readl(&udc->op_regs->epprime), 474 (unsigned)readl(&udc->op_regs->epstatus), 475 (unsigned)bit_pos); 476 goto en_done; 477 } 478 479 /* Set the max packet length, interrupt on Setup and Mult fields */ 480 ios = 0; 481 mult = 0; 482 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { 483 case USB_ENDPOINT_XFER_BULK: 484 case USB_ENDPOINT_XFER_INT: 485 break; 486 case USB_ENDPOINT_XFER_CONTROL: 487 ios = 1; 488 break; 489 case USB_ENDPOINT_XFER_ISOC: 490 /* Calculate transactions needed for high bandwidth iso */ 491 mult = usb_endpoint_maxp_mult(desc); 492 /* 3 transactions at most */ 493 if (mult > 3) 494 goto en_done; 495 break; 496 default: 497 goto en_done; 498 } 499 500 spin_lock_irqsave(&udc->lock, flags); 501 /* Get the endpoint queue head address */ 502 dqh = ep->dqh; 503 dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS) 504 | (mult << EP_QUEUE_HEAD_MULT_POS) 505 | (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0) 506 | (ios ? EP_QUEUE_HEAD_IOS : 0); 507 dqh->next_dtd_ptr = 1; 508 dqh->size_ioc_int_sts = 0; 509 510 ep->ep.maxpacket = max; 511 ep->ep.desc = desc; 512 ep->stopped = 0; 513 514 /* Enable the endpoint for Rx or Tx and set the endpoint type */ 515 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); 516 if (direction == EP_DIR_IN) { 517 epctrlx &= ~EPCTRL_TX_ALL_MASK; 518 epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST 519 | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) 520 << EPCTRL_TX_EP_TYPE_SHIFT); 521 } else { 522 epctrlx &= ~EPCTRL_RX_ALL_MASK; 523 epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST 524 | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) 525 << EPCTRL_RX_EP_TYPE_SHIFT); 526 } 527 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); 528 529 /* 530 * Implement Guideline (GL# USB-7) The unused endpoint type must 531 * be programmed to bulk. 532 */ 533 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); 534 if ((epctrlx & EPCTRL_RX_ENABLE) == 0) { 535 epctrlx |= (USB_ENDPOINT_XFER_BULK 536 << EPCTRL_RX_EP_TYPE_SHIFT); 537 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); 538 } 539 540 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); 541 if ((epctrlx & EPCTRL_TX_ENABLE) == 0) { 542 epctrlx |= (USB_ENDPOINT_XFER_BULK 543 << EPCTRL_TX_EP_TYPE_SHIFT); 544 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); 545 } 546 547 spin_unlock_irqrestore(&udc->lock, flags); 548 549 return 0; 550 en_done: 551 return -EINVAL; 552 } 553 554 static int mv_ep_disable(struct usb_ep *_ep) 555 { 556 struct mv_udc *udc; 557 struct mv_ep *ep; 558 struct mv_dqh *dqh; 559 u32 epctrlx, direction; 560 unsigned long flags; 561 562 ep = container_of(_ep, struct mv_ep, ep); 563 if ((_ep == NULL) || !ep->ep.desc) 564 return -EINVAL; 565 566 udc = ep->udc; 567 568 /* Get the endpoint queue head address */ 569 dqh = ep->dqh; 570 571 spin_lock_irqsave(&udc->lock, flags); 572 573 direction = ep_dir(ep); 574 575 /* Reset the max packet length and the interrupt on Setup */ 576 dqh->max_packet_length = 0; 577 578 /* Disable the endpoint for Rx or Tx and reset the endpoint type */ 579 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); 580 epctrlx &= ~((direction == EP_DIR_IN) 581 ? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE) 582 : (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE)); 583 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); 584 585 /* nuke all pending requests (does flush) */ 586 nuke(ep, -ESHUTDOWN); 587 588 ep->ep.desc = NULL; 589 ep->stopped = 1; 590 591 spin_unlock_irqrestore(&udc->lock, flags); 592 593 return 0; 594 } 595 596 static struct usb_request * 597 mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) 598 { 599 struct mv_req *req = NULL; 600 601 req = kzalloc(sizeof *req, gfp_flags); 602 if (!req) 603 return NULL; 604 605 req->req.dma = DMA_ADDR_INVALID; 606 INIT_LIST_HEAD(&req->queue); 607 608 return &req->req; 609 } 610 611 static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req) 612 { 613 struct mv_req *req = NULL; 614 615 req = container_of(_req, struct mv_req, req); 616 617 if (_req) 618 kfree(req); 619 } 620 621 static void mv_ep_fifo_flush(struct usb_ep *_ep) 622 { 623 struct mv_udc *udc; 624 u32 bit_pos, direction; 625 struct mv_ep *ep; 626 unsigned int loops; 627 628 if (!_ep) 629 return; 630 631 ep = container_of(_ep, struct mv_ep, ep); 632 if (!ep->ep.desc) 633 return; 634 635 udc = ep->udc; 636 direction = ep_dir(ep); 637 638 if (ep->ep_num == 0) 639 bit_pos = (1 << 16) | 1; 640 else if (direction == EP_DIR_OUT) 641 bit_pos = 1 << ep->ep_num; 642 else 643 bit_pos = 1 << (16 + ep->ep_num); 644 645 loops = LOOPS(EPSTATUS_TIMEOUT); 646 do { 647 unsigned int inter_loops; 648 649 if (loops == 0) { 650 dev_err(&udc->dev->dev, 651 "TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n", 652 (unsigned)readl(&udc->op_regs->epstatus), 653 (unsigned)bit_pos); 654 return; 655 } 656 /* Write 1 to the Flush register */ 657 writel(bit_pos, &udc->op_regs->epflush); 658 659 /* Wait until flushing completed */ 660 inter_loops = LOOPS(FLUSH_TIMEOUT); 661 while (readl(&udc->op_regs->epflush)) { 662 /* 663 * ENDPTFLUSH bit should be cleared to indicate this 664 * operation is complete 665 */ 666 if (inter_loops == 0) { 667 dev_err(&udc->dev->dev, 668 "TIMEOUT for ENDPTFLUSH=0x%x," 669 "bit_pos=0x%x\n", 670 (unsigned)readl(&udc->op_regs->epflush), 671 (unsigned)bit_pos); 672 return; 673 } 674 inter_loops--; 675 udelay(LOOPS_USEC); 676 } 677 loops--; 678 } while (readl(&udc->op_regs->epstatus) & bit_pos); 679 } 680 681 /* queues (submits) an I/O request to an endpoint */ 682 static int 683 mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) 684 { 685 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep); 686 struct mv_req *req = container_of(_req, struct mv_req, req); 687 struct mv_udc *udc = ep->udc; 688 unsigned long flags; 689 int retval; 690 691 /* catch various bogus parameters */ 692 if (!_req || !req->req.complete || !req->req.buf 693 || !list_empty(&req->queue)) { 694 dev_err(&udc->dev->dev, "%s, bad params", __func__); 695 return -EINVAL; 696 } 697 if (unlikely(!_ep || !ep->ep.desc)) { 698 dev_err(&udc->dev->dev, "%s, bad ep", __func__); 699 return -EINVAL; 700 } 701 702 udc = ep->udc; 703 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) 704 return -ESHUTDOWN; 705 706 req->ep = ep; 707 708 /* map virtual address to hardware */ 709 retval = usb_gadget_map_request(&udc->gadget, _req, ep_dir(ep)); 710 if (retval) 711 return retval; 712 713 req->req.status = -EINPROGRESS; 714 req->req.actual = 0; 715 req->dtd_count = 0; 716 717 spin_lock_irqsave(&udc->lock, flags); 718 719 /* build dtds and push them to device queue */ 720 if (!req_to_dtd(req)) { 721 retval = queue_dtd(ep, req); 722 if (retval) { 723 spin_unlock_irqrestore(&udc->lock, flags); 724 dev_err(&udc->dev->dev, "Failed to queue dtd\n"); 725 goto err_unmap_dma; 726 } 727 } else { 728 spin_unlock_irqrestore(&udc->lock, flags); 729 dev_err(&udc->dev->dev, "Failed to dma_pool_alloc\n"); 730 retval = -ENOMEM; 731 goto err_unmap_dma; 732 } 733 734 /* Update ep0 state */ 735 if (ep->ep_num == 0) 736 udc->ep0_state = DATA_STATE_XMIT; 737 738 /* irq handler advances the queue */ 739 list_add_tail(&req->queue, &ep->queue); 740 spin_unlock_irqrestore(&udc->lock, flags); 741 742 return 0; 743 744 err_unmap_dma: 745 usb_gadget_unmap_request(&udc->gadget, _req, ep_dir(ep)); 746 747 return retval; 748 } 749 750 static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req) 751 { 752 struct mv_dqh *dqh = ep->dqh; 753 u32 bit_pos; 754 755 /* Write dQH next pointer and terminate bit to 0 */ 756 dqh->next_dtd_ptr = req->head->td_dma 757 & EP_QUEUE_HEAD_NEXT_POINTER_MASK; 758 759 /* clear active and halt bit, in case set from a previous error */ 760 dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED); 761 762 /* Ensure that updates to the QH will occure before priming. */ 763 wmb(); 764 765 bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num); 766 767 /* Prime the Endpoint */ 768 writel(bit_pos, &ep->udc->op_regs->epprime); 769 } 770 771 /* dequeues (cancels, unlinks) an I/O request from an endpoint */ 772 static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) 773 { 774 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep); 775 struct mv_req *req; 776 struct mv_udc *udc = ep->udc; 777 unsigned long flags; 778 int stopped, ret = 0; 779 u32 epctrlx; 780 781 if (!_ep || !_req) 782 return -EINVAL; 783 784 spin_lock_irqsave(&ep->udc->lock, flags); 785 stopped = ep->stopped; 786 787 /* Stop the ep before we deal with the queue */ 788 ep->stopped = 1; 789 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); 790 if (ep_dir(ep) == EP_DIR_IN) 791 epctrlx &= ~EPCTRL_TX_ENABLE; 792 else 793 epctrlx &= ~EPCTRL_RX_ENABLE; 794 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); 795 796 /* make sure it's actually queued on this endpoint */ 797 list_for_each_entry(req, &ep->queue, queue) { 798 if (&req->req == _req) 799 break; 800 } 801 if (&req->req != _req) { 802 ret = -EINVAL; 803 goto out; 804 } 805 806 /* The request is in progress, or completed but not dequeued */ 807 if (ep->queue.next == &req->queue) { 808 _req->status = -ECONNRESET; 809 mv_ep_fifo_flush(_ep); /* flush current transfer */ 810 811 /* The request isn't the last request in this ep queue */ 812 if (req->queue.next != &ep->queue) { 813 struct mv_req *next_req; 814 815 next_req = list_entry(req->queue.next, 816 struct mv_req, queue); 817 818 /* Point the QH to the first TD of next request */ 819 mv_prime_ep(ep, next_req); 820 } else { 821 struct mv_dqh *qh; 822 823 qh = ep->dqh; 824 qh->next_dtd_ptr = 1; 825 qh->size_ioc_int_sts = 0; 826 } 827 828 /* The request hasn't been processed, patch up the TD chain */ 829 } else { 830 struct mv_req *prev_req; 831 832 prev_req = list_entry(req->queue.prev, struct mv_req, queue); 833 writel(readl(&req->tail->dtd_next), 834 &prev_req->tail->dtd_next); 835 836 } 837 838 done(ep, req, -ECONNRESET); 839 840 /* Enable EP */ 841 out: 842 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); 843 if (ep_dir(ep) == EP_DIR_IN) 844 epctrlx |= EPCTRL_TX_ENABLE; 845 else 846 epctrlx |= EPCTRL_RX_ENABLE; 847 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); 848 ep->stopped = stopped; 849 850 spin_unlock_irqrestore(&ep->udc->lock, flags); 851 return ret; 852 } 853 854 static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall) 855 { 856 u32 epctrlx; 857 858 epctrlx = readl(&udc->op_regs->epctrlx[ep_num]); 859 860 if (stall) { 861 if (direction == EP_DIR_IN) 862 epctrlx |= EPCTRL_TX_EP_STALL; 863 else 864 epctrlx |= EPCTRL_RX_EP_STALL; 865 } else { 866 if (direction == EP_DIR_IN) { 867 epctrlx &= ~EPCTRL_TX_EP_STALL; 868 epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST; 869 } else { 870 epctrlx &= ~EPCTRL_RX_EP_STALL; 871 epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST; 872 } 873 } 874 writel(epctrlx, &udc->op_regs->epctrlx[ep_num]); 875 } 876 877 static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction) 878 { 879 u32 epctrlx; 880 881 epctrlx = readl(&udc->op_regs->epctrlx[ep_num]); 882 883 if (direction == EP_DIR_OUT) 884 return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0; 885 else 886 return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0; 887 } 888 889 static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge) 890 { 891 struct mv_ep *ep; 892 unsigned long flags = 0; 893 int status = 0; 894 struct mv_udc *udc; 895 896 ep = container_of(_ep, struct mv_ep, ep); 897 udc = ep->udc; 898 if (!_ep || !ep->ep.desc) { 899 status = -EINVAL; 900 goto out; 901 } 902 903 if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { 904 status = -EOPNOTSUPP; 905 goto out; 906 } 907 908 /* 909 * Attempt to halt IN ep will fail if any transfer requests 910 * are still queue 911 */ 912 if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) { 913 status = -EAGAIN; 914 goto out; 915 } 916 917 spin_lock_irqsave(&ep->udc->lock, flags); 918 ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt); 919 if (halt && wedge) 920 ep->wedge = 1; 921 else if (!halt) 922 ep->wedge = 0; 923 spin_unlock_irqrestore(&ep->udc->lock, flags); 924 925 if (ep->ep_num == 0) { 926 udc->ep0_state = WAIT_FOR_SETUP; 927 udc->ep0_dir = EP_DIR_OUT; 928 } 929 out: 930 return status; 931 } 932 933 static int mv_ep_set_halt(struct usb_ep *_ep, int halt) 934 { 935 return mv_ep_set_halt_wedge(_ep, halt, 0); 936 } 937 938 static int mv_ep_set_wedge(struct usb_ep *_ep) 939 { 940 return mv_ep_set_halt_wedge(_ep, 1, 1); 941 } 942 943 static const struct usb_ep_ops mv_ep_ops = { 944 .enable = mv_ep_enable, 945 .disable = mv_ep_disable, 946 947 .alloc_request = mv_alloc_request, 948 .free_request = mv_free_request, 949 950 .queue = mv_ep_queue, 951 .dequeue = mv_ep_dequeue, 952 953 .set_wedge = mv_ep_set_wedge, 954 .set_halt = mv_ep_set_halt, 955 .fifo_flush = mv_ep_fifo_flush, /* flush fifo */ 956 }; 957 958 static int udc_clock_enable(struct mv_udc *udc) 959 { 960 return clk_prepare_enable(udc->clk); 961 } 962 963 static void udc_clock_disable(struct mv_udc *udc) 964 { 965 clk_disable_unprepare(udc->clk); 966 } 967 968 static void udc_stop(struct mv_udc *udc) 969 { 970 u32 tmp; 971 972 /* Disable interrupts */ 973 tmp = readl(&udc->op_regs->usbintr); 974 tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN | 975 USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN); 976 writel(tmp, &udc->op_regs->usbintr); 977 978 udc->stopped = 1; 979 980 /* Reset the Run the bit in the command register to stop VUSB */ 981 tmp = readl(&udc->op_regs->usbcmd); 982 tmp &= ~USBCMD_RUN_STOP; 983 writel(tmp, &udc->op_regs->usbcmd); 984 } 985 986 static void udc_start(struct mv_udc *udc) 987 { 988 u32 usbintr; 989 990 usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN 991 | USBINTR_PORT_CHANGE_DETECT_EN 992 | USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND; 993 /* Enable interrupts */ 994 writel(usbintr, &udc->op_regs->usbintr); 995 996 udc->stopped = 0; 997 998 /* Set the Run bit in the command register */ 999 writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd); 1000 } 1001 1002 static int udc_reset(struct mv_udc *udc) 1003 { 1004 unsigned int loops; 1005 u32 tmp, portsc; 1006 1007 /* Stop the controller */ 1008 tmp = readl(&udc->op_regs->usbcmd); 1009 tmp &= ~USBCMD_RUN_STOP; 1010 writel(tmp, &udc->op_regs->usbcmd); 1011 1012 /* Reset the controller to get default values */ 1013 writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd); 1014 1015 /* wait for reset to complete */ 1016 loops = LOOPS(RESET_TIMEOUT); 1017 while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) { 1018 if (loops == 0) { 1019 dev_err(&udc->dev->dev, 1020 "Wait for RESET completed TIMEOUT\n"); 1021 return -ETIMEDOUT; 1022 } 1023 loops--; 1024 udelay(LOOPS_USEC); 1025 } 1026 1027 /* set controller to device mode */ 1028 tmp = readl(&udc->op_regs->usbmode); 1029 tmp |= USBMODE_CTRL_MODE_DEVICE; 1030 1031 /* turn setup lockout off, require setup tripwire in usbcmd */ 1032 tmp |= USBMODE_SETUP_LOCK_OFF; 1033 1034 writel(tmp, &udc->op_regs->usbmode); 1035 1036 writel(0x0, &udc->op_regs->epsetupstat); 1037 1038 /* Configure the Endpoint List Address */ 1039 writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK, 1040 &udc->op_regs->eplistaddr); 1041 1042 portsc = readl(&udc->op_regs->portsc[0]); 1043 if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC) 1044 portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER); 1045 1046 if (udc->force_fs) 1047 portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT; 1048 else 1049 portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT); 1050 1051 writel(portsc, &udc->op_regs->portsc[0]); 1052 1053 tmp = readl(&udc->op_regs->epctrlx[0]); 1054 tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL); 1055 writel(tmp, &udc->op_regs->epctrlx[0]); 1056 1057 return 0; 1058 } 1059 1060 static int mv_udc_enable_internal(struct mv_udc *udc) 1061 { 1062 int retval; 1063 1064 if (udc->active) 1065 return 0; 1066 1067 dev_dbg(&udc->dev->dev, "enable udc\n"); 1068 retval = udc_clock_enable(udc); 1069 if (retval) 1070 return retval; 1071 1072 if (udc->pdata->phy_init) { 1073 retval = udc->pdata->phy_init(udc->phy_regs); 1074 if (retval) { 1075 dev_err(&udc->dev->dev, 1076 "init phy error %d\n", retval); 1077 udc_clock_disable(udc); 1078 return retval; 1079 } 1080 } 1081 udc->active = 1; 1082 1083 return 0; 1084 } 1085 1086 static int mv_udc_enable(struct mv_udc *udc) 1087 { 1088 if (udc->clock_gating) 1089 return mv_udc_enable_internal(udc); 1090 1091 return 0; 1092 } 1093 1094 static void mv_udc_disable_internal(struct mv_udc *udc) 1095 { 1096 if (udc->active) { 1097 dev_dbg(&udc->dev->dev, "disable udc\n"); 1098 if (udc->pdata->phy_deinit) 1099 udc->pdata->phy_deinit(udc->phy_regs); 1100 udc_clock_disable(udc); 1101 udc->active = 0; 1102 } 1103 } 1104 1105 static void mv_udc_disable(struct mv_udc *udc) 1106 { 1107 if (udc->clock_gating) 1108 mv_udc_disable_internal(udc); 1109 } 1110 1111 static int mv_udc_get_frame(struct usb_gadget *gadget) 1112 { 1113 struct mv_udc *udc; 1114 u16 retval; 1115 1116 if (!gadget) 1117 return -ENODEV; 1118 1119 udc = container_of(gadget, struct mv_udc, gadget); 1120 1121 retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS; 1122 1123 return retval; 1124 } 1125 1126 /* Tries to wake up the host connected to this gadget */ 1127 static int mv_udc_wakeup(struct usb_gadget *gadget) 1128 { 1129 struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget); 1130 u32 portsc; 1131 1132 /* Remote wakeup feature not enabled by host */ 1133 if (!udc->remote_wakeup) 1134 return -ENOTSUPP; 1135 1136 portsc = readl(&udc->op_regs->portsc); 1137 /* not suspended? */ 1138 if (!(portsc & PORTSCX_PORT_SUSPEND)) 1139 return 0; 1140 /* trigger force resume */ 1141 portsc |= PORTSCX_PORT_FORCE_RESUME; 1142 writel(portsc, &udc->op_regs->portsc[0]); 1143 return 0; 1144 } 1145 1146 static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active) 1147 { 1148 struct mv_udc *udc; 1149 unsigned long flags; 1150 int retval = 0; 1151 1152 udc = container_of(gadget, struct mv_udc, gadget); 1153 spin_lock_irqsave(&udc->lock, flags); 1154 1155 udc->vbus_active = (is_active != 0); 1156 1157 dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n", 1158 __func__, udc->softconnect, udc->vbus_active); 1159 1160 if (udc->driver && udc->softconnect && udc->vbus_active) { 1161 retval = mv_udc_enable(udc); 1162 if (retval == 0) { 1163 /* Clock is disabled, need re-init registers */ 1164 udc_reset(udc); 1165 ep0_reset(udc); 1166 udc_start(udc); 1167 } 1168 } else if (udc->driver && udc->softconnect) { 1169 if (!udc->active) 1170 goto out; 1171 1172 /* stop all the transfer in queue*/ 1173 stop_activity(udc, udc->driver); 1174 udc_stop(udc); 1175 mv_udc_disable(udc); 1176 } 1177 1178 out: 1179 spin_unlock_irqrestore(&udc->lock, flags); 1180 return retval; 1181 } 1182 1183 static int mv_udc_pullup(struct usb_gadget *gadget, int is_on) 1184 { 1185 struct mv_udc *udc; 1186 unsigned long flags; 1187 int retval = 0; 1188 1189 udc = container_of(gadget, struct mv_udc, gadget); 1190 spin_lock_irqsave(&udc->lock, flags); 1191 1192 udc->softconnect = (is_on != 0); 1193 1194 dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n", 1195 __func__, udc->softconnect, udc->vbus_active); 1196 1197 if (udc->driver && udc->softconnect && udc->vbus_active) { 1198 retval = mv_udc_enable(udc); 1199 if (retval == 0) { 1200 /* Clock is disabled, need re-init registers */ 1201 udc_reset(udc); 1202 ep0_reset(udc); 1203 udc_start(udc); 1204 } 1205 } else if (udc->driver && udc->vbus_active) { 1206 /* stop all the transfer in queue*/ 1207 stop_activity(udc, udc->driver); 1208 udc_stop(udc); 1209 mv_udc_disable(udc); 1210 } 1211 1212 spin_unlock_irqrestore(&udc->lock, flags); 1213 return retval; 1214 } 1215 1216 static int mv_udc_start(struct usb_gadget *, struct usb_gadget_driver *); 1217 static int mv_udc_stop(struct usb_gadget *); 1218 /* device controller usb_gadget_ops structure */ 1219 static const struct usb_gadget_ops mv_ops = { 1220 1221 /* returns the current frame number */ 1222 .get_frame = mv_udc_get_frame, 1223 1224 /* tries to wake up the host connected to this gadget */ 1225 .wakeup = mv_udc_wakeup, 1226 1227 /* notify controller that VBUS is powered or not */ 1228 .vbus_session = mv_udc_vbus_session, 1229 1230 /* D+ pullup, software-controlled connect/disconnect to USB host */ 1231 .pullup = mv_udc_pullup, 1232 .udc_start = mv_udc_start, 1233 .udc_stop = mv_udc_stop, 1234 }; 1235 1236 static int eps_init(struct mv_udc *udc) 1237 { 1238 struct mv_ep *ep; 1239 char name[14]; 1240 int i; 1241 1242 /* initialize ep0 */ 1243 ep = &udc->eps[0]; 1244 ep->udc = udc; 1245 strncpy(ep->name, "ep0", sizeof(ep->name)); 1246 ep->ep.name = ep->name; 1247 ep->ep.ops = &mv_ep_ops; 1248 ep->wedge = 0; 1249 ep->stopped = 0; 1250 usb_ep_set_maxpacket_limit(&ep->ep, EP0_MAX_PKT_SIZE); 1251 ep->ep.caps.type_control = true; 1252 ep->ep.caps.dir_in = true; 1253 ep->ep.caps.dir_out = true; 1254 ep->ep_num = 0; 1255 ep->ep.desc = &mv_ep0_desc; 1256 INIT_LIST_HEAD(&ep->queue); 1257 1258 ep->ep_type = USB_ENDPOINT_XFER_CONTROL; 1259 1260 /* initialize other endpoints */ 1261 for (i = 2; i < udc->max_eps * 2; i++) { 1262 ep = &udc->eps[i]; 1263 if (i % 2) { 1264 snprintf(name, sizeof(name), "ep%din", i / 2); 1265 ep->direction = EP_DIR_IN; 1266 ep->ep.caps.dir_in = true; 1267 } else { 1268 snprintf(name, sizeof(name), "ep%dout", i / 2); 1269 ep->direction = EP_DIR_OUT; 1270 ep->ep.caps.dir_out = true; 1271 } 1272 ep->udc = udc; 1273 strncpy(ep->name, name, sizeof(ep->name)); 1274 ep->ep.name = ep->name; 1275 1276 ep->ep.caps.type_iso = true; 1277 ep->ep.caps.type_bulk = true; 1278 ep->ep.caps.type_int = true; 1279 1280 ep->ep.ops = &mv_ep_ops; 1281 ep->stopped = 0; 1282 usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0); 1283 ep->ep_num = i / 2; 1284 1285 INIT_LIST_HEAD(&ep->queue); 1286 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); 1287 1288 ep->dqh = &udc->ep_dqh[i]; 1289 } 1290 1291 return 0; 1292 } 1293 1294 /* delete all endpoint requests, called with spinlock held */ 1295 static void nuke(struct mv_ep *ep, int status) 1296 { 1297 /* called with spinlock held */ 1298 ep->stopped = 1; 1299 1300 /* endpoint fifo flush */ 1301 mv_ep_fifo_flush(&ep->ep); 1302 1303 while (!list_empty(&ep->queue)) { 1304 struct mv_req *req = NULL; 1305 req = list_entry(ep->queue.next, struct mv_req, queue); 1306 done(ep, req, status); 1307 } 1308 } 1309 1310 static void gadget_reset(struct mv_udc *udc, struct usb_gadget_driver *driver) 1311 { 1312 struct mv_ep *ep; 1313 1314 nuke(&udc->eps[0], -ESHUTDOWN); 1315 1316 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) { 1317 nuke(ep, -ESHUTDOWN); 1318 } 1319 1320 /* report reset; the driver is already quiesced */ 1321 if (driver) { 1322 spin_unlock(&udc->lock); 1323 usb_gadget_udc_reset(&udc->gadget, driver); 1324 spin_lock(&udc->lock); 1325 } 1326 } 1327 /* stop all USB activities */ 1328 static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver) 1329 { 1330 struct mv_ep *ep; 1331 1332 nuke(&udc->eps[0], -ESHUTDOWN); 1333 1334 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) { 1335 nuke(ep, -ESHUTDOWN); 1336 } 1337 1338 /* report disconnect; the driver is already quiesced */ 1339 if (driver) { 1340 spin_unlock(&udc->lock); 1341 driver->disconnect(&udc->gadget); 1342 spin_lock(&udc->lock); 1343 } 1344 } 1345 1346 static int mv_udc_start(struct usb_gadget *gadget, 1347 struct usb_gadget_driver *driver) 1348 { 1349 struct mv_udc *udc; 1350 int retval = 0; 1351 unsigned long flags; 1352 1353 udc = container_of(gadget, struct mv_udc, gadget); 1354 1355 if (udc->driver) 1356 return -EBUSY; 1357 1358 spin_lock_irqsave(&udc->lock, flags); 1359 1360 /* hook up the driver ... */ 1361 driver->driver.bus = NULL; 1362 udc->driver = driver; 1363 1364 udc->usb_state = USB_STATE_ATTACHED; 1365 udc->ep0_state = WAIT_FOR_SETUP; 1366 udc->ep0_dir = EP_DIR_OUT; 1367 1368 spin_unlock_irqrestore(&udc->lock, flags); 1369 1370 if (udc->transceiver) { 1371 retval = otg_set_peripheral(udc->transceiver->otg, 1372 &udc->gadget); 1373 if (retval) { 1374 dev_err(&udc->dev->dev, 1375 "unable to register peripheral to otg\n"); 1376 udc->driver = NULL; 1377 return retval; 1378 } 1379 } 1380 1381 /* When boot with cable attached, there will be no vbus irq occurred */ 1382 if (udc->qwork) 1383 queue_work(udc->qwork, &udc->vbus_work); 1384 1385 return 0; 1386 } 1387 1388 static int mv_udc_stop(struct usb_gadget *gadget) 1389 { 1390 struct mv_udc *udc; 1391 unsigned long flags; 1392 1393 udc = container_of(gadget, struct mv_udc, gadget); 1394 1395 spin_lock_irqsave(&udc->lock, flags); 1396 1397 mv_udc_enable(udc); 1398 udc_stop(udc); 1399 1400 /* stop all usb activities */ 1401 udc->gadget.speed = USB_SPEED_UNKNOWN; 1402 stop_activity(udc, NULL); 1403 mv_udc_disable(udc); 1404 1405 spin_unlock_irqrestore(&udc->lock, flags); 1406 1407 /* unbind gadget driver */ 1408 udc->driver = NULL; 1409 1410 return 0; 1411 } 1412 1413 static void mv_set_ptc(struct mv_udc *udc, u32 mode) 1414 { 1415 u32 portsc; 1416 1417 portsc = readl(&udc->op_regs->portsc[0]); 1418 portsc |= mode << 16; 1419 writel(portsc, &udc->op_regs->portsc[0]); 1420 } 1421 1422 static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req) 1423 { 1424 struct mv_ep *mvep = container_of(ep, struct mv_ep, ep); 1425 struct mv_req *req = container_of(_req, struct mv_req, req); 1426 struct mv_udc *udc; 1427 unsigned long flags; 1428 1429 udc = mvep->udc; 1430 1431 dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode); 1432 1433 spin_lock_irqsave(&udc->lock, flags); 1434 if (req->test_mode) { 1435 mv_set_ptc(udc, req->test_mode); 1436 req->test_mode = 0; 1437 } 1438 spin_unlock_irqrestore(&udc->lock, flags); 1439 } 1440 1441 static int 1442 udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty) 1443 { 1444 int retval = 0; 1445 struct mv_req *req; 1446 struct mv_ep *ep; 1447 1448 ep = &udc->eps[0]; 1449 udc->ep0_dir = direction; 1450 udc->ep0_state = WAIT_FOR_OUT_STATUS; 1451 1452 req = udc->status_req; 1453 1454 /* fill in the reqest structure */ 1455 if (empty == false) { 1456 *((u16 *) req->req.buf) = cpu_to_le16(status); 1457 req->req.length = 2; 1458 } else 1459 req->req.length = 0; 1460 1461 req->ep = ep; 1462 req->req.status = -EINPROGRESS; 1463 req->req.actual = 0; 1464 if (udc->test_mode) { 1465 req->req.complete = prime_status_complete; 1466 req->test_mode = udc->test_mode; 1467 udc->test_mode = 0; 1468 } else 1469 req->req.complete = NULL; 1470 req->dtd_count = 0; 1471 1472 if (req->req.dma == DMA_ADDR_INVALID) { 1473 req->req.dma = dma_map_single(ep->udc->gadget.dev.parent, 1474 req->req.buf, req->req.length, 1475 ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1476 req->mapped = 1; 1477 } 1478 1479 /* prime the data phase */ 1480 if (!req_to_dtd(req)) { 1481 retval = queue_dtd(ep, req); 1482 if (retval) { 1483 dev_err(&udc->dev->dev, 1484 "Failed to queue dtd when prime status\n"); 1485 goto out; 1486 } 1487 } else{ /* no mem */ 1488 retval = -ENOMEM; 1489 dev_err(&udc->dev->dev, 1490 "Failed to dma_pool_alloc when prime status\n"); 1491 goto out; 1492 } 1493 1494 list_add_tail(&req->queue, &ep->queue); 1495 1496 return 0; 1497 out: 1498 usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep)); 1499 1500 return retval; 1501 } 1502 1503 static void mv_udc_testmode(struct mv_udc *udc, u16 index) 1504 { 1505 if (index <= TEST_FORCE_EN) { 1506 udc->test_mode = index; 1507 if (udc_prime_status(udc, EP_DIR_IN, 0, true)) 1508 ep0_stall(udc); 1509 } else 1510 dev_err(&udc->dev->dev, 1511 "This test mode(%d) is not supported\n", index); 1512 } 1513 1514 static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup) 1515 { 1516 udc->dev_addr = (u8)setup->wValue; 1517 1518 /* update usb state */ 1519 udc->usb_state = USB_STATE_ADDRESS; 1520 1521 if (udc_prime_status(udc, EP_DIR_IN, 0, true)) 1522 ep0_stall(udc); 1523 } 1524 1525 static void ch9getstatus(struct mv_udc *udc, u8 ep_num, 1526 struct usb_ctrlrequest *setup) 1527 { 1528 u16 status = 0; 1529 int retval; 1530 1531 if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK)) 1532 != (USB_DIR_IN | USB_TYPE_STANDARD)) 1533 return; 1534 1535 if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) { 1536 status = 1 << USB_DEVICE_SELF_POWERED; 1537 status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP; 1538 } else if ((setup->bRequestType & USB_RECIP_MASK) 1539 == USB_RECIP_INTERFACE) { 1540 /* get interface status */ 1541 status = 0; 1542 } else if ((setup->bRequestType & USB_RECIP_MASK) 1543 == USB_RECIP_ENDPOINT) { 1544 u8 ep_num, direction; 1545 1546 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK; 1547 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK) 1548 ? EP_DIR_IN : EP_DIR_OUT; 1549 status = ep_is_stall(udc, ep_num, direction) 1550 << USB_ENDPOINT_HALT; 1551 } 1552 1553 retval = udc_prime_status(udc, EP_DIR_IN, status, false); 1554 if (retval) 1555 ep0_stall(udc); 1556 else 1557 udc->ep0_state = DATA_STATE_XMIT; 1558 } 1559 1560 static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup) 1561 { 1562 u8 ep_num; 1563 u8 direction; 1564 struct mv_ep *ep; 1565 1566 if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK)) 1567 == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) { 1568 switch (setup->wValue) { 1569 case USB_DEVICE_REMOTE_WAKEUP: 1570 udc->remote_wakeup = 0; 1571 break; 1572 default: 1573 goto out; 1574 } 1575 } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK)) 1576 == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) { 1577 switch (setup->wValue) { 1578 case USB_ENDPOINT_HALT: 1579 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK; 1580 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK) 1581 ? EP_DIR_IN : EP_DIR_OUT; 1582 if (setup->wValue != 0 || setup->wLength != 0 1583 || ep_num > udc->max_eps) 1584 goto out; 1585 ep = &udc->eps[ep_num * 2 + direction]; 1586 if (ep->wedge == 1) 1587 break; 1588 spin_unlock(&udc->lock); 1589 ep_set_stall(udc, ep_num, direction, 0); 1590 spin_lock(&udc->lock); 1591 break; 1592 default: 1593 goto out; 1594 } 1595 } else 1596 goto out; 1597 1598 if (udc_prime_status(udc, EP_DIR_IN, 0, true)) 1599 ep0_stall(udc); 1600 out: 1601 return; 1602 } 1603 1604 static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup) 1605 { 1606 u8 ep_num; 1607 u8 direction; 1608 1609 if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK)) 1610 == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) { 1611 switch (setup->wValue) { 1612 case USB_DEVICE_REMOTE_WAKEUP: 1613 udc->remote_wakeup = 1; 1614 break; 1615 case USB_DEVICE_TEST_MODE: 1616 if (setup->wIndex & 0xFF 1617 || udc->gadget.speed != USB_SPEED_HIGH) 1618 ep0_stall(udc); 1619 1620 if (udc->usb_state != USB_STATE_CONFIGURED 1621 && udc->usb_state != USB_STATE_ADDRESS 1622 && udc->usb_state != USB_STATE_DEFAULT) 1623 ep0_stall(udc); 1624 1625 mv_udc_testmode(udc, (setup->wIndex >> 8)); 1626 goto out; 1627 default: 1628 goto out; 1629 } 1630 } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK)) 1631 == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) { 1632 switch (setup->wValue) { 1633 case USB_ENDPOINT_HALT: 1634 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK; 1635 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK) 1636 ? EP_DIR_IN : EP_DIR_OUT; 1637 if (setup->wValue != 0 || setup->wLength != 0 1638 || ep_num > udc->max_eps) 1639 goto out; 1640 spin_unlock(&udc->lock); 1641 ep_set_stall(udc, ep_num, direction, 1); 1642 spin_lock(&udc->lock); 1643 break; 1644 default: 1645 goto out; 1646 } 1647 } else 1648 goto out; 1649 1650 if (udc_prime_status(udc, EP_DIR_IN, 0, true)) 1651 ep0_stall(udc); 1652 out: 1653 return; 1654 } 1655 1656 static void handle_setup_packet(struct mv_udc *udc, u8 ep_num, 1657 struct usb_ctrlrequest *setup) 1658 __releases(&ep->udc->lock) 1659 __acquires(&ep->udc->lock) 1660 { 1661 bool delegate = false; 1662 1663 nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN); 1664 1665 dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n", 1666 setup->bRequestType, setup->bRequest, 1667 setup->wValue, setup->wIndex, setup->wLength); 1668 /* We process some standard setup requests here */ 1669 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { 1670 switch (setup->bRequest) { 1671 case USB_REQ_GET_STATUS: 1672 ch9getstatus(udc, ep_num, setup); 1673 break; 1674 1675 case USB_REQ_SET_ADDRESS: 1676 ch9setaddress(udc, setup); 1677 break; 1678 1679 case USB_REQ_CLEAR_FEATURE: 1680 ch9clearfeature(udc, setup); 1681 break; 1682 1683 case USB_REQ_SET_FEATURE: 1684 ch9setfeature(udc, setup); 1685 break; 1686 1687 default: 1688 delegate = true; 1689 } 1690 } else 1691 delegate = true; 1692 1693 /* delegate USB standard requests to the gadget driver */ 1694 if (delegate == true) { 1695 /* USB requests handled by gadget */ 1696 if (setup->wLength) { 1697 /* DATA phase from gadget, STATUS phase from udc */ 1698 udc->ep0_dir = (setup->bRequestType & USB_DIR_IN) 1699 ? EP_DIR_IN : EP_DIR_OUT; 1700 spin_unlock(&udc->lock); 1701 if (udc->driver->setup(&udc->gadget, 1702 &udc->local_setup_buff) < 0) 1703 ep0_stall(udc); 1704 spin_lock(&udc->lock); 1705 udc->ep0_state = (setup->bRequestType & USB_DIR_IN) 1706 ? DATA_STATE_XMIT : DATA_STATE_RECV; 1707 } else { 1708 /* no DATA phase, IN STATUS phase from gadget */ 1709 udc->ep0_dir = EP_DIR_IN; 1710 spin_unlock(&udc->lock); 1711 if (udc->driver->setup(&udc->gadget, 1712 &udc->local_setup_buff) < 0) 1713 ep0_stall(udc); 1714 spin_lock(&udc->lock); 1715 udc->ep0_state = WAIT_FOR_OUT_STATUS; 1716 } 1717 } 1718 } 1719 1720 /* complete DATA or STATUS phase of ep0 prime status phase if needed */ 1721 static void ep0_req_complete(struct mv_udc *udc, 1722 struct mv_ep *ep0, struct mv_req *req) 1723 { 1724 u32 new_addr; 1725 1726 if (udc->usb_state == USB_STATE_ADDRESS) { 1727 /* set the new address */ 1728 new_addr = (u32)udc->dev_addr; 1729 writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT, 1730 &udc->op_regs->deviceaddr); 1731 } 1732 1733 done(ep0, req, 0); 1734 1735 switch (udc->ep0_state) { 1736 case DATA_STATE_XMIT: 1737 /* receive status phase */ 1738 if (udc_prime_status(udc, EP_DIR_OUT, 0, true)) 1739 ep0_stall(udc); 1740 break; 1741 case DATA_STATE_RECV: 1742 /* send status phase */ 1743 if (udc_prime_status(udc, EP_DIR_IN, 0 , true)) 1744 ep0_stall(udc); 1745 break; 1746 case WAIT_FOR_OUT_STATUS: 1747 udc->ep0_state = WAIT_FOR_SETUP; 1748 break; 1749 case WAIT_FOR_SETUP: 1750 dev_err(&udc->dev->dev, "unexpect ep0 packets\n"); 1751 break; 1752 default: 1753 ep0_stall(udc); 1754 break; 1755 } 1756 } 1757 1758 static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr) 1759 { 1760 u32 temp; 1761 struct mv_dqh *dqh; 1762 1763 dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT]; 1764 1765 /* Clear bit in ENDPTSETUPSTAT */ 1766 writel((1 << ep_num), &udc->op_regs->epsetupstat); 1767 1768 /* while a hazard exists when setup package arrives */ 1769 do { 1770 /* Set Setup Tripwire */ 1771 temp = readl(&udc->op_regs->usbcmd); 1772 writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd); 1773 1774 /* Copy the setup packet to local buffer */ 1775 memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8); 1776 } while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET)); 1777 1778 /* Clear Setup Tripwire */ 1779 temp = readl(&udc->op_regs->usbcmd); 1780 writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd); 1781 } 1782 1783 static void irq_process_tr_complete(struct mv_udc *udc) 1784 { 1785 u32 tmp, bit_pos; 1786 int i, ep_num = 0, direction = 0; 1787 struct mv_ep *curr_ep; 1788 struct mv_req *curr_req, *temp_req; 1789 int status; 1790 1791 /* 1792 * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE 1793 * because the setup packets are to be read ASAP 1794 */ 1795 1796 /* Process all Setup packet received interrupts */ 1797 tmp = readl(&udc->op_regs->epsetupstat); 1798 1799 if (tmp) { 1800 for (i = 0; i < udc->max_eps; i++) { 1801 if (tmp & (1 << i)) { 1802 get_setup_data(udc, i, 1803 (u8 *)(&udc->local_setup_buff)); 1804 handle_setup_packet(udc, i, 1805 &udc->local_setup_buff); 1806 } 1807 } 1808 } 1809 1810 /* Don't clear the endpoint setup status register here. 1811 * It is cleared as a setup packet is read out of the buffer 1812 */ 1813 1814 /* Process non-setup transaction complete interrupts */ 1815 tmp = readl(&udc->op_regs->epcomplete); 1816 1817 if (!tmp) 1818 return; 1819 1820 writel(tmp, &udc->op_regs->epcomplete); 1821 1822 for (i = 0; i < udc->max_eps * 2; i++) { 1823 ep_num = i >> 1; 1824 direction = i % 2; 1825 1826 bit_pos = 1 << (ep_num + 16 * direction); 1827 1828 if (!(bit_pos & tmp)) 1829 continue; 1830 1831 if (i == 1) 1832 curr_ep = &udc->eps[0]; 1833 else 1834 curr_ep = &udc->eps[i]; 1835 /* process the req queue until an uncomplete request */ 1836 list_for_each_entry_safe(curr_req, temp_req, 1837 &curr_ep->queue, queue) { 1838 status = process_ep_req(udc, i, curr_req); 1839 if (status) 1840 break; 1841 1842 /* write back status to req */ 1843 curr_req->req.status = status; 1844 1845 /* ep0 request completion */ 1846 if (ep_num == 0) { 1847 ep0_req_complete(udc, curr_ep, curr_req); 1848 break; 1849 } else { 1850 done(curr_ep, curr_req, status); 1851 } 1852 } 1853 } 1854 } 1855 1856 static void irq_process_reset(struct mv_udc *udc) 1857 { 1858 u32 tmp; 1859 unsigned int loops; 1860 1861 udc->ep0_dir = EP_DIR_OUT; 1862 udc->ep0_state = WAIT_FOR_SETUP; 1863 udc->remote_wakeup = 0; /* default to 0 on reset */ 1864 1865 /* The address bits are past bit 25-31. Set the address */ 1866 tmp = readl(&udc->op_regs->deviceaddr); 1867 tmp &= ~(USB_DEVICE_ADDRESS_MASK); 1868 writel(tmp, &udc->op_regs->deviceaddr); 1869 1870 /* Clear all the setup token semaphores */ 1871 tmp = readl(&udc->op_regs->epsetupstat); 1872 writel(tmp, &udc->op_regs->epsetupstat); 1873 1874 /* Clear all the endpoint complete status bits */ 1875 tmp = readl(&udc->op_regs->epcomplete); 1876 writel(tmp, &udc->op_regs->epcomplete); 1877 1878 /* wait until all endptprime bits cleared */ 1879 loops = LOOPS(PRIME_TIMEOUT); 1880 while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) { 1881 if (loops == 0) { 1882 dev_err(&udc->dev->dev, 1883 "Timeout for ENDPTPRIME = 0x%x\n", 1884 readl(&udc->op_regs->epprime)); 1885 break; 1886 } 1887 loops--; 1888 udelay(LOOPS_USEC); 1889 } 1890 1891 /* Write 1s to the Flush register */ 1892 writel((u32)~0, &udc->op_regs->epflush); 1893 1894 if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) { 1895 dev_info(&udc->dev->dev, "usb bus reset\n"); 1896 udc->usb_state = USB_STATE_DEFAULT; 1897 /* reset all the queues, stop all USB activities */ 1898 gadget_reset(udc, udc->driver); 1899 } else { 1900 dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n", 1901 readl(&udc->op_regs->portsc)); 1902 1903 /* 1904 * re-initialize 1905 * controller reset 1906 */ 1907 udc_reset(udc); 1908 1909 /* reset all the queues, stop all USB activities */ 1910 stop_activity(udc, udc->driver); 1911 1912 /* reset ep0 dQH and endptctrl */ 1913 ep0_reset(udc); 1914 1915 /* enable interrupt and set controller to run state */ 1916 udc_start(udc); 1917 1918 udc->usb_state = USB_STATE_ATTACHED; 1919 } 1920 } 1921 1922 static void handle_bus_resume(struct mv_udc *udc) 1923 { 1924 udc->usb_state = udc->resume_state; 1925 udc->resume_state = 0; 1926 1927 /* report resume to the driver */ 1928 if (udc->driver) { 1929 if (udc->driver->resume) { 1930 spin_unlock(&udc->lock); 1931 udc->driver->resume(&udc->gadget); 1932 spin_lock(&udc->lock); 1933 } 1934 } 1935 } 1936 1937 static void irq_process_suspend(struct mv_udc *udc) 1938 { 1939 udc->resume_state = udc->usb_state; 1940 udc->usb_state = USB_STATE_SUSPENDED; 1941 1942 if (udc->driver->suspend) { 1943 spin_unlock(&udc->lock); 1944 udc->driver->suspend(&udc->gadget); 1945 spin_lock(&udc->lock); 1946 } 1947 } 1948 1949 static void irq_process_port_change(struct mv_udc *udc) 1950 { 1951 u32 portsc; 1952 1953 portsc = readl(&udc->op_regs->portsc[0]); 1954 if (!(portsc & PORTSCX_PORT_RESET)) { 1955 /* Get the speed */ 1956 u32 speed = portsc & PORTSCX_PORT_SPEED_MASK; 1957 switch (speed) { 1958 case PORTSCX_PORT_SPEED_HIGH: 1959 udc->gadget.speed = USB_SPEED_HIGH; 1960 break; 1961 case PORTSCX_PORT_SPEED_FULL: 1962 udc->gadget.speed = USB_SPEED_FULL; 1963 break; 1964 case PORTSCX_PORT_SPEED_LOW: 1965 udc->gadget.speed = USB_SPEED_LOW; 1966 break; 1967 default: 1968 udc->gadget.speed = USB_SPEED_UNKNOWN; 1969 break; 1970 } 1971 } 1972 1973 if (portsc & PORTSCX_PORT_SUSPEND) { 1974 udc->resume_state = udc->usb_state; 1975 udc->usb_state = USB_STATE_SUSPENDED; 1976 if (udc->driver->suspend) { 1977 spin_unlock(&udc->lock); 1978 udc->driver->suspend(&udc->gadget); 1979 spin_lock(&udc->lock); 1980 } 1981 } 1982 1983 if (!(portsc & PORTSCX_PORT_SUSPEND) 1984 && udc->usb_state == USB_STATE_SUSPENDED) { 1985 handle_bus_resume(udc); 1986 } 1987 1988 if (!udc->resume_state) 1989 udc->usb_state = USB_STATE_DEFAULT; 1990 } 1991 1992 static void irq_process_error(struct mv_udc *udc) 1993 { 1994 /* Increment the error count */ 1995 udc->errors++; 1996 } 1997 1998 static irqreturn_t mv_udc_irq(int irq, void *dev) 1999 { 2000 struct mv_udc *udc = (struct mv_udc *)dev; 2001 u32 status, intr; 2002 2003 /* Disable ISR when stopped bit is set */ 2004 if (udc->stopped) 2005 return IRQ_NONE; 2006 2007 spin_lock(&udc->lock); 2008 2009 status = readl(&udc->op_regs->usbsts); 2010 intr = readl(&udc->op_regs->usbintr); 2011 status &= intr; 2012 2013 if (status == 0) { 2014 spin_unlock(&udc->lock); 2015 return IRQ_NONE; 2016 } 2017 2018 /* Clear all the interrupts occurred */ 2019 writel(status, &udc->op_regs->usbsts); 2020 2021 if (status & USBSTS_ERR) 2022 irq_process_error(udc); 2023 2024 if (status & USBSTS_RESET) 2025 irq_process_reset(udc); 2026 2027 if (status & USBSTS_PORT_CHANGE) 2028 irq_process_port_change(udc); 2029 2030 if (status & USBSTS_INT) 2031 irq_process_tr_complete(udc); 2032 2033 if (status & USBSTS_SUSPEND) 2034 irq_process_suspend(udc); 2035 2036 spin_unlock(&udc->lock); 2037 2038 return IRQ_HANDLED; 2039 } 2040 2041 static irqreturn_t mv_udc_vbus_irq(int irq, void *dev) 2042 { 2043 struct mv_udc *udc = (struct mv_udc *)dev; 2044 2045 /* polling VBUS and init phy may cause too much time*/ 2046 if (udc->qwork) 2047 queue_work(udc->qwork, &udc->vbus_work); 2048 2049 return IRQ_HANDLED; 2050 } 2051 2052 static void mv_udc_vbus_work(struct work_struct *work) 2053 { 2054 struct mv_udc *udc; 2055 unsigned int vbus; 2056 2057 udc = container_of(work, struct mv_udc, vbus_work); 2058 if (!udc->pdata->vbus) 2059 return; 2060 2061 vbus = udc->pdata->vbus->poll(); 2062 dev_info(&udc->dev->dev, "vbus is %d\n", vbus); 2063 2064 if (vbus == VBUS_HIGH) 2065 mv_udc_vbus_session(&udc->gadget, 1); 2066 else if (vbus == VBUS_LOW) 2067 mv_udc_vbus_session(&udc->gadget, 0); 2068 } 2069 2070 /* release device structure */ 2071 static void gadget_release(struct device *_dev) 2072 { 2073 struct mv_udc *udc; 2074 2075 udc = dev_get_drvdata(_dev); 2076 2077 complete(udc->done); 2078 } 2079 2080 static int mv_udc_remove(struct platform_device *pdev) 2081 { 2082 struct mv_udc *udc; 2083 2084 udc = platform_get_drvdata(pdev); 2085 2086 usb_del_gadget_udc(&udc->gadget); 2087 2088 if (udc->qwork) { 2089 flush_workqueue(udc->qwork); 2090 destroy_workqueue(udc->qwork); 2091 } 2092 2093 /* free memory allocated in probe */ 2094 dma_pool_destroy(udc->dtd_pool); 2095 2096 if (udc->ep_dqh) 2097 dma_free_coherent(&pdev->dev, udc->ep_dqh_size, 2098 udc->ep_dqh, udc->ep_dqh_dma); 2099 2100 mv_udc_disable(udc); 2101 2102 /* free dev, wait for the release() finished */ 2103 wait_for_completion(udc->done); 2104 2105 return 0; 2106 } 2107 2108 static int mv_udc_probe(struct platform_device *pdev) 2109 { 2110 struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev); 2111 struct mv_udc *udc; 2112 int retval = 0; 2113 struct resource *r; 2114 size_t size; 2115 2116 if (pdata == NULL) { 2117 dev_err(&pdev->dev, "missing platform_data\n"); 2118 return -ENODEV; 2119 } 2120 2121 udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL); 2122 if (udc == NULL) 2123 return -ENOMEM; 2124 2125 udc->done = &release_done; 2126 udc->pdata = dev_get_platdata(&pdev->dev); 2127 spin_lock_init(&udc->lock); 2128 2129 udc->dev = pdev; 2130 2131 if (pdata->mode == MV_USB_MODE_OTG) { 2132 udc->transceiver = devm_usb_get_phy(&pdev->dev, 2133 USB_PHY_TYPE_USB2); 2134 if (IS_ERR(udc->transceiver)) { 2135 retval = PTR_ERR(udc->transceiver); 2136 2137 if (retval == -ENXIO) 2138 return retval; 2139 2140 udc->transceiver = NULL; 2141 return -EPROBE_DEFER; 2142 } 2143 } 2144 2145 /* udc only have one sysclk. */ 2146 udc->clk = devm_clk_get(&pdev->dev, NULL); 2147 if (IS_ERR(udc->clk)) 2148 return PTR_ERR(udc->clk); 2149 2150 r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs"); 2151 if (r == NULL) { 2152 dev_err(&pdev->dev, "no I/O memory resource defined\n"); 2153 return -ENODEV; 2154 } 2155 2156 udc->cap_regs = (struct mv_cap_regs __iomem *) 2157 devm_ioremap(&pdev->dev, r->start, resource_size(r)); 2158 if (udc->cap_regs == NULL) { 2159 dev_err(&pdev->dev, "failed to map I/O memory\n"); 2160 return -EBUSY; 2161 } 2162 2163 r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs"); 2164 if (r == NULL) { 2165 dev_err(&pdev->dev, "no phy I/O memory resource defined\n"); 2166 return -ENODEV; 2167 } 2168 2169 udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); 2170 if (udc->phy_regs == NULL) { 2171 dev_err(&pdev->dev, "failed to map phy I/O memory\n"); 2172 return -EBUSY; 2173 } 2174 2175 /* we will acces controller register, so enable the clk */ 2176 retval = mv_udc_enable_internal(udc); 2177 if (retval) 2178 return retval; 2179 2180 udc->op_regs = 2181 (struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs 2182 + (readl(&udc->cap_regs->caplength_hciversion) 2183 & CAPLENGTH_MASK)); 2184 udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK; 2185 2186 /* 2187 * some platform will use usb to download image, it may not disconnect 2188 * usb gadget before loading kernel. So first stop udc here. 2189 */ 2190 udc_stop(udc); 2191 writel(0xFFFFFFFF, &udc->op_regs->usbsts); 2192 2193 size = udc->max_eps * sizeof(struct mv_dqh) *2; 2194 size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1); 2195 udc->ep_dqh = dma_alloc_coherent(&pdev->dev, size, 2196 &udc->ep_dqh_dma, GFP_KERNEL); 2197 2198 if (udc->ep_dqh == NULL) { 2199 dev_err(&pdev->dev, "allocate dQH memory failed\n"); 2200 retval = -ENOMEM; 2201 goto err_disable_clock; 2202 } 2203 udc->ep_dqh_size = size; 2204 2205 /* create dTD dma_pool resource */ 2206 udc->dtd_pool = dma_pool_create("mv_dtd", 2207 &pdev->dev, 2208 sizeof(struct mv_dtd), 2209 DTD_ALIGNMENT, 2210 DMA_BOUNDARY); 2211 2212 if (!udc->dtd_pool) { 2213 retval = -ENOMEM; 2214 goto err_free_dma; 2215 } 2216 2217 size = udc->max_eps * sizeof(struct mv_ep) *2; 2218 udc->eps = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); 2219 if (udc->eps == NULL) { 2220 retval = -ENOMEM; 2221 goto err_destroy_dma; 2222 } 2223 2224 /* initialize ep0 status request structure */ 2225 udc->status_req = devm_kzalloc(&pdev->dev, sizeof(struct mv_req), 2226 GFP_KERNEL); 2227 if (!udc->status_req) { 2228 retval = -ENOMEM; 2229 goto err_destroy_dma; 2230 } 2231 INIT_LIST_HEAD(&udc->status_req->queue); 2232 2233 /* allocate a small amount of memory to get valid address */ 2234 udc->status_req->req.buf = kzalloc(8, GFP_KERNEL); 2235 udc->status_req->req.dma = DMA_ADDR_INVALID; 2236 2237 udc->resume_state = USB_STATE_NOTATTACHED; 2238 udc->usb_state = USB_STATE_POWERED; 2239 udc->ep0_dir = EP_DIR_OUT; 2240 udc->remote_wakeup = 0; 2241 2242 r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0); 2243 if (r == NULL) { 2244 dev_err(&pdev->dev, "no IRQ resource defined\n"); 2245 retval = -ENODEV; 2246 goto err_destroy_dma; 2247 } 2248 udc->irq = r->start; 2249 if (devm_request_irq(&pdev->dev, udc->irq, mv_udc_irq, 2250 IRQF_SHARED, driver_name, udc)) { 2251 dev_err(&pdev->dev, "Request irq %d for UDC failed\n", 2252 udc->irq); 2253 retval = -ENODEV; 2254 goto err_destroy_dma; 2255 } 2256 2257 /* initialize gadget structure */ 2258 udc->gadget.ops = &mv_ops; /* usb_gadget_ops */ 2259 udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */ 2260 INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */ 2261 udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */ 2262 udc->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */ 2263 2264 /* the "gadget" abstracts/virtualizes the controller */ 2265 udc->gadget.name = driver_name; /* gadget name */ 2266 2267 eps_init(udc); 2268 2269 /* VBUS detect: we can disable/enable clock on demand.*/ 2270 if (udc->transceiver) 2271 udc->clock_gating = 1; 2272 else if (pdata->vbus) { 2273 udc->clock_gating = 1; 2274 retval = devm_request_threaded_irq(&pdev->dev, 2275 pdata->vbus->irq, NULL, 2276 mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc); 2277 if (retval) { 2278 dev_info(&pdev->dev, 2279 "Can not request irq for VBUS, " 2280 "disable clock gating\n"); 2281 udc->clock_gating = 0; 2282 } 2283 2284 udc->qwork = create_singlethread_workqueue("mv_udc_queue"); 2285 if (!udc->qwork) { 2286 dev_err(&pdev->dev, "cannot create workqueue\n"); 2287 retval = -ENOMEM; 2288 goto err_destroy_dma; 2289 } 2290 2291 INIT_WORK(&udc->vbus_work, mv_udc_vbus_work); 2292 } 2293 2294 /* 2295 * When clock gating is supported, we can disable clk and phy. 2296 * If not, it means that VBUS detection is not supported, we 2297 * have to enable vbus active all the time to let controller work. 2298 */ 2299 if (udc->clock_gating) 2300 mv_udc_disable_internal(udc); 2301 else 2302 udc->vbus_active = 1; 2303 2304 retval = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget, 2305 gadget_release); 2306 if (retval) 2307 goto err_create_workqueue; 2308 2309 platform_set_drvdata(pdev, udc); 2310 dev_info(&pdev->dev, "successful probe UDC device %s clock gating.\n", 2311 udc->clock_gating ? "with" : "without"); 2312 2313 return 0; 2314 2315 err_create_workqueue: 2316 destroy_workqueue(udc->qwork); 2317 err_destroy_dma: 2318 dma_pool_destroy(udc->dtd_pool); 2319 err_free_dma: 2320 dma_free_coherent(&pdev->dev, udc->ep_dqh_size, 2321 udc->ep_dqh, udc->ep_dqh_dma); 2322 err_disable_clock: 2323 mv_udc_disable_internal(udc); 2324 2325 return retval; 2326 } 2327 2328 #ifdef CONFIG_PM 2329 static int mv_udc_suspend(struct device *dev) 2330 { 2331 struct mv_udc *udc; 2332 2333 udc = dev_get_drvdata(dev); 2334 2335 /* if OTG is enabled, the following will be done in OTG driver*/ 2336 if (udc->transceiver) 2337 return 0; 2338 2339 if (udc->pdata->vbus && udc->pdata->vbus->poll) 2340 if (udc->pdata->vbus->poll() == VBUS_HIGH) { 2341 dev_info(&udc->dev->dev, "USB cable is connected!\n"); 2342 return -EAGAIN; 2343 } 2344 2345 /* 2346 * only cable is unplugged, udc can suspend. 2347 * So do not care about clock_gating == 1. 2348 */ 2349 if (!udc->clock_gating) { 2350 udc_stop(udc); 2351 2352 spin_lock_irq(&udc->lock); 2353 /* stop all usb activities */ 2354 stop_activity(udc, udc->driver); 2355 spin_unlock_irq(&udc->lock); 2356 2357 mv_udc_disable_internal(udc); 2358 } 2359 2360 return 0; 2361 } 2362 2363 static int mv_udc_resume(struct device *dev) 2364 { 2365 struct mv_udc *udc; 2366 int retval; 2367 2368 udc = dev_get_drvdata(dev); 2369 2370 /* if OTG is enabled, the following will be done in OTG driver*/ 2371 if (udc->transceiver) 2372 return 0; 2373 2374 if (!udc->clock_gating) { 2375 retval = mv_udc_enable_internal(udc); 2376 if (retval) 2377 return retval; 2378 2379 if (udc->driver && udc->softconnect) { 2380 udc_reset(udc); 2381 ep0_reset(udc); 2382 udc_start(udc); 2383 } 2384 } 2385 2386 return 0; 2387 } 2388 2389 static const struct dev_pm_ops mv_udc_pm_ops = { 2390 .suspend = mv_udc_suspend, 2391 .resume = mv_udc_resume, 2392 }; 2393 #endif 2394 2395 static void mv_udc_shutdown(struct platform_device *pdev) 2396 { 2397 struct mv_udc *udc; 2398 u32 mode; 2399 2400 udc = platform_get_drvdata(pdev); 2401 /* reset controller mode to IDLE */ 2402 mv_udc_enable(udc); 2403 mode = readl(&udc->op_regs->usbmode); 2404 mode &= ~3; 2405 writel(mode, &udc->op_regs->usbmode); 2406 mv_udc_disable(udc); 2407 } 2408 2409 static struct platform_driver udc_driver = { 2410 .probe = mv_udc_probe, 2411 .remove = mv_udc_remove, 2412 .shutdown = mv_udc_shutdown, 2413 .driver = { 2414 .name = "mv-udc", 2415 #ifdef CONFIG_PM 2416 .pm = &mv_udc_pm_ops, 2417 #endif 2418 }, 2419 }; 2420 2421 module_platform_driver(udc_driver); 2422 MODULE_ALIAS("platform:mv-udc"); 2423 MODULE_DESCRIPTION(DRIVER_DESC); 2424 MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>"); 2425 MODULE_LICENSE("GPL"); 2426