1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2011 Marvell International Ltd. All rights reserved. 4 */ 5 6 #include <linux/module.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/dmapool.h> 9 #include <linux/kernel.h> 10 #include <linux/delay.h> 11 #include <linux/ioport.h> 12 #include <linux/sched.h> 13 #include <linux/slab.h> 14 #include <linux/errno.h> 15 #include <linux/timer.h> 16 #include <linux/list.h> 17 #include <linux/notifier.h> 18 #include <linux/interrupt.h> 19 #include <linux/moduleparam.h> 20 #include <linux/device.h> 21 #include <linux/usb/ch9.h> 22 #include <linux/usb/gadget.h> 23 #include <linux/pm.h> 24 #include <linux/io.h> 25 #include <linux/irq.h> 26 #include <linux/platform_device.h> 27 #include <linux/platform_data/mv_usb.h> 28 #include <linux/clk.h> 29 30 #include "mv_u3d.h" 31 32 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver" 33 34 static const char driver_name[] = "mv_u3d"; 35 36 static void mv_u3d_nuke(struct mv_u3d_ep *ep, int status); 37 static void mv_u3d_stop_activity(struct mv_u3d *u3d, 38 struct usb_gadget_driver *driver); 39 40 /* for endpoint 0 operations */ 41 static const struct usb_endpoint_descriptor mv_u3d_ep0_desc = { 42 .bLength = USB_DT_ENDPOINT_SIZE, 43 .bDescriptorType = USB_DT_ENDPOINT, 44 .bEndpointAddress = 0, 45 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 46 .wMaxPacketSize = MV_U3D_EP0_MAX_PKT_SIZE, 47 }; 48 49 static void mv_u3d_ep0_reset(struct mv_u3d *u3d) 50 { 51 struct mv_u3d_ep *ep; 52 u32 epxcr; 53 int i; 54 55 for (i = 0; i < 2; i++) { 56 ep = &u3d->eps[i]; 57 ep->u3d = u3d; 58 59 /* ep0 ep context, ep0 in and out share the same ep context */ 60 ep->ep_context = &u3d->ep_context[1]; 61 } 62 63 /* reset ep state machine */ 64 /* reset ep0 out */ 65 epxcr = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0); 66 epxcr |= MV_U3D_EPXCR_EP_INIT; 67 iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr0); 68 udelay(5); 69 epxcr &= ~MV_U3D_EPXCR_EP_INIT; 70 iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr0); 71 72 epxcr = ((MV_U3D_EP0_MAX_PKT_SIZE 73 << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT) 74 | (1 << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT) 75 | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT) 76 | MV_U3D_EPXCR_EP_TYPE_CONTROL); 77 iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr1); 78 79 /* reset ep0 in */ 80 epxcr = ioread32(&u3d->vuc_regs->epcr[0].epxincr0); 81 epxcr |= MV_U3D_EPXCR_EP_INIT; 82 iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr0); 83 udelay(5); 84 epxcr &= ~MV_U3D_EPXCR_EP_INIT; 85 iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr0); 86 87 epxcr = ((MV_U3D_EP0_MAX_PKT_SIZE 88 << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT) 89 | (1 << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT) 90 | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT) 91 | MV_U3D_EPXCR_EP_TYPE_CONTROL); 92 iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr1); 93 } 94 95 static void mv_u3d_ep0_stall(struct mv_u3d *u3d) 96 { 97 u32 tmp; 98 dev_dbg(u3d->dev, "%s\n", __func__); 99 100 /* set TX and RX to stall */ 101 tmp = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0); 102 tmp |= MV_U3D_EPXCR_EP_HALT; 103 iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0); 104 105 tmp = ioread32(&u3d->vuc_regs->epcr[0].epxincr0); 106 tmp |= MV_U3D_EPXCR_EP_HALT; 107 iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0); 108 109 /* update ep0 state */ 110 u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP; 111 u3d->ep0_dir = MV_U3D_EP_DIR_OUT; 112 } 113 114 static int mv_u3d_process_ep_req(struct mv_u3d *u3d, int index, 115 struct mv_u3d_req *curr_req) 116 { 117 struct mv_u3d_trb *curr_trb; 118 int actual, remaining_length = 0; 119 int direction, ep_num; 120 int retval = 0; 121 u32 tmp, status, length; 122 123 direction = index % 2; 124 ep_num = index / 2; 125 126 actual = curr_req->req.length; 127 128 while (!list_empty(&curr_req->trb_list)) { 129 curr_trb = list_entry(curr_req->trb_list.next, 130 struct mv_u3d_trb, trb_list); 131 if (!curr_trb->trb_hw->ctrl.own) { 132 dev_err(u3d->dev, "%s, TRB own error!\n", 133 u3d->eps[index].name); 134 return 1; 135 } 136 137 curr_trb->trb_hw->ctrl.own = 0; 138 if (direction == MV_U3D_EP_DIR_OUT) 139 tmp = ioread32(&u3d->vuc_regs->rxst[ep_num].statuslo); 140 else 141 tmp = ioread32(&u3d->vuc_regs->txst[ep_num].statuslo); 142 143 status = tmp >> MV_U3D_XFERSTATUS_COMPLETE_SHIFT; 144 length = tmp & MV_U3D_XFERSTATUS_TRB_LENGTH_MASK; 145 146 if (status == MV_U3D_COMPLETE_SUCCESS || 147 (status == MV_U3D_COMPLETE_SHORT_PACKET && 148 direction == MV_U3D_EP_DIR_OUT)) { 149 remaining_length += length; 150 actual -= remaining_length; 151 } else { 152 dev_err(u3d->dev, 153 "complete_tr error: ep=%d %s: error = 0x%x\n", 154 index >> 1, direction ? "SEND" : "RECV", 155 status); 156 retval = -EPROTO; 157 } 158 159 list_del_init(&curr_trb->trb_list); 160 } 161 if (retval) 162 return retval; 163 164 curr_req->req.actual = actual; 165 return 0; 166 } 167 168 /* 169 * mv_u3d_done() - retire a request; caller blocked irqs 170 * @status : request status to be set, only works when 171 * request is still in progress. 172 */ 173 static 174 void mv_u3d_done(struct mv_u3d_ep *ep, struct mv_u3d_req *req, int status) 175 __releases(&ep->udc->lock) 176 __acquires(&ep->udc->lock) 177 { 178 struct mv_u3d *u3d = (struct mv_u3d *)ep->u3d; 179 180 dev_dbg(u3d->dev, "mv_u3d_done: remove req->queue\n"); 181 /* Removed the req from ep queue */ 182 list_del_init(&req->queue); 183 184 /* req.status should be set as -EINPROGRESS in ep_queue() */ 185 if (req->req.status == -EINPROGRESS) 186 req->req.status = status; 187 else 188 status = req->req.status; 189 190 /* Free trb for the request */ 191 if (!req->chain) 192 dma_pool_free(u3d->trb_pool, 193 req->trb_head->trb_hw, req->trb_head->trb_dma); 194 else { 195 dma_unmap_single(ep->u3d->gadget.dev.parent, 196 (dma_addr_t)req->trb_head->trb_dma, 197 req->trb_count * sizeof(struct mv_u3d_trb_hw), 198 DMA_BIDIRECTIONAL); 199 kfree(req->trb_head->trb_hw); 200 } 201 kfree(req->trb_head); 202 203 usb_gadget_unmap_request(&u3d->gadget, &req->req, mv_u3d_ep_dir(ep)); 204 205 if (status && (status != -ESHUTDOWN)) { 206 dev_dbg(u3d->dev, "complete %s req %p stat %d len %u/%u", 207 ep->ep.name, &req->req, status, 208 req->req.actual, req->req.length); 209 } 210 211 spin_unlock(&ep->u3d->lock); 212 213 usb_gadget_giveback_request(&ep->ep, &req->req); 214 215 spin_lock(&ep->u3d->lock); 216 } 217 218 static int mv_u3d_queue_trb(struct mv_u3d_ep *ep, struct mv_u3d_req *req) 219 { 220 u32 tmp, direction; 221 struct mv_u3d *u3d; 222 struct mv_u3d_ep_context *ep_context; 223 int retval = 0; 224 225 u3d = ep->u3d; 226 direction = mv_u3d_ep_dir(ep); 227 228 /* ep0 in and out share the same ep context slot 1*/ 229 if (ep->ep_num == 0) 230 ep_context = &(u3d->ep_context[1]); 231 else 232 ep_context = &(u3d->ep_context[ep->ep_num * 2 + direction]); 233 234 /* check if the pipe is empty or not */ 235 if (!list_empty(&ep->queue)) { 236 dev_err(u3d->dev, "add trb to non-empty queue!\n"); 237 retval = -ENOMEM; 238 WARN_ON(1); 239 } else { 240 ep_context->rsvd0 = cpu_to_le32(1); 241 ep_context->rsvd1 = 0; 242 243 /* Configure the trb address and set the DCS bit. 244 * Both DCS bit and own bit in trb should be set. 245 */ 246 ep_context->trb_addr_lo = 247 cpu_to_le32(req->trb_head->trb_dma | DCS_ENABLE); 248 ep_context->trb_addr_hi = 0; 249 250 /* Ensure that updates to the EP Context will 251 * occure before Ring Bell. 252 */ 253 wmb(); 254 255 /* ring bell the ep */ 256 if (ep->ep_num == 0) 257 tmp = 0x1; 258 else 259 tmp = ep->ep_num * 2 260 + ((direction == MV_U3D_EP_DIR_OUT) ? 0 : 1); 261 262 iowrite32(tmp, &u3d->op_regs->doorbell); 263 } 264 return retval; 265 } 266 267 static struct mv_u3d_trb *mv_u3d_build_trb_one(struct mv_u3d_req *req, 268 unsigned *length, dma_addr_t *dma) 269 { 270 u32 temp; 271 unsigned int direction; 272 struct mv_u3d_trb *trb; 273 struct mv_u3d_trb_hw *trb_hw; 274 struct mv_u3d *u3d; 275 276 /* how big will this transfer be? */ 277 *length = req->req.length - req->req.actual; 278 BUG_ON(*length > (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER); 279 280 u3d = req->ep->u3d; 281 282 trb = kzalloc(sizeof(*trb), GFP_ATOMIC); 283 if (!trb) 284 return NULL; 285 286 /* 287 * Be careful that no _GFP_HIGHMEM is set, 288 * or we can not use dma_to_virt 289 * cannot use GFP_KERNEL in spin lock 290 */ 291 trb_hw = dma_pool_alloc(u3d->trb_pool, GFP_ATOMIC, dma); 292 if (!trb_hw) { 293 kfree(trb); 294 dev_err(u3d->dev, 295 "%s, dma_pool_alloc fail\n", __func__); 296 return NULL; 297 } 298 trb->trb_dma = *dma; 299 trb->trb_hw = trb_hw; 300 301 /* initialize buffer page pointers */ 302 temp = (u32)(req->req.dma + req->req.actual); 303 304 trb_hw->buf_addr_lo = cpu_to_le32(temp); 305 trb_hw->buf_addr_hi = 0; 306 trb_hw->trb_len = cpu_to_le32(*length); 307 trb_hw->ctrl.own = 1; 308 309 if (req->ep->ep_num == 0) 310 trb_hw->ctrl.type = TYPE_DATA; 311 else 312 trb_hw->ctrl.type = TYPE_NORMAL; 313 314 req->req.actual += *length; 315 316 direction = mv_u3d_ep_dir(req->ep); 317 if (direction == MV_U3D_EP_DIR_IN) 318 trb_hw->ctrl.dir = 1; 319 else 320 trb_hw->ctrl.dir = 0; 321 322 /* Enable interrupt for the last trb of a request */ 323 if (!req->req.no_interrupt) 324 trb_hw->ctrl.ioc = 1; 325 326 trb_hw->ctrl.chain = 0; 327 328 wmb(); 329 return trb; 330 } 331 332 static int mv_u3d_build_trb_chain(struct mv_u3d_req *req, unsigned *length, 333 struct mv_u3d_trb *trb, int *is_last) 334 { 335 u32 temp; 336 unsigned int direction; 337 struct mv_u3d *u3d; 338 339 /* how big will this transfer be? */ 340 *length = min(req->req.length - req->req.actual, 341 (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER); 342 343 u3d = req->ep->u3d; 344 345 trb->trb_dma = 0; 346 347 /* initialize buffer page pointers */ 348 temp = (u32)(req->req.dma + req->req.actual); 349 350 trb->trb_hw->buf_addr_lo = cpu_to_le32(temp); 351 trb->trb_hw->buf_addr_hi = 0; 352 trb->trb_hw->trb_len = cpu_to_le32(*length); 353 trb->trb_hw->ctrl.own = 1; 354 355 if (req->ep->ep_num == 0) 356 trb->trb_hw->ctrl.type = TYPE_DATA; 357 else 358 trb->trb_hw->ctrl.type = TYPE_NORMAL; 359 360 req->req.actual += *length; 361 362 direction = mv_u3d_ep_dir(req->ep); 363 if (direction == MV_U3D_EP_DIR_IN) 364 trb->trb_hw->ctrl.dir = 1; 365 else 366 trb->trb_hw->ctrl.dir = 0; 367 368 /* zlp is needed if req->req.zero is set */ 369 if (req->req.zero) { 370 if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0) 371 *is_last = 1; 372 else 373 *is_last = 0; 374 } else if (req->req.length == req->req.actual) 375 *is_last = 1; 376 else 377 *is_last = 0; 378 379 /* Enable interrupt for the last trb of a request */ 380 if (*is_last && !req->req.no_interrupt) 381 trb->trb_hw->ctrl.ioc = 1; 382 383 if (*is_last) 384 trb->trb_hw->ctrl.chain = 0; 385 else { 386 trb->trb_hw->ctrl.chain = 1; 387 dev_dbg(u3d->dev, "chain trb\n"); 388 } 389 390 wmb(); 391 392 return 0; 393 } 394 395 /* generate TRB linked list for a request 396 * usb controller only supports continous trb chain, 397 * that trb structure physical address should be continous. 398 */ 399 static int mv_u3d_req_to_trb(struct mv_u3d_req *req) 400 { 401 unsigned count; 402 int is_last; 403 struct mv_u3d_trb *trb; 404 struct mv_u3d_trb_hw *trb_hw; 405 struct mv_u3d *u3d; 406 dma_addr_t dma; 407 unsigned length; 408 unsigned trb_num; 409 410 u3d = req->ep->u3d; 411 412 INIT_LIST_HEAD(&req->trb_list); 413 414 length = req->req.length - req->req.actual; 415 /* normally the request transfer length is less than 16KB. 416 * we use buil_trb_one() to optimize it. 417 */ 418 if (length <= (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER) { 419 trb = mv_u3d_build_trb_one(req, &count, &dma); 420 list_add_tail(&trb->trb_list, &req->trb_list); 421 req->trb_head = trb; 422 req->trb_count = 1; 423 req->chain = 0; 424 } else { 425 trb_num = length / MV_U3D_EP_MAX_LENGTH_TRANSFER; 426 if (length % MV_U3D_EP_MAX_LENGTH_TRANSFER) 427 trb_num++; 428 429 trb = kcalloc(trb_num, sizeof(*trb), GFP_ATOMIC); 430 if (!trb) 431 return -ENOMEM; 432 433 trb_hw = kcalloc(trb_num, sizeof(*trb_hw), GFP_ATOMIC); 434 if (!trb_hw) { 435 kfree(trb); 436 return -ENOMEM; 437 } 438 439 do { 440 trb->trb_hw = trb_hw; 441 if (mv_u3d_build_trb_chain(req, &count, 442 trb, &is_last)) { 443 dev_err(u3d->dev, 444 "%s, mv_u3d_build_trb_chain fail\n", 445 __func__); 446 return -EIO; 447 } 448 449 list_add_tail(&trb->trb_list, &req->trb_list); 450 req->trb_count++; 451 trb++; 452 trb_hw++; 453 } while (!is_last); 454 455 req->trb_head = list_entry(req->trb_list.next, 456 struct mv_u3d_trb, trb_list); 457 req->trb_head->trb_dma = dma_map_single(u3d->gadget.dev.parent, 458 req->trb_head->trb_hw, 459 trb_num * sizeof(*trb_hw), 460 DMA_BIDIRECTIONAL); 461 if (dma_mapping_error(u3d->gadget.dev.parent, 462 req->trb_head->trb_dma)) { 463 kfree(req->trb_head->trb_hw); 464 kfree(req->trb_head); 465 return -EFAULT; 466 } 467 468 req->chain = 1; 469 } 470 471 return 0; 472 } 473 474 static int 475 mv_u3d_start_queue(struct mv_u3d_ep *ep) 476 { 477 struct mv_u3d *u3d = ep->u3d; 478 struct mv_u3d_req *req; 479 int ret; 480 481 if (!list_empty(&ep->req_list) && !ep->processing) 482 req = list_entry(ep->req_list.next, struct mv_u3d_req, list); 483 else 484 return 0; 485 486 ep->processing = 1; 487 488 /* set up dma mapping */ 489 ret = usb_gadget_map_request(&u3d->gadget, &req->req, 490 mv_u3d_ep_dir(ep)); 491 if (ret) 492 goto break_processing; 493 494 req->req.status = -EINPROGRESS; 495 req->req.actual = 0; 496 req->trb_count = 0; 497 498 /* build trbs */ 499 ret = mv_u3d_req_to_trb(req); 500 if (ret) { 501 dev_err(u3d->dev, "%s, mv_u3d_req_to_trb fail\n", __func__); 502 goto break_processing; 503 } 504 505 /* and push them to device queue */ 506 ret = mv_u3d_queue_trb(ep, req); 507 if (ret) 508 goto break_processing; 509 510 /* irq handler advances the queue */ 511 list_add_tail(&req->queue, &ep->queue); 512 513 return 0; 514 515 break_processing: 516 ep->processing = 0; 517 return ret; 518 } 519 520 static int mv_u3d_ep_enable(struct usb_ep *_ep, 521 const struct usb_endpoint_descriptor *desc) 522 { 523 struct mv_u3d *u3d; 524 struct mv_u3d_ep *ep; 525 u16 max = 0; 526 unsigned maxburst = 0; 527 u32 epxcr, direction; 528 529 if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) 530 return -EINVAL; 531 532 ep = container_of(_ep, struct mv_u3d_ep, ep); 533 u3d = ep->u3d; 534 535 if (!u3d->driver || u3d->gadget.speed == USB_SPEED_UNKNOWN) 536 return -ESHUTDOWN; 537 538 direction = mv_u3d_ep_dir(ep); 539 max = le16_to_cpu(desc->wMaxPacketSize); 540 541 if (!_ep->maxburst) 542 _ep->maxburst = 1; 543 maxburst = _ep->maxburst; 544 545 /* Set the max burst size */ 546 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { 547 case USB_ENDPOINT_XFER_BULK: 548 if (maxburst > 16) { 549 dev_dbg(u3d->dev, 550 "max burst should not be greater " 551 "than 16 on bulk ep\n"); 552 maxburst = 1; 553 _ep->maxburst = maxburst; 554 } 555 dev_dbg(u3d->dev, 556 "maxburst: %d on bulk %s\n", maxburst, ep->name); 557 break; 558 case USB_ENDPOINT_XFER_CONTROL: 559 /* control transfer only supports maxburst as one */ 560 maxburst = 1; 561 _ep->maxburst = maxburst; 562 break; 563 case USB_ENDPOINT_XFER_INT: 564 if (maxburst != 1) { 565 dev_dbg(u3d->dev, 566 "max burst should be 1 on int ep " 567 "if transfer size is not 1024\n"); 568 maxburst = 1; 569 _ep->maxburst = maxburst; 570 } 571 break; 572 case USB_ENDPOINT_XFER_ISOC: 573 if (maxburst != 1) { 574 dev_dbg(u3d->dev, 575 "max burst should be 1 on isoc ep " 576 "if transfer size is not 1024\n"); 577 maxburst = 1; 578 _ep->maxburst = maxburst; 579 } 580 break; 581 default: 582 goto en_done; 583 } 584 585 ep->ep.maxpacket = max; 586 ep->ep.desc = desc; 587 ep->enabled = 1; 588 589 /* Enable the endpoint for Rx or Tx and set the endpoint type */ 590 if (direction == MV_U3D_EP_DIR_OUT) { 591 epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0); 592 epxcr |= MV_U3D_EPXCR_EP_INIT; 593 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0); 594 udelay(5); 595 epxcr &= ~MV_U3D_EPXCR_EP_INIT; 596 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0); 597 598 epxcr = ((max << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT) 599 | ((maxburst - 1) << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT) 600 | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT) 601 | (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)); 602 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1); 603 } else { 604 epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0); 605 epxcr |= MV_U3D_EPXCR_EP_INIT; 606 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0); 607 udelay(5); 608 epxcr &= ~MV_U3D_EPXCR_EP_INIT; 609 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0); 610 611 epxcr = ((max << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT) 612 | ((maxburst - 1) << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT) 613 | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT) 614 | (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)); 615 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1); 616 } 617 618 return 0; 619 en_done: 620 return -EINVAL; 621 } 622 623 static int mv_u3d_ep_disable(struct usb_ep *_ep) 624 { 625 struct mv_u3d *u3d; 626 struct mv_u3d_ep *ep; 627 u32 epxcr, direction; 628 unsigned long flags; 629 630 if (!_ep) 631 return -EINVAL; 632 633 ep = container_of(_ep, struct mv_u3d_ep, ep); 634 if (!ep->ep.desc) 635 return -EINVAL; 636 637 u3d = ep->u3d; 638 639 direction = mv_u3d_ep_dir(ep); 640 641 /* nuke all pending requests (does flush) */ 642 spin_lock_irqsave(&u3d->lock, flags); 643 mv_u3d_nuke(ep, -ESHUTDOWN); 644 spin_unlock_irqrestore(&u3d->lock, flags); 645 646 /* Disable the endpoint for Rx or Tx and reset the endpoint type */ 647 if (direction == MV_U3D_EP_DIR_OUT) { 648 epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1); 649 epxcr &= ~((1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT) 650 | USB_ENDPOINT_XFERTYPE_MASK); 651 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1); 652 } else { 653 epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr1); 654 epxcr &= ~((1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT) 655 | USB_ENDPOINT_XFERTYPE_MASK); 656 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1); 657 } 658 659 ep->enabled = 0; 660 661 ep->ep.desc = NULL; 662 return 0; 663 } 664 665 static struct usb_request * 666 mv_u3d_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) 667 { 668 struct mv_u3d_req *req = NULL; 669 670 req = kzalloc(sizeof *req, gfp_flags); 671 if (!req) 672 return NULL; 673 674 INIT_LIST_HEAD(&req->queue); 675 676 return &req->req; 677 } 678 679 static void mv_u3d_free_request(struct usb_ep *_ep, struct usb_request *_req) 680 { 681 struct mv_u3d_req *req = container_of(_req, struct mv_u3d_req, req); 682 683 kfree(req); 684 } 685 686 static void mv_u3d_ep_fifo_flush(struct usb_ep *_ep) 687 { 688 struct mv_u3d *u3d; 689 u32 direction; 690 struct mv_u3d_ep *ep = container_of(_ep, struct mv_u3d_ep, ep); 691 unsigned int loops; 692 u32 tmp; 693 694 /* if endpoint is not enabled, cannot flush endpoint */ 695 if (!ep->enabled) 696 return; 697 698 u3d = ep->u3d; 699 direction = mv_u3d_ep_dir(ep); 700 701 /* ep0 need clear bit after flushing fifo. */ 702 if (!ep->ep_num) { 703 if (direction == MV_U3D_EP_DIR_OUT) { 704 tmp = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0); 705 tmp |= MV_U3D_EPXCR_EP_FLUSH; 706 iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0); 707 udelay(10); 708 tmp &= ~MV_U3D_EPXCR_EP_FLUSH; 709 iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0); 710 } else { 711 tmp = ioread32(&u3d->vuc_regs->epcr[0].epxincr0); 712 tmp |= MV_U3D_EPXCR_EP_FLUSH; 713 iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0); 714 udelay(10); 715 tmp &= ~MV_U3D_EPXCR_EP_FLUSH; 716 iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0); 717 } 718 return; 719 } 720 721 if (direction == MV_U3D_EP_DIR_OUT) { 722 tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0); 723 tmp |= MV_U3D_EPXCR_EP_FLUSH; 724 iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0); 725 726 /* Wait until flushing completed */ 727 loops = LOOPS(MV_U3D_FLUSH_TIMEOUT); 728 while (ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0) & 729 MV_U3D_EPXCR_EP_FLUSH) { 730 /* 731 * EP_FLUSH bit should be cleared to indicate this 732 * operation is complete 733 */ 734 if (loops == 0) { 735 dev_dbg(u3d->dev, 736 "EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num, 737 direction ? "in" : "out"); 738 return; 739 } 740 loops--; 741 udelay(LOOPS_USEC); 742 } 743 } else { /* EP_DIR_IN */ 744 tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0); 745 tmp |= MV_U3D_EPXCR_EP_FLUSH; 746 iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0); 747 748 /* Wait until flushing completed */ 749 loops = LOOPS(MV_U3D_FLUSH_TIMEOUT); 750 while (ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0) & 751 MV_U3D_EPXCR_EP_FLUSH) { 752 /* 753 * EP_FLUSH bit should be cleared to indicate this 754 * operation is complete 755 */ 756 if (loops == 0) { 757 dev_dbg(u3d->dev, 758 "EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num, 759 direction ? "in" : "out"); 760 return; 761 } 762 loops--; 763 udelay(LOOPS_USEC); 764 } 765 } 766 } 767 768 /* queues (submits) an I/O request to an endpoint */ 769 static int 770 mv_u3d_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) 771 { 772 struct mv_u3d_ep *ep; 773 struct mv_u3d_req *req; 774 struct mv_u3d *u3d; 775 unsigned long flags; 776 int is_first_req = 0; 777 778 if (unlikely(!_ep || !_req)) 779 return -EINVAL; 780 781 ep = container_of(_ep, struct mv_u3d_ep, ep); 782 u3d = ep->u3d; 783 784 req = container_of(_req, struct mv_u3d_req, req); 785 786 if (!ep->ep_num 787 && u3d->ep0_state == MV_U3D_STATUS_STAGE 788 && !_req->length) { 789 dev_dbg(u3d->dev, "ep0 status stage\n"); 790 u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP; 791 return 0; 792 } 793 794 dev_dbg(u3d->dev, "%s: %s, req: 0x%p\n", 795 __func__, _ep->name, req); 796 797 /* catch various bogus parameters */ 798 if (!req->req.complete || !req->req.buf 799 || !list_empty(&req->queue)) { 800 dev_err(u3d->dev, 801 "%s, bad params, _req: 0x%p," 802 "req->req.complete: 0x%p, req->req.buf: 0x%p," 803 "list_empty: 0x%x\n", 804 __func__, _req, 805 req->req.complete, req->req.buf, 806 list_empty(&req->queue)); 807 return -EINVAL; 808 } 809 if (unlikely(!ep->ep.desc)) { 810 dev_err(u3d->dev, "%s, bad ep\n", __func__); 811 return -EINVAL; 812 } 813 if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { 814 if (req->req.length > ep->ep.maxpacket) 815 return -EMSGSIZE; 816 } 817 818 if (!u3d->driver || u3d->gadget.speed == USB_SPEED_UNKNOWN) { 819 dev_err(u3d->dev, 820 "bad params of driver/speed\n"); 821 return -ESHUTDOWN; 822 } 823 824 req->ep = ep; 825 826 /* Software list handles usb request. */ 827 spin_lock_irqsave(&ep->req_lock, flags); 828 is_first_req = list_empty(&ep->req_list); 829 list_add_tail(&req->list, &ep->req_list); 830 spin_unlock_irqrestore(&ep->req_lock, flags); 831 if (!is_first_req) { 832 dev_dbg(u3d->dev, "list is not empty\n"); 833 return 0; 834 } 835 836 dev_dbg(u3d->dev, "call mv_u3d_start_queue from usb_ep_queue\n"); 837 spin_lock_irqsave(&u3d->lock, flags); 838 mv_u3d_start_queue(ep); 839 spin_unlock_irqrestore(&u3d->lock, flags); 840 return 0; 841 } 842 843 /* dequeues (cancels, unlinks) an I/O request from an endpoint */ 844 static int mv_u3d_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) 845 { 846 struct mv_u3d_ep *ep; 847 struct mv_u3d_req *req; 848 struct mv_u3d *u3d; 849 struct mv_u3d_ep_context *ep_context; 850 struct mv_u3d_req *next_req; 851 852 unsigned long flags; 853 int ret = 0; 854 855 if (!_ep || !_req) 856 return -EINVAL; 857 858 ep = container_of(_ep, struct mv_u3d_ep, ep); 859 u3d = ep->u3d; 860 861 spin_lock_irqsave(&ep->u3d->lock, flags); 862 863 /* make sure it's actually queued on this endpoint */ 864 list_for_each_entry(req, &ep->queue, queue) { 865 if (&req->req == _req) 866 break; 867 } 868 if (&req->req != _req) { 869 ret = -EINVAL; 870 goto out; 871 } 872 873 /* The request is in progress, or completed but not dequeued */ 874 if (ep->queue.next == &req->queue) { 875 _req->status = -ECONNRESET; 876 mv_u3d_ep_fifo_flush(_ep); 877 878 /* The request isn't the last request in this ep queue */ 879 if (req->queue.next != &ep->queue) { 880 dev_dbg(u3d->dev, 881 "it is the last request in this ep queue\n"); 882 ep_context = ep->ep_context; 883 next_req = list_entry(req->queue.next, 884 struct mv_u3d_req, queue); 885 886 /* Point first TRB of next request to the EP context. */ 887 iowrite32((unsigned long) next_req->trb_head, 888 &ep_context->trb_addr_lo); 889 } else { 890 struct mv_u3d_ep_context *ep_context; 891 ep_context = ep->ep_context; 892 ep_context->trb_addr_lo = 0; 893 ep_context->trb_addr_hi = 0; 894 } 895 896 } else 897 WARN_ON(1); 898 899 mv_u3d_done(ep, req, -ECONNRESET); 900 901 /* remove the req from the ep req list */ 902 if (!list_empty(&ep->req_list)) { 903 struct mv_u3d_req *curr_req; 904 curr_req = list_entry(ep->req_list.next, 905 struct mv_u3d_req, list); 906 if (curr_req == req) { 907 list_del_init(&req->list); 908 ep->processing = 0; 909 } 910 } 911 912 out: 913 spin_unlock_irqrestore(&ep->u3d->lock, flags); 914 return ret; 915 } 916 917 static void 918 mv_u3d_ep_set_stall(struct mv_u3d *u3d, u8 ep_num, u8 direction, int stall) 919 { 920 u32 tmp; 921 struct mv_u3d_ep *ep = u3d->eps; 922 923 dev_dbg(u3d->dev, "%s\n", __func__); 924 if (direction == MV_U3D_EP_DIR_OUT) { 925 tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0); 926 if (stall) 927 tmp |= MV_U3D_EPXCR_EP_HALT; 928 else 929 tmp &= ~MV_U3D_EPXCR_EP_HALT; 930 iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0); 931 } else { 932 tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0); 933 if (stall) 934 tmp |= MV_U3D_EPXCR_EP_HALT; 935 else 936 tmp &= ~MV_U3D_EPXCR_EP_HALT; 937 iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0); 938 } 939 } 940 941 static int mv_u3d_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge) 942 { 943 struct mv_u3d_ep *ep; 944 unsigned long flags; 945 int status = 0; 946 struct mv_u3d *u3d; 947 948 ep = container_of(_ep, struct mv_u3d_ep, ep); 949 u3d = ep->u3d; 950 if (!ep->ep.desc) { 951 status = -EINVAL; 952 goto out; 953 } 954 955 if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { 956 status = -EOPNOTSUPP; 957 goto out; 958 } 959 960 /* 961 * Attempt to halt IN ep will fail if any transfer requests 962 * are still queue 963 */ 964 if (halt && (mv_u3d_ep_dir(ep) == MV_U3D_EP_DIR_IN) 965 && !list_empty(&ep->queue)) { 966 status = -EAGAIN; 967 goto out; 968 } 969 970 spin_lock_irqsave(&ep->u3d->lock, flags); 971 mv_u3d_ep_set_stall(u3d, ep->ep_num, mv_u3d_ep_dir(ep), halt); 972 if (halt && wedge) 973 ep->wedge = 1; 974 else if (!halt) 975 ep->wedge = 0; 976 spin_unlock_irqrestore(&ep->u3d->lock, flags); 977 978 if (ep->ep_num == 0) 979 u3d->ep0_dir = MV_U3D_EP_DIR_OUT; 980 out: 981 return status; 982 } 983 984 static int mv_u3d_ep_set_halt(struct usb_ep *_ep, int halt) 985 { 986 return mv_u3d_ep_set_halt_wedge(_ep, halt, 0); 987 } 988 989 static int mv_u3d_ep_set_wedge(struct usb_ep *_ep) 990 { 991 return mv_u3d_ep_set_halt_wedge(_ep, 1, 1); 992 } 993 994 static const struct usb_ep_ops mv_u3d_ep_ops = { 995 .enable = mv_u3d_ep_enable, 996 .disable = mv_u3d_ep_disable, 997 998 .alloc_request = mv_u3d_alloc_request, 999 .free_request = mv_u3d_free_request, 1000 1001 .queue = mv_u3d_ep_queue, 1002 .dequeue = mv_u3d_ep_dequeue, 1003 1004 .set_wedge = mv_u3d_ep_set_wedge, 1005 .set_halt = mv_u3d_ep_set_halt, 1006 .fifo_flush = mv_u3d_ep_fifo_flush, 1007 }; 1008 1009 static void mv_u3d_controller_stop(struct mv_u3d *u3d) 1010 { 1011 u32 tmp; 1012 1013 if (!u3d->clock_gating && u3d->vbus_valid_detect) 1014 iowrite32(MV_U3D_INTR_ENABLE_VBUS_VALID, 1015 &u3d->vuc_regs->intrenable); 1016 else 1017 iowrite32(0, &u3d->vuc_regs->intrenable); 1018 iowrite32(~0x0, &u3d->vuc_regs->endcomplete); 1019 iowrite32(~0x0, &u3d->vuc_regs->trbunderrun); 1020 iowrite32(~0x0, &u3d->vuc_regs->trbcomplete); 1021 iowrite32(~0x0, &u3d->vuc_regs->linkchange); 1022 iowrite32(0x1, &u3d->vuc_regs->setuplock); 1023 1024 /* Reset the RUN bit in the command register to stop USB */ 1025 tmp = ioread32(&u3d->op_regs->usbcmd); 1026 tmp &= ~MV_U3D_CMD_RUN_STOP; 1027 iowrite32(tmp, &u3d->op_regs->usbcmd); 1028 dev_dbg(u3d->dev, "after u3d_stop, USBCMD 0x%x\n", 1029 ioread32(&u3d->op_regs->usbcmd)); 1030 } 1031 1032 static void mv_u3d_controller_start(struct mv_u3d *u3d) 1033 { 1034 u32 usbintr; 1035 u32 temp; 1036 1037 /* enable link LTSSM state machine */ 1038 temp = ioread32(&u3d->vuc_regs->ltssm); 1039 temp |= MV_U3D_LTSSM_PHY_INIT_DONE; 1040 iowrite32(temp, &u3d->vuc_regs->ltssm); 1041 1042 /* Enable interrupts */ 1043 usbintr = MV_U3D_INTR_ENABLE_LINK_CHG | MV_U3D_INTR_ENABLE_TXDESC_ERR | 1044 MV_U3D_INTR_ENABLE_RXDESC_ERR | MV_U3D_INTR_ENABLE_TX_COMPLETE | 1045 MV_U3D_INTR_ENABLE_RX_COMPLETE | MV_U3D_INTR_ENABLE_SETUP | 1046 (u3d->vbus_valid_detect ? MV_U3D_INTR_ENABLE_VBUS_VALID : 0); 1047 iowrite32(usbintr, &u3d->vuc_regs->intrenable); 1048 1049 /* Enable ctrl ep */ 1050 iowrite32(0x1, &u3d->vuc_regs->ctrlepenable); 1051 1052 /* Set the Run bit in the command register */ 1053 iowrite32(MV_U3D_CMD_RUN_STOP, &u3d->op_regs->usbcmd); 1054 dev_dbg(u3d->dev, "after u3d_start, USBCMD 0x%x\n", 1055 ioread32(&u3d->op_regs->usbcmd)); 1056 } 1057 1058 static int mv_u3d_controller_reset(struct mv_u3d *u3d) 1059 { 1060 unsigned int loops; 1061 u32 tmp; 1062 1063 /* Stop the controller */ 1064 tmp = ioread32(&u3d->op_regs->usbcmd); 1065 tmp &= ~MV_U3D_CMD_RUN_STOP; 1066 iowrite32(tmp, &u3d->op_regs->usbcmd); 1067 1068 /* Reset the controller to get default values */ 1069 iowrite32(MV_U3D_CMD_CTRL_RESET, &u3d->op_regs->usbcmd); 1070 1071 /* wait for reset to complete */ 1072 loops = LOOPS(MV_U3D_RESET_TIMEOUT); 1073 while (ioread32(&u3d->op_regs->usbcmd) & MV_U3D_CMD_CTRL_RESET) { 1074 if (loops == 0) { 1075 dev_err(u3d->dev, 1076 "Wait for RESET completed TIMEOUT\n"); 1077 return -ETIMEDOUT; 1078 } 1079 loops--; 1080 udelay(LOOPS_USEC); 1081 } 1082 1083 /* Configure the Endpoint Context Address */ 1084 iowrite32(u3d->ep_context_dma, &u3d->op_regs->dcbaapl); 1085 iowrite32(0, &u3d->op_regs->dcbaaph); 1086 1087 return 0; 1088 } 1089 1090 static int mv_u3d_enable(struct mv_u3d *u3d) 1091 { 1092 struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev); 1093 int retval; 1094 1095 if (u3d->active) 1096 return 0; 1097 1098 if (!u3d->clock_gating) { 1099 u3d->active = 1; 1100 return 0; 1101 } 1102 1103 dev_dbg(u3d->dev, "enable u3d\n"); 1104 clk_enable(u3d->clk); 1105 if (pdata->phy_init) { 1106 retval = pdata->phy_init(u3d->phy_regs); 1107 if (retval) { 1108 dev_err(u3d->dev, 1109 "init phy error %d\n", retval); 1110 clk_disable(u3d->clk); 1111 return retval; 1112 } 1113 } 1114 u3d->active = 1; 1115 1116 return 0; 1117 } 1118 1119 static void mv_u3d_disable(struct mv_u3d *u3d) 1120 { 1121 struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev); 1122 if (u3d->clock_gating && u3d->active) { 1123 dev_dbg(u3d->dev, "disable u3d\n"); 1124 if (pdata->phy_deinit) 1125 pdata->phy_deinit(u3d->phy_regs); 1126 clk_disable(u3d->clk); 1127 u3d->active = 0; 1128 } 1129 } 1130 1131 static int mv_u3d_vbus_session(struct usb_gadget *gadget, int is_active) 1132 { 1133 struct mv_u3d *u3d; 1134 unsigned long flags; 1135 int retval = 0; 1136 1137 u3d = container_of(gadget, struct mv_u3d, gadget); 1138 1139 spin_lock_irqsave(&u3d->lock, flags); 1140 1141 u3d->vbus_active = (is_active != 0); 1142 dev_dbg(u3d->dev, "%s: softconnect %d, vbus_active %d\n", 1143 __func__, u3d->softconnect, u3d->vbus_active); 1144 /* 1145 * 1. external VBUS detect: we can disable/enable clock on demand. 1146 * 2. UDC VBUS detect: we have to enable clock all the time. 1147 * 3. No VBUS detect: we have to enable clock all the time. 1148 */ 1149 if (u3d->driver && u3d->softconnect && u3d->vbus_active) { 1150 retval = mv_u3d_enable(u3d); 1151 if (retval == 0) { 1152 /* 1153 * after clock is disabled, we lost all the register 1154 * context. We have to re-init registers 1155 */ 1156 mv_u3d_controller_reset(u3d); 1157 mv_u3d_ep0_reset(u3d); 1158 mv_u3d_controller_start(u3d); 1159 } 1160 } else if (u3d->driver && u3d->softconnect) { 1161 if (!u3d->active) 1162 goto out; 1163 1164 /* stop all the transfer in queue*/ 1165 mv_u3d_stop_activity(u3d, u3d->driver); 1166 mv_u3d_controller_stop(u3d); 1167 mv_u3d_disable(u3d); 1168 } 1169 1170 out: 1171 spin_unlock_irqrestore(&u3d->lock, flags); 1172 return retval; 1173 } 1174 1175 /* constrain controller's VBUS power usage 1176 * This call is used by gadget drivers during SET_CONFIGURATION calls, 1177 * reporting how much power the device may consume. For example, this 1178 * could affect how quickly batteries are recharged. 1179 * 1180 * Returns zero on success, else negative errno. 1181 */ 1182 static int mv_u3d_vbus_draw(struct usb_gadget *gadget, unsigned mA) 1183 { 1184 struct mv_u3d *u3d = container_of(gadget, struct mv_u3d, gadget); 1185 1186 u3d->power = mA; 1187 1188 return 0; 1189 } 1190 1191 static int mv_u3d_pullup(struct usb_gadget *gadget, int is_on) 1192 { 1193 struct mv_u3d *u3d = container_of(gadget, struct mv_u3d, gadget); 1194 unsigned long flags; 1195 int retval = 0; 1196 1197 spin_lock_irqsave(&u3d->lock, flags); 1198 1199 dev_dbg(u3d->dev, "%s: softconnect %d, vbus_active %d\n", 1200 __func__, u3d->softconnect, u3d->vbus_active); 1201 u3d->softconnect = (is_on != 0); 1202 if (u3d->driver && u3d->softconnect && u3d->vbus_active) { 1203 retval = mv_u3d_enable(u3d); 1204 if (retval == 0) { 1205 /* 1206 * after clock is disabled, we lost all the register 1207 * context. We have to re-init registers 1208 */ 1209 mv_u3d_controller_reset(u3d); 1210 mv_u3d_ep0_reset(u3d); 1211 mv_u3d_controller_start(u3d); 1212 } 1213 } else if (u3d->driver && u3d->vbus_active) { 1214 /* stop all the transfer in queue*/ 1215 mv_u3d_stop_activity(u3d, u3d->driver); 1216 mv_u3d_controller_stop(u3d); 1217 mv_u3d_disable(u3d); 1218 } 1219 1220 spin_unlock_irqrestore(&u3d->lock, flags); 1221 1222 return retval; 1223 } 1224 1225 static int mv_u3d_start(struct usb_gadget *g, 1226 struct usb_gadget_driver *driver) 1227 { 1228 struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget); 1229 struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev); 1230 unsigned long flags; 1231 1232 if (u3d->driver) 1233 return -EBUSY; 1234 1235 spin_lock_irqsave(&u3d->lock, flags); 1236 1237 if (!u3d->clock_gating) { 1238 clk_enable(u3d->clk); 1239 if (pdata->phy_init) 1240 pdata->phy_init(u3d->phy_regs); 1241 } 1242 1243 /* hook up the driver ... */ 1244 driver->driver.bus = NULL; 1245 u3d->driver = driver; 1246 1247 u3d->ep0_dir = USB_DIR_OUT; 1248 1249 spin_unlock_irqrestore(&u3d->lock, flags); 1250 1251 u3d->vbus_valid_detect = 1; 1252 1253 return 0; 1254 } 1255 1256 static int mv_u3d_stop(struct usb_gadget *g) 1257 { 1258 struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget); 1259 struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev); 1260 unsigned long flags; 1261 1262 u3d->vbus_valid_detect = 0; 1263 spin_lock_irqsave(&u3d->lock, flags); 1264 1265 /* enable clock to access controller register */ 1266 clk_enable(u3d->clk); 1267 if (pdata->phy_init) 1268 pdata->phy_init(u3d->phy_regs); 1269 1270 mv_u3d_controller_stop(u3d); 1271 /* stop all usb activities */ 1272 u3d->gadget.speed = USB_SPEED_UNKNOWN; 1273 mv_u3d_stop_activity(u3d, NULL); 1274 mv_u3d_disable(u3d); 1275 1276 if (pdata->phy_deinit) 1277 pdata->phy_deinit(u3d->phy_regs); 1278 clk_disable(u3d->clk); 1279 1280 spin_unlock_irqrestore(&u3d->lock, flags); 1281 1282 u3d->driver = NULL; 1283 1284 return 0; 1285 } 1286 1287 /* device controller usb_gadget_ops structure */ 1288 static const struct usb_gadget_ops mv_u3d_ops = { 1289 /* notify controller that VBUS is powered or not */ 1290 .vbus_session = mv_u3d_vbus_session, 1291 1292 /* constrain controller's VBUS power usage */ 1293 .vbus_draw = mv_u3d_vbus_draw, 1294 1295 .pullup = mv_u3d_pullup, 1296 .udc_start = mv_u3d_start, 1297 .udc_stop = mv_u3d_stop, 1298 }; 1299 1300 static int mv_u3d_eps_init(struct mv_u3d *u3d) 1301 { 1302 struct mv_u3d_ep *ep; 1303 char name[14]; 1304 int i; 1305 1306 /* initialize ep0, ep0 in/out use eps[1] */ 1307 ep = &u3d->eps[1]; 1308 ep->u3d = u3d; 1309 strncpy(ep->name, "ep0", sizeof(ep->name)); 1310 ep->ep.name = ep->name; 1311 ep->ep.ops = &mv_u3d_ep_ops; 1312 ep->wedge = 0; 1313 usb_ep_set_maxpacket_limit(&ep->ep, MV_U3D_EP0_MAX_PKT_SIZE); 1314 ep->ep.caps.type_control = true; 1315 ep->ep.caps.dir_in = true; 1316 ep->ep.caps.dir_out = true; 1317 ep->ep_num = 0; 1318 ep->ep.desc = &mv_u3d_ep0_desc; 1319 INIT_LIST_HEAD(&ep->queue); 1320 INIT_LIST_HEAD(&ep->req_list); 1321 ep->ep_type = USB_ENDPOINT_XFER_CONTROL; 1322 1323 /* add ep0 ep_context */ 1324 ep->ep_context = &u3d->ep_context[1]; 1325 1326 /* initialize other endpoints */ 1327 for (i = 2; i < u3d->max_eps * 2; i++) { 1328 ep = &u3d->eps[i]; 1329 if (i & 1) { 1330 snprintf(name, sizeof(name), "ep%din", i >> 1); 1331 ep->direction = MV_U3D_EP_DIR_IN; 1332 ep->ep.caps.dir_in = true; 1333 } else { 1334 snprintf(name, sizeof(name), "ep%dout", i >> 1); 1335 ep->direction = MV_U3D_EP_DIR_OUT; 1336 ep->ep.caps.dir_out = true; 1337 } 1338 ep->u3d = u3d; 1339 strncpy(ep->name, name, sizeof(ep->name)); 1340 ep->ep.name = ep->name; 1341 1342 ep->ep.caps.type_iso = true; 1343 ep->ep.caps.type_bulk = true; 1344 ep->ep.caps.type_int = true; 1345 1346 ep->ep.ops = &mv_u3d_ep_ops; 1347 usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0); 1348 ep->ep_num = i / 2; 1349 1350 INIT_LIST_HEAD(&ep->queue); 1351 list_add_tail(&ep->ep.ep_list, &u3d->gadget.ep_list); 1352 1353 INIT_LIST_HEAD(&ep->req_list); 1354 spin_lock_init(&ep->req_lock); 1355 ep->ep_context = &u3d->ep_context[i]; 1356 } 1357 1358 return 0; 1359 } 1360 1361 /* delete all endpoint requests, called with spinlock held */ 1362 static void mv_u3d_nuke(struct mv_u3d_ep *ep, int status) 1363 { 1364 /* endpoint fifo flush */ 1365 mv_u3d_ep_fifo_flush(&ep->ep); 1366 1367 while (!list_empty(&ep->queue)) { 1368 struct mv_u3d_req *req = NULL; 1369 req = list_entry(ep->queue.next, struct mv_u3d_req, queue); 1370 mv_u3d_done(ep, req, status); 1371 } 1372 } 1373 1374 /* stop all USB activities */ 1375 static 1376 void mv_u3d_stop_activity(struct mv_u3d *u3d, struct usb_gadget_driver *driver) 1377 { 1378 struct mv_u3d_ep *ep; 1379 1380 mv_u3d_nuke(&u3d->eps[1], -ESHUTDOWN); 1381 1382 list_for_each_entry(ep, &u3d->gadget.ep_list, ep.ep_list) { 1383 mv_u3d_nuke(ep, -ESHUTDOWN); 1384 } 1385 1386 /* report disconnect; the driver is already quiesced */ 1387 if (driver) { 1388 spin_unlock(&u3d->lock); 1389 driver->disconnect(&u3d->gadget); 1390 spin_lock(&u3d->lock); 1391 } 1392 } 1393 1394 static void mv_u3d_irq_process_error(struct mv_u3d *u3d) 1395 { 1396 /* Increment the error count */ 1397 u3d->errors++; 1398 dev_err(u3d->dev, "%s\n", __func__); 1399 } 1400 1401 static void mv_u3d_irq_process_link_change(struct mv_u3d *u3d) 1402 { 1403 u32 linkchange; 1404 1405 linkchange = ioread32(&u3d->vuc_regs->linkchange); 1406 iowrite32(linkchange, &u3d->vuc_regs->linkchange); 1407 1408 dev_dbg(u3d->dev, "linkchange: 0x%x\n", linkchange); 1409 1410 if (linkchange & MV_U3D_LINK_CHANGE_LINK_UP) { 1411 dev_dbg(u3d->dev, "link up: ltssm state: 0x%x\n", 1412 ioread32(&u3d->vuc_regs->ltssmstate)); 1413 1414 u3d->usb_state = USB_STATE_DEFAULT; 1415 u3d->ep0_dir = MV_U3D_EP_DIR_OUT; 1416 u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP; 1417 1418 /* set speed */ 1419 u3d->gadget.speed = USB_SPEED_SUPER; 1420 } 1421 1422 if (linkchange & MV_U3D_LINK_CHANGE_SUSPEND) { 1423 dev_dbg(u3d->dev, "link suspend\n"); 1424 u3d->resume_state = u3d->usb_state; 1425 u3d->usb_state = USB_STATE_SUSPENDED; 1426 } 1427 1428 if (linkchange & MV_U3D_LINK_CHANGE_RESUME) { 1429 dev_dbg(u3d->dev, "link resume\n"); 1430 u3d->usb_state = u3d->resume_state; 1431 u3d->resume_state = 0; 1432 } 1433 1434 if (linkchange & MV_U3D_LINK_CHANGE_WRESET) { 1435 dev_dbg(u3d->dev, "warm reset\n"); 1436 u3d->usb_state = USB_STATE_POWERED; 1437 } 1438 1439 if (linkchange & MV_U3D_LINK_CHANGE_HRESET) { 1440 dev_dbg(u3d->dev, "hot reset\n"); 1441 u3d->usb_state = USB_STATE_DEFAULT; 1442 } 1443 1444 if (linkchange & MV_U3D_LINK_CHANGE_INACT) 1445 dev_dbg(u3d->dev, "inactive\n"); 1446 1447 if (linkchange & MV_U3D_LINK_CHANGE_DISABLE_AFTER_U0) 1448 dev_dbg(u3d->dev, "ss.disabled\n"); 1449 1450 if (linkchange & MV_U3D_LINK_CHANGE_VBUS_INVALID) { 1451 dev_dbg(u3d->dev, "vbus invalid\n"); 1452 u3d->usb_state = USB_STATE_ATTACHED; 1453 u3d->vbus_valid_detect = 1; 1454 /* if external vbus detect is not supported, 1455 * we handle it here. 1456 */ 1457 if (!u3d->vbus) { 1458 spin_unlock(&u3d->lock); 1459 mv_u3d_vbus_session(&u3d->gadget, 0); 1460 spin_lock(&u3d->lock); 1461 } 1462 } 1463 } 1464 1465 static void mv_u3d_ch9setaddress(struct mv_u3d *u3d, 1466 struct usb_ctrlrequest *setup) 1467 { 1468 u32 tmp; 1469 1470 if (u3d->usb_state != USB_STATE_DEFAULT) { 1471 dev_err(u3d->dev, 1472 "%s, cannot setaddr in this state (%d)\n", 1473 __func__, u3d->usb_state); 1474 goto err; 1475 } 1476 1477 u3d->dev_addr = (u8)setup->wValue; 1478 1479 dev_dbg(u3d->dev, "%s: 0x%x\n", __func__, u3d->dev_addr); 1480 1481 if (u3d->dev_addr > 127) { 1482 dev_err(u3d->dev, 1483 "%s, u3d address is wrong (out of range)\n", __func__); 1484 u3d->dev_addr = 0; 1485 goto err; 1486 } 1487 1488 /* update usb state */ 1489 u3d->usb_state = USB_STATE_ADDRESS; 1490 1491 /* set the new address */ 1492 tmp = ioread32(&u3d->vuc_regs->devaddrtiebrkr); 1493 tmp &= ~0x7F; 1494 tmp |= (u32)u3d->dev_addr; 1495 iowrite32(tmp, &u3d->vuc_regs->devaddrtiebrkr); 1496 1497 return; 1498 err: 1499 mv_u3d_ep0_stall(u3d); 1500 } 1501 1502 static int mv_u3d_is_set_configuration(struct usb_ctrlrequest *setup) 1503 { 1504 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) 1505 if (setup->bRequest == USB_REQ_SET_CONFIGURATION) 1506 return 1; 1507 1508 return 0; 1509 } 1510 1511 static void mv_u3d_handle_setup_packet(struct mv_u3d *u3d, u8 ep_num, 1512 struct usb_ctrlrequest *setup) 1513 __releases(&u3c->lock) 1514 __acquires(&u3c->lock) 1515 { 1516 bool delegate = false; 1517 1518 mv_u3d_nuke(&u3d->eps[ep_num * 2 + MV_U3D_EP_DIR_IN], -ESHUTDOWN); 1519 1520 dev_dbg(u3d->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n", 1521 setup->bRequestType, setup->bRequest, 1522 setup->wValue, setup->wIndex, setup->wLength); 1523 1524 /* We process some stardard setup requests here */ 1525 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { 1526 switch (setup->bRequest) { 1527 case USB_REQ_GET_STATUS: 1528 delegate = true; 1529 break; 1530 1531 case USB_REQ_SET_ADDRESS: 1532 mv_u3d_ch9setaddress(u3d, setup); 1533 break; 1534 1535 case USB_REQ_CLEAR_FEATURE: 1536 delegate = true; 1537 break; 1538 1539 case USB_REQ_SET_FEATURE: 1540 delegate = true; 1541 break; 1542 1543 default: 1544 delegate = true; 1545 } 1546 } else 1547 delegate = true; 1548 1549 /* delegate USB standard requests to the gadget driver */ 1550 if (delegate) { 1551 /* USB requests handled by gadget */ 1552 if (setup->wLength) { 1553 /* DATA phase from gadget, STATUS phase from u3d */ 1554 u3d->ep0_dir = (setup->bRequestType & USB_DIR_IN) 1555 ? MV_U3D_EP_DIR_IN : MV_U3D_EP_DIR_OUT; 1556 spin_unlock(&u3d->lock); 1557 if (u3d->driver->setup(&u3d->gadget, 1558 &u3d->local_setup_buff) < 0) { 1559 dev_err(u3d->dev, "setup error!\n"); 1560 mv_u3d_ep0_stall(u3d); 1561 } 1562 spin_lock(&u3d->lock); 1563 } else { 1564 /* no DATA phase, STATUS phase from gadget */ 1565 u3d->ep0_dir = MV_U3D_EP_DIR_IN; 1566 u3d->ep0_state = MV_U3D_STATUS_STAGE; 1567 spin_unlock(&u3d->lock); 1568 if (u3d->driver->setup(&u3d->gadget, 1569 &u3d->local_setup_buff) < 0) 1570 mv_u3d_ep0_stall(u3d); 1571 spin_lock(&u3d->lock); 1572 } 1573 1574 if (mv_u3d_is_set_configuration(setup)) { 1575 dev_dbg(u3d->dev, "u3d configured\n"); 1576 u3d->usb_state = USB_STATE_CONFIGURED; 1577 } 1578 } 1579 } 1580 1581 static void mv_u3d_get_setup_data(struct mv_u3d *u3d, u8 ep_num, u8 *buffer_ptr) 1582 { 1583 struct mv_u3d_ep_context *epcontext; 1584 1585 epcontext = &u3d->ep_context[ep_num * 2 + MV_U3D_EP_DIR_IN]; 1586 1587 /* Copy the setup packet to local buffer */ 1588 memcpy(buffer_ptr, (u8 *) &epcontext->setup_buffer, 8); 1589 } 1590 1591 static void mv_u3d_irq_process_setup(struct mv_u3d *u3d) 1592 { 1593 u32 tmp, i; 1594 /* Process all Setup packet received interrupts */ 1595 tmp = ioread32(&u3d->vuc_regs->setuplock); 1596 if (tmp) { 1597 for (i = 0; i < u3d->max_eps; i++) { 1598 if (tmp & (1 << i)) { 1599 mv_u3d_get_setup_data(u3d, i, 1600 (u8 *)(&u3d->local_setup_buff)); 1601 mv_u3d_handle_setup_packet(u3d, i, 1602 &u3d->local_setup_buff); 1603 } 1604 } 1605 } 1606 1607 iowrite32(tmp, &u3d->vuc_regs->setuplock); 1608 } 1609 1610 static void mv_u3d_irq_process_tr_complete(struct mv_u3d *u3d) 1611 { 1612 u32 tmp, bit_pos; 1613 int i, ep_num = 0, direction = 0; 1614 struct mv_u3d_ep *curr_ep; 1615 struct mv_u3d_req *curr_req, *temp_req; 1616 int status; 1617 1618 tmp = ioread32(&u3d->vuc_regs->endcomplete); 1619 1620 dev_dbg(u3d->dev, "tr_complete: ep: 0x%x\n", tmp); 1621 if (!tmp) 1622 return; 1623 iowrite32(tmp, &u3d->vuc_regs->endcomplete); 1624 1625 for (i = 0; i < u3d->max_eps * 2; i++) { 1626 ep_num = i >> 1; 1627 direction = i % 2; 1628 1629 bit_pos = 1 << (ep_num + 16 * direction); 1630 1631 if (!(bit_pos & tmp)) 1632 continue; 1633 1634 if (i == 0) 1635 curr_ep = &u3d->eps[1]; 1636 else 1637 curr_ep = &u3d->eps[i]; 1638 1639 /* remove req out of ep request list after completion */ 1640 dev_dbg(u3d->dev, "tr comp: check req_list\n"); 1641 spin_lock(&curr_ep->req_lock); 1642 if (!list_empty(&curr_ep->req_list)) { 1643 struct mv_u3d_req *req; 1644 req = list_entry(curr_ep->req_list.next, 1645 struct mv_u3d_req, list); 1646 list_del_init(&req->list); 1647 curr_ep->processing = 0; 1648 } 1649 spin_unlock(&curr_ep->req_lock); 1650 1651 /* process the req queue until an uncomplete request */ 1652 list_for_each_entry_safe(curr_req, temp_req, 1653 &curr_ep->queue, queue) { 1654 status = mv_u3d_process_ep_req(u3d, i, curr_req); 1655 if (status) 1656 break; 1657 /* write back status to req */ 1658 curr_req->req.status = status; 1659 1660 /* ep0 request completion */ 1661 if (ep_num == 0) { 1662 mv_u3d_done(curr_ep, curr_req, 0); 1663 break; 1664 } else { 1665 mv_u3d_done(curr_ep, curr_req, status); 1666 } 1667 } 1668 1669 dev_dbg(u3d->dev, "call mv_u3d_start_queue from ep complete\n"); 1670 mv_u3d_start_queue(curr_ep); 1671 } 1672 } 1673 1674 static irqreturn_t mv_u3d_irq(int irq, void *dev) 1675 { 1676 struct mv_u3d *u3d = (struct mv_u3d *)dev; 1677 u32 status, intr; 1678 u32 bridgesetting; 1679 u32 trbunderrun; 1680 1681 spin_lock(&u3d->lock); 1682 1683 status = ioread32(&u3d->vuc_regs->intrcause); 1684 intr = ioread32(&u3d->vuc_regs->intrenable); 1685 status &= intr; 1686 1687 if (status == 0) { 1688 spin_unlock(&u3d->lock); 1689 dev_err(u3d->dev, "irq error!\n"); 1690 return IRQ_NONE; 1691 } 1692 1693 if (status & MV_U3D_USBINT_VBUS_VALID) { 1694 bridgesetting = ioread32(&u3d->vuc_regs->bridgesetting); 1695 if (bridgesetting & MV_U3D_BRIDGE_SETTING_VBUS_VALID) { 1696 /* write vbus valid bit of bridge setting to clear */ 1697 bridgesetting = MV_U3D_BRIDGE_SETTING_VBUS_VALID; 1698 iowrite32(bridgesetting, &u3d->vuc_regs->bridgesetting); 1699 dev_dbg(u3d->dev, "vbus valid\n"); 1700 1701 u3d->usb_state = USB_STATE_POWERED; 1702 u3d->vbus_valid_detect = 0; 1703 /* if external vbus detect is not supported, 1704 * we handle it here. 1705 */ 1706 if (!u3d->vbus) { 1707 spin_unlock(&u3d->lock); 1708 mv_u3d_vbus_session(&u3d->gadget, 1); 1709 spin_lock(&u3d->lock); 1710 } 1711 } else 1712 dev_err(u3d->dev, "vbus bit is not set\n"); 1713 } 1714 1715 /* RX data is already in the 16KB FIFO.*/ 1716 if (status & MV_U3D_USBINT_UNDER_RUN) { 1717 trbunderrun = ioread32(&u3d->vuc_regs->trbunderrun); 1718 dev_err(u3d->dev, "under run, ep%d\n", trbunderrun); 1719 iowrite32(trbunderrun, &u3d->vuc_regs->trbunderrun); 1720 mv_u3d_irq_process_error(u3d); 1721 } 1722 1723 if (status & (MV_U3D_USBINT_RXDESC_ERR | MV_U3D_USBINT_TXDESC_ERR)) { 1724 /* write one to clear */ 1725 iowrite32(status & (MV_U3D_USBINT_RXDESC_ERR 1726 | MV_U3D_USBINT_TXDESC_ERR), 1727 &u3d->vuc_regs->intrcause); 1728 dev_err(u3d->dev, "desc err 0x%x\n", status); 1729 mv_u3d_irq_process_error(u3d); 1730 } 1731 1732 if (status & MV_U3D_USBINT_LINK_CHG) 1733 mv_u3d_irq_process_link_change(u3d); 1734 1735 if (status & MV_U3D_USBINT_TX_COMPLETE) 1736 mv_u3d_irq_process_tr_complete(u3d); 1737 1738 if (status & MV_U3D_USBINT_RX_COMPLETE) 1739 mv_u3d_irq_process_tr_complete(u3d); 1740 1741 if (status & MV_U3D_USBINT_SETUP) 1742 mv_u3d_irq_process_setup(u3d); 1743 1744 spin_unlock(&u3d->lock); 1745 return IRQ_HANDLED; 1746 } 1747 1748 static int mv_u3d_remove(struct platform_device *dev) 1749 { 1750 struct mv_u3d *u3d = platform_get_drvdata(dev); 1751 1752 BUG_ON(u3d == NULL); 1753 1754 usb_del_gadget_udc(&u3d->gadget); 1755 1756 /* free memory allocated in probe */ 1757 dma_pool_destroy(u3d->trb_pool); 1758 1759 if (u3d->ep_context) 1760 dma_free_coherent(&dev->dev, u3d->ep_context_size, 1761 u3d->ep_context, u3d->ep_context_dma); 1762 1763 kfree(u3d->eps); 1764 1765 if (u3d->irq) 1766 free_irq(u3d->irq, u3d); 1767 1768 if (u3d->cap_regs) 1769 iounmap(u3d->cap_regs); 1770 u3d->cap_regs = NULL; 1771 1772 kfree(u3d->status_req); 1773 1774 clk_put(u3d->clk); 1775 1776 kfree(u3d); 1777 1778 return 0; 1779 } 1780 1781 static int mv_u3d_probe(struct platform_device *dev) 1782 { 1783 struct mv_u3d *u3d = NULL; 1784 struct mv_usb_platform_data *pdata = dev_get_platdata(&dev->dev); 1785 int retval = 0; 1786 struct resource *r; 1787 size_t size; 1788 1789 if (!dev_get_platdata(&dev->dev)) { 1790 dev_err(&dev->dev, "missing platform_data\n"); 1791 retval = -ENODEV; 1792 goto err_pdata; 1793 } 1794 1795 u3d = kzalloc(sizeof(*u3d), GFP_KERNEL); 1796 if (!u3d) { 1797 retval = -ENOMEM; 1798 goto err_alloc_private; 1799 } 1800 1801 spin_lock_init(&u3d->lock); 1802 1803 platform_set_drvdata(dev, u3d); 1804 1805 u3d->dev = &dev->dev; 1806 u3d->vbus = pdata->vbus; 1807 1808 u3d->clk = clk_get(&dev->dev, NULL); 1809 if (IS_ERR(u3d->clk)) { 1810 retval = PTR_ERR(u3d->clk); 1811 goto err_get_clk; 1812 } 1813 1814 r = platform_get_resource_byname(dev, IORESOURCE_MEM, "capregs"); 1815 if (!r) { 1816 dev_err(&dev->dev, "no I/O memory resource defined\n"); 1817 retval = -ENODEV; 1818 goto err_get_cap_regs; 1819 } 1820 1821 u3d->cap_regs = (struct mv_u3d_cap_regs __iomem *) 1822 ioremap(r->start, resource_size(r)); 1823 if (!u3d->cap_regs) { 1824 dev_err(&dev->dev, "failed to map I/O memory\n"); 1825 retval = -EBUSY; 1826 goto err_map_cap_regs; 1827 } else { 1828 dev_dbg(&dev->dev, "cap_regs address: 0x%lx/0x%lx\n", 1829 (unsigned long) r->start, 1830 (unsigned long) u3d->cap_regs); 1831 } 1832 1833 /* we will access controller register, so enable the u3d controller */ 1834 retval = clk_enable(u3d->clk); 1835 if (retval) { 1836 dev_err(&dev->dev, "clk_enable error %d\n", retval); 1837 goto err_u3d_enable; 1838 } 1839 1840 if (pdata->phy_init) { 1841 retval = pdata->phy_init(u3d->phy_regs); 1842 if (retval) { 1843 dev_err(&dev->dev, "init phy error %d\n", retval); 1844 clk_disable(u3d->clk); 1845 goto err_phy_init; 1846 } 1847 } 1848 1849 u3d->op_regs = (struct mv_u3d_op_regs __iomem *)(u3d->cap_regs 1850 + MV_U3D_USB3_OP_REGS_OFFSET); 1851 1852 u3d->vuc_regs = (struct mv_u3d_vuc_regs __iomem *)(u3d->cap_regs 1853 + ioread32(&u3d->cap_regs->vuoff)); 1854 1855 u3d->max_eps = 16; 1856 1857 /* 1858 * some platform will use usb to download image, it may not disconnect 1859 * usb gadget before loading kernel. So first stop u3d here. 1860 */ 1861 mv_u3d_controller_stop(u3d); 1862 iowrite32(0xFFFFFFFF, &u3d->vuc_regs->intrcause); 1863 1864 if (pdata->phy_deinit) 1865 pdata->phy_deinit(u3d->phy_regs); 1866 clk_disable(u3d->clk); 1867 1868 size = u3d->max_eps * sizeof(struct mv_u3d_ep_context) * 2; 1869 size = (size + MV_U3D_EP_CONTEXT_ALIGNMENT - 1) 1870 & ~(MV_U3D_EP_CONTEXT_ALIGNMENT - 1); 1871 u3d->ep_context = dma_alloc_coherent(&dev->dev, size, 1872 &u3d->ep_context_dma, GFP_KERNEL); 1873 if (!u3d->ep_context) { 1874 dev_err(&dev->dev, "allocate ep context memory failed\n"); 1875 retval = -ENOMEM; 1876 goto err_alloc_ep_context; 1877 } 1878 u3d->ep_context_size = size; 1879 1880 /* create TRB dma_pool resource */ 1881 u3d->trb_pool = dma_pool_create("u3d_trb", 1882 &dev->dev, 1883 sizeof(struct mv_u3d_trb_hw), 1884 MV_U3D_TRB_ALIGNMENT, 1885 MV_U3D_DMA_BOUNDARY); 1886 1887 if (!u3d->trb_pool) { 1888 retval = -ENOMEM; 1889 goto err_alloc_trb_pool; 1890 } 1891 1892 size = u3d->max_eps * sizeof(struct mv_u3d_ep) * 2; 1893 u3d->eps = kzalloc(size, GFP_KERNEL); 1894 if (!u3d->eps) { 1895 retval = -ENOMEM; 1896 goto err_alloc_eps; 1897 } 1898 1899 /* initialize ep0 status request structure */ 1900 u3d->status_req = kzalloc(sizeof(struct mv_u3d_req) + 8, GFP_KERNEL); 1901 if (!u3d->status_req) { 1902 retval = -ENOMEM; 1903 goto err_alloc_status_req; 1904 } 1905 INIT_LIST_HEAD(&u3d->status_req->queue); 1906 1907 /* allocate a small amount of memory to get valid address */ 1908 u3d->status_req->req.buf = (char *)u3d->status_req 1909 + sizeof(struct mv_u3d_req); 1910 u3d->status_req->req.dma = virt_to_phys(u3d->status_req->req.buf); 1911 1912 u3d->resume_state = USB_STATE_NOTATTACHED; 1913 u3d->usb_state = USB_STATE_ATTACHED; 1914 u3d->ep0_dir = MV_U3D_EP_DIR_OUT; 1915 u3d->remote_wakeup = 0; 1916 1917 r = platform_get_resource(dev, IORESOURCE_IRQ, 0); 1918 if (!r) { 1919 dev_err(&dev->dev, "no IRQ resource defined\n"); 1920 retval = -ENODEV; 1921 goto err_get_irq; 1922 } 1923 u3d->irq = r->start; 1924 1925 /* initialize gadget structure */ 1926 u3d->gadget.ops = &mv_u3d_ops; /* usb_gadget_ops */ 1927 u3d->gadget.ep0 = &u3d->eps[1].ep; /* gadget ep0 */ 1928 INIT_LIST_HEAD(&u3d->gadget.ep_list); /* ep_list */ 1929 u3d->gadget.speed = USB_SPEED_UNKNOWN; /* speed */ 1930 1931 /* the "gadget" abstracts/virtualizes the controller */ 1932 u3d->gadget.name = driver_name; /* gadget name */ 1933 1934 mv_u3d_eps_init(u3d); 1935 1936 if (request_irq(u3d->irq, mv_u3d_irq, 1937 IRQF_SHARED, driver_name, u3d)) { 1938 u3d->irq = 0; 1939 dev_err(&dev->dev, "Request irq %d for u3d failed\n", 1940 u3d->irq); 1941 retval = -ENODEV; 1942 goto err_request_irq; 1943 } 1944 1945 /* external vbus detection */ 1946 if (u3d->vbus) { 1947 u3d->clock_gating = 1; 1948 dev_err(&dev->dev, "external vbus detection\n"); 1949 } 1950 1951 if (!u3d->clock_gating) 1952 u3d->vbus_active = 1; 1953 1954 /* enable usb3 controller vbus detection */ 1955 u3d->vbus_valid_detect = 1; 1956 1957 retval = usb_add_gadget_udc(&dev->dev, &u3d->gadget); 1958 if (retval) 1959 goto err_unregister; 1960 1961 dev_dbg(&dev->dev, "successful probe usb3 device %s clock gating.\n", 1962 u3d->clock_gating ? "with" : "without"); 1963 1964 return 0; 1965 1966 err_unregister: 1967 free_irq(u3d->irq, u3d); 1968 err_get_irq: 1969 err_request_irq: 1970 kfree(u3d->status_req); 1971 err_alloc_status_req: 1972 kfree(u3d->eps); 1973 err_alloc_eps: 1974 dma_pool_destroy(u3d->trb_pool); 1975 err_alloc_trb_pool: 1976 dma_free_coherent(&dev->dev, u3d->ep_context_size, 1977 u3d->ep_context, u3d->ep_context_dma); 1978 err_alloc_ep_context: 1979 err_phy_init: 1980 err_u3d_enable: 1981 iounmap(u3d->cap_regs); 1982 err_map_cap_regs: 1983 err_get_cap_regs: 1984 clk_put(u3d->clk); 1985 err_get_clk: 1986 kfree(u3d); 1987 err_alloc_private: 1988 err_pdata: 1989 return retval; 1990 } 1991 1992 #ifdef CONFIG_PM_SLEEP 1993 static int mv_u3d_suspend(struct device *dev) 1994 { 1995 struct mv_u3d *u3d = dev_get_drvdata(dev); 1996 1997 /* 1998 * only cable is unplugged, usb can suspend. 1999 * So do not care about clock_gating == 1, it is handled by 2000 * vbus session. 2001 */ 2002 if (!u3d->clock_gating) { 2003 mv_u3d_controller_stop(u3d); 2004 2005 spin_lock_irq(&u3d->lock); 2006 /* stop all usb activities */ 2007 mv_u3d_stop_activity(u3d, u3d->driver); 2008 spin_unlock_irq(&u3d->lock); 2009 2010 mv_u3d_disable(u3d); 2011 } 2012 2013 return 0; 2014 } 2015 2016 static int mv_u3d_resume(struct device *dev) 2017 { 2018 struct mv_u3d *u3d = dev_get_drvdata(dev); 2019 int retval; 2020 2021 if (!u3d->clock_gating) { 2022 retval = mv_u3d_enable(u3d); 2023 if (retval) 2024 return retval; 2025 2026 if (u3d->driver && u3d->softconnect) { 2027 mv_u3d_controller_reset(u3d); 2028 mv_u3d_ep0_reset(u3d); 2029 mv_u3d_controller_start(u3d); 2030 } 2031 } 2032 2033 return 0; 2034 } 2035 #endif 2036 2037 static SIMPLE_DEV_PM_OPS(mv_u3d_pm_ops, mv_u3d_suspend, mv_u3d_resume); 2038 2039 static void mv_u3d_shutdown(struct platform_device *dev) 2040 { 2041 struct mv_u3d *u3d = platform_get_drvdata(dev); 2042 u32 tmp; 2043 2044 tmp = ioread32(&u3d->op_regs->usbcmd); 2045 tmp &= ~MV_U3D_CMD_RUN_STOP; 2046 iowrite32(tmp, &u3d->op_regs->usbcmd); 2047 } 2048 2049 static struct platform_driver mv_u3d_driver = { 2050 .probe = mv_u3d_probe, 2051 .remove = mv_u3d_remove, 2052 .shutdown = mv_u3d_shutdown, 2053 .driver = { 2054 .name = "mv-u3d", 2055 .pm = &mv_u3d_pm_ops, 2056 }, 2057 }; 2058 2059 module_platform_driver(mv_u3d_driver); 2060 MODULE_ALIAS("platform:mv-u3d"); 2061 MODULE_DESCRIPTION(DRIVER_DESC); 2062 MODULE_AUTHOR("Yu Xu <yuxu@marvell.com>"); 2063 MODULE_LICENSE("GPL"); 2064