1 /** 2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 3 * 4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com 5 * 6 * Authors: Felipe Balbi <balbi@ti.com>, 7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. The names of the above-listed copyright holders may not be used 19 * to endorse or promote products derived from this software without 20 * specific prior written permission. 21 * 22 * ALTERNATIVELY, this software may be distributed under the terms of the 23 * GNU General Public License ("GPL") version 2, as published by the Free 24 * Software Foundation. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #include <linux/kernel.h> 40 #include <linux/delay.h> 41 #include <linux/slab.h> 42 #include <linux/spinlock.h> 43 #include <linux/platform_device.h> 44 #include <linux/pm_runtime.h> 45 #include <linux/interrupt.h> 46 #include <linux/io.h> 47 #include <linux/list.h> 48 #include <linux/dma-mapping.h> 49 50 #include <linux/usb/ch9.h> 51 #include <linux/usb/gadget.h> 52 53 #include "core.h" 54 #include "gadget.h" 55 #include "io.h" 56 57 /** 58 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes 59 * @dwc: pointer to our context structure 60 * @mode: the mode to set (J, K SE0 NAK, Force Enable) 61 * 62 * Caller should take care of locking. This function will 63 * return 0 on success or -EINVAL if wrong Test Selector 64 * is passed 65 */ 66 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) 67 { 68 u32 reg; 69 70 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 71 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 72 73 switch (mode) { 74 case TEST_J: 75 case TEST_K: 76 case TEST_SE0_NAK: 77 case TEST_PACKET: 78 case TEST_FORCE_EN: 79 reg |= mode << 1; 80 break; 81 default: 82 return -EINVAL; 83 } 84 85 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 86 87 return 0; 88 } 89 90 /** 91 * dwc3_gadget_set_link_state - Sets USB Link to a particular State 92 * @dwc: pointer to our context structure 93 * @state: the state to put link into 94 * 95 * Caller should take care of locking. This function will 96 * return 0 on success or -ETIMEDOUT. 97 */ 98 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) 99 { 100 int retries = 10000; 101 u32 reg; 102 103 /* 104 * Wait until device controller is ready. Only applies to 1.94a and 105 * later RTL. 106 */ 107 if (dwc->revision >= DWC3_REVISION_194A) { 108 while (--retries) { 109 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 110 if (reg & DWC3_DSTS_DCNRD) 111 udelay(5); 112 else 113 break; 114 } 115 116 if (retries <= 0) 117 return -ETIMEDOUT; 118 } 119 120 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 121 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 122 123 /* set requested state */ 124 reg |= DWC3_DCTL_ULSTCHNGREQ(state); 125 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 126 127 /* 128 * The following code is racy when called from dwc3_gadget_wakeup, 129 * and is not needed, at least on newer versions 130 */ 131 if (dwc->revision >= DWC3_REVISION_194A) 132 return 0; 133 134 /* wait for a change in DSTS */ 135 retries = 10000; 136 while (--retries) { 137 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 138 139 if (DWC3_DSTS_USBLNKST(reg) == state) 140 return 0; 141 142 udelay(5); 143 } 144 145 dev_vdbg(dwc->dev, "link state change request timed out\n"); 146 147 return -ETIMEDOUT; 148 } 149 150 /** 151 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case 152 * @dwc: pointer to our context structure 153 * 154 * This function will a best effort FIFO allocation in order 155 * to improve FIFO usage and throughput, while still allowing 156 * us to enable as many endpoints as possible. 157 * 158 * Keep in mind that this operation will be highly dependent 159 * on the configured size for RAM1 - which contains TxFifo -, 160 * the amount of endpoints enabled on coreConsultant tool, and 161 * the width of the Master Bus. 162 * 163 * In the ideal world, we would always be able to satisfy the 164 * following equation: 165 * 166 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \ 167 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes 168 * 169 * Unfortunately, due to many variables that's not always the case. 170 */ 171 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc) 172 { 173 int last_fifo_depth = 0; 174 int ram1_depth; 175 int fifo_size; 176 int mdwidth; 177 int num; 178 179 if (!dwc->needs_fifo_resize) 180 return 0; 181 182 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7); 183 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); 184 185 /* MDWIDTH is represented in bits, we need it in bytes */ 186 mdwidth >>= 3; 187 188 /* 189 * FIXME For now we will only allocate 1 wMaxPacketSize space 190 * for each enabled endpoint, later patches will come to 191 * improve this algorithm so that we better use the internal 192 * FIFO space 193 */ 194 for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) { 195 struct dwc3_ep *dep = dwc->eps[num]; 196 int fifo_number = dep->number >> 1; 197 int mult = 1; 198 int tmp; 199 200 if (!(dep->number & 1)) 201 continue; 202 203 if (!(dep->flags & DWC3_EP_ENABLED)) 204 continue; 205 206 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) 207 || usb_endpoint_xfer_isoc(dep->endpoint.desc)) 208 mult = 3; 209 210 /* 211 * REVISIT: the following assumes we will always have enough 212 * space available on the FIFO RAM for all possible use cases. 213 * Make sure that's true somehow and change FIFO allocation 214 * accordingly. 215 * 216 * If we have Bulk or Isochronous endpoints, we want 217 * them to be able to be very, very fast. So we're giving 218 * those endpoints a fifo_size which is enough for 3 full 219 * packets 220 */ 221 tmp = mult * (dep->endpoint.maxpacket + mdwidth); 222 tmp += mdwidth; 223 224 fifo_size = DIV_ROUND_UP(tmp, mdwidth); 225 226 fifo_size |= (last_fifo_depth << 16); 227 228 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n", 229 dep->name, last_fifo_depth, fifo_size & 0xffff); 230 231 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number), 232 fifo_size); 233 234 last_fifo_depth += (fifo_size & 0xffff); 235 } 236 237 return 0; 238 } 239 240 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 241 int status) 242 { 243 struct dwc3 *dwc = dep->dwc; 244 int i; 245 246 if (req->queued) { 247 i = 0; 248 do { 249 dep->busy_slot++; 250 /* 251 * Skip LINK TRB. We can't use req->trb and check for 252 * DWC3_TRBCTL_LINK_TRB because it points the TRB we 253 * just completed (not the LINK TRB). 254 */ 255 if (((dep->busy_slot & DWC3_TRB_MASK) == 256 DWC3_TRB_NUM- 1) && 257 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 258 dep->busy_slot++; 259 } while(++i < req->request.num_mapped_sgs); 260 req->queued = false; 261 } 262 list_del(&req->list); 263 req->trb = NULL; 264 265 if (req->request.status == -EINPROGRESS) 266 req->request.status = status; 267 268 if (dwc->ep0_bounced && dep->number == 0) 269 dwc->ep0_bounced = false; 270 else 271 usb_gadget_unmap_request(&dwc->gadget, &req->request, 272 req->direction); 273 274 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n", 275 req, dep->name, req->request.actual, 276 req->request.length, status); 277 278 spin_unlock(&dwc->lock); 279 req->request.complete(&dep->endpoint, &req->request); 280 spin_lock(&dwc->lock); 281 } 282 283 static const char *dwc3_gadget_ep_cmd_string(u8 cmd) 284 { 285 switch (cmd) { 286 case DWC3_DEPCMD_DEPSTARTCFG: 287 return "Start New Configuration"; 288 case DWC3_DEPCMD_ENDTRANSFER: 289 return "End Transfer"; 290 case DWC3_DEPCMD_UPDATETRANSFER: 291 return "Update Transfer"; 292 case DWC3_DEPCMD_STARTTRANSFER: 293 return "Start Transfer"; 294 case DWC3_DEPCMD_CLEARSTALL: 295 return "Clear Stall"; 296 case DWC3_DEPCMD_SETSTALL: 297 return "Set Stall"; 298 case DWC3_DEPCMD_GETEPSTATE: 299 return "Get Endpoint State"; 300 case DWC3_DEPCMD_SETTRANSFRESOURCE: 301 return "Set Endpoint Transfer Resource"; 302 case DWC3_DEPCMD_SETEPCONFIG: 303 return "Set Endpoint Configuration"; 304 default: 305 return "UNKNOWN command"; 306 } 307 } 308 309 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param) 310 { 311 u32 timeout = 500; 312 u32 reg; 313 314 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param); 315 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT); 316 317 do { 318 reg = dwc3_readl(dwc->regs, DWC3_DGCMD); 319 if (!(reg & DWC3_DGCMD_CMDACT)) { 320 dev_vdbg(dwc->dev, "Command Complete --> %d\n", 321 DWC3_DGCMD_STATUS(reg)); 322 return 0; 323 } 324 325 /* 326 * We can't sleep here, because it's also called from 327 * interrupt context. 328 */ 329 timeout--; 330 if (!timeout) 331 return -ETIMEDOUT; 332 udelay(1); 333 } while (1); 334 } 335 336 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep, 337 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params) 338 { 339 struct dwc3_ep *dep = dwc->eps[ep]; 340 u32 timeout = 500; 341 u32 reg; 342 343 dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n", 344 dep->name, 345 dwc3_gadget_ep_cmd_string(cmd), params->param0, 346 params->param1, params->param2); 347 348 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0); 349 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1); 350 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2); 351 352 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT); 353 do { 354 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep)); 355 if (!(reg & DWC3_DEPCMD_CMDACT)) { 356 dev_vdbg(dwc->dev, "Command Complete --> %d\n", 357 DWC3_DEPCMD_STATUS(reg)); 358 return 0; 359 } 360 361 /* 362 * We can't sleep here, because it is also called from 363 * interrupt context. 364 */ 365 timeout--; 366 if (!timeout) 367 return -ETIMEDOUT; 368 369 udelay(1); 370 } while (1); 371 } 372 373 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 374 struct dwc3_trb *trb) 375 { 376 u32 offset = (char *) trb - (char *) dep->trb_pool; 377 378 return dep->trb_pool_dma + offset; 379 } 380 381 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 382 { 383 struct dwc3 *dwc = dep->dwc; 384 385 if (dep->trb_pool) 386 return 0; 387 388 if (dep->number == 0 || dep->number == 1) 389 return 0; 390 391 dep->trb_pool = dma_alloc_coherent(dwc->dev, 392 sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 393 &dep->trb_pool_dma, GFP_KERNEL); 394 if (!dep->trb_pool) { 395 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 396 dep->name); 397 return -ENOMEM; 398 } 399 400 return 0; 401 } 402 403 static void dwc3_free_trb_pool(struct dwc3_ep *dep) 404 { 405 struct dwc3 *dwc = dep->dwc; 406 407 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 408 dep->trb_pool, dep->trb_pool_dma); 409 410 dep->trb_pool = NULL; 411 dep->trb_pool_dma = 0; 412 } 413 414 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 415 { 416 struct dwc3_gadget_ep_cmd_params params; 417 u32 cmd; 418 419 memset(¶ms, 0x00, sizeof(params)); 420 421 if (dep->number != 1) { 422 cmd = DWC3_DEPCMD_DEPSTARTCFG; 423 /* XferRscIdx == 0 for ep0 and 2 for the remaining */ 424 if (dep->number > 1) { 425 if (dwc->start_config_issued) 426 return 0; 427 dwc->start_config_issued = true; 428 cmd |= DWC3_DEPCMD_PARAM(2); 429 } 430 431 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms); 432 } 433 434 return 0; 435 } 436 437 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, 438 const struct usb_endpoint_descriptor *desc, 439 const struct usb_ss_ep_comp_descriptor *comp_desc, 440 bool ignore) 441 { 442 struct dwc3_gadget_ep_cmd_params params; 443 444 memset(¶ms, 0x00, sizeof(params)); 445 446 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) 447 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); 448 449 /* Burst size is only needed in SuperSpeed mode */ 450 if (dwc->gadget.speed == USB_SPEED_SUPER) { 451 u32 burst = dep->endpoint.maxburst - 1; 452 453 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst); 454 } 455 456 if (ignore) 457 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM; 458 459 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN 460 | DWC3_DEPCFG_XFER_NOT_READY_EN; 461 462 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 463 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE 464 | DWC3_DEPCFG_STREAM_EVENT_EN; 465 dep->stream_capable = true; 466 } 467 468 if (usb_endpoint_xfer_isoc(desc)) 469 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; 470 471 /* 472 * We are doing 1:1 mapping for endpoints, meaning 473 * Physical Endpoints 2 maps to Logical Endpoint 2 and 474 * so on. We consider the direction bit as part of the physical 475 * endpoint number. So USB endpoint 0x81 is 0x03. 476 */ 477 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); 478 479 /* 480 * We must use the lower 16 TX FIFOs even though 481 * HW might have more 482 */ 483 if (dep->direction) 484 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); 485 486 if (desc->bInterval) { 487 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); 488 dep->interval = 1 << (desc->bInterval - 1); 489 } 490 491 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 492 DWC3_DEPCMD_SETEPCONFIG, ¶ms); 493 } 494 495 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) 496 { 497 struct dwc3_gadget_ep_cmd_params params; 498 499 memset(¶ms, 0x00, sizeof(params)); 500 501 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 502 503 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 504 DWC3_DEPCMD_SETTRANSFRESOURCE, ¶ms); 505 } 506 507 /** 508 * __dwc3_gadget_ep_enable - Initializes a HW endpoint 509 * @dep: endpoint to be initialized 510 * @desc: USB Endpoint Descriptor 511 * 512 * Caller should take care of locking 513 */ 514 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, 515 const struct usb_endpoint_descriptor *desc, 516 const struct usb_ss_ep_comp_descriptor *comp_desc, 517 bool ignore) 518 { 519 struct dwc3 *dwc = dep->dwc; 520 u32 reg; 521 int ret = -ENOMEM; 522 523 if (!(dep->flags & DWC3_EP_ENABLED)) { 524 ret = dwc3_gadget_start_config(dwc, dep); 525 if (ret) 526 return ret; 527 } 528 529 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore); 530 if (ret) 531 return ret; 532 533 if (!(dep->flags & DWC3_EP_ENABLED)) { 534 struct dwc3_trb *trb_st_hw; 535 struct dwc3_trb *trb_link; 536 537 ret = dwc3_gadget_set_xfer_resource(dwc, dep); 538 if (ret) 539 return ret; 540 541 dep->endpoint.desc = desc; 542 dep->comp_desc = comp_desc; 543 dep->type = usb_endpoint_type(desc); 544 dep->flags |= DWC3_EP_ENABLED; 545 546 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 547 reg |= DWC3_DALEPENA_EP(dep->number); 548 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 549 550 if (!usb_endpoint_xfer_isoc(desc)) 551 return 0; 552 553 memset(&trb_link, 0, sizeof(trb_link)); 554 555 /* Link TRB for ISOC. The HWO bit is never reset */ 556 trb_st_hw = &dep->trb_pool[0]; 557 558 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 559 560 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 561 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 562 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB; 563 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 564 } 565 566 return 0; 567 } 568 569 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum); 570 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 571 { 572 struct dwc3_request *req; 573 574 if (!list_empty(&dep->req_queued)) { 575 dwc3_stop_active_transfer(dwc, dep->number); 576 577 /* - giveback all requests to gadget driver */ 578 while (!list_empty(&dep->req_queued)) { 579 req = next_request(&dep->req_queued); 580 581 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 582 } 583 } 584 585 while (!list_empty(&dep->request_list)) { 586 req = next_request(&dep->request_list); 587 588 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 589 } 590 } 591 592 /** 593 * __dwc3_gadget_ep_disable - Disables a HW endpoint 594 * @dep: the endpoint to disable 595 * 596 * This function also removes requests which are currently processed ny the 597 * hardware and those which are not yet scheduled. 598 * Caller should take care of locking. 599 */ 600 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 601 { 602 struct dwc3 *dwc = dep->dwc; 603 u32 reg; 604 605 dwc3_remove_requests(dwc, dep); 606 607 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 608 reg &= ~DWC3_DALEPENA_EP(dep->number); 609 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 610 611 dep->stream_capable = false; 612 dep->endpoint.desc = NULL; 613 dep->comp_desc = NULL; 614 dep->type = 0; 615 dep->flags = 0; 616 617 return 0; 618 } 619 620 /* -------------------------------------------------------------------------- */ 621 622 static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 623 const struct usb_endpoint_descriptor *desc) 624 { 625 return -EINVAL; 626 } 627 628 static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 629 { 630 return -EINVAL; 631 } 632 633 /* -------------------------------------------------------------------------- */ 634 635 static int dwc3_gadget_ep_enable(struct usb_ep *ep, 636 const struct usb_endpoint_descriptor *desc) 637 { 638 struct dwc3_ep *dep; 639 struct dwc3 *dwc; 640 unsigned long flags; 641 int ret; 642 643 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 644 pr_debug("dwc3: invalid parameters\n"); 645 return -EINVAL; 646 } 647 648 if (!desc->wMaxPacketSize) { 649 pr_debug("dwc3: missing wMaxPacketSize\n"); 650 return -EINVAL; 651 } 652 653 dep = to_dwc3_ep(ep); 654 dwc = dep->dwc; 655 656 if (dep->flags & DWC3_EP_ENABLED) { 657 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n", 658 dep->name); 659 return 0; 660 } 661 662 switch (usb_endpoint_type(desc)) { 663 case USB_ENDPOINT_XFER_CONTROL: 664 strlcat(dep->name, "-control", sizeof(dep->name)); 665 break; 666 case USB_ENDPOINT_XFER_ISOC: 667 strlcat(dep->name, "-isoc", sizeof(dep->name)); 668 break; 669 case USB_ENDPOINT_XFER_BULK: 670 strlcat(dep->name, "-bulk", sizeof(dep->name)); 671 break; 672 case USB_ENDPOINT_XFER_INT: 673 strlcat(dep->name, "-int", sizeof(dep->name)); 674 break; 675 default: 676 dev_err(dwc->dev, "invalid endpoint transfer type\n"); 677 } 678 679 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name); 680 681 spin_lock_irqsave(&dwc->lock, flags); 682 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false); 683 spin_unlock_irqrestore(&dwc->lock, flags); 684 685 return ret; 686 } 687 688 static int dwc3_gadget_ep_disable(struct usb_ep *ep) 689 { 690 struct dwc3_ep *dep; 691 struct dwc3 *dwc; 692 unsigned long flags; 693 int ret; 694 695 if (!ep) { 696 pr_debug("dwc3: invalid parameters\n"); 697 return -EINVAL; 698 } 699 700 dep = to_dwc3_ep(ep); 701 dwc = dep->dwc; 702 703 if (!(dep->flags & DWC3_EP_ENABLED)) { 704 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n", 705 dep->name); 706 return 0; 707 } 708 709 snprintf(dep->name, sizeof(dep->name), "ep%d%s", 710 dep->number >> 1, 711 (dep->number & 1) ? "in" : "out"); 712 713 spin_lock_irqsave(&dwc->lock, flags); 714 ret = __dwc3_gadget_ep_disable(dep); 715 spin_unlock_irqrestore(&dwc->lock, flags); 716 717 return ret; 718 } 719 720 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 721 gfp_t gfp_flags) 722 { 723 struct dwc3_request *req; 724 struct dwc3_ep *dep = to_dwc3_ep(ep); 725 struct dwc3 *dwc = dep->dwc; 726 727 req = kzalloc(sizeof(*req), gfp_flags); 728 if (!req) { 729 dev_err(dwc->dev, "not enough memory\n"); 730 return NULL; 731 } 732 733 req->epnum = dep->number; 734 req->dep = dep; 735 736 return &req->request; 737 } 738 739 static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 740 struct usb_request *request) 741 { 742 struct dwc3_request *req = to_dwc3_request(request); 743 744 kfree(req); 745 } 746 747 /** 748 * dwc3_prepare_one_trb - setup one TRB from one request 749 * @dep: endpoint for which this request is prepared 750 * @req: dwc3_request pointer 751 */ 752 static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 753 struct dwc3_request *req, dma_addr_t dma, 754 unsigned length, unsigned last, unsigned chain, unsigned node) 755 { 756 struct dwc3 *dwc = dep->dwc; 757 struct dwc3_trb *trb; 758 759 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n", 760 dep->name, req, (unsigned long long) dma, 761 length, last ? " last" : "", 762 chain ? " chain" : ""); 763 764 /* Skip the LINK-TRB on ISOC */ 765 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && 766 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 767 dep->free_slot++; 768 769 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK]; 770 771 if (!req->trb) { 772 dwc3_gadget_move_request_queued(req); 773 req->trb = trb; 774 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 775 req->start_slot = dep->free_slot & DWC3_TRB_MASK; 776 } 777 778 dep->free_slot++; 779 780 trb->size = DWC3_TRB_SIZE_LENGTH(length); 781 trb->bpl = lower_32_bits(dma); 782 trb->bph = upper_32_bits(dma); 783 784 switch (usb_endpoint_type(dep->endpoint.desc)) { 785 case USB_ENDPOINT_XFER_CONTROL: 786 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP; 787 break; 788 789 case USB_ENDPOINT_XFER_ISOC: 790 if (!node) 791 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 792 else 793 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 794 795 if (!req->request.no_interrupt && !chain) 796 trb->ctrl |= DWC3_TRB_CTRL_IOC; 797 break; 798 799 case USB_ENDPOINT_XFER_BULK: 800 case USB_ENDPOINT_XFER_INT: 801 trb->ctrl = DWC3_TRBCTL_NORMAL; 802 break; 803 default: 804 /* 805 * This is only possible with faulty memory because we 806 * checked it already :) 807 */ 808 BUG(); 809 } 810 811 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 812 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 813 trb->ctrl |= DWC3_TRB_CTRL_CSP; 814 } else if (last) { 815 trb->ctrl |= DWC3_TRB_CTRL_LST; 816 } 817 818 if (chain) 819 trb->ctrl |= DWC3_TRB_CTRL_CHN; 820 821 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) 822 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id); 823 824 trb->ctrl |= DWC3_TRB_CTRL_HWO; 825 } 826 827 /* 828 * dwc3_prepare_trbs - setup TRBs from requests 829 * @dep: endpoint for which requests are being prepared 830 * @starting: true if the endpoint is idle and no requests are queued. 831 * 832 * The function goes through the requests list and sets up TRBs for the 833 * transfers. The function returns once there are no more TRBs available or 834 * it runs out of requests. 835 */ 836 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting) 837 { 838 struct dwc3_request *req, *n; 839 u32 trbs_left; 840 u32 max; 841 unsigned int last_one = 0; 842 843 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 844 845 /* the first request must not be queued */ 846 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK; 847 848 /* Can't wrap around on a non-isoc EP since there's no link TRB */ 849 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 850 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK); 851 if (trbs_left > max) 852 trbs_left = max; 853 } 854 855 /* 856 * If busy & slot are equal than it is either full or empty. If we are 857 * starting to process requests then we are empty. Otherwise we are 858 * full and don't do anything 859 */ 860 if (!trbs_left) { 861 if (!starting) 862 return; 863 trbs_left = DWC3_TRB_NUM; 864 /* 865 * In case we start from scratch, we queue the ISOC requests 866 * starting from slot 1. This is done because we use ring 867 * buffer and have no LST bit to stop us. Instead, we place 868 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt 869 * after the first request so we start at slot 1 and have 870 * 7 requests proceed before we hit the first IOC. 871 * Other transfer types don't use the ring buffer and are 872 * processed from the first TRB until the last one. Since we 873 * don't wrap around we have to start at the beginning. 874 */ 875 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 876 dep->busy_slot = 1; 877 dep->free_slot = 1; 878 } else { 879 dep->busy_slot = 0; 880 dep->free_slot = 0; 881 } 882 } 883 884 /* The last TRB is a link TRB, not used for xfer */ 885 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc)) 886 return; 887 888 list_for_each_entry_safe(req, n, &dep->request_list, list) { 889 unsigned length; 890 dma_addr_t dma; 891 last_one = false; 892 893 if (req->request.num_mapped_sgs > 0) { 894 struct usb_request *request = &req->request; 895 struct scatterlist *sg = request->sg; 896 struct scatterlist *s; 897 int i; 898 899 for_each_sg(sg, s, request->num_mapped_sgs, i) { 900 unsigned chain = true; 901 902 length = sg_dma_len(s); 903 dma = sg_dma_address(s); 904 905 if (i == (request->num_mapped_sgs - 1) || 906 sg_is_last(s)) { 907 if (list_is_last(&req->list, 908 &dep->request_list)) 909 last_one = true; 910 chain = false; 911 } 912 913 trbs_left--; 914 if (!trbs_left) 915 last_one = true; 916 917 if (last_one) 918 chain = false; 919 920 dwc3_prepare_one_trb(dep, req, dma, length, 921 last_one, chain, i); 922 923 if (last_one) 924 break; 925 } 926 } else { 927 dma = req->request.dma; 928 length = req->request.length; 929 trbs_left--; 930 931 if (!trbs_left) 932 last_one = 1; 933 934 /* Is this the last request? */ 935 if (list_is_last(&req->list, &dep->request_list)) 936 last_one = 1; 937 938 dwc3_prepare_one_trb(dep, req, dma, length, 939 last_one, false, 0); 940 941 if (last_one) 942 break; 943 } 944 } 945 } 946 947 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param, 948 int start_new) 949 { 950 struct dwc3_gadget_ep_cmd_params params; 951 struct dwc3_request *req; 952 struct dwc3 *dwc = dep->dwc; 953 int ret; 954 u32 cmd; 955 956 if (start_new && (dep->flags & DWC3_EP_BUSY)) { 957 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name); 958 return -EBUSY; 959 } 960 dep->flags &= ~DWC3_EP_PENDING_REQUEST; 961 962 /* 963 * If we are getting here after a short-out-packet we don't enqueue any 964 * new requests as we try to set the IOC bit only on the last request. 965 */ 966 if (start_new) { 967 if (list_empty(&dep->req_queued)) 968 dwc3_prepare_trbs(dep, start_new); 969 970 /* req points to the first request which will be sent */ 971 req = next_request(&dep->req_queued); 972 } else { 973 dwc3_prepare_trbs(dep, start_new); 974 975 /* 976 * req points to the first request where HWO changed from 0 to 1 977 */ 978 req = next_request(&dep->req_queued); 979 } 980 if (!req) { 981 dep->flags |= DWC3_EP_PENDING_REQUEST; 982 return 0; 983 } 984 985 memset(¶ms, 0, sizeof(params)); 986 987 if (start_new) { 988 params.param0 = upper_32_bits(req->trb_dma); 989 params.param1 = lower_32_bits(req->trb_dma); 990 cmd = DWC3_DEPCMD_STARTTRANSFER; 991 } else { 992 cmd = DWC3_DEPCMD_UPDATETRANSFER; 993 } 994 995 cmd |= DWC3_DEPCMD_PARAM(cmd_param); 996 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 997 if (ret < 0) { 998 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n"); 999 1000 /* 1001 * FIXME we need to iterate over the list of requests 1002 * here and stop, unmap, free and del each of the linked 1003 * requests instead of what we do now. 1004 */ 1005 usb_gadget_unmap_request(&dwc->gadget, &req->request, 1006 req->direction); 1007 list_del(&req->list); 1008 return ret; 1009 } 1010 1011 dep->flags |= DWC3_EP_BUSY; 1012 1013 if (start_new) { 1014 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc, 1015 dep->number); 1016 WARN_ON_ONCE(!dep->resource_index); 1017 } 1018 1019 return 0; 1020 } 1021 1022 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc, 1023 struct dwc3_ep *dep, u32 cur_uf) 1024 { 1025 u32 uf; 1026 1027 if (list_empty(&dep->request_list)) { 1028 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n", 1029 dep->name); 1030 dep->flags |= DWC3_EP_PENDING_REQUEST; 1031 return; 1032 } 1033 1034 /* 4 micro frames in the future */ 1035 uf = cur_uf + dep->interval * 4; 1036 1037 __dwc3_gadget_kick_transfer(dep, uf, 1); 1038 } 1039 1040 static void dwc3_gadget_start_isoc(struct dwc3 *dwc, 1041 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1042 { 1043 u32 cur_uf, mask; 1044 1045 mask = ~(dep->interval - 1); 1046 cur_uf = event->parameters & mask; 1047 1048 __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1049 } 1050 1051 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 1052 { 1053 struct dwc3 *dwc = dep->dwc; 1054 int ret; 1055 1056 req->request.actual = 0; 1057 req->request.status = -EINPROGRESS; 1058 req->direction = dep->direction; 1059 req->epnum = dep->number; 1060 1061 /* 1062 * We only add to our list of requests now and 1063 * start consuming the list once we get XferNotReady 1064 * IRQ. 1065 * 1066 * That way, we avoid doing anything that we don't need 1067 * to do now and defer it until the point we receive a 1068 * particular token from the Host side. 1069 * 1070 * This will also avoid Host cancelling URBs due to too 1071 * many NAKs. 1072 */ 1073 ret = usb_gadget_map_request(&dwc->gadget, &req->request, 1074 dep->direction); 1075 if (ret) 1076 return ret; 1077 1078 list_add_tail(&req->list, &dep->request_list); 1079 1080 /* 1081 * There are a few special cases: 1082 * 1083 * 1. XferNotReady with empty list of requests. We need to kick the 1084 * transfer here in that situation, otherwise we will be NAKing 1085 * forever. If we get XferNotReady before gadget driver has a 1086 * chance to queue a request, we will ACK the IRQ but won't be 1087 * able to receive the data until the next request is queued. 1088 * The following code is handling exactly that. 1089 * 1090 */ 1091 if (dep->flags & DWC3_EP_PENDING_REQUEST) { 1092 /* 1093 * If xfernotready is already elapsed and it is a case 1094 * of isoc transfer, then issue END TRANSFER, so that 1095 * you can receive xfernotready again and can have 1096 * notion of current microframe. 1097 */ 1098 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1099 if (list_empty(&dep->req_queued)) { 1100 dwc3_stop_active_transfer(dwc, dep->number); 1101 dep->flags = DWC3_EP_ENABLED; 1102 } 1103 return 0; 1104 } 1105 1106 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1107 if (ret && ret != -EBUSY) 1108 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1109 dep->name); 1110 return ret; 1111 } 1112 1113 /* 1114 * 2. XferInProgress on Isoc EP with an active transfer. We need to 1115 * kick the transfer here after queuing a request, otherwise the 1116 * core may not see the modified TRB(s). 1117 */ 1118 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1119 (dep->flags & DWC3_EP_BUSY) && 1120 !(dep->flags & DWC3_EP_MISSED_ISOC)) { 1121 WARN_ON_ONCE(!dep->resource_index); 1122 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index, 1123 false); 1124 if (ret && ret != -EBUSY) 1125 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1126 dep->name); 1127 return ret; 1128 } 1129 1130 return 0; 1131 } 1132 1133 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 1134 gfp_t gfp_flags) 1135 { 1136 struct dwc3_request *req = to_dwc3_request(request); 1137 struct dwc3_ep *dep = to_dwc3_ep(ep); 1138 struct dwc3 *dwc = dep->dwc; 1139 1140 unsigned long flags; 1141 1142 int ret; 1143 1144 if (!dep->endpoint.desc) { 1145 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n", 1146 request, ep->name); 1147 return -ESHUTDOWN; 1148 } 1149 1150 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n", 1151 request, ep->name, request->length); 1152 1153 spin_lock_irqsave(&dwc->lock, flags); 1154 ret = __dwc3_gadget_ep_queue(dep, req); 1155 spin_unlock_irqrestore(&dwc->lock, flags); 1156 1157 return ret; 1158 } 1159 1160 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 1161 struct usb_request *request) 1162 { 1163 struct dwc3_request *req = to_dwc3_request(request); 1164 struct dwc3_request *r = NULL; 1165 1166 struct dwc3_ep *dep = to_dwc3_ep(ep); 1167 struct dwc3 *dwc = dep->dwc; 1168 1169 unsigned long flags; 1170 int ret = 0; 1171 1172 spin_lock_irqsave(&dwc->lock, flags); 1173 1174 list_for_each_entry(r, &dep->request_list, list) { 1175 if (r == req) 1176 break; 1177 } 1178 1179 if (r != req) { 1180 list_for_each_entry(r, &dep->req_queued, list) { 1181 if (r == req) 1182 break; 1183 } 1184 if (r == req) { 1185 /* wait until it is processed */ 1186 dwc3_stop_active_transfer(dwc, dep->number); 1187 goto out1; 1188 } 1189 dev_err(dwc->dev, "request %p was not queued to %s\n", 1190 request, ep->name); 1191 ret = -EINVAL; 1192 goto out0; 1193 } 1194 1195 out1: 1196 /* giveback the request */ 1197 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1198 1199 out0: 1200 spin_unlock_irqrestore(&dwc->lock, flags); 1201 1202 return ret; 1203 } 1204 1205 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value) 1206 { 1207 struct dwc3_gadget_ep_cmd_params params; 1208 struct dwc3 *dwc = dep->dwc; 1209 int ret; 1210 1211 memset(¶ms, 0x00, sizeof(params)); 1212 1213 if (value) { 1214 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1215 DWC3_DEPCMD_SETSTALL, ¶ms); 1216 if (ret) 1217 dev_err(dwc->dev, "failed to %s STALL on %s\n", 1218 value ? "set" : "clear", 1219 dep->name); 1220 else 1221 dep->flags |= DWC3_EP_STALL; 1222 } else { 1223 if (dep->flags & DWC3_EP_WEDGE) 1224 return 0; 1225 1226 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1227 DWC3_DEPCMD_CLEARSTALL, ¶ms); 1228 if (ret) 1229 dev_err(dwc->dev, "failed to %s STALL on %s\n", 1230 value ? "set" : "clear", 1231 dep->name); 1232 else 1233 dep->flags &= ~DWC3_EP_STALL; 1234 } 1235 1236 return ret; 1237 } 1238 1239 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 1240 { 1241 struct dwc3_ep *dep = to_dwc3_ep(ep); 1242 struct dwc3 *dwc = dep->dwc; 1243 1244 unsigned long flags; 1245 1246 int ret; 1247 1248 spin_lock_irqsave(&dwc->lock, flags); 1249 1250 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1251 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 1252 ret = -EINVAL; 1253 goto out; 1254 } 1255 1256 ret = __dwc3_gadget_ep_set_halt(dep, value); 1257 out: 1258 spin_unlock_irqrestore(&dwc->lock, flags); 1259 1260 return ret; 1261 } 1262 1263 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 1264 { 1265 struct dwc3_ep *dep = to_dwc3_ep(ep); 1266 struct dwc3 *dwc = dep->dwc; 1267 unsigned long flags; 1268 1269 spin_lock_irqsave(&dwc->lock, flags); 1270 dep->flags |= DWC3_EP_WEDGE; 1271 spin_unlock_irqrestore(&dwc->lock, flags); 1272 1273 if (dep->number == 0 || dep->number == 1) 1274 return dwc3_gadget_ep0_set_halt(ep, 1); 1275 else 1276 return dwc3_gadget_ep_set_halt(ep, 1); 1277 } 1278 1279 /* -------------------------------------------------------------------------- */ 1280 1281 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 1282 .bLength = USB_DT_ENDPOINT_SIZE, 1283 .bDescriptorType = USB_DT_ENDPOINT, 1284 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1285 }; 1286 1287 static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 1288 .enable = dwc3_gadget_ep0_enable, 1289 .disable = dwc3_gadget_ep0_disable, 1290 .alloc_request = dwc3_gadget_ep_alloc_request, 1291 .free_request = dwc3_gadget_ep_free_request, 1292 .queue = dwc3_gadget_ep0_queue, 1293 .dequeue = dwc3_gadget_ep_dequeue, 1294 .set_halt = dwc3_gadget_ep0_set_halt, 1295 .set_wedge = dwc3_gadget_ep_set_wedge, 1296 }; 1297 1298 static const struct usb_ep_ops dwc3_gadget_ep_ops = { 1299 .enable = dwc3_gadget_ep_enable, 1300 .disable = dwc3_gadget_ep_disable, 1301 .alloc_request = dwc3_gadget_ep_alloc_request, 1302 .free_request = dwc3_gadget_ep_free_request, 1303 .queue = dwc3_gadget_ep_queue, 1304 .dequeue = dwc3_gadget_ep_dequeue, 1305 .set_halt = dwc3_gadget_ep_set_halt, 1306 .set_wedge = dwc3_gadget_ep_set_wedge, 1307 }; 1308 1309 /* -------------------------------------------------------------------------- */ 1310 1311 static int dwc3_gadget_get_frame(struct usb_gadget *g) 1312 { 1313 struct dwc3 *dwc = gadget_to_dwc(g); 1314 u32 reg; 1315 1316 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1317 return DWC3_DSTS_SOFFN(reg); 1318 } 1319 1320 static int dwc3_gadget_wakeup(struct usb_gadget *g) 1321 { 1322 struct dwc3 *dwc = gadget_to_dwc(g); 1323 1324 unsigned long timeout; 1325 unsigned long flags; 1326 1327 u32 reg; 1328 1329 int ret = 0; 1330 1331 u8 link_state; 1332 u8 speed; 1333 1334 spin_lock_irqsave(&dwc->lock, flags); 1335 1336 /* 1337 * According to the Databook Remote wakeup request should 1338 * be issued only when the device is in early suspend state. 1339 * 1340 * We can check that via USB Link State bits in DSTS register. 1341 */ 1342 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1343 1344 speed = reg & DWC3_DSTS_CONNECTSPD; 1345 if (speed == DWC3_DSTS_SUPERSPEED) { 1346 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n"); 1347 ret = -EINVAL; 1348 goto out; 1349 } 1350 1351 link_state = DWC3_DSTS_USBLNKST(reg); 1352 1353 switch (link_state) { 1354 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1355 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1356 break; 1357 default: 1358 dev_dbg(dwc->dev, "can't wakeup from link state %d\n", 1359 link_state); 1360 ret = -EINVAL; 1361 goto out; 1362 } 1363 1364 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); 1365 if (ret < 0) { 1366 dev_err(dwc->dev, "failed to put link in Recovery\n"); 1367 goto out; 1368 } 1369 1370 /* Recent versions do this automatically */ 1371 if (dwc->revision < DWC3_REVISION_194A) { 1372 /* write zeroes to Link Change Request */ 1373 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1374 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 1375 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1376 } 1377 1378 /* poll until Link State changes to ON */ 1379 timeout = jiffies + msecs_to_jiffies(100); 1380 1381 while (!time_after(jiffies, timeout)) { 1382 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1383 1384 /* in HS, means ON */ 1385 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 1386 break; 1387 } 1388 1389 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1390 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1391 ret = -EINVAL; 1392 } 1393 1394 out: 1395 spin_unlock_irqrestore(&dwc->lock, flags); 1396 1397 return ret; 1398 } 1399 1400 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 1401 int is_selfpowered) 1402 { 1403 struct dwc3 *dwc = gadget_to_dwc(g); 1404 unsigned long flags; 1405 1406 spin_lock_irqsave(&dwc->lock, flags); 1407 dwc->is_selfpowered = !!is_selfpowered; 1408 spin_unlock_irqrestore(&dwc->lock, flags); 1409 1410 return 0; 1411 } 1412 1413 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on) 1414 { 1415 u32 reg; 1416 u32 timeout = 500; 1417 1418 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1419 if (is_on) { 1420 if (dwc->revision <= DWC3_REVISION_187A) { 1421 reg &= ~DWC3_DCTL_TRGTULST_MASK; 1422 reg |= DWC3_DCTL_TRGTULST_RX_DET; 1423 } 1424 1425 if (dwc->revision >= DWC3_REVISION_194A) 1426 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1427 reg |= DWC3_DCTL_RUN_STOP; 1428 dwc->pullups_connected = true; 1429 } else { 1430 reg &= ~DWC3_DCTL_RUN_STOP; 1431 dwc->pullups_connected = false; 1432 } 1433 1434 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1435 1436 do { 1437 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1438 if (is_on) { 1439 if (!(reg & DWC3_DSTS_DEVCTRLHLT)) 1440 break; 1441 } else { 1442 if (reg & DWC3_DSTS_DEVCTRLHLT) 1443 break; 1444 } 1445 timeout--; 1446 if (!timeout) 1447 return -ETIMEDOUT; 1448 udelay(1); 1449 } while (1); 1450 1451 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n", 1452 dwc->gadget_driver 1453 ? dwc->gadget_driver->function : "no-function", 1454 is_on ? "connect" : "disconnect"); 1455 1456 return 0; 1457 } 1458 1459 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 1460 { 1461 struct dwc3 *dwc = gadget_to_dwc(g); 1462 unsigned long flags; 1463 int ret; 1464 1465 is_on = !!is_on; 1466 1467 spin_lock_irqsave(&dwc->lock, flags); 1468 ret = dwc3_gadget_run_stop(dwc, is_on); 1469 spin_unlock_irqrestore(&dwc->lock, flags); 1470 1471 return ret; 1472 } 1473 1474 static void dwc3_gadget_enable_irq(struct dwc3 *dwc) 1475 { 1476 u32 reg; 1477 1478 /* Enable all but Start and End of Frame IRQs */ 1479 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | 1480 DWC3_DEVTEN_EVNTOVERFLOWEN | 1481 DWC3_DEVTEN_CMDCMPLTEN | 1482 DWC3_DEVTEN_ERRTICERREN | 1483 DWC3_DEVTEN_WKUPEVTEN | 1484 DWC3_DEVTEN_ULSTCNGEN | 1485 DWC3_DEVTEN_CONNECTDONEEN | 1486 DWC3_DEVTEN_USBRSTEN | 1487 DWC3_DEVTEN_DISCONNEVTEN); 1488 1489 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 1490 } 1491 1492 static void dwc3_gadget_disable_irq(struct dwc3 *dwc) 1493 { 1494 /* mask all interrupts */ 1495 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 1496 } 1497 1498 static irqreturn_t dwc3_interrupt(int irq, void *_dwc); 1499 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc); 1500 1501 static int dwc3_gadget_start(struct usb_gadget *g, 1502 struct usb_gadget_driver *driver) 1503 { 1504 struct dwc3 *dwc = gadget_to_dwc(g); 1505 struct dwc3_ep *dep; 1506 unsigned long flags; 1507 int ret = 0; 1508 int irq; 1509 u32 reg; 1510 1511 spin_lock_irqsave(&dwc->lock, flags); 1512 1513 if (dwc->gadget_driver) { 1514 dev_err(dwc->dev, "%s is already bound to %s\n", 1515 dwc->gadget.name, 1516 dwc->gadget_driver->driver.name); 1517 ret = -EBUSY; 1518 goto err0; 1519 } 1520 1521 dwc->gadget_driver = driver; 1522 1523 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1524 reg &= ~(DWC3_DCFG_SPEED_MASK); 1525 1526 /** 1527 * WORKAROUND: DWC3 revision < 2.20a have an issue 1528 * which would cause metastability state on Run/Stop 1529 * bit if we try to force the IP to USB2-only mode. 1530 * 1531 * Because of that, we cannot configure the IP to any 1532 * speed other than the SuperSpeed 1533 * 1534 * Refers to: 1535 * 1536 * STAR#9000525659: Clock Domain Crossing on DCTL in 1537 * USB 2.0 Mode 1538 */ 1539 if (dwc->revision < DWC3_REVISION_220A) 1540 reg |= DWC3_DCFG_SUPERSPEED; 1541 else 1542 reg |= dwc->maximum_speed; 1543 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1544 1545 dwc->start_config_issued = false; 1546 1547 /* Start with SuperSpeed Default */ 1548 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1549 1550 dep = dwc->eps[0]; 1551 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false); 1552 if (ret) { 1553 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1554 goto err0; 1555 } 1556 1557 dep = dwc->eps[1]; 1558 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false); 1559 if (ret) { 1560 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1561 goto err1; 1562 } 1563 1564 /* begin to receive SETUP packets */ 1565 dwc->ep0state = EP0_SETUP_PHASE; 1566 dwc3_ep0_out_start(dwc); 1567 1568 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1569 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, 1570 IRQF_SHARED | IRQF_ONESHOT, "dwc3", dwc); 1571 if (ret) { 1572 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 1573 irq, ret); 1574 goto err1; 1575 } 1576 1577 dwc3_gadget_enable_irq(dwc); 1578 1579 spin_unlock_irqrestore(&dwc->lock, flags); 1580 1581 return 0; 1582 1583 err1: 1584 __dwc3_gadget_ep_disable(dwc->eps[0]); 1585 1586 err0: 1587 spin_unlock_irqrestore(&dwc->lock, flags); 1588 1589 return ret; 1590 } 1591 1592 static int dwc3_gadget_stop(struct usb_gadget *g, 1593 struct usb_gadget_driver *driver) 1594 { 1595 struct dwc3 *dwc = gadget_to_dwc(g); 1596 unsigned long flags; 1597 int irq; 1598 1599 spin_lock_irqsave(&dwc->lock, flags); 1600 1601 dwc3_gadget_disable_irq(dwc); 1602 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1603 free_irq(irq, dwc); 1604 1605 __dwc3_gadget_ep_disable(dwc->eps[0]); 1606 __dwc3_gadget_ep_disable(dwc->eps[1]); 1607 1608 dwc->gadget_driver = NULL; 1609 1610 spin_unlock_irqrestore(&dwc->lock, flags); 1611 1612 return 0; 1613 } 1614 1615 static const struct usb_gadget_ops dwc3_gadget_ops = { 1616 .get_frame = dwc3_gadget_get_frame, 1617 .wakeup = dwc3_gadget_wakeup, 1618 .set_selfpowered = dwc3_gadget_set_selfpowered, 1619 .pullup = dwc3_gadget_pullup, 1620 .udc_start = dwc3_gadget_start, 1621 .udc_stop = dwc3_gadget_stop, 1622 }; 1623 1624 /* -------------------------------------------------------------------------- */ 1625 1626 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc, 1627 u8 num, u32 direction) 1628 { 1629 struct dwc3_ep *dep; 1630 u8 i; 1631 1632 for (i = 0; i < num; i++) { 1633 u8 epnum = (i << 1) | (!!direction); 1634 1635 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 1636 if (!dep) { 1637 dev_err(dwc->dev, "can't allocate endpoint %d\n", 1638 epnum); 1639 return -ENOMEM; 1640 } 1641 1642 dep->dwc = dwc; 1643 dep->number = epnum; 1644 dwc->eps[epnum] = dep; 1645 1646 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1, 1647 (epnum & 1) ? "in" : "out"); 1648 1649 dep->endpoint.name = dep->name; 1650 dep->direction = (epnum & 1); 1651 1652 if (epnum == 0 || epnum == 1) { 1653 dep->endpoint.maxpacket = 512; 1654 dep->endpoint.maxburst = 1; 1655 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 1656 if (!epnum) 1657 dwc->gadget.ep0 = &dep->endpoint; 1658 } else { 1659 int ret; 1660 1661 dep->endpoint.maxpacket = 1024; 1662 dep->endpoint.max_streams = 15; 1663 dep->endpoint.ops = &dwc3_gadget_ep_ops; 1664 list_add_tail(&dep->endpoint.ep_list, 1665 &dwc->gadget.ep_list); 1666 1667 ret = dwc3_alloc_trb_pool(dep); 1668 if (ret) 1669 return ret; 1670 } 1671 1672 INIT_LIST_HEAD(&dep->request_list); 1673 INIT_LIST_HEAD(&dep->req_queued); 1674 } 1675 1676 return 0; 1677 } 1678 1679 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc) 1680 { 1681 int ret; 1682 1683 INIT_LIST_HEAD(&dwc->gadget.ep_list); 1684 1685 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0); 1686 if (ret < 0) { 1687 dev_vdbg(dwc->dev, "failed to allocate OUT endpoints\n"); 1688 return ret; 1689 } 1690 1691 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1); 1692 if (ret < 0) { 1693 dev_vdbg(dwc->dev, "failed to allocate IN endpoints\n"); 1694 return ret; 1695 } 1696 1697 return 0; 1698 } 1699 1700 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 1701 { 1702 struct dwc3_ep *dep; 1703 u8 epnum; 1704 1705 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1706 dep = dwc->eps[epnum]; 1707 if (!dep) 1708 continue; 1709 1710 dwc3_free_trb_pool(dep); 1711 1712 if (epnum != 0 && epnum != 1) 1713 list_del(&dep->endpoint.ep_list); 1714 1715 kfree(dep); 1716 } 1717 } 1718 1719 /* -------------------------------------------------------------------------- */ 1720 1721 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, 1722 struct dwc3_request *req, struct dwc3_trb *trb, 1723 const struct dwc3_event_depevt *event, int status) 1724 { 1725 unsigned int count; 1726 unsigned int s_pkt = 0; 1727 unsigned int trb_status; 1728 1729 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 1730 /* 1731 * We continue despite the error. There is not much we 1732 * can do. If we don't clean it up we loop forever. If 1733 * we skip the TRB then it gets overwritten after a 1734 * while since we use them in a ring buffer. A BUG() 1735 * would help. Lets hope that if this occurs, someone 1736 * fixes the root cause instead of looking away :) 1737 */ 1738 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n", 1739 dep->name, trb); 1740 count = trb->size & DWC3_TRB_SIZE_MASK; 1741 1742 if (dep->direction) { 1743 if (count) { 1744 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); 1745 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) { 1746 dev_dbg(dwc->dev, "incomplete IN transfer %s\n", 1747 dep->name); 1748 /* 1749 * If missed isoc occurred and there is 1750 * no request queued then issue END 1751 * TRANSFER, so that core generates 1752 * next xfernotready and we will issue 1753 * a fresh START TRANSFER. 1754 * If there are still queued request 1755 * then wait, do not issue either END 1756 * or UPDATE TRANSFER, just attach next 1757 * request in request_list during 1758 * giveback.If any future queued request 1759 * is successfully transferred then we 1760 * will issue UPDATE TRANSFER for all 1761 * request in the request_list. 1762 */ 1763 dep->flags |= DWC3_EP_MISSED_ISOC; 1764 } else { 1765 dev_err(dwc->dev, "incomplete IN transfer %s\n", 1766 dep->name); 1767 status = -ECONNRESET; 1768 } 1769 } else { 1770 dep->flags &= ~DWC3_EP_MISSED_ISOC; 1771 } 1772 } else { 1773 if (count && (event->status & DEPEVT_STATUS_SHORT)) 1774 s_pkt = 1; 1775 } 1776 1777 /* 1778 * We assume here we will always receive the entire data block 1779 * which we should receive. Meaning, if we program RX to 1780 * receive 4K but we receive only 2K, we assume that's all we 1781 * should receive and we simply bounce the request back to the 1782 * gadget driver for further processing. 1783 */ 1784 req->request.actual += req->request.length - count; 1785 if (s_pkt) 1786 return 1; 1787 if ((event->status & DEPEVT_STATUS_LST) && 1788 (trb->ctrl & (DWC3_TRB_CTRL_LST | 1789 DWC3_TRB_CTRL_HWO))) 1790 return 1; 1791 if ((event->status & DEPEVT_STATUS_IOC) && 1792 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 1793 return 1; 1794 return 0; 1795 } 1796 1797 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, 1798 const struct dwc3_event_depevt *event, int status) 1799 { 1800 struct dwc3_request *req; 1801 struct dwc3_trb *trb; 1802 unsigned int slot; 1803 unsigned int i; 1804 int ret; 1805 1806 do { 1807 req = next_request(&dep->req_queued); 1808 if (!req) { 1809 WARN_ON_ONCE(1); 1810 return 1; 1811 } 1812 i = 0; 1813 do { 1814 slot = req->start_slot + i; 1815 if ((slot == DWC3_TRB_NUM - 1) && 1816 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 1817 slot++; 1818 slot %= DWC3_TRB_NUM; 1819 trb = &dep->trb_pool[slot]; 1820 1821 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 1822 event, status); 1823 if (ret) 1824 break; 1825 }while (++i < req->request.num_mapped_sgs); 1826 1827 dwc3_gadget_giveback(dep, req, status); 1828 1829 if (ret) 1830 break; 1831 } while (1); 1832 1833 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1834 list_empty(&dep->req_queued)) { 1835 if (list_empty(&dep->request_list)) { 1836 /* 1837 * If there is no entry in request list then do 1838 * not issue END TRANSFER now. Just set PENDING 1839 * flag, so that END TRANSFER is issued when an 1840 * entry is added into request list. 1841 */ 1842 dep->flags = DWC3_EP_PENDING_REQUEST; 1843 } else { 1844 dwc3_stop_active_transfer(dwc, dep->number); 1845 dep->flags = DWC3_EP_ENABLED; 1846 } 1847 return 1; 1848 } 1849 1850 if ((event->status & DEPEVT_STATUS_IOC) && 1851 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 1852 return 0; 1853 return 1; 1854 } 1855 1856 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, 1857 struct dwc3_ep *dep, const struct dwc3_event_depevt *event, 1858 int start_new) 1859 { 1860 unsigned status = 0; 1861 int clean_busy; 1862 1863 if (event->status & DEPEVT_STATUS_BUSERR) 1864 status = -ECONNRESET; 1865 1866 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status); 1867 if (clean_busy) 1868 dep->flags &= ~DWC3_EP_BUSY; 1869 1870 /* 1871 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. 1872 * See dwc3_gadget_linksts_change_interrupt() for 1st half. 1873 */ 1874 if (dwc->revision < DWC3_REVISION_183A) { 1875 u32 reg; 1876 int i; 1877 1878 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 1879 dep = dwc->eps[i]; 1880 1881 if (!(dep->flags & DWC3_EP_ENABLED)) 1882 continue; 1883 1884 if (!list_empty(&dep->req_queued)) 1885 return; 1886 } 1887 1888 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1889 reg |= dwc->u1u2; 1890 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1891 1892 dwc->u1u2 = 0; 1893 } 1894 } 1895 1896 static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 1897 const struct dwc3_event_depevt *event) 1898 { 1899 struct dwc3_ep *dep; 1900 u8 epnum = event->endpoint_number; 1901 1902 dep = dwc->eps[epnum]; 1903 1904 if (!(dep->flags & DWC3_EP_ENABLED)) 1905 return; 1906 1907 dev_vdbg(dwc->dev, "%s: %s\n", dep->name, 1908 dwc3_ep_event_string(event->endpoint_event)); 1909 1910 if (epnum == 0 || epnum == 1) { 1911 dwc3_ep0_interrupt(dwc, event); 1912 return; 1913 } 1914 1915 switch (event->endpoint_event) { 1916 case DWC3_DEPEVT_XFERCOMPLETE: 1917 dep->resource_index = 0; 1918 1919 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1920 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n", 1921 dep->name); 1922 return; 1923 } 1924 1925 dwc3_endpoint_transfer_complete(dwc, dep, event, 1); 1926 break; 1927 case DWC3_DEPEVT_XFERINPROGRESS: 1928 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1929 dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n", 1930 dep->name); 1931 return; 1932 } 1933 1934 dwc3_endpoint_transfer_complete(dwc, dep, event, 0); 1935 break; 1936 case DWC3_DEPEVT_XFERNOTREADY: 1937 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1938 dwc3_gadget_start_isoc(dwc, dep, event); 1939 } else { 1940 int ret; 1941 1942 dev_vdbg(dwc->dev, "%s: reason %s\n", 1943 dep->name, event->status & 1944 DEPEVT_STATUS_TRANSFER_ACTIVE 1945 ? "Transfer Active" 1946 : "Transfer Not Active"); 1947 1948 ret = __dwc3_gadget_kick_transfer(dep, 0, 1); 1949 if (!ret || ret == -EBUSY) 1950 return; 1951 1952 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1953 dep->name); 1954 } 1955 1956 break; 1957 case DWC3_DEPEVT_STREAMEVT: 1958 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) { 1959 dev_err(dwc->dev, "Stream event for non-Bulk %s\n", 1960 dep->name); 1961 return; 1962 } 1963 1964 switch (event->status) { 1965 case DEPEVT_STREAMEVT_FOUND: 1966 dev_vdbg(dwc->dev, "Stream %d found and started\n", 1967 event->parameters); 1968 1969 break; 1970 case DEPEVT_STREAMEVT_NOTFOUND: 1971 /* FALLTHROUGH */ 1972 default: 1973 dev_dbg(dwc->dev, "Couldn't find suitable stream\n"); 1974 } 1975 break; 1976 case DWC3_DEPEVT_RXTXFIFOEVT: 1977 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name); 1978 break; 1979 case DWC3_DEPEVT_EPCMDCMPLT: 1980 dev_vdbg(dwc->dev, "Endpoint Command Complete\n"); 1981 break; 1982 } 1983 } 1984 1985 static void dwc3_disconnect_gadget(struct dwc3 *dwc) 1986 { 1987 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { 1988 spin_unlock(&dwc->lock); 1989 dwc->gadget_driver->disconnect(&dwc->gadget); 1990 spin_lock(&dwc->lock); 1991 } 1992 } 1993 1994 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum) 1995 { 1996 struct dwc3_ep *dep; 1997 struct dwc3_gadget_ep_cmd_params params; 1998 u32 cmd; 1999 int ret; 2000 2001 dep = dwc->eps[epnum]; 2002 2003 if (!dep->resource_index) 2004 return; 2005 2006 /* 2007 * NOTICE: We are violating what the Databook says about the 2008 * EndTransfer command. Ideally we would _always_ wait for the 2009 * EndTransfer Command Completion IRQ, but that's causing too 2010 * much trouble synchronizing between us and gadget driver. 2011 * 2012 * We have discussed this with the IP Provider and it was 2013 * suggested to giveback all requests here, but give HW some 2014 * extra time to synchronize with the interconnect. We're using 2015 * an arbitraty 100us delay for that. 2016 * 2017 * Note also that a similar handling was tested by Synopsys 2018 * (thanks a lot Paul) and nothing bad has come out of it. 2019 * In short, what we're doing is: 2020 * 2021 * - Issue EndTransfer WITH CMDIOC bit set 2022 * - Wait 100us 2023 */ 2024 2025 cmd = DWC3_DEPCMD_ENDTRANSFER; 2026 cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC; 2027 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); 2028 memset(¶ms, 0, sizeof(params)); 2029 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 2030 WARN_ON_ONCE(ret); 2031 dep->resource_index = 0; 2032 dep->flags &= ~DWC3_EP_BUSY; 2033 udelay(100); 2034 } 2035 2036 static void dwc3_stop_active_transfers(struct dwc3 *dwc) 2037 { 2038 u32 epnum; 2039 2040 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2041 struct dwc3_ep *dep; 2042 2043 dep = dwc->eps[epnum]; 2044 if (!dep) 2045 continue; 2046 2047 if (!(dep->flags & DWC3_EP_ENABLED)) 2048 continue; 2049 2050 dwc3_remove_requests(dwc, dep); 2051 } 2052 } 2053 2054 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 2055 { 2056 u32 epnum; 2057 2058 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2059 struct dwc3_ep *dep; 2060 struct dwc3_gadget_ep_cmd_params params; 2061 int ret; 2062 2063 dep = dwc->eps[epnum]; 2064 if (!dep) 2065 continue; 2066 2067 if (!(dep->flags & DWC3_EP_STALL)) 2068 continue; 2069 2070 dep->flags &= ~DWC3_EP_STALL; 2071 2072 memset(¶ms, 0, sizeof(params)); 2073 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 2074 DWC3_DEPCMD_CLEARSTALL, ¶ms); 2075 WARN_ON_ONCE(ret); 2076 } 2077 } 2078 2079 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 2080 { 2081 int reg; 2082 2083 dev_vdbg(dwc->dev, "%s\n", __func__); 2084 2085 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2086 reg &= ~DWC3_DCTL_INITU1ENA; 2087 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2088 2089 reg &= ~DWC3_DCTL_INITU2ENA; 2090 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2091 2092 dwc3_disconnect_gadget(dwc); 2093 dwc->start_config_issued = false; 2094 2095 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2096 dwc->setup_packet_pending = false; 2097 } 2098 2099 static void dwc3_gadget_usb3_phy_suspend(struct dwc3 *dwc, int suspend) 2100 { 2101 u32 reg; 2102 2103 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); 2104 2105 if (suspend) 2106 reg |= DWC3_GUSB3PIPECTL_SUSPHY; 2107 else 2108 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; 2109 2110 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg); 2111 } 2112 2113 static void dwc3_gadget_usb2_phy_suspend(struct dwc3 *dwc, int suspend) 2114 { 2115 u32 reg; 2116 2117 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 2118 2119 if (suspend) 2120 reg |= DWC3_GUSB2PHYCFG_SUSPHY; 2121 else 2122 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 2123 2124 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 2125 } 2126 2127 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 2128 { 2129 u32 reg; 2130 2131 dev_vdbg(dwc->dev, "%s\n", __func__); 2132 2133 /* 2134 * WORKAROUND: DWC3 revisions <1.88a have an issue which 2135 * would cause a missing Disconnect Event if there's a 2136 * pending Setup Packet in the FIFO. 2137 * 2138 * There's no suggested workaround on the official Bug 2139 * report, which states that "unless the driver/application 2140 * is doing any special handling of a disconnect event, 2141 * there is no functional issue". 2142 * 2143 * Unfortunately, it turns out that we _do_ some special 2144 * handling of a disconnect event, namely complete all 2145 * pending transfers, notify gadget driver of the 2146 * disconnection, and so on. 2147 * 2148 * Our suggested workaround is to follow the Disconnect 2149 * Event steps here, instead, based on a setup_packet_pending 2150 * flag. Such flag gets set whenever we have a XferNotReady 2151 * event on EP0 and gets cleared on XferComplete for the 2152 * same endpoint. 2153 * 2154 * Refers to: 2155 * 2156 * STAR#9000466709: RTL: Device : Disconnect event not 2157 * generated if setup packet pending in FIFO 2158 */ 2159 if (dwc->revision < DWC3_REVISION_188A) { 2160 if (dwc->setup_packet_pending) 2161 dwc3_gadget_disconnect_interrupt(dwc); 2162 } 2163 2164 /* after reset -> Default State */ 2165 usb_gadget_set_state(&dwc->gadget, USB_STATE_DEFAULT); 2166 2167 /* Recent versions support automatic phy suspend and don't need this */ 2168 if (dwc->revision < DWC3_REVISION_194A) { 2169 /* Resume PHYs */ 2170 dwc3_gadget_usb2_phy_suspend(dwc, false); 2171 dwc3_gadget_usb3_phy_suspend(dwc, false); 2172 } 2173 2174 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) 2175 dwc3_disconnect_gadget(dwc); 2176 2177 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2178 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 2179 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2180 dwc->test_mode = false; 2181 2182 dwc3_stop_active_transfers(dwc); 2183 dwc3_clear_stall_all_ep(dwc); 2184 dwc->start_config_issued = false; 2185 2186 /* Reset device address to zero */ 2187 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2188 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 2189 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2190 } 2191 2192 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed) 2193 { 2194 u32 reg; 2195 u32 usb30_clock = DWC3_GCTL_CLK_BUS; 2196 2197 /* 2198 * We change the clock only at SS but I dunno why I would want to do 2199 * this. Maybe it becomes part of the power saving plan. 2200 */ 2201 2202 if (speed != DWC3_DSTS_SUPERSPEED) 2203 return; 2204 2205 /* 2206 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 2207 * each time on Connect Done. 2208 */ 2209 if (!usb30_clock) 2210 return; 2211 2212 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 2213 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock); 2214 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 2215 } 2216 2217 static void dwc3_gadget_phy_suspend(struct dwc3 *dwc, u8 speed) 2218 { 2219 switch (speed) { 2220 case USB_SPEED_SUPER: 2221 dwc3_gadget_usb2_phy_suspend(dwc, true); 2222 break; 2223 case USB_SPEED_HIGH: 2224 case USB_SPEED_FULL: 2225 case USB_SPEED_LOW: 2226 dwc3_gadget_usb3_phy_suspend(dwc, true); 2227 break; 2228 } 2229 } 2230 2231 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 2232 { 2233 struct dwc3_ep *dep; 2234 int ret; 2235 u32 reg; 2236 u8 speed; 2237 2238 dev_vdbg(dwc->dev, "%s\n", __func__); 2239 2240 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2241 speed = reg & DWC3_DSTS_CONNECTSPD; 2242 dwc->speed = speed; 2243 2244 dwc3_update_ram_clk_sel(dwc, speed); 2245 2246 switch (speed) { 2247 case DWC3_DCFG_SUPERSPEED: 2248 /* 2249 * WORKAROUND: DWC3 revisions <1.90a have an issue which 2250 * would cause a missing USB3 Reset event. 2251 * 2252 * In such situations, we should force a USB3 Reset 2253 * event by calling our dwc3_gadget_reset_interrupt() 2254 * routine. 2255 * 2256 * Refers to: 2257 * 2258 * STAR#9000483510: RTL: SS : USB3 reset event may 2259 * not be generated always when the link enters poll 2260 */ 2261 if (dwc->revision < DWC3_REVISION_190A) 2262 dwc3_gadget_reset_interrupt(dwc); 2263 2264 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2265 dwc->gadget.ep0->maxpacket = 512; 2266 dwc->gadget.speed = USB_SPEED_SUPER; 2267 break; 2268 case DWC3_DCFG_HIGHSPEED: 2269 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2270 dwc->gadget.ep0->maxpacket = 64; 2271 dwc->gadget.speed = USB_SPEED_HIGH; 2272 break; 2273 case DWC3_DCFG_FULLSPEED2: 2274 case DWC3_DCFG_FULLSPEED1: 2275 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2276 dwc->gadget.ep0->maxpacket = 64; 2277 dwc->gadget.speed = USB_SPEED_FULL; 2278 break; 2279 case DWC3_DCFG_LOWSPEED: 2280 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); 2281 dwc->gadget.ep0->maxpacket = 8; 2282 dwc->gadget.speed = USB_SPEED_LOW; 2283 break; 2284 } 2285 2286 /* Enable USB2 LPM Capability */ 2287 2288 if ((dwc->revision > DWC3_REVISION_194A) 2289 && (speed != DWC3_DCFG_SUPERSPEED)) { 2290 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2291 reg |= DWC3_DCFG_LPM_CAP; 2292 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2293 2294 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2295 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); 2296 2297 /* 2298 * TODO: This should be configurable. For now using 2299 * maximum allowed HIRD threshold value of 0b1100 2300 */ 2301 reg |= DWC3_DCTL_HIRD_THRES(12); 2302 2303 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2304 } 2305 2306 /* Recent versions support automatic phy suspend and don't need this */ 2307 if (dwc->revision < DWC3_REVISION_194A) { 2308 /* Suspend unneeded PHY */ 2309 dwc3_gadget_phy_suspend(dwc, dwc->gadget.speed); 2310 } 2311 2312 dep = dwc->eps[0]; 2313 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true); 2314 if (ret) { 2315 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2316 return; 2317 } 2318 2319 dep = dwc->eps[1]; 2320 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true); 2321 if (ret) { 2322 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2323 return; 2324 } 2325 2326 /* 2327 * Configure PHY via GUSB3PIPECTLn if required. 2328 * 2329 * Update GTXFIFOSIZn 2330 * 2331 * In both cases reset values should be sufficient. 2332 */ 2333 } 2334 2335 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 2336 { 2337 dev_vdbg(dwc->dev, "%s\n", __func__); 2338 2339 /* 2340 * TODO take core out of low power mode when that's 2341 * implemented. 2342 */ 2343 2344 dwc->gadget_driver->resume(&dwc->gadget); 2345 } 2346 2347 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 2348 unsigned int evtinfo) 2349 { 2350 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2351 unsigned int pwropt; 2352 2353 /* 2354 * WORKAROUND: DWC3 < 2.50a have an issue when configured without 2355 * Hibernation mode enabled which would show up when device detects 2356 * host-initiated U3 exit. 2357 * 2358 * In that case, device will generate a Link State Change Interrupt 2359 * from U3 to RESUME which is only necessary if Hibernation is 2360 * configured in. 2361 * 2362 * There are no functional changes due to such spurious event and we 2363 * just need to ignore it. 2364 * 2365 * Refers to: 2366 * 2367 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation 2368 * operational mode 2369 */ 2370 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 2371 if ((dwc->revision < DWC3_REVISION_250A) && 2372 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { 2373 if ((dwc->link_state == DWC3_LINK_STATE_U3) && 2374 (next == DWC3_LINK_STATE_RESUME)) { 2375 dev_vdbg(dwc->dev, "ignoring transition U3 -> Resume\n"); 2376 return; 2377 } 2378 } 2379 2380 /* 2381 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending 2382 * on the link partner, the USB session might do multiple entry/exit 2383 * of low power states before a transfer takes place. 2384 * 2385 * Due to this problem, we might experience lower throughput. The 2386 * suggested workaround is to disable DCTL[12:9] bits if we're 2387 * transitioning from U1/U2 to U0 and enable those bits again 2388 * after a transfer completes and there are no pending transfers 2389 * on any of the enabled endpoints. 2390 * 2391 * This is the first half of that workaround. 2392 * 2393 * Refers to: 2394 * 2395 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us 2396 * core send LGO_Ux entering U0 2397 */ 2398 if (dwc->revision < DWC3_REVISION_183A) { 2399 if (next == DWC3_LINK_STATE_U0) { 2400 u32 u1u2; 2401 u32 reg; 2402 2403 switch (dwc->link_state) { 2404 case DWC3_LINK_STATE_U1: 2405 case DWC3_LINK_STATE_U2: 2406 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2407 u1u2 = reg & (DWC3_DCTL_INITU2ENA 2408 | DWC3_DCTL_ACCEPTU2ENA 2409 | DWC3_DCTL_INITU1ENA 2410 | DWC3_DCTL_ACCEPTU1ENA); 2411 2412 if (!dwc->u1u2) 2413 dwc->u1u2 = reg & u1u2; 2414 2415 reg &= ~u1u2; 2416 2417 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2418 break; 2419 default: 2420 /* do nothing */ 2421 break; 2422 } 2423 } 2424 } 2425 2426 dwc->link_state = next; 2427 2428 dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state); 2429 } 2430 2431 static void dwc3_gadget_interrupt(struct dwc3 *dwc, 2432 const struct dwc3_event_devt *event) 2433 { 2434 switch (event->type) { 2435 case DWC3_DEVICE_EVENT_DISCONNECT: 2436 dwc3_gadget_disconnect_interrupt(dwc); 2437 break; 2438 case DWC3_DEVICE_EVENT_RESET: 2439 dwc3_gadget_reset_interrupt(dwc); 2440 break; 2441 case DWC3_DEVICE_EVENT_CONNECT_DONE: 2442 dwc3_gadget_conndone_interrupt(dwc); 2443 break; 2444 case DWC3_DEVICE_EVENT_WAKEUP: 2445 dwc3_gadget_wakeup_interrupt(dwc); 2446 break; 2447 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 2448 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 2449 break; 2450 case DWC3_DEVICE_EVENT_EOPF: 2451 dev_vdbg(dwc->dev, "End of Periodic Frame\n"); 2452 break; 2453 case DWC3_DEVICE_EVENT_SOF: 2454 dev_vdbg(dwc->dev, "Start of Periodic Frame\n"); 2455 break; 2456 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 2457 dev_vdbg(dwc->dev, "Erratic Error\n"); 2458 break; 2459 case DWC3_DEVICE_EVENT_CMD_CMPL: 2460 dev_vdbg(dwc->dev, "Command Complete\n"); 2461 break; 2462 case DWC3_DEVICE_EVENT_OVERFLOW: 2463 dev_vdbg(dwc->dev, "Overflow\n"); 2464 break; 2465 default: 2466 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 2467 } 2468 } 2469 2470 static void dwc3_process_event_entry(struct dwc3 *dwc, 2471 const union dwc3_event *event) 2472 { 2473 /* Endpoint IRQ, handle it and return early */ 2474 if (event->type.is_devspec == 0) { 2475 /* depevt */ 2476 return dwc3_endpoint_interrupt(dwc, &event->depevt); 2477 } 2478 2479 switch (event->type.type) { 2480 case DWC3_EVENT_TYPE_DEV: 2481 dwc3_gadget_interrupt(dwc, &event->devt); 2482 break; 2483 /* REVISIT what to do with Carkit and I2C events ? */ 2484 default: 2485 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 2486 } 2487 } 2488 2489 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc) 2490 { 2491 struct dwc3 *dwc = _dwc; 2492 unsigned long flags; 2493 irqreturn_t ret = IRQ_NONE; 2494 int i; 2495 2496 spin_lock_irqsave(&dwc->lock, flags); 2497 2498 for (i = 0; i < dwc->num_event_buffers; i++) { 2499 struct dwc3_event_buffer *evt; 2500 int left; 2501 2502 evt = dwc->ev_buffs[i]; 2503 left = evt->count; 2504 2505 if (!(evt->flags & DWC3_EVENT_PENDING)) 2506 continue; 2507 2508 while (left > 0) { 2509 union dwc3_event event; 2510 2511 event.raw = *(u32 *) (evt->buf + evt->lpos); 2512 2513 dwc3_process_event_entry(dwc, &event); 2514 2515 /* 2516 * FIXME we wrap around correctly to the next entry as 2517 * almost all entries are 4 bytes in size. There is one 2518 * entry which has 12 bytes which is a regular entry 2519 * followed by 8 bytes data. ATM I don't know how 2520 * things are organized if we get next to the a 2521 * boundary so I worry about that once we try to handle 2522 * that. 2523 */ 2524 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE; 2525 left -= 4; 2526 2527 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(i), 4); 2528 } 2529 2530 evt->count = 0; 2531 evt->flags &= ~DWC3_EVENT_PENDING; 2532 ret = IRQ_HANDLED; 2533 } 2534 2535 spin_unlock_irqrestore(&dwc->lock, flags); 2536 2537 return ret; 2538 } 2539 2540 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf) 2541 { 2542 struct dwc3_event_buffer *evt; 2543 u32 count; 2544 2545 evt = dwc->ev_buffs[buf]; 2546 2547 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf)); 2548 count &= DWC3_GEVNTCOUNT_MASK; 2549 if (!count) 2550 return IRQ_NONE; 2551 2552 evt->count = count; 2553 evt->flags |= DWC3_EVENT_PENDING; 2554 2555 return IRQ_WAKE_THREAD; 2556 } 2557 2558 static irqreturn_t dwc3_interrupt(int irq, void *_dwc) 2559 { 2560 struct dwc3 *dwc = _dwc; 2561 int i; 2562 irqreturn_t ret = IRQ_NONE; 2563 2564 spin_lock(&dwc->lock); 2565 2566 for (i = 0; i < dwc->num_event_buffers; i++) { 2567 irqreturn_t status; 2568 2569 status = dwc3_process_event_buf(dwc, i); 2570 if (status == IRQ_WAKE_THREAD) 2571 ret = status; 2572 } 2573 2574 spin_unlock(&dwc->lock); 2575 2576 return ret; 2577 } 2578 2579 /** 2580 * dwc3_gadget_init - Initializes gadget related registers 2581 * @dwc: pointer to our controller context structure 2582 * 2583 * Returns 0 on success otherwise negative errno. 2584 */ 2585 int dwc3_gadget_init(struct dwc3 *dwc) 2586 { 2587 u32 reg; 2588 int ret; 2589 2590 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2591 &dwc->ctrl_req_addr, GFP_KERNEL); 2592 if (!dwc->ctrl_req) { 2593 dev_err(dwc->dev, "failed to allocate ctrl request\n"); 2594 ret = -ENOMEM; 2595 goto err0; 2596 } 2597 2598 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2599 &dwc->ep0_trb_addr, GFP_KERNEL); 2600 if (!dwc->ep0_trb) { 2601 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 2602 ret = -ENOMEM; 2603 goto err1; 2604 } 2605 2606 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL); 2607 if (!dwc->setup_buf) { 2608 dev_err(dwc->dev, "failed to allocate setup buffer\n"); 2609 ret = -ENOMEM; 2610 goto err2; 2611 } 2612 2613 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev, 2614 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr, 2615 GFP_KERNEL); 2616 if (!dwc->ep0_bounce) { 2617 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n"); 2618 ret = -ENOMEM; 2619 goto err3; 2620 } 2621 2622 dwc->gadget.ops = &dwc3_gadget_ops; 2623 dwc->gadget.max_speed = USB_SPEED_SUPER; 2624 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2625 dwc->gadget.sg_supported = true; 2626 dwc->gadget.name = "dwc3-gadget"; 2627 2628 /* 2629 * REVISIT: Here we should clear all pending IRQs to be 2630 * sure we're starting from a well known location. 2631 */ 2632 2633 ret = dwc3_gadget_init_endpoints(dwc); 2634 if (ret) 2635 goto err4; 2636 2637 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2638 reg |= DWC3_DCFG_LPM_CAP; 2639 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2640 2641 /* Enable USB2 LPM and automatic phy suspend only on recent versions */ 2642 if (dwc->revision >= DWC3_REVISION_194A) { 2643 dwc3_gadget_usb2_phy_suspend(dwc, false); 2644 dwc3_gadget_usb3_phy_suspend(dwc, false); 2645 } 2646 2647 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 2648 if (ret) { 2649 dev_err(dwc->dev, "failed to register udc\n"); 2650 goto err5; 2651 } 2652 2653 return 0; 2654 2655 err5: 2656 dwc3_gadget_free_endpoints(dwc); 2657 2658 err4: 2659 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2660 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2661 2662 err3: 2663 kfree(dwc->setup_buf); 2664 2665 err2: 2666 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2667 dwc->ep0_trb, dwc->ep0_trb_addr); 2668 2669 err1: 2670 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2671 dwc->ctrl_req, dwc->ctrl_req_addr); 2672 2673 err0: 2674 return ret; 2675 } 2676 2677 /* -------------------------------------------------------------------------- */ 2678 2679 void dwc3_gadget_exit(struct dwc3 *dwc) 2680 { 2681 usb_del_gadget_udc(&dwc->gadget); 2682 2683 dwc3_gadget_free_endpoints(dwc); 2684 2685 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2686 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2687 2688 kfree(dwc->setup_buf); 2689 2690 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2691 dwc->ep0_trb, dwc->ep0_trb_addr); 2692 2693 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2694 dwc->ctrl_req, dwc->ctrl_req_addr); 2695 } 2696 2697 int dwc3_gadget_prepare(struct dwc3 *dwc) 2698 { 2699 if (dwc->pullups_connected) 2700 dwc3_gadget_disable_irq(dwc); 2701 2702 return 0; 2703 } 2704 2705 void dwc3_gadget_complete(struct dwc3 *dwc) 2706 { 2707 if (dwc->pullups_connected) { 2708 dwc3_gadget_enable_irq(dwc); 2709 dwc3_gadget_run_stop(dwc, true); 2710 } 2711 } 2712 2713 int dwc3_gadget_suspend(struct dwc3 *dwc) 2714 { 2715 __dwc3_gadget_ep_disable(dwc->eps[0]); 2716 __dwc3_gadget_ep_disable(dwc->eps[1]); 2717 2718 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG); 2719 2720 return 0; 2721 } 2722 2723 int dwc3_gadget_resume(struct dwc3 *dwc) 2724 { 2725 struct dwc3_ep *dep; 2726 int ret; 2727 2728 /* Start with SuperSpeed Default */ 2729 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2730 2731 dep = dwc->eps[0]; 2732 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false); 2733 if (ret) 2734 goto err0; 2735 2736 dep = dwc->eps[1]; 2737 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false); 2738 if (ret) 2739 goto err1; 2740 2741 /* begin to receive SETUP packets */ 2742 dwc->ep0state = EP0_SETUP_PHASE; 2743 dwc3_ep0_out_start(dwc); 2744 2745 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg); 2746 2747 return 0; 2748 2749 err1: 2750 __dwc3_gadget_ep_disable(dwc->eps[0]); 2751 2752 err0: 2753 return ret; 2754 } 2755