1 /** 2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 3 * 4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com 5 * 6 * Authors: Felipe Balbi <balbi@ti.com>, 7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 8 * 9 * This program is free software: you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 of 11 * the License as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/delay.h> 21 #include <linux/slab.h> 22 #include <linux/spinlock.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/interrupt.h> 26 #include <linux/io.h> 27 #include <linux/list.h> 28 #include <linux/dma-mapping.h> 29 30 #include <linux/usb/ch9.h> 31 #include <linux/usb/gadget.h> 32 33 #include "core.h" 34 #include "gadget.h" 35 #include "io.h" 36 37 /** 38 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes 39 * @dwc: pointer to our context structure 40 * @mode: the mode to set (J, K SE0 NAK, Force Enable) 41 * 42 * Caller should take care of locking. This function will 43 * return 0 on success or -EINVAL if wrong Test Selector 44 * is passed 45 */ 46 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) 47 { 48 u32 reg; 49 50 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 51 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 52 53 switch (mode) { 54 case TEST_J: 55 case TEST_K: 56 case TEST_SE0_NAK: 57 case TEST_PACKET: 58 case TEST_FORCE_EN: 59 reg |= mode << 1; 60 break; 61 default: 62 return -EINVAL; 63 } 64 65 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 66 67 return 0; 68 } 69 70 /** 71 * dwc3_gadget_set_link_state - Sets USB Link to a particular State 72 * @dwc: pointer to our context structure 73 * @state: the state to put link into 74 * 75 * Caller should take care of locking. This function will 76 * return 0 on success or -ETIMEDOUT. 77 */ 78 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) 79 { 80 int retries = 10000; 81 u32 reg; 82 83 /* 84 * Wait until device controller is ready. Only applies to 1.94a and 85 * later RTL. 86 */ 87 if (dwc->revision >= DWC3_REVISION_194A) { 88 while (--retries) { 89 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 90 if (reg & DWC3_DSTS_DCNRD) 91 udelay(5); 92 else 93 break; 94 } 95 96 if (retries <= 0) 97 return -ETIMEDOUT; 98 } 99 100 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 101 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 102 103 /* set requested state */ 104 reg |= DWC3_DCTL_ULSTCHNGREQ(state); 105 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 106 107 /* 108 * The following code is racy when called from dwc3_gadget_wakeup, 109 * and is not needed, at least on newer versions 110 */ 111 if (dwc->revision >= DWC3_REVISION_194A) 112 return 0; 113 114 /* wait for a change in DSTS */ 115 retries = 10000; 116 while (--retries) { 117 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 118 119 if (DWC3_DSTS_USBLNKST(reg) == state) 120 return 0; 121 122 udelay(5); 123 } 124 125 dev_vdbg(dwc->dev, "link state change request timed out\n"); 126 127 return -ETIMEDOUT; 128 } 129 130 /** 131 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case 132 * @dwc: pointer to our context structure 133 * 134 * This function will a best effort FIFO allocation in order 135 * to improve FIFO usage and throughput, while still allowing 136 * us to enable as many endpoints as possible. 137 * 138 * Keep in mind that this operation will be highly dependent 139 * on the configured size for RAM1 - which contains TxFifo -, 140 * the amount of endpoints enabled on coreConsultant tool, and 141 * the width of the Master Bus. 142 * 143 * In the ideal world, we would always be able to satisfy the 144 * following equation: 145 * 146 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \ 147 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes 148 * 149 * Unfortunately, due to many variables that's not always the case. 150 */ 151 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc) 152 { 153 int last_fifo_depth = 0; 154 int ram1_depth; 155 int fifo_size; 156 int mdwidth; 157 int num; 158 159 if (!dwc->needs_fifo_resize) 160 return 0; 161 162 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7); 163 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); 164 165 /* MDWIDTH is represented in bits, we need it in bytes */ 166 mdwidth >>= 3; 167 168 /* 169 * FIXME For now we will only allocate 1 wMaxPacketSize space 170 * for each enabled endpoint, later patches will come to 171 * improve this algorithm so that we better use the internal 172 * FIFO space 173 */ 174 for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) { 175 struct dwc3_ep *dep = dwc->eps[num]; 176 int fifo_number = dep->number >> 1; 177 int mult = 1; 178 int tmp; 179 180 if (!(dep->number & 1)) 181 continue; 182 183 if (!(dep->flags & DWC3_EP_ENABLED)) 184 continue; 185 186 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) 187 || usb_endpoint_xfer_isoc(dep->endpoint.desc)) 188 mult = 3; 189 190 /* 191 * REVISIT: the following assumes we will always have enough 192 * space available on the FIFO RAM for all possible use cases. 193 * Make sure that's true somehow and change FIFO allocation 194 * accordingly. 195 * 196 * If we have Bulk or Isochronous endpoints, we want 197 * them to be able to be very, very fast. So we're giving 198 * those endpoints a fifo_size which is enough for 3 full 199 * packets 200 */ 201 tmp = mult * (dep->endpoint.maxpacket + mdwidth); 202 tmp += mdwidth; 203 204 fifo_size = DIV_ROUND_UP(tmp, mdwidth); 205 206 fifo_size |= (last_fifo_depth << 16); 207 208 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n", 209 dep->name, last_fifo_depth, fifo_size & 0xffff); 210 211 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number), 212 fifo_size); 213 214 last_fifo_depth += (fifo_size & 0xffff); 215 } 216 217 return 0; 218 } 219 220 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 221 int status) 222 { 223 struct dwc3 *dwc = dep->dwc; 224 int i; 225 226 if (req->queued) { 227 i = 0; 228 do { 229 dep->busy_slot++; 230 /* 231 * Skip LINK TRB. We can't use req->trb and check for 232 * DWC3_TRBCTL_LINK_TRB because it points the TRB we 233 * just completed (not the LINK TRB). 234 */ 235 if (((dep->busy_slot & DWC3_TRB_MASK) == 236 DWC3_TRB_NUM- 1) && 237 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 238 dep->busy_slot++; 239 } while(++i < req->request.num_mapped_sgs); 240 req->queued = false; 241 } 242 list_del(&req->list); 243 req->trb = NULL; 244 245 if (req->request.status == -EINPROGRESS) 246 req->request.status = status; 247 248 if (dwc->ep0_bounced && dep->number == 0) 249 dwc->ep0_bounced = false; 250 else 251 usb_gadget_unmap_request(&dwc->gadget, &req->request, 252 req->direction); 253 254 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n", 255 req, dep->name, req->request.actual, 256 req->request.length, status); 257 258 spin_unlock(&dwc->lock); 259 req->request.complete(&dep->endpoint, &req->request); 260 spin_lock(&dwc->lock); 261 } 262 263 static const char *dwc3_gadget_ep_cmd_string(u8 cmd) 264 { 265 switch (cmd) { 266 case DWC3_DEPCMD_DEPSTARTCFG: 267 return "Start New Configuration"; 268 case DWC3_DEPCMD_ENDTRANSFER: 269 return "End Transfer"; 270 case DWC3_DEPCMD_UPDATETRANSFER: 271 return "Update Transfer"; 272 case DWC3_DEPCMD_STARTTRANSFER: 273 return "Start Transfer"; 274 case DWC3_DEPCMD_CLEARSTALL: 275 return "Clear Stall"; 276 case DWC3_DEPCMD_SETSTALL: 277 return "Set Stall"; 278 case DWC3_DEPCMD_GETEPSTATE: 279 return "Get Endpoint State"; 280 case DWC3_DEPCMD_SETTRANSFRESOURCE: 281 return "Set Endpoint Transfer Resource"; 282 case DWC3_DEPCMD_SETEPCONFIG: 283 return "Set Endpoint Configuration"; 284 default: 285 return "UNKNOWN command"; 286 } 287 } 288 289 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param) 290 { 291 u32 timeout = 500; 292 u32 reg; 293 294 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param); 295 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT); 296 297 do { 298 reg = dwc3_readl(dwc->regs, DWC3_DGCMD); 299 if (!(reg & DWC3_DGCMD_CMDACT)) { 300 dev_vdbg(dwc->dev, "Command Complete --> %d\n", 301 DWC3_DGCMD_STATUS(reg)); 302 return 0; 303 } 304 305 /* 306 * We can't sleep here, because it's also called from 307 * interrupt context. 308 */ 309 timeout--; 310 if (!timeout) 311 return -ETIMEDOUT; 312 udelay(1); 313 } while (1); 314 } 315 316 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep, 317 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params) 318 { 319 struct dwc3_ep *dep = dwc->eps[ep]; 320 u32 timeout = 500; 321 u32 reg; 322 323 dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n", 324 dep->name, 325 dwc3_gadget_ep_cmd_string(cmd), params->param0, 326 params->param1, params->param2); 327 328 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0); 329 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1); 330 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2); 331 332 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT); 333 do { 334 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep)); 335 if (!(reg & DWC3_DEPCMD_CMDACT)) { 336 dev_vdbg(dwc->dev, "Command Complete --> %d\n", 337 DWC3_DEPCMD_STATUS(reg)); 338 return 0; 339 } 340 341 /* 342 * We can't sleep here, because it is also called from 343 * interrupt context. 344 */ 345 timeout--; 346 if (!timeout) 347 return -ETIMEDOUT; 348 349 udelay(1); 350 } while (1); 351 } 352 353 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 354 struct dwc3_trb *trb) 355 { 356 u32 offset = (char *) trb - (char *) dep->trb_pool; 357 358 return dep->trb_pool_dma + offset; 359 } 360 361 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 362 { 363 struct dwc3 *dwc = dep->dwc; 364 365 if (dep->trb_pool) 366 return 0; 367 368 if (dep->number == 0 || dep->number == 1) 369 return 0; 370 371 dep->trb_pool = dma_alloc_coherent(dwc->dev, 372 sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 373 &dep->trb_pool_dma, GFP_KERNEL); 374 if (!dep->trb_pool) { 375 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 376 dep->name); 377 return -ENOMEM; 378 } 379 380 return 0; 381 } 382 383 static void dwc3_free_trb_pool(struct dwc3_ep *dep) 384 { 385 struct dwc3 *dwc = dep->dwc; 386 387 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 388 dep->trb_pool, dep->trb_pool_dma); 389 390 dep->trb_pool = NULL; 391 dep->trb_pool_dma = 0; 392 } 393 394 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 395 { 396 struct dwc3_gadget_ep_cmd_params params; 397 u32 cmd; 398 399 memset(¶ms, 0x00, sizeof(params)); 400 401 if (dep->number != 1) { 402 cmd = DWC3_DEPCMD_DEPSTARTCFG; 403 /* XferRscIdx == 0 for ep0 and 2 for the remaining */ 404 if (dep->number > 1) { 405 if (dwc->start_config_issued) 406 return 0; 407 dwc->start_config_issued = true; 408 cmd |= DWC3_DEPCMD_PARAM(2); 409 } 410 411 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms); 412 } 413 414 return 0; 415 } 416 417 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, 418 const struct usb_endpoint_descriptor *desc, 419 const struct usb_ss_ep_comp_descriptor *comp_desc, 420 bool ignore) 421 { 422 struct dwc3_gadget_ep_cmd_params params; 423 424 memset(¶ms, 0x00, sizeof(params)); 425 426 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) 427 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); 428 429 /* Burst size is only needed in SuperSpeed mode */ 430 if (dwc->gadget.speed == USB_SPEED_SUPER) { 431 u32 burst = dep->endpoint.maxburst - 1; 432 433 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst); 434 } 435 436 if (ignore) 437 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM; 438 439 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN 440 | DWC3_DEPCFG_XFER_NOT_READY_EN; 441 442 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 443 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE 444 | DWC3_DEPCFG_STREAM_EVENT_EN; 445 dep->stream_capable = true; 446 } 447 448 if (usb_endpoint_xfer_isoc(desc)) 449 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; 450 451 /* 452 * We are doing 1:1 mapping for endpoints, meaning 453 * Physical Endpoints 2 maps to Logical Endpoint 2 and 454 * so on. We consider the direction bit as part of the physical 455 * endpoint number. So USB endpoint 0x81 is 0x03. 456 */ 457 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); 458 459 /* 460 * We must use the lower 16 TX FIFOs even though 461 * HW might have more 462 */ 463 if (dep->direction) 464 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); 465 466 if (desc->bInterval) { 467 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); 468 dep->interval = 1 << (desc->bInterval - 1); 469 } 470 471 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 472 DWC3_DEPCMD_SETEPCONFIG, ¶ms); 473 } 474 475 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) 476 { 477 struct dwc3_gadget_ep_cmd_params params; 478 479 memset(¶ms, 0x00, sizeof(params)); 480 481 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 482 483 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 484 DWC3_DEPCMD_SETTRANSFRESOURCE, ¶ms); 485 } 486 487 /** 488 * __dwc3_gadget_ep_enable - Initializes a HW endpoint 489 * @dep: endpoint to be initialized 490 * @desc: USB Endpoint Descriptor 491 * 492 * Caller should take care of locking 493 */ 494 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, 495 const struct usb_endpoint_descriptor *desc, 496 const struct usb_ss_ep_comp_descriptor *comp_desc, 497 bool ignore) 498 { 499 struct dwc3 *dwc = dep->dwc; 500 u32 reg; 501 int ret = -ENOMEM; 502 503 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name); 504 505 if (!(dep->flags & DWC3_EP_ENABLED)) { 506 ret = dwc3_gadget_start_config(dwc, dep); 507 if (ret) 508 return ret; 509 } 510 511 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore); 512 if (ret) 513 return ret; 514 515 if (!(dep->flags & DWC3_EP_ENABLED)) { 516 struct dwc3_trb *trb_st_hw; 517 struct dwc3_trb *trb_link; 518 519 ret = dwc3_gadget_set_xfer_resource(dwc, dep); 520 if (ret) 521 return ret; 522 523 dep->endpoint.desc = desc; 524 dep->comp_desc = comp_desc; 525 dep->type = usb_endpoint_type(desc); 526 dep->flags |= DWC3_EP_ENABLED; 527 528 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 529 reg |= DWC3_DALEPENA_EP(dep->number); 530 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 531 532 if (!usb_endpoint_xfer_isoc(desc)) 533 return 0; 534 535 memset(&trb_link, 0, sizeof(trb_link)); 536 537 /* Link TRB for ISOC. The HWO bit is never reset */ 538 trb_st_hw = &dep->trb_pool[0]; 539 540 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 541 542 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 543 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 544 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB; 545 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 546 } 547 548 return 0; 549 } 550 551 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum); 552 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 553 { 554 struct dwc3_request *req; 555 556 if (!list_empty(&dep->req_queued)) { 557 dwc3_stop_active_transfer(dwc, dep->number); 558 559 /* - giveback all requests to gadget driver */ 560 while (!list_empty(&dep->req_queued)) { 561 req = next_request(&dep->req_queued); 562 563 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 564 } 565 } 566 567 while (!list_empty(&dep->request_list)) { 568 req = next_request(&dep->request_list); 569 570 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 571 } 572 } 573 574 /** 575 * __dwc3_gadget_ep_disable - Disables a HW endpoint 576 * @dep: the endpoint to disable 577 * 578 * This function also removes requests which are currently processed ny the 579 * hardware and those which are not yet scheduled. 580 * Caller should take care of locking. 581 */ 582 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 583 { 584 struct dwc3 *dwc = dep->dwc; 585 u32 reg; 586 587 dwc3_remove_requests(dwc, dep); 588 589 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 590 reg &= ~DWC3_DALEPENA_EP(dep->number); 591 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 592 593 dep->stream_capable = false; 594 dep->endpoint.desc = NULL; 595 dep->comp_desc = NULL; 596 dep->type = 0; 597 dep->flags = 0; 598 599 return 0; 600 } 601 602 /* -------------------------------------------------------------------------- */ 603 604 static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 605 const struct usb_endpoint_descriptor *desc) 606 { 607 return -EINVAL; 608 } 609 610 static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 611 { 612 return -EINVAL; 613 } 614 615 /* -------------------------------------------------------------------------- */ 616 617 static int dwc3_gadget_ep_enable(struct usb_ep *ep, 618 const struct usb_endpoint_descriptor *desc) 619 { 620 struct dwc3_ep *dep; 621 struct dwc3 *dwc; 622 unsigned long flags; 623 int ret; 624 625 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 626 pr_debug("dwc3: invalid parameters\n"); 627 return -EINVAL; 628 } 629 630 if (!desc->wMaxPacketSize) { 631 pr_debug("dwc3: missing wMaxPacketSize\n"); 632 return -EINVAL; 633 } 634 635 dep = to_dwc3_ep(ep); 636 dwc = dep->dwc; 637 638 if (dep->flags & DWC3_EP_ENABLED) { 639 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n", 640 dep->name); 641 return 0; 642 } 643 644 switch (usb_endpoint_type(desc)) { 645 case USB_ENDPOINT_XFER_CONTROL: 646 strlcat(dep->name, "-control", sizeof(dep->name)); 647 break; 648 case USB_ENDPOINT_XFER_ISOC: 649 strlcat(dep->name, "-isoc", sizeof(dep->name)); 650 break; 651 case USB_ENDPOINT_XFER_BULK: 652 strlcat(dep->name, "-bulk", sizeof(dep->name)); 653 break; 654 case USB_ENDPOINT_XFER_INT: 655 strlcat(dep->name, "-int", sizeof(dep->name)); 656 break; 657 default: 658 dev_err(dwc->dev, "invalid endpoint transfer type\n"); 659 } 660 661 spin_lock_irqsave(&dwc->lock, flags); 662 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false); 663 spin_unlock_irqrestore(&dwc->lock, flags); 664 665 return ret; 666 } 667 668 static int dwc3_gadget_ep_disable(struct usb_ep *ep) 669 { 670 struct dwc3_ep *dep; 671 struct dwc3 *dwc; 672 unsigned long flags; 673 int ret; 674 675 if (!ep) { 676 pr_debug("dwc3: invalid parameters\n"); 677 return -EINVAL; 678 } 679 680 dep = to_dwc3_ep(ep); 681 dwc = dep->dwc; 682 683 if (!(dep->flags & DWC3_EP_ENABLED)) { 684 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n", 685 dep->name); 686 return 0; 687 } 688 689 snprintf(dep->name, sizeof(dep->name), "ep%d%s", 690 dep->number >> 1, 691 (dep->number & 1) ? "in" : "out"); 692 693 spin_lock_irqsave(&dwc->lock, flags); 694 ret = __dwc3_gadget_ep_disable(dep); 695 spin_unlock_irqrestore(&dwc->lock, flags); 696 697 return ret; 698 } 699 700 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 701 gfp_t gfp_flags) 702 { 703 struct dwc3_request *req; 704 struct dwc3_ep *dep = to_dwc3_ep(ep); 705 struct dwc3 *dwc = dep->dwc; 706 707 req = kzalloc(sizeof(*req), gfp_flags); 708 if (!req) { 709 dev_err(dwc->dev, "not enough memory\n"); 710 return NULL; 711 } 712 713 req->epnum = dep->number; 714 req->dep = dep; 715 716 return &req->request; 717 } 718 719 static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 720 struct usb_request *request) 721 { 722 struct dwc3_request *req = to_dwc3_request(request); 723 724 kfree(req); 725 } 726 727 /** 728 * dwc3_prepare_one_trb - setup one TRB from one request 729 * @dep: endpoint for which this request is prepared 730 * @req: dwc3_request pointer 731 */ 732 static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 733 struct dwc3_request *req, dma_addr_t dma, 734 unsigned length, unsigned last, unsigned chain, unsigned node) 735 { 736 struct dwc3 *dwc = dep->dwc; 737 struct dwc3_trb *trb; 738 739 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n", 740 dep->name, req, (unsigned long long) dma, 741 length, last ? " last" : "", 742 chain ? " chain" : ""); 743 744 /* Skip the LINK-TRB on ISOC */ 745 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && 746 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 747 dep->free_slot++; 748 749 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK]; 750 751 if (!req->trb) { 752 dwc3_gadget_move_request_queued(req); 753 req->trb = trb; 754 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 755 req->start_slot = dep->free_slot & DWC3_TRB_MASK; 756 } 757 758 dep->free_slot++; 759 760 trb->size = DWC3_TRB_SIZE_LENGTH(length); 761 trb->bpl = lower_32_bits(dma); 762 trb->bph = upper_32_bits(dma); 763 764 switch (usb_endpoint_type(dep->endpoint.desc)) { 765 case USB_ENDPOINT_XFER_CONTROL: 766 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP; 767 break; 768 769 case USB_ENDPOINT_XFER_ISOC: 770 if (!node) 771 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 772 else 773 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 774 775 if (!req->request.no_interrupt && !chain) 776 trb->ctrl |= DWC3_TRB_CTRL_IOC; 777 break; 778 779 case USB_ENDPOINT_XFER_BULK: 780 case USB_ENDPOINT_XFER_INT: 781 trb->ctrl = DWC3_TRBCTL_NORMAL; 782 break; 783 default: 784 /* 785 * This is only possible with faulty memory because we 786 * checked it already :) 787 */ 788 BUG(); 789 } 790 791 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 792 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 793 trb->ctrl |= DWC3_TRB_CTRL_CSP; 794 } else if (last) { 795 trb->ctrl |= DWC3_TRB_CTRL_LST; 796 } 797 798 if (chain) 799 trb->ctrl |= DWC3_TRB_CTRL_CHN; 800 801 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) 802 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id); 803 804 trb->ctrl |= DWC3_TRB_CTRL_HWO; 805 } 806 807 /* 808 * dwc3_prepare_trbs - setup TRBs from requests 809 * @dep: endpoint for which requests are being prepared 810 * @starting: true if the endpoint is idle and no requests are queued. 811 * 812 * The function goes through the requests list and sets up TRBs for the 813 * transfers. The function returns once there are no more TRBs available or 814 * it runs out of requests. 815 */ 816 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting) 817 { 818 struct dwc3_request *req, *n; 819 u32 trbs_left; 820 u32 max; 821 unsigned int last_one = 0; 822 823 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 824 825 /* the first request must not be queued */ 826 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK; 827 828 /* Can't wrap around on a non-isoc EP since there's no link TRB */ 829 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 830 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK); 831 if (trbs_left > max) 832 trbs_left = max; 833 } 834 835 /* 836 * If busy & slot are equal than it is either full or empty. If we are 837 * starting to process requests then we are empty. Otherwise we are 838 * full and don't do anything 839 */ 840 if (!trbs_left) { 841 if (!starting) 842 return; 843 trbs_left = DWC3_TRB_NUM; 844 /* 845 * In case we start from scratch, we queue the ISOC requests 846 * starting from slot 1. This is done because we use ring 847 * buffer and have no LST bit to stop us. Instead, we place 848 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt 849 * after the first request so we start at slot 1 and have 850 * 7 requests proceed before we hit the first IOC. 851 * Other transfer types don't use the ring buffer and are 852 * processed from the first TRB until the last one. Since we 853 * don't wrap around we have to start at the beginning. 854 */ 855 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 856 dep->busy_slot = 1; 857 dep->free_slot = 1; 858 } else { 859 dep->busy_slot = 0; 860 dep->free_slot = 0; 861 } 862 } 863 864 /* The last TRB is a link TRB, not used for xfer */ 865 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc)) 866 return; 867 868 list_for_each_entry_safe(req, n, &dep->request_list, list) { 869 unsigned length; 870 dma_addr_t dma; 871 last_one = false; 872 873 if (req->request.num_mapped_sgs > 0) { 874 struct usb_request *request = &req->request; 875 struct scatterlist *sg = request->sg; 876 struct scatterlist *s; 877 int i; 878 879 for_each_sg(sg, s, request->num_mapped_sgs, i) { 880 unsigned chain = true; 881 882 length = sg_dma_len(s); 883 dma = sg_dma_address(s); 884 885 if (i == (request->num_mapped_sgs - 1) || 886 sg_is_last(s)) { 887 if (list_is_last(&req->list, 888 &dep->request_list)) 889 last_one = true; 890 chain = false; 891 } 892 893 trbs_left--; 894 if (!trbs_left) 895 last_one = true; 896 897 if (last_one) 898 chain = false; 899 900 dwc3_prepare_one_trb(dep, req, dma, length, 901 last_one, chain, i); 902 903 if (last_one) 904 break; 905 } 906 } else { 907 dma = req->request.dma; 908 length = req->request.length; 909 trbs_left--; 910 911 if (!trbs_left) 912 last_one = 1; 913 914 /* Is this the last request? */ 915 if (list_is_last(&req->list, &dep->request_list)) 916 last_one = 1; 917 918 dwc3_prepare_one_trb(dep, req, dma, length, 919 last_one, false, 0); 920 921 if (last_one) 922 break; 923 } 924 } 925 } 926 927 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param, 928 int start_new) 929 { 930 struct dwc3_gadget_ep_cmd_params params; 931 struct dwc3_request *req; 932 struct dwc3 *dwc = dep->dwc; 933 int ret; 934 u32 cmd; 935 936 if (start_new && (dep->flags & DWC3_EP_BUSY)) { 937 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name); 938 return -EBUSY; 939 } 940 dep->flags &= ~DWC3_EP_PENDING_REQUEST; 941 942 /* 943 * If we are getting here after a short-out-packet we don't enqueue any 944 * new requests as we try to set the IOC bit only on the last request. 945 */ 946 if (start_new) { 947 if (list_empty(&dep->req_queued)) 948 dwc3_prepare_trbs(dep, start_new); 949 950 /* req points to the first request which will be sent */ 951 req = next_request(&dep->req_queued); 952 } else { 953 dwc3_prepare_trbs(dep, start_new); 954 955 /* 956 * req points to the first request where HWO changed from 0 to 1 957 */ 958 req = next_request(&dep->req_queued); 959 } 960 if (!req) { 961 dep->flags |= DWC3_EP_PENDING_REQUEST; 962 return 0; 963 } 964 965 memset(¶ms, 0, sizeof(params)); 966 967 if (start_new) { 968 params.param0 = upper_32_bits(req->trb_dma); 969 params.param1 = lower_32_bits(req->trb_dma); 970 cmd = DWC3_DEPCMD_STARTTRANSFER; 971 } else { 972 cmd = DWC3_DEPCMD_UPDATETRANSFER; 973 } 974 975 cmd |= DWC3_DEPCMD_PARAM(cmd_param); 976 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 977 if (ret < 0) { 978 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n"); 979 980 /* 981 * FIXME we need to iterate over the list of requests 982 * here and stop, unmap, free and del each of the linked 983 * requests instead of what we do now. 984 */ 985 usb_gadget_unmap_request(&dwc->gadget, &req->request, 986 req->direction); 987 list_del(&req->list); 988 return ret; 989 } 990 991 dep->flags |= DWC3_EP_BUSY; 992 993 if (start_new) { 994 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc, 995 dep->number); 996 WARN_ON_ONCE(!dep->resource_index); 997 } 998 999 return 0; 1000 } 1001 1002 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc, 1003 struct dwc3_ep *dep, u32 cur_uf) 1004 { 1005 u32 uf; 1006 1007 if (list_empty(&dep->request_list)) { 1008 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n", 1009 dep->name); 1010 dep->flags |= DWC3_EP_PENDING_REQUEST; 1011 return; 1012 } 1013 1014 /* 4 micro frames in the future */ 1015 uf = cur_uf + dep->interval * 4; 1016 1017 __dwc3_gadget_kick_transfer(dep, uf, 1); 1018 } 1019 1020 static void dwc3_gadget_start_isoc(struct dwc3 *dwc, 1021 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1022 { 1023 u32 cur_uf, mask; 1024 1025 mask = ~(dep->interval - 1); 1026 cur_uf = event->parameters & mask; 1027 1028 __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1029 } 1030 1031 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 1032 { 1033 struct dwc3 *dwc = dep->dwc; 1034 int ret; 1035 1036 req->request.actual = 0; 1037 req->request.status = -EINPROGRESS; 1038 req->direction = dep->direction; 1039 req->epnum = dep->number; 1040 1041 /* 1042 * We only add to our list of requests now and 1043 * start consuming the list once we get XferNotReady 1044 * IRQ. 1045 * 1046 * That way, we avoid doing anything that we don't need 1047 * to do now and defer it until the point we receive a 1048 * particular token from the Host side. 1049 * 1050 * This will also avoid Host cancelling URBs due to too 1051 * many NAKs. 1052 */ 1053 ret = usb_gadget_map_request(&dwc->gadget, &req->request, 1054 dep->direction); 1055 if (ret) 1056 return ret; 1057 1058 list_add_tail(&req->list, &dep->request_list); 1059 1060 /* 1061 * There are a few special cases: 1062 * 1063 * 1. XferNotReady with empty list of requests. We need to kick the 1064 * transfer here in that situation, otherwise we will be NAKing 1065 * forever. If we get XferNotReady before gadget driver has a 1066 * chance to queue a request, we will ACK the IRQ but won't be 1067 * able to receive the data until the next request is queued. 1068 * The following code is handling exactly that. 1069 * 1070 */ 1071 if (dep->flags & DWC3_EP_PENDING_REQUEST) { 1072 /* 1073 * If xfernotready is already elapsed and it is a case 1074 * of isoc transfer, then issue END TRANSFER, so that 1075 * you can receive xfernotready again and can have 1076 * notion of current microframe. 1077 */ 1078 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1079 if (list_empty(&dep->req_queued)) { 1080 dwc3_stop_active_transfer(dwc, dep->number); 1081 dep->flags = DWC3_EP_ENABLED; 1082 } 1083 return 0; 1084 } 1085 1086 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1087 if (ret && ret != -EBUSY) 1088 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1089 dep->name); 1090 return ret; 1091 } 1092 1093 /* 1094 * 2. XferInProgress on Isoc EP with an active transfer. We need to 1095 * kick the transfer here after queuing a request, otherwise the 1096 * core may not see the modified TRB(s). 1097 */ 1098 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1099 (dep->flags & DWC3_EP_BUSY) && 1100 !(dep->flags & DWC3_EP_MISSED_ISOC)) { 1101 WARN_ON_ONCE(!dep->resource_index); 1102 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index, 1103 false); 1104 if (ret && ret != -EBUSY) 1105 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1106 dep->name); 1107 return ret; 1108 } 1109 1110 return 0; 1111 } 1112 1113 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 1114 gfp_t gfp_flags) 1115 { 1116 struct dwc3_request *req = to_dwc3_request(request); 1117 struct dwc3_ep *dep = to_dwc3_ep(ep); 1118 struct dwc3 *dwc = dep->dwc; 1119 1120 unsigned long flags; 1121 1122 int ret; 1123 1124 if (!dep->endpoint.desc) { 1125 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n", 1126 request, ep->name); 1127 return -ESHUTDOWN; 1128 } 1129 1130 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n", 1131 request, ep->name, request->length); 1132 1133 spin_lock_irqsave(&dwc->lock, flags); 1134 ret = __dwc3_gadget_ep_queue(dep, req); 1135 spin_unlock_irqrestore(&dwc->lock, flags); 1136 1137 return ret; 1138 } 1139 1140 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 1141 struct usb_request *request) 1142 { 1143 struct dwc3_request *req = to_dwc3_request(request); 1144 struct dwc3_request *r = NULL; 1145 1146 struct dwc3_ep *dep = to_dwc3_ep(ep); 1147 struct dwc3 *dwc = dep->dwc; 1148 1149 unsigned long flags; 1150 int ret = 0; 1151 1152 spin_lock_irqsave(&dwc->lock, flags); 1153 1154 list_for_each_entry(r, &dep->request_list, list) { 1155 if (r == req) 1156 break; 1157 } 1158 1159 if (r != req) { 1160 list_for_each_entry(r, &dep->req_queued, list) { 1161 if (r == req) 1162 break; 1163 } 1164 if (r == req) { 1165 /* wait until it is processed */ 1166 dwc3_stop_active_transfer(dwc, dep->number); 1167 goto out1; 1168 } 1169 dev_err(dwc->dev, "request %p was not queued to %s\n", 1170 request, ep->name); 1171 ret = -EINVAL; 1172 goto out0; 1173 } 1174 1175 out1: 1176 /* giveback the request */ 1177 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1178 1179 out0: 1180 spin_unlock_irqrestore(&dwc->lock, flags); 1181 1182 return ret; 1183 } 1184 1185 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value) 1186 { 1187 struct dwc3_gadget_ep_cmd_params params; 1188 struct dwc3 *dwc = dep->dwc; 1189 int ret; 1190 1191 memset(¶ms, 0x00, sizeof(params)); 1192 1193 if (value) { 1194 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1195 DWC3_DEPCMD_SETSTALL, ¶ms); 1196 if (ret) 1197 dev_err(dwc->dev, "failed to %s STALL on %s\n", 1198 value ? "set" : "clear", 1199 dep->name); 1200 else 1201 dep->flags |= DWC3_EP_STALL; 1202 } else { 1203 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1204 DWC3_DEPCMD_CLEARSTALL, ¶ms); 1205 if (ret) 1206 dev_err(dwc->dev, "failed to %s STALL on %s\n", 1207 value ? "set" : "clear", 1208 dep->name); 1209 else 1210 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); 1211 } 1212 1213 return ret; 1214 } 1215 1216 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 1217 { 1218 struct dwc3_ep *dep = to_dwc3_ep(ep); 1219 struct dwc3 *dwc = dep->dwc; 1220 1221 unsigned long flags; 1222 1223 int ret; 1224 1225 spin_lock_irqsave(&dwc->lock, flags); 1226 1227 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1228 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 1229 ret = -EINVAL; 1230 goto out; 1231 } 1232 1233 ret = __dwc3_gadget_ep_set_halt(dep, value); 1234 out: 1235 spin_unlock_irqrestore(&dwc->lock, flags); 1236 1237 return ret; 1238 } 1239 1240 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 1241 { 1242 struct dwc3_ep *dep = to_dwc3_ep(ep); 1243 struct dwc3 *dwc = dep->dwc; 1244 unsigned long flags; 1245 1246 spin_lock_irqsave(&dwc->lock, flags); 1247 dep->flags |= DWC3_EP_WEDGE; 1248 spin_unlock_irqrestore(&dwc->lock, flags); 1249 1250 if (dep->number == 0 || dep->number == 1) 1251 return dwc3_gadget_ep0_set_halt(ep, 1); 1252 else 1253 return dwc3_gadget_ep_set_halt(ep, 1); 1254 } 1255 1256 /* -------------------------------------------------------------------------- */ 1257 1258 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 1259 .bLength = USB_DT_ENDPOINT_SIZE, 1260 .bDescriptorType = USB_DT_ENDPOINT, 1261 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1262 }; 1263 1264 static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 1265 .enable = dwc3_gadget_ep0_enable, 1266 .disable = dwc3_gadget_ep0_disable, 1267 .alloc_request = dwc3_gadget_ep_alloc_request, 1268 .free_request = dwc3_gadget_ep_free_request, 1269 .queue = dwc3_gadget_ep0_queue, 1270 .dequeue = dwc3_gadget_ep_dequeue, 1271 .set_halt = dwc3_gadget_ep0_set_halt, 1272 .set_wedge = dwc3_gadget_ep_set_wedge, 1273 }; 1274 1275 static const struct usb_ep_ops dwc3_gadget_ep_ops = { 1276 .enable = dwc3_gadget_ep_enable, 1277 .disable = dwc3_gadget_ep_disable, 1278 .alloc_request = dwc3_gadget_ep_alloc_request, 1279 .free_request = dwc3_gadget_ep_free_request, 1280 .queue = dwc3_gadget_ep_queue, 1281 .dequeue = dwc3_gadget_ep_dequeue, 1282 .set_halt = dwc3_gadget_ep_set_halt, 1283 .set_wedge = dwc3_gadget_ep_set_wedge, 1284 }; 1285 1286 /* -------------------------------------------------------------------------- */ 1287 1288 static int dwc3_gadget_get_frame(struct usb_gadget *g) 1289 { 1290 struct dwc3 *dwc = gadget_to_dwc(g); 1291 u32 reg; 1292 1293 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1294 return DWC3_DSTS_SOFFN(reg); 1295 } 1296 1297 static int dwc3_gadget_wakeup(struct usb_gadget *g) 1298 { 1299 struct dwc3 *dwc = gadget_to_dwc(g); 1300 1301 unsigned long timeout; 1302 unsigned long flags; 1303 1304 u32 reg; 1305 1306 int ret = 0; 1307 1308 u8 link_state; 1309 u8 speed; 1310 1311 spin_lock_irqsave(&dwc->lock, flags); 1312 1313 /* 1314 * According to the Databook Remote wakeup request should 1315 * be issued only when the device is in early suspend state. 1316 * 1317 * We can check that via USB Link State bits in DSTS register. 1318 */ 1319 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1320 1321 speed = reg & DWC3_DSTS_CONNECTSPD; 1322 if (speed == DWC3_DSTS_SUPERSPEED) { 1323 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n"); 1324 ret = -EINVAL; 1325 goto out; 1326 } 1327 1328 link_state = DWC3_DSTS_USBLNKST(reg); 1329 1330 switch (link_state) { 1331 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1332 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1333 break; 1334 default: 1335 dev_dbg(dwc->dev, "can't wakeup from link state %d\n", 1336 link_state); 1337 ret = -EINVAL; 1338 goto out; 1339 } 1340 1341 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); 1342 if (ret < 0) { 1343 dev_err(dwc->dev, "failed to put link in Recovery\n"); 1344 goto out; 1345 } 1346 1347 /* Recent versions do this automatically */ 1348 if (dwc->revision < DWC3_REVISION_194A) { 1349 /* write zeroes to Link Change Request */ 1350 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1351 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 1352 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1353 } 1354 1355 /* poll until Link State changes to ON */ 1356 timeout = jiffies + msecs_to_jiffies(100); 1357 1358 while (!time_after(jiffies, timeout)) { 1359 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1360 1361 /* in HS, means ON */ 1362 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 1363 break; 1364 } 1365 1366 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1367 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1368 ret = -EINVAL; 1369 } 1370 1371 out: 1372 spin_unlock_irqrestore(&dwc->lock, flags); 1373 1374 return ret; 1375 } 1376 1377 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 1378 int is_selfpowered) 1379 { 1380 struct dwc3 *dwc = gadget_to_dwc(g); 1381 unsigned long flags; 1382 1383 spin_lock_irqsave(&dwc->lock, flags); 1384 dwc->is_selfpowered = !!is_selfpowered; 1385 spin_unlock_irqrestore(&dwc->lock, flags); 1386 1387 return 0; 1388 } 1389 1390 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on) 1391 { 1392 u32 reg; 1393 u32 timeout = 500; 1394 1395 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1396 if (is_on) { 1397 if (dwc->revision <= DWC3_REVISION_187A) { 1398 reg &= ~DWC3_DCTL_TRGTULST_MASK; 1399 reg |= DWC3_DCTL_TRGTULST_RX_DET; 1400 } 1401 1402 if (dwc->revision >= DWC3_REVISION_194A) 1403 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1404 reg |= DWC3_DCTL_RUN_STOP; 1405 dwc->pullups_connected = true; 1406 } else { 1407 reg &= ~DWC3_DCTL_RUN_STOP; 1408 dwc->pullups_connected = false; 1409 } 1410 1411 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1412 1413 do { 1414 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1415 if (is_on) { 1416 if (!(reg & DWC3_DSTS_DEVCTRLHLT)) 1417 break; 1418 } else { 1419 if (reg & DWC3_DSTS_DEVCTRLHLT) 1420 break; 1421 } 1422 timeout--; 1423 if (!timeout) 1424 return -ETIMEDOUT; 1425 udelay(1); 1426 } while (1); 1427 1428 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n", 1429 dwc->gadget_driver 1430 ? dwc->gadget_driver->function : "no-function", 1431 is_on ? "connect" : "disconnect"); 1432 1433 return 0; 1434 } 1435 1436 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 1437 { 1438 struct dwc3 *dwc = gadget_to_dwc(g); 1439 unsigned long flags; 1440 int ret; 1441 1442 is_on = !!is_on; 1443 1444 spin_lock_irqsave(&dwc->lock, flags); 1445 ret = dwc3_gadget_run_stop(dwc, is_on); 1446 spin_unlock_irqrestore(&dwc->lock, flags); 1447 1448 return ret; 1449 } 1450 1451 static void dwc3_gadget_enable_irq(struct dwc3 *dwc) 1452 { 1453 u32 reg; 1454 1455 /* Enable all but Start and End of Frame IRQs */ 1456 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | 1457 DWC3_DEVTEN_EVNTOVERFLOWEN | 1458 DWC3_DEVTEN_CMDCMPLTEN | 1459 DWC3_DEVTEN_ERRTICERREN | 1460 DWC3_DEVTEN_WKUPEVTEN | 1461 DWC3_DEVTEN_ULSTCNGEN | 1462 DWC3_DEVTEN_CONNECTDONEEN | 1463 DWC3_DEVTEN_USBRSTEN | 1464 DWC3_DEVTEN_DISCONNEVTEN); 1465 1466 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 1467 } 1468 1469 static void dwc3_gadget_disable_irq(struct dwc3 *dwc) 1470 { 1471 /* mask all interrupts */ 1472 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 1473 } 1474 1475 static irqreturn_t dwc3_interrupt(int irq, void *_dwc); 1476 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc); 1477 1478 static int dwc3_gadget_start(struct usb_gadget *g, 1479 struct usb_gadget_driver *driver) 1480 { 1481 struct dwc3 *dwc = gadget_to_dwc(g); 1482 struct dwc3_ep *dep; 1483 unsigned long flags; 1484 int ret = 0; 1485 int irq; 1486 u32 reg; 1487 1488 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1489 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, 1490 IRQF_SHARED, "dwc3", dwc); 1491 if (ret) { 1492 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 1493 irq, ret); 1494 goto err0; 1495 } 1496 1497 spin_lock_irqsave(&dwc->lock, flags); 1498 1499 if (dwc->gadget_driver) { 1500 dev_err(dwc->dev, "%s is already bound to %s\n", 1501 dwc->gadget.name, 1502 dwc->gadget_driver->driver.name); 1503 ret = -EBUSY; 1504 goto err1; 1505 } 1506 1507 dwc->gadget_driver = driver; 1508 1509 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1510 reg &= ~(DWC3_DCFG_SPEED_MASK); 1511 1512 /** 1513 * WORKAROUND: DWC3 revision < 2.20a have an issue 1514 * which would cause metastability state on Run/Stop 1515 * bit if we try to force the IP to USB2-only mode. 1516 * 1517 * Because of that, we cannot configure the IP to any 1518 * speed other than the SuperSpeed 1519 * 1520 * Refers to: 1521 * 1522 * STAR#9000525659: Clock Domain Crossing on DCTL in 1523 * USB 2.0 Mode 1524 */ 1525 if (dwc->revision < DWC3_REVISION_220A) { 1526 reg |= DWC3_DCFG_SUPERSPEED; 1527 } else { 1528 switch (dwc->maximum_speed) { 1529 case USB_SPEED_LOW: 1530 reg |= DWC3_DSTS_LOWSPEED; 1531 break; 1532 case USB_SPEED_FULL: 1533 reg |= DWC3_DSTS_FULLSPEED1; 1534 break; 1535 case USB_SPEED_HIGH: 1536 reg |= DWC3_DSTS_HIGHSPEED; 1537 break; 1538 case USB_SPEED_SUPER: /* FALLTHROUGH */ 1539 case USB_SPEED_UNKNOWN: /* FALTHROUGH */ 1540 default: 1541 reg |= DWC3_DSTS_SUPERSPEED; 1542 } 1543 } 1544 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1545 1546 dwc->start_config_issued = false; 1547 1548 /* Start with SuperSpeed Default */ 1549 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1550 1551 dep = dwc->eps[0]; 1552 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false); 1553 if (ret) { 1554 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1555 goto err2; 1556 } 1557 1558 dep = dwc->eps[1]; 1559 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false); 1560 if (ret) { 1561 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1562 goto err3; 1563 } 1564 1565 /* begin to receive SETUP packets */ 1566 dwc->ep0state = EP0_SETUP_PHASE; 1567 dwc3_ep0_out_start(dwc); 1568 1569 dwc3_gadget_enable_irq(dwc); 1570 1571 spin_unlock_irqrestore(&dwc->lock, flags); 1572 1573 return 0; 1574 1575 err3: 1576 __dwc3_gadget_ep_disable(dwc->eps[0]); 1577 1578 err2: 1579 dwc->gadget_driver = NULL; 1580 1581 err1: 1582 spin_unlock_irqrestore(&dwc->lock, flags); 1583 1584 free_irq(irq, dwc); 1585 1586 err0: 1587 return ret; 1588 } 1589 1590 static int dwc3_gadget_stop(struct usb_gadget *g, 1591 struct usb_gadget_driver *driver) 1592 { 1593 struct dwc3 *dwc = gadget_to_dwc(g); 1594 unsigned long flags; 1595 int irq; 1596 1597 spin_lock_irqsave(&dwc->lock, flags); 1598 1599 dwc3_gadget_disable_irq(dwc); 1600 __dwc3_gadget_ep_disable(dwc->eps[0]); 1601 __dwc3_gadget_ep_disable(dwc->eps[1]); 1602 1603 dwc->gadget_driver = NULL; 1604 1605 spin_unlock_irqrestore(&dwc->lock, flags); 1606 1607 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1608 free_irq(irq, dwc); 1609 1610 return 0; 1611 } 1612 1613 static const struct usb_gadget_ops dwc3_gadget_ops = { 1614 .get_frame = dwc3_gadget_get_frame, 1615 .wakeup = dwc3_gadget_wakeup, 1616 .set_selfpowered = dwc3_gadget_set_selfpowered, 1617 .pullup = dwc3_gadget_pullup, 1618 .udc_start = dwc3_gadget_start, 1619 .udc_stop = dwc3_gadget_stop, 1620 }; 1621 1622 /* -------------------------------------------------------------------------- */ 1623 1624 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc, 1625 u8 num, u32 direction) 1626 { 1627 struct dwc3_ep *dep; 1628 u8 i; 1629 1630 for (i = 0; i < num; i++) { 1631 u8 epnum = (i << 1) | (!!direction); 1632 1633 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 1634 if (!dep) { 1635 dev_err(dwc->dev, "can't allocate endpoint %d\n", 1636 epnum); 1637 return -ENOMEM; 1638 } 1639 1640 dep->dwc = dwc; 1641 dep->number = epnum; 1642 dep->direction = !!direction; 1643 dwc->eps[epnum] = dep; 1644 1645 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1, 1646 (epnum & 1) ? "in" : "out"); 1647 1648 dep->endpoint.name = dep->name; 1649 1650 dev_vdbg(dwc->dev, "initializing %s\n", dep->name); 1651 1652 if (epnum == 0 || epnum == 1) { 1653 dep->endpoint.maxpacket = 512; 1654 dep->endpoint.maxburst = 1; 1655 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 1656 if (!epnum) 1657 dwc->gadget.ep0 = &dep->endpoint; 1658 } else { 1659 int ret; 1660 1661 dep->endpoint.maxpacket = 1024; 1662 dep->endpoint.max_streams = 15; 1663 dep->endpoint.ops = &dwc3_gadget_ep_ops; 1664 list_add_tail(&dep->endpoint.ep_list, 1665 &dwc->gadget.ep_list); 1666 1667 ret = dwc3_alloc_trb_pool(dep); 1668 if (ret) 1669 return ret; 1670 } 1671 1672 INIT_LIST_HEAD(&dep->request_list); 1673 INIT_LIST_HEAD(&dep->req_queued); 1674 } 1675 1676 return 0; 1677 } 1678 1679 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc) 1680 { 1681 int ret; 1682 1683 INIT_LIST_HEAD(&dwc->gadget.ep_list); 1684 1685 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0); 1686 if (ret < 0) { 1687 dev_vdbg(dwc->dev, "failed to allocate OUT endpoints\n"); 1688 return ret; 1689 } 1690 1691 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1); 1692 if (ret < 0) { 1693 dev_vdbg(dwc->dev, "failed to allocate IN endpoints\n"); 1694 return ret; 1695 } 1696 1697 return 0; 1698 } 1699 1700 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 1701 { 1702 struct dwc3_ep *dep; 1703 u8 epnum; 1704 1705 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1706 dep = dwc->eps[epnum]; 1707 if (!dep) 1708 continue; 1709 /* 1710 * Physical endpoints 0 and 1 are special; they form the 1711 * bi-directional USB endpoint 0. 1712 * 1713 * For those two physical endpoints, we don't allocate a TRB 1714 * pool nor do we add them the endpoints list. Due to that, we 1715 * shouldn't do these two operations otherwise we would end up 1716 * with all sorts of bugs when removing dwc3.ko. 1717 */ 1718 if (epnum != 0 && epnum != 1) { 1719 dwc3_free_trb_pool(dep); 1720 list_del(&dep->endpoint.ep_list); 1721 } 1722 1723 kfree(dep); 1724 } 1725 } 1726 1727 /* -------------------------------------------------------------------------- */ 1728 1729 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, 1730 struct dwc3_request *req, struct dwc3_trb *trb, 1731 const struct dwc3_event_depevt *event, int status) 1732 { 1733 unsigned int count; 1734 unsigned int s_pkt = 0; 1735 unsigned int trb_status; 1736 1737 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 1738 /* 1739 * We continue despite the error. There is not much we 1740 * can do. If we don't clean it up we loop forever. If 1741 * we skip the TRB then it gets overwritten after a 1742 * while since we use them in a ring buffer. A BUG() 1743 * would help. Lets hope that if this occurs, someone 1744 * fixes the root cause instead of looking away :) 1745 */ 1746 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n", 1747 dep->name, trb); 1748 count = trb->size & DWC3_TRB_SIZE_MASK; 1749 1750 if (dep->direction) { 1751 if (count) { 1752 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); 1753 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) { 1754 dev_dbg(dwc->dev, "incomplete IN transfer %s\n", 1755 dep->name); 1756 /* 1757 * If missed isoc occurred and there is 1758 * no request queued then issue END 1759 * TRANSFER, so that core generates 1760 * next xfernotready and we will issue 1761 * a fresh START TRANSFER. 1762 * If there are still queued request 1763 * then wait, do not issue either END 1764 * or UPDATE TRANSFER, just attach next 1765 * request in request_list during 1766 * giveback.If any future queued request 1767 * is successfully transferred then we 1768 * will issue UPDATE TRANSFER for all 1769 * request in the request_list. 1770 */ 1771 dep->flags |= DWC3_EP_MISSED_ISOC; 1772 } else { 1773 dev_err(dwc->dev, "incomplete IN transfer %s\n", 1774 dep->name); 1775 status = -ECONNRESET; 1776 } 1777 } else { 1778 dep->flags &= ~DWC3_EP_MISSED_ISOC; 1779 } 1780 } else { 1781 if (count && (event->status & DEPEVT_STATUS_SHORT)) 1782 s_pkt = 1; 1783 } 1784 1785 /* 1786 * We assume here we will always receive the entire data block 1787 * which we should receive. Meaning, if we program RX to 1788 * receive 4K but we receive only 2K, we assume that's all we 1789 * should receive and we simply bounce the request back to the 1790 * gadget driver for further processing. 1791 */ 1792 req->request.actual += req->request.length - count; 1793 if (s_pkt) 1794 return 1; 1795 if ((event->status & DEPEVT_STATUS_LST) && 1796 (trb->ctrl & (DWC3_TRB_CTRL_LST | 1797 DWC3_TRB_CTRL_HWO))) 1798 return 1; 1799 if ((event->status & DEPEVT_STATUS_IOC) && 1800 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 1801 return 1; 1802 return 0; 1803 } 1804 1805 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, 1806 const struct dwc3_event_depevt *event, int status) 1807 { 1808 struct dwc3_request *req; 1809 struct dwc3_trb *trb; 1810 unsigned int slot; 1811 unsigned int i; 1812 int ret; 1813 1814 do { 1815 req = next_request(&dep->req_queued); 1816 if (!req) { 1817 WARN_ON_ONCE(1); 1818 return 1; 1819 } 1820 i = 0; 1821 do { 1822 slot = req->start_slot + i; 1823 if ((slot == DWC3_TRB_NUM - 1) && 1824 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 1825 slot++; 1826 slot %= DWC3_TRB_NUM; 1827 trb = &dep->trb_pool[slot]; 1828 1829 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 1830 event, status); 1831 if (ret) 1832 break; 1833 }while (++i < req->request.num_mapped_sgs); 1834 1835 dwc3_gadget_giveback(dep, req, status); 1836 1837 if (ret) 1838 break; 1839 } while (1); 1840 1841 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1842 list_empty(&dep->req_queued)) { 1843 if (list_empty(&dep->request_list)) { 1844 /* 1845 * If there is no entry in request list then do 1846 * not issue END TRANSFER now. Just set PENDING 1847 * flag, so that END TRANSFER is issued when an 1848 * entry is added into request list. 1849 */ 1850 dep->flags = DWC3_EP_PENDING_REQUEST; 1851 } else { 1852 dwc3_stop_active_transfer(dwc, dep->number); 1853 dep->flags = DWC3_EP_ENABLED; 1854 } 1855 return 1; 1856 } 1857 1858 if ((event->status & DEPEVT_STATUS_IOC) && 1859 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 1860 return 0; 1861 return 1; 1862 } 1863 1864 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, 1865 struct dwc3_ep *dep, const struct dwc3_event_depevt *event, 1866 int start_new) 1867 { 1868 unsigned status = 0; 1869 int clean_busy; 1870 1871 if (event->status & DEPEVT_STATUS_BUSERR) 1872 status = -ECONNRESET; 1873 1874 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status); 1875 if (clean_busy) 1876 dep->flags &= ~DWC3_EP_BUSY; 1877 1878 /* 1879 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. 1880 * See dwc3_gadget_linksts_change_interrupt() for 1st half. 1881 */ 1882 if (dwc->revision < DWC3_REVISION_183A) { 1883 u32 reg; 1884 int i; 1885 1886 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 1887 dep = dwc->eps[i]; 1888 1889 if (!(dep->flags & DWC3_EP_ENABLED)) 1890 continue; 1891 1892 if (!list_empty(&dep->req_queued)) 1893 return; 1894 } 1895 1896 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1897 reg |= dwc->u1u2; 1898 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1899 1900 dwc->u1u2 = 0; 1901 } 1902 } 1903 1904 static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 1905 const struct dwc3_event_depevt *event) 1906 { 1907 struct dwc3_ep *dep; 1908 u8 epnum = event->endpoint_number; 1909 1910 dep = dwc->eps[epnum]; 1911 1912 if (!(dep->flags & DWC3_EP_ENABLED)) 1913 return; 1914 1915 dev_vdbg(dwc->dev, "%s: %s\n", dep->name, 1916 dwc3_ep_event_string(event->endpoint_event)); 1917 1918 if (epnum == 0 || epnum == 1) { 1919 dwc3_ep0_interrupt(dwc, event); 1920 return; 1921 } 1922 1923 switch (event->endpoint_event) { 1924 case DWC3_DEPEVT_XFERCOMPLETE: 1925 dep->resource_index = 0; 1926 1927 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1928 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n", 1929 dep->name); 1930 return; 1931 } 1932 1933 dwc3_endpoint_transfer_complete(dwc, dep, event, 1); 1934 break; 1935 case DWC3_DEPEVT_XFERINPROGRESS: 1936 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1937 dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n", 1938 dep->name); 1939 return; 1940 } 1941 1942 dwc3_endpoint_transfer_complete(dwc, dep, event, 0); 1943 break; 1944 case DWC3_DEPEVT_XFERNOTREADY: 1945 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1946 dwc3_gadget_start_isoc(dwc, dep, event); 1947 } else { 1948 int ret; 1949 1950 dev_vdbg(dwc->dev, "%s: reason %s\n", 1951 dep->name, event->status & 1952 DEPEVT_STATUS_TRANSFER_ACTIVE 1953 ? "Transfer Active" 1954 : "Transfer Not Active"); 1955 1956 ret = __dwc3_gadget_kick_transfer(dep, 0, 1); 1957 if (!ret || ret == -EBUSY) 1958 return; 1959 1960 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1961 dep->name); 1962 } 1963 1964 break; 1965 case DWC3_DEPEVT_STREAMEVT: 1966 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) { 1967 dev_err(dwc->dev, "Stream event for non-Bulk %s\n", 1968 dep->name); 1969 return; 1970 } 1971 1972 switch (event->status) { 1973 case DEPEVT_STREAMEVT_FOUND: 1974 dev_vdbg(dwc->dev, "Stream %d found and started\n", 1975 event->parameters); 1976 1977 break; 1978 case DEPEVT_STREAMEVT_NOTFOUND: 1979 /* FALLTHROUGH */ 1980 default: 1981 dev_dbg(dwc->dev, "Couldn't find suitable stream\n"); 1982 } 1983 break; 1984 case DWC3_DEPEVT_RXTXFIFOEVT: 1985 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name); 1986 break; 1987 case DWC3_DEPEVT_EPCMDCMPLT: 1988 dev_vdbg(dwc->dev, "Endpoint Command Complete\n"); 1989 break; 1990 } 1991 } 1992 1993 static void dwc3_disconnect_gadget(struct dwc3 *dwc) 1994 { 1995 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { 1996 spin_unlock(&dwc->lock); 1997 dwc->gadget_driver->disconnect(&dwc->gadget); 1998 spin_lock(&dwc->lock); 1999 } 2000 } 2001 2002 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum) 2003 { 2004 struct dwc3_ep *dep; 2005 struct dwc3_gadget_ep_cmd_params params; 2006 u32 cmd; 2007 int ret; 2008 2009 dep = dwc->eps[epnum]; 2010 2011 if (!dep->resource_index) 2012 return; 2013 2014 /* 2015 * NOTICE: We are violating what the Databook says about the 2016 * EndTransfer command. Ideally we would _always_ wait for the 2017 * EndTransfer Command Completion IRQ, but that's causing too 2018 * much trouble synchronizing between us and gadget driver. 2019 * 2020 * We have discussed this with the IP Provider and it was 2021 * suggested to giveback all requests here, but give HW some 2022 * extra time to synchronize with the interconnect. We're using 2023 * an arbitraty 100us delay for that. 2024 * 2025 * Note also that a similar handling was tested by Synopsys 2026 * (thanks a lot Paul) and nothing bad has come out of it. 2027 * In short, what we're doing is: 2028 * 2029 * - Issue EndTransfer WITH CMDIOC bit set 2030 * - Wait 100us 2031 */ 2032 2033 cmd = DWC3_DEPCMD_ENDTRANSFER; 2034 cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC; 2035 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); 2036 memset(¶ms, 0, sizeof(params)); 2037 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 2038 WARN_ON_ONCE(ret); 2039 dep->resource_index = 0; 2040 dep->flags &= ~DWC3_EP_BUSY; 2041 udelay(100); 2042 } 2043 2044 static void dwc3_stop_active_transfers(struct dwc3 *dwc) 2045 { 2046 u32 epnum; 2047 2048 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2049 struct dwc3_ep *dep; 2050 2051 dep = dwc->eps[epnum]; 2052 if (!dep) 2053 continue; 2054 2055 if (!(dep->flags & DWC3_EP_ENABLED)) 2056 continue; 2057 2058 dwc3_remove_requests(dwc, dep); 2059 } 2060 } 2061 2062 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 2063 { 2064 u32 epnum; 2065 2066 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2067 struct dwc3_ep *dep; 2068 struct dwc3_gadget_ep_cmd_params params; 2069 int ret; 2070 2071 dep = dwc->eps[epnum]; 2072 if (!dep) 2073 continue; 2074 2075 if (!(dep->flags & DWC3_EP_STALL)) 2076 continue; 2077 2078 dep->flags &= ~DWC3_EP_STALL; 2079 2080 memset(¶ms, 0, sizeof(params)); 2081 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 2082 DWC3_DEPCMD_CLEARSTALL, ¶ms); 2083 WARN_ON_ONCE(ret); 2084 } 2085 } 2086 2087 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 2088 { 2089 int reg; 2090 2091 dev_vdbg(dwc->dev, "%s\n", __func__); 2092 2093 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2094 reg &= ~DWC3_DCTL_INITU1ENA; 2095 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2096 2097 reg &= ~DWC3_DCTL_INITU2ENA; 2098 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2099 2100 dwc3_disconnect_gadget(dwc); 2101 dwc->start_config_issued = false; 2102 2103 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2104 dwc->setup_packet_pending = false; 2105 } 2106 2107 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 2108 { 2109 u32 reg; 2110 2111 dev_vdbg(dwc->dev, "%s\n", __func__); 2112 2113 /* 2114 * WORKAROUND: DWC3 revisions <1.88a have an issue which 2115 * would cause a missing Disconnect Event if there's a 2116 * pending Setup Packet in the FIFO. 2117 * 2118 * There's no suggested workaround on the official Bug 2119 * report, which states that "unless the driver/application 2120 * is doing any special handling of a disconnect event, 2121 * there is no functional issue". 2122 * 2123 * Unfortunately, it turns out that we _do_ some special 2124 * handling of a disconnect event, namely complete all 2125 * pending transfers, notify gadget driver of the 2126 * disconnection, and so on. 2127 * 2128 * Our suggested workaround is to follow the Disconnect 2129 * Event steps here, instead, based on a setup_packet_pending 2130 * flag. Such flag gets set whenever we have a XferNotReady 2131 * event on EP0 and gets cleared on XferComplete for the 2132 * same endpoint. 2133 * 2134 * Refers to: 2135 * 2136 * STAR#9000466709: RTL: Device : Disconnect event not 2137 * generated if setup packet pending in FIFO 2138 */ 2139 if (dwc->revision < DWC3_REVISION_188A) { 2140 if (dwc->setup_packet_pending) 2141 dwc3_gadget_disconnect_interrupt(dwc); 2142 } 2143 2144 /* after reset -> Default State */ 2145 usb_gadget_set_state(&dwc->gadget, USB_STATE_DEFAULT); 2146 2147 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) 2148 dwc3_disconnect_gadget(dwc); 2149 2150 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2151 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 2152 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2153 dwc->test_mode = false; 2154 2155 dwc3_stop_active_transfers(dwc); 2156 dwc3_clear_stall_all_ep(dwc); 2157 dwc->start_config_issued = false; 2158 2159 /* Reset device address to zero */ 2160 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2161 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 2162 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2163 } 2164 2165 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed) 2166 { 2167 u32 reg; 2168 u32 usb30_clock = DWC3_GCTL_CLK_BUS; 2169 2170 /* 2171 * We change the clock only at SS but I dunno why I would want to do 2172 * this. Maybe it becomes part of the power saving plan. 2173 */ 2174 2175 if (speed != DWC3_DSTS_SUPERSPEED) 2176 return; 2177 2178 /* 2179 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 2180 * each time on Connect Done. 2181 */ 2182 if (!usb30_clock) 2183 return; 2184 2185 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 2186 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock); 2187 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 2188 } 2189 2190 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 2191 { 2192 struct dwc3_ep *dep; 2193 int ret; 2194 u32 reg; 2195 u8 speed; 2196 2197 dev_vdbg(dwc->dev, "%s\n", __func__); 2198 2199 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2200 speed = reg & DWC3_DSTS_CONNECTSPD; 2201 dwc->speed = speed; 2202 2203 dwc3_update_ram_clk_sel(dwc, speed); 2204 2205 switch (speed) { 2206 case DWC3_DCFG_SUPERSPEED: 2207 /* 2208 * WORKAROUND: DWC3 revisions <1.90a have an issue which 2209 * would cause a missing USB3 Reset event. 2210 * 2211 * In such situations, we should force a USB3 Reset 2212 * event by calling our dwc3_gadget_reset_interrupt() 2213 * routine. 2214 * 2215 * Refers to: 2216 * 2217 * STAR#9000483510: RTL: SS : USB3 reset event may 2218 * not be generated always when the link enters poll 2219 */ 2220 if (dwc->revision < DWC3_REVISION_190A) 2221 dwc3_gadget_reset_interrupt(dwc); 2222 2223 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2224 dwc->gadget.ep0->maxpacket = 512; 2225 dwc->gadget.speed = USB_SPEED_SUPER; 2226 break; 2227 case DWC3_DCFG_HIGHSPEED: 2228 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2229 dwc->gadget.ep0->maxpacket = 64; 2230 dwc->gadget.speed = USB_SPEED_HIGH; 2231 break; 2232 case DWC3_DCFG_FULLSPEED2: 2233 case DWC3_DCFG_FULLSPEED1: 2234 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2235 dwc->gadget.ep0->maxpacket = 64; 2236 dwc->gadget.speed = USB_SPEED_FULL; 2237 break; 2238 case DWC3_DCFG_LOWSPEED: 2239 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); 2240 dwc->gadget.ep0->maxpacket = 8; 2241 dwc->gadget.speed = USB_SPEED_LOW; 2242 break; 2243 } 2244 2245 /* Enable USB2 LPM Capability */ 2246 2247 if ((dwc->revision > DWC3_REVISION_194A) 2248 && (speed != DWC3_DCFG_SUPERSPEED)) { 2249 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2250 reg |= DWC3_DCFG_LPM_CAP; 2251 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2252 2253 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2254 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); 2255 2256 /* 2257 * TODO: This should be configurable. For now using 2258 * maximum allowed HIRD threshold value of 0b1100 2259 */ 2260 reg |= DWC3_DCTL_HIRD_THRES(12); 2261 2262 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2263 } 2264 2265 dep = dwc->eps[0]; 2266 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true); 2267 if (ret) { 2268 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2269 return; 2270 } 2271 2272 dep = dwc->eps[1]; 2273 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true); 2274 if (ret) { 2275 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2276 return; 2277 } 2278 2279 /* 2280 * Configure PHY via GUSB3PIPECTLn if required. 2281 * 2282 * Update GTXFIFOSIZn 2283 * 2284 * In both cases reset values should be sufficient. 2285 */ 2286 } 2287 2288 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 2289 { 2290 dev_vdbg(dwc->dev, "%s\n", __func__); 2291 2292 /* 2293 * TODO take core out of low power mode when that's 2294 * implemented. 2295 */ 2296 2297 dwc->gadget_driver->resume(&dwc->gadget); 2298 } 2299 2300 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 2301 unsigned int evtinfo) 2302 { 2303 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2304 unsigned int pwropt; 2305 2306 /* 2307 * WORKAROUND: DWC3 < 2.50a have an issue when configured without 2308 * Hibernation mode enabled which would show up when device detects 2309 * host-initiated U3 exit. 2310 * 2311 * In that case, device will generate a Link State Change Interrupt 2312 * from U3 to RESUME which is only necessary if Hibernation is 2313 * configured in. 2314 * 2315 * There are no functional changes due to such spurious event and we 2316 * just need to ignore it. 2317 * 2318 * Refers to: 2319 * 2320 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation 2321 * operational mode 2322 */ 2323 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 2324 if ((dwc->revision < DWC3_REVISION_250A) && 2325 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { 2326 if ((dwc->link_state == DWC3_LINK_STATE_U3) && 2327 (next == DWC3_LINK_STATE_RESUME)) { 2328 dev_vdbg(dwc->dev, "ignoring transition U3 -> Resume\n"); 2329 return; 2330 } 2331 } 2332 2333 /* 2334 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending 2335 * on the link partner, the USB session might do multiple entry/exit 2336 * of low power states before a transfer takes place. 2337 * 2338 * Due to this problem, we might experience lower throughput. The 2339 * suggested workaround is to disable DCTL[12:9] bits if we're 2340 * transitioning from U1/U2 to U0 and enable those bits again 2341 * after a transfer completes and there are no pending transfers 2342 * on any of the enabled endpoints. 2343 * 2344 * This is the first half of that workaround. 2345 * 2346 * Refers to: 2347 * 2348 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us 2349 * core send LGO_Ux entering U0 2350 */ 2351 if (dwc->revision < DWC3_REVISION_183A) { 2352 if (next == DWC3_LINK_STATE_U0) { 2353 u32 u1u2; 2354 u32 reg; 2355 2356 switch (dwc->link_state) { 2357 case DWC3_LINK_STATE_U1: 2358 case DWC3_LINK_STATE_U2: 2359 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2360 u1u2 = reg & (DWC3_DCTL_INITU2ENA 2361 | DWC3_DCTL_ACCEPTU2ENA 2362 | DWC3_DCTL_INITU1ENA 2363 | DWC3_DCTL_ACCEPTU1ENA); 2364 2365 if (!dwc->u1u2) 2366 dwc->u1u2 = reg & u1u2; 2367 2368 reg &= ~u1u2; 2369 2370 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2371 break; 2372 default: 2373 /* do nothing */ 2374 break; 2375 } 2376 } 2377 } 2378 2379 dwc->link_state = next; 2380 2381 dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state); 2382 } 2383 2384 static void dwc3_gadget_interrupt(struct dwc3 *dwc, 2385 const struct dwc3_event_devt *event) 2386 { 2387 switch (event->type) { 2388 case DWC3_DEVICE_EVENT_DISCONNECT: 2389 dwc3_gadget_disconnect_interrupt(dwc); 2390 break; 2391 case DWC3_DEVICE_EVENT_RESET: 2392 dwc3_gadget_reset_interrupt(dwc); 2393 break; 2394 case DWC3_DEVICE_EVENT_CONNECT_DONE: 2395 dwc3_gadget_conndone_interrupt(dwc); 2396 break; 2397 case DWC3_DEVICE_EVENT_WAKEUP: 2398 dwc3_gadget_wakeup_interrupt(dwc); 2399 break; 2400 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 2401 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 2402 break; 2403 case DWC3_DEVICE_EVENT_EOPF: 2404 dev_vdbg(dwc->dev, "End of Periodic Frame\n"); 2405 break; 2406 case DWC3_DEVICE_EVENT_SOF: 2407 dev_vdbg(dwc->dev, "Start of Periodic Frame\n"); 2408 break; 2409 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 2410 dev_vdbg(dwc->dev, "Erratic Error\n"); 2411 break; 2412 case DWC3_DEVICE_EVENT_CMD_CMPL: 2413 dev_vdbg(dwc->dev, "Command Complete\n"); 2414 break; 2415 case DWC3_DEVICE_EVENT_OVERFLOW: 2416 dev_vdbg(dwc->dev, "Overflow\n"); 2417 break; 2418 default: 2419 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 2420 } 2421 } 2422 2423 static void dwc3_process_event_entry(struct dwc3 *dwc, 2424 const union dwc3_event *event) 2425 { 2426 /* Endpoint IRQ, handle it and return early */ 2427 if (event->type.is_devspec == 0) { 2428 /* depevt */ 2429 return dwc3_endpoint_interrupt(dwc, &event->depevt); 2430 } 2431 2432 switch (event->type.type) { 2433 case DWC3_EVENT_TYPE_DEV: 2434 dwc3_gadget_interrupt(dwc, &event->devt); 2435 break; 2436 /* REVISIT what to do with Carkit and I2C events ? */ 2437 default: 2438 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 2439 } 2440 } 2441 2442 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf) 2443 { 2444 struct dwc3_event_buffer *evt; 2445 irqreturn_t ret = IRQ_NONE; 2446 int left; 2447 u32 reg; 2448 2449 evt = dwc->ev_buffs[buf]; 2450 left = evt->count; 2451 2452 if (!(evt->flags & DWC3_EVENT_PENDING)) 2453 return IRQ_NONE; 2454 2455 while (left > 0) { 2456 union dwc3_event event; 2457 2458 event.raw = *(u32 *) (evt->buf + evt->lpos); 2459 2460 dwc3_process_event_entry(dwc, &event); 2461 2462 /* 2463 * FIXME we wrap around correctly to the next entry as 2464 * almost all entries are 4 bytes in size. There is one 2465 * entry which has 12 bytes which is a regular entry 2466 * followed by 8 bytes data. ATM I don't know how 2467 * things are organized if we get next to the a 2468 * boundary so I worry about that once we try to handle 2469 * that. 2470 */ 2471 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE; 2472 left -= 4; 2473 2474 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4); 2475 } 2476 2477 evt->count = 0; 2478 evt->flags &= ~DWC3_EVENT_PENDING; 2479 ret = IRQ_HANDLED; 2480 2481 /* Unmask interrupt */ 2482 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf)); 2483 reg &= ~DWC3_GEVNTSIZ_INTMASK; 2484 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg); 2485 2486 return ret; 2487 } 2488 2489 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc) 2490 { 2491 struct dwc3 *dwc = _dwc; 2492 unsigned long flags; 2493 irqreturn_t ret = IRQ_NONE; 2494 int i; 2495 2496 spin_lock_irqsave(&dwc->lock, flags); 2497 2498 for (i = 0; i < dwc->num_event_buffers; i++) 2499 ret |= dwc3_process_event_buf(dwc, i); 2500 2501 spin_unlock_irqrestore(&dwc->lock, flags); 2502 2503 return ret; 2504 } 2505 2506 static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf) 2507 { 2508 struct dwc3_event_buffer *evt; 2509 u32 count; 2510 u32 reg; 2511 2512 evt = dwc->ev_buffs[buf]; 2513 2514 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf)); 2515 count &= DWC3_GEVNTCOUNT_MASK; 2516 if (!count) 2517 return IRQ_NONE; 2518 2519 evt->count = count; 2520 evt->flags |= DWC3_EVENT_PENDING; 2521 2522 /* Mask interrupt */ 2523 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf)); 2524 reg |= DWC3_GEVNTSIZ_INTMASK; 2525 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg); 2526 2527 return IRQ_WAKE_THREAD; 2528 } 2529 2530 static irqreturn_t dwc3_interrupt(int irq, void *_dwc) 2531 { 2532 struct dwc3 *dwc = _dwc; 2533 int i; 2534 irqreturn_t ret = IRQ_NONE; 2535 2536 spin_lock(&dwc->lock); 2537 2538 for (i = 0; i < dwc->num_event_buffers; i++) { 2539 irqreturn_t status; 2540 2541 status = dwc3_check_event_buf(dwc, i); 2542 if (status == IRQ_WAKE_THREAD) 2543 ret = status; 2544 } 2545 2546 spin_unlock(&dwc->lock); 2547 2548 return ret; 2549 } 2550 2551 /** 2552 * dwc3_gadget_init - Initializes gadget related registers 2553 * @dwc: pointer to our controller context structure 2554 * 2555 * Returns 0 on success otherwise negative errno. 2556 */ 2557 int dwc3_gadget_init(struct dwc3 *dwc) 2558 { 2559 int ret; 2560 2561 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2562 &dwc->ctrl_req_addr, GFP_KERNEL); 2563 if (!dwc->ctrl_req) { 2564 dev_err(dwc->dev, "failed to allocate ctrl request\n"); 2565 ret = -ENOMEM; 2566 goto err0; 2567 } 2568 2569 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2570 &dwc->ep0_trb_addr, GFP_KERNEL); 2571 if (!dwc->ep0_trb) { 2572 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 2573 ret = -ENOMEM; 2574 goto err1; 2575 } 2576 2577 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL); 2578 if (!dwc->setup_buf) { 2579 dev_err(dwc->dev, "failed to allocate setup buffer\n"); 2580 ret = -ENOMEM; 2581 goto err2; 2582 } 2583 2584 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev, 2585 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr, 2586 GFP_KERNEL); 2587 if (!dwc->ep0_bounce) { 2588 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n"); 2589 ret = -ENOMEM; 2590 goto err3; 2591 } 2592 2593 dwc->gadget.ops = &dwc3_gadget_ops; 2594 dwc->gadget.max_speed = USB_SPEED_SUPER; 2595 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2596 dwc->gadget.sg_supported = true; 2597 dwc->gadget.name = "dwc3-gadget"; 2598 2599 /* 2600 * REVISIT: Here we should clear all pending IRQs to be 2601 * sure we're starting from a well known location. 2602 */ 2603 2604 ret = dwc3_gadget_init_endpoints(dwc); 2605 if (ret) 2606 goto err4; 2607 2608 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 2609 if (ret) { 2610 dev_err(dwc->dev, "failed to register udc\n"); 2611 goto err4; 2612 } 2613 2614 return 0; 2615 2616 err4: 2617 dwc3_gadget_free_endpoints(dwc); 2618 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2619 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2620 2621 err3: 2622 kfree(dwc->setup_buf); 2623 2624 err2: 2625 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2626 dwc->ep0_trb, dwc->ep0_trb_addr); 2627 2628 err1: 2629 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2630 dwc->ctrl_req, dwc->ctrl_req_addr); 2631 2632 err0: 2633 return ret; 2634 } 2635 2636 /* -------------------------------------------------------------------------- */ 2637 2638 void dwc3_gadget_exit(struct dwc3 *dwc) 2639 { 2640 usb_del_gadget_udc(&dwc->gadget); 2641 2642 dwc3_gadget_free_endpoints(dwc); 2643 2644 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2645 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2646 2647 kfree(dwc->setup_buf); 2648 2649 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2650 dwc->ep0_trb, dwc->ep0_trb_addr); 2651 2652 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2653 dwc->ctrl_req, dwc->ctrl_req_addr); 2654 } 2655 2656 int dwc3_gadget_prepare(struct dwc3 *dwc) 2657 { 2658 if (dwc->pullups_connected) 2659 dwc3_gadget_disable_irq(dwc); 2660 2661 return 0; 2662 } 2663 2664 void dwc3_gadget_complete(struct dwc3 *dwc) 2665 { 2666 if (dwc->pullups_connected) { 2667 dwc3_gadget_enable_irq(dwc); 2668 dwc3_gadget_run_stop(dwc, true); 2669 } 2670 } 2671 2672 int dwc3_gadget_suspend(struct dwc3 *dwc) 2673 { 2674 __dwc3_gadget_ep_disable(dwc->eps[0]); 2675 __dwc3_gadget_ep_disable(dwc->eps[1]); 2676 2677 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG); 2678 2679 return 0; 2680 } 2681 2682 int dwc3_gadget_resume(struct dwc3 *dwc) 2683 { 2684 struct dwc3_ep *dep; 2685 int ret; 2686 2687 /* Start with SuperSpeed Default */ 2688 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2689 2690 dep = dwc->eps[0]; 2691 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false); 2692 if (ret) 2693 goto err0; 2694 2695 dep = dwc->eps[1]; 2696 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false); 2697 if (ret) 2698 goto err1; 2699 2700 /* begin to receive SETUP packets */ 2701 dwc->ep0state = EP0_SETUP_PHASE; 2702 dwc3_ep0_out_start(dwc); 2703 2704 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg); 2705 2706 return 0; 2707 2708 err1: 2709 __dwc3_gadget_ep_disable(dwc->eps[0]); 2710 2711 err0: 2712 return ret; 2713 } 2714