1 /** 2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 3 * 4 * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com 5 * 6 * Authors: Felipe Balbi <balbi@ti.com>, 7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 8 * 9 * Taken from Linux Kernel v3.19-rc1 (drivers/usb/dwc3/gadget.c) and ported 10 * to uboot. 11 * 12 * commit 8e74475b0e : usb: dwc3: gadget: use udc-core's reset notifier 13 * 14 * SPDX-License-Identifier: GPL-2.0 15 */ 16 17 #include <common.h> 18 #include <malloc.h> 19 #include <asm/dma-mapping.h> 20 #include <usb/lin_gadget_compat.h> 21 #include <linux/bug.h> 22 #include <linux/list.h> 23 24 #include <linux/usb/ch9.h> 25 #include <linux/usb/gadget.h> 26 27 #include "core.h" 28 #include "gadget.h" 29 #include "io.h" 30 31 #include "linux-compat.h" 32 33 /** 34 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes 35 * @dwc: pointer to our context structure 36 * @mode: the mode to set (J, K SE0 NAK, Force Enable) 37 * 38 * Caller should take care of locking. This function will 39 * return 0 on success or -EINVAL if wrong Test Selector 40 * is passed 41 */ 42 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) 43 { 44 u32 reg; 45 46 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 47 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 48 49 switch (mode) { 50 case TEST_J: 51 case TEST_K: 52 case TEST_SE0_NAK: 53 case TEST_PACKET: 54 case TEST_FORCE_EN: 55 reg |= mode << 1; 56 break; 57 default: 58 return -EINVAL; 59 } 60 61 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 62 63 return 0; 64 } 65 66 /** 67 * dwc3_gadget_get_link_state - Gets current state of USB Link 68 * @dwc: pointer to our context structure 69 * 70 * Caller should take care of locking. This function will 71 * return the link state on success (>= 0) or -ETIMEDOUT. 72 */ 73 int dwc3_gadget_get_link_state(struct dwc3 *dwc) 74 { 75 u32 reg; 76 77 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 78 79 return DWC3_DSTS_USBLNKST(reg); 80 } 81 82 /** 83 * dwc3_gadget_set_link_state - Sets USB Link to a particular State 84 * @dwc: pointer to our context structure 85 * @state: the state to put link into 86 * 87 * Caller should take care of locking. This function will 88 * return 0 on success or -ETIMEDOUT. 89 */ 90 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) 91 { 92 int retries = 10000; 93 u32 reg; 94 95 /* 96 * Wait until device controller is ready. Only applies to 1.94a and 97 * later RTL. 98 */ 99 if (dwc->revision >= DWC3_REVISION_194A) { 100 while (--retries) { 101 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 102 if (reg & DWC3_DSTS_DCNRD) 103 udelay(5); 104 else 105 break; 106 } 107 108 if (retries <= 0) 109 return -ETIMEDOUT; 110 } 111 112 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 113 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 114 115 /* set requested state */ 116 reg |= DWC3_DCTL_ULSTCHNGREQ(state); 117 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 118 119 /* 120 * The following code is racy when called from dwc3_gadget_wakeup, 121 * and is not needed, at least on newer versions 122 */ 123 if (dwc->revision >= DWC3_REVISION_194A) 124 return 0; 125 126 /* wait for a change in DSTS */ 127 retries = 10000; 128 while (--retries) { 129 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 130 131 if (DWC3_DSTS_USBLNKST(reg) == state) 132 return 0; 133 134 udelay(5); 135 } 136 137 dev_vdbg(dwc->dev, "link state change request timed out\n"); 138 139 return -ETIMEDOUT; 140 } 141 142 /** 143 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case 144 * @dwc: pointer to our context structure 145 * 146 * This function will a best effort FIFO allocation in order 147 * to improve FIFO usage and throughput, while still allowing 148 * us to enable as many endpoints as possible. 149 * 150 * Keep in mind that this operation will be highly dependent 151 * on the configured size for RAM1 - which contains TxFifo -, 152 * the amount of endpoints enabled on coreConsultant tool, and 153 * the width of the Master Bus. 154 * 155 * In the ideal world, we would always be able to satisfy the 156 * following equation: 157 * 158 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \ 159 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes 160 * 161 * Unfortunately, due to many variables that's not always the case. 162 */ 163 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc) 164 { 165 int last_fifo_depth = 0; 166 int fifo_size; 167 int mdwidth; 168 int num; 169 170 if (!dwc->needs_fifo_resize) 171 return 0; 172 173 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); 174 175 /* MDWIDTH is represented in bits, we need it in bytes */ 176 mdwidth >>= 3; 177 178 /* 179 * FIXME For now we will only allocate 1 wMaxPacketSize space 180 * for each enabled endpoint, later patches will come to 181 * improve this algorithm so that we better use the internal 182 * FIFO space 183 */ 184 for (num = 0; num < dwc->num_in_eps; num++) { 185 /* bit0 indicates direction; 1 means IN ep */ 186 struct dwc3_ep *dep = dwc->eps[(num << 1) | 1]; 187 int mult = 1; 188 int tmp; 189 190 if (!(dep->flags & DWC3_EP_ENABLED)) 191 continue; 192 193 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) 194 || usb_endpoint_xfer_isoc(dep->endpoint.desc)) 195 mult = 3; 196 197 /* 198 * REVISIT: the following assumes we will always have enough 199 * space available on the FIFO RAM for all possible use cases. 200 * Make sure that's true somehow and change FIFO allocation 201 * accordingly. 202 * 203 * If we have Bulk or Isochronous endpoints, we want 204 * them to be able to be very, very fast. So we're giving 205 * those endpoints a fifo_size which is enough for 3 full 206 * packets 207 */ 208 tmp = mult * (dep->endpoint.maxpacket + mdwidth); 209 tmp += mdwidth; 210 211 fifo_size = DIV_ROUND_UP(tmp, mdwidth); 212 213 fifo_size |= (last_fifo_depth << 16); 214 215 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n", 216 dep->name, last_fifo_depth, fifo_size & 0xffff); 217 218 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size); 219 220 last_fifo_depth += (fifo_size & 0xffff); 221 } 222 223 return 0; 224 } 225 226 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 227 int status) 228 { 229 struct dwc3 *dwc = dep->dwc; 230 231 if (req->queued) { 232 dep->busy_slot++; 233 /* 234 * Skip LINK TRB. We can't use req->trb and check for 235 * DWC3_TRBCTL_LINK_TRB because it points the TRB we 236 * just completed (not the LINK TRB). 237 */ 238 if (((dep->busy_slot & DWC3_TRB_MASK) == 239 DWC3_TRB_NUM- 1) && 240 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 241 dep->busy_slot++; 242 req->queued = false; 243 } 244 245 list_del(&req->list); 246 req->trb = NULL; 247 dwc3_flush_cache((uintptr_t)req->request.dma, req->request.length); 248 249 if (req->request.status == -EINPROGRESS) 250 req->request.status = status; 251 252 if (dwc->ep0_bounced && dep->number == 0) 253 dwc->ep0_bounced = false; 254 else 255 usb_gadget_unmap_request(&dwc->gadget, &req->request, 256 req->direction); 257 258 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n", 259 req, dep->name, req->request.actual, 260 req->request.length, status); 261 262 spin_unlock(&dwc->lock); 263 usb_gadget_giveback_request(&dep->endpoint, &req->request); 264 spin_lock(&dwc->lock); 265 } 266 267 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param) 268 { 269 u32 timeout = 500; 270 u32 reg; 271 272 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param); 273 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT); 274 275 do { 276 reg = dwc3_readl(dwc->regs, DWC3_DGCMD); 277 if (!(reg & DWC3_DGCMD_CMDACT)) { 278 dev_vdbg(dwc->dev, "Command Complete --> %d\n", 279 DWC3_DGCMD_STATUS(reg)); 280 return 0; 281 } 282 283 /* 284 * We can't sleep here, because it's also called from 285 * interrupt context. 286 */ 287 timeout--; 288 if (!timeout) 289 return -ETIMEDOUT; 290 udelay(1); 291 } while (1); 292 } 293 294 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep, 295 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params) 296 { 297 u32 timeout = 500; 298 u32 reg; 299 300 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0); 301 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1); 302 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2); 303 304 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT); 305 do { 306 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep)); 307 if (!(reg & DWC3_DEPCMD_CMDACT)) { 308 dev_vdbg(dwc->dev, "Command Complete --> %d\n", 309 DWC3_DEPCMD_STATUS(reg)); 310 return 0; 311 } 312 313 /* 314 * We can't sleep here, because it is also called from 315 * interrupt context. 316 */ 317 timeout--; 318 if (!timeout) 319 return -ETIMEDOUT; 320 321 udelay(1); 322 } while (1); 323 } 324 325 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 326 struct dwc3_trb *trb) 327 { 328 u32 offset = (char *) trb - (char *) dep->trb_pool; 329 330 return dep->trb_pool_dma + offset; 331 } 332 333 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 334 { 335 if (dep->trb_pool) 336 return 0; 337 338 if (dep->number == 0 || dep->number == 1) 339 return 0; 340 341 dep->trb_pool = dma_alloc_coherent(sizeof(struct dwc3_trb) * 342 DWC3_TRB_NUM, 343 (unsigned long *)&dep->trb_pool_dma); 344 if (!dep->trb_pool) { 345 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 346 dep->name); 347 return -ENOMEM; 348 } 349 350 return 0; 351 } 352 353 static void dwc3_free_trb_pool(struct dwc3_ep *dep) 354 { 355 dma_free_coherent(dep->trb_pool); 356 357 dep->trb_pool = NULL; 358 dep->trb_pool_dma = 0; 359 } 360 361 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 362 { 363 struct dwc3_gadget_ep_cmd_params params; 364 u32 cmd; 365 366 memset(¶ms, 0x00, sizeof(params)); 367 368 if (dep->number != 1) { 369 cmd = DWC3_DEPCMD_DEPSTARTCFG; 370 /* XferRscIdx == 0 for ep0 and 2 for the remaining */ 371 if (dep->number > 1) { 372 if (dwc->start_config_issued) 373 return 0; 374 dwc->start_config_issued = true; 375 cmd |= DWC3_DEPCMD_PARAM(2); 376 } 377 378 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms); 379 } 380 381 return 0; 382 } 383 384 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, 385 const struct usb_endpoint_descriptor *desc, 386 const struct usb_ss_ep_comp_descriptor *comp_desc, 387 bool ignore, bool restore) 388 { 389 struct dwc3_gadget_ep_cmd_params params; 390 391 memset(¶ms, 0x00, sizeof(params)); 392 393 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) 394 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); 395 396 /* Burst size is only needed in SuperSpeed mode */ 397 if (dwc->gadget.speed == USB_SPEED_SUPER) { 398 u32 burst = dep->endpoint.maxburst - 1; 399 400 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst); 401 } 402 403 if (ignore) 404 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM; 405 406 if (restore) { 407 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE; 408 params.param2 |= dep->saved_state; 409 } 410 411 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN 412 | DWC3_DEPCFG_XFER_NOT_READY_EN; 413 414 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 415 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE 416 | DWC3_DEPCFG_STREAM_EVENT_EN; 417 dep->stream_capable = true; 418 } 419 420 if (!usb_endpoint_xfer_control(desc)) 421 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; 422 423 /* 424 * We are doing 1:1 mapping for endpoints, meaning 425 * Physical Endpoints 2 maps to Logical Endpoint 2 and 426 * so on. We consider the direction bit as part of the physical 427 * endpoint number. So USB endpoint 0x81 is 0x03. 428 */ 429 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); 430 431 /* 432 * We must use the lower 16 TX FIFOs even though 433 * HW might have more 434 */ 435 if (dep->direction) 436 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); 437 438 if (desc->bInterval) { 439 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); 440 dep->interval = 1 << (desc->bInterval - 1); 441 } 442 443 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 444 DWC3_DEPCMD_SETEPCONFIG, ¶ms); 445 } 446 447 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) 448 { 449 struct dwc3_gadget_ep_cmd_params params; 450 451 memset(¶ms, 0x00, sizeof(params)); 452 453 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 454 455 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 456 DWC3_DEPCMD_SETTRANSFRESOURCE, ¶ms); 457 } 458 459 /** 460 * __dwc3_gadget_ep_enable - Initializes a HW endpoint 461 * @dep: endpoint to be initialized 462 * @desc: USB Endpoint Descriptor 463 * 464 * Caller should take care of locking 465 */ 466 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, 467 const struct usb_endpoint_descriptor *desc, 468 const struct usb_ss_ep_comp_descriptor *comp_desc, 469 bool ignore, bool restore) 470 { 471 struct dwc3 *dwc = dep->dwc; 472 u32 reg; 473 int ret; 474 475 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name); 476 477 if (!(dep->flags & DWC3_EP_ENABLED)) { 478 ret = dwc3_gadget_start_config(dwc, dep); 479 if (ret) 480 return ret; 481 } 482 483 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore, 484 restore); 485 if (ret) 486 return ret; 487 488 if (!(dep->flags & DWC3_EP_ENABLED)) { 489 struct dwc3_trb *trb_st_hw; 490 struct dwc3_trb *trb_link; 491 492 ret = dwc3_gadget_set_xfer_resource(dwc, dep); 493 if (ret) 494 return ret; 495 496 dep->endpoint.desc = desc; 497 dep->comp_desc = comp_desc; 498 dep->type = usb_endpoint_type(desc); 499 dep->flags |= DWC3_EP_ENABLED; 500 501 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 502 reg |= DWC3_DALEPENA_EP(dep->number); 503 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 504 505 if (!usb_endpoint_xfer_isoc(desc)) 506 return 0; 507 508 /* Link TRB for ISOC. The HWO bit is never reset */ 509 trb_st_hw = &dep->trb_pool[0]; 510 511 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 512 memset(trb_link, 0, sizeof(*trb_link)); 513 514 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 515 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 516 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB; 517 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 518 } 519 520 return 0; 521 } 522 523 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force); 524 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 525 { 526 struct dwc3_request *req; 527 528 if (!list_empty(&dep->req_queued)) { 529 dwc3_stop_active_transfer(dwc, dep->number, true); 530 531 /* - giveback all requests to gadget driver */ 532 while (!list_empty(&dep->req_queued)) { 533 req = next_request(&dep->req_queued); 534 535 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 536 } 537 } 538 539 while (!list_empty(&dep->request_list)) { 540 req = next_request(&dep->request_list); 541 542 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 543 } 544 } 545 546 /** 547 * __dwc3_gadget_ep_disable - Disables a HW endpoint 548 * @dep: the endpoint to disable 549 * 550 * This function also removes requests which are currently processed ny the 551 * hardware and those which are not yet scheduled. 552 * Caller should take care of locking. 553 */ 554 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 555 { 556 struct dwc3 *dwc = dep->dwc; 557 u32 reg; 558 559 dwc3_remove_requests(dwc, dep); 560 561 /* make sure HW endpoint isn't stalled */ 562 if (dep->flags & DWC3_EP_STALL) 563 __dwc3_gadget_ep_set_halt(dep, 0, false); 564 565 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 566 reg &= ~DWC3_DALEPENA_EP(dep->number); 567 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 568 569 dep->stream_capable = false; 570 dep->endpoint.desc = NULL; 571 dep->comp_desc = NULL; 572 dep->type = 0; 573 dep->flags = 0; 574 575 return 0; 576 } 577 578 /* -------------------------------------------------------------------------- */ 579 580 static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 581 const struct usb_endpoint_descriptor *desc) 582 { 583 return -EINVAL; 584 } 585 586 static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 587 { 588 return -EINVAL; 589 } 590 591 /* -------------------------------------------------------------------------- */ 592 593 static int dwc3_gadget_ep_enable(struct usb_ep *ep, 594 const struct usb_endpoint_descriptor *desc) 595 { 596 struct dwc3_ep *dep; 597 unsigned long flags; 598 int ret; 599 600 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 601 pr_debug("dwc3: invalid parameters\n"); 602 return -EINVAL; 603 } 604 605 if (!desc->wMaxPacketSize) { 606 pr_debug("dwc3: missing wMaxPacketSize\n"); 607 return -EINVAL; 608 } 609 610 dep = to_dwc3_ep(ep); 611 612 if (dep->flags & DWC3_EP_ENABLED) { 613 WARN(true, "%s is already enabled\n", 614 dep->name); 615 return 0; 616 } 617 618 switch (usb_endpoint_type(desc)) { 619 case USB_ENDPOINT_XFER_CONTROL: 620 strlcat(dep->name, "-control", sizeof(dep->name)); 621 break; 622 case USB_ENDPOINT_XFER_ISOC: 623 strlcat(dep->name, "-isoc", sizeof(dep->name)); 624 break; 625 case USB_ENDPOINT_XFER_BULK: 626 strlcat(dep->name, "-bulk", sizeof(dep->name)); 627 break; 628 case USB_ENDPOINT_XFER_INT: 629 strlcat(dep->name, "-int", sizeof(dep->name)); 630 break; 631 default: 632 dev_err(dwc->dev, "invalid endpoint transfer type\n"); 633 } 634 635 spin_lock_irqsave(&dwc->lock, flags); 636 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false); 637 spin_unlock_irqrestore(&dwc->lock, flags); 638 639 return ret; 640 } 641 642 static int dwc3_gadget_ep_disable(struct usb_ep *ep) 643 { 644 struct dwc3_ep *dep; 645 unsigned long flags; 646 int ret; 647 648 if (!ep) { 649 pr_debug("dwc3: invalid parameters\n"); 650 return -EINVAL; 651 } 652 653 dep = to_dwc3_ep(ep); 654 655 if (!(dep->flags & DWC3_EP_ENABLED)) { 656 WARN(true, "%s is already disabled\n", 657 dep->name); 658 return 0; 659 } 660 661 snprintf(dep->name, sizeof(dep->name), "ep%d%s", 662 dep->number >> 1, 663 (dep->number & 1) ? "in" : "out"); 664 665 spin_lock_irqsave(&dwc->lock, flags); 666 ret = __dwc3_gadget_ep_disable(dep); 667 spin_unlock_irqrestore(&dwc->lock, flags); 668 669 return ret; 670 } 671 672 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 673 gfp_t gfp_flags) 674 { 675 struct dwc3_request *req; 676 struct dwc3_ep *dep = to_dwc3_ep(ep); 677 678 req = kzalloc(sizeof(*req), gfp_flags); 679 if (!req) 680 return NULL; 681 682 req->epnum = dep->number; 683 req->dep = dep; 684 685 return &req->request; 686 } 687 688 static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 689 struct usb_request *request) 690 { 691 struct dwc3_request *req = to_dwc3_request(request); 692 693 kfree(req); 694 } 695 696 /** 697 * dwc3_prepare_one_trb - setup one TRB from one request 698 * @dep: endpoint for which this request is prepared 699 * @req: dwc3_request pointer 700 */ 701 static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 702 struct dwc3_request *req, dma_addr_t dma, 703 unsigned length, unsigned last, unsigned chain, unsigned node) 704 { 705 struct dwc3_trb *trb; 706 707 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n", 708 dep->name, req, (unsigned long long) dma, 709 length, last ? " last" : "", 710 chain ? " chain" : ""); 711 712 713 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK]; 714 715 if (!req->trb) { 716 dwc3_gadget_move_request_queued(req); 717 req->trb = trb; 718 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 719 req->start_slot = dep->free_slot & DWC3_TRB_MASK; 720 } 721 722 dep->free_slot++; 723 /* Skip the LINK-TRB on ISOC */ 724 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && 725 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 726 dep->free_slot++; 727 728 trb->size = DWC3_TRB_SIZE_LENGTH(length); 729 trb->bpl = lower_32_bits(dma); 730 trb->bph = upper_32_bits(dma); 731 732 switch (usb_endpoint_type(dep->endpoint.desc)) { 733 case USB_ENDPOINT_XFER_CONTROL: 734 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP; 735 break; 736 737 case USB_ENDPOINT_XFER_ISOC: 738 if (!node) 739 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 740 else 741 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 742 break; 743 744 case USB_ENDPOINT_XFER_BULK: 745 case USB_ENDPOINT_XFER_INT: 746 trb->ctrl = DWC3_TRBCTL_NORMAL; 747 break; 748 default: 749 /* 750 * This is only possible with faulty memory because we 751 * checked it already :) 752 */ 753 BUG(); 754 } 755 756 if (!req->request.no_interrupt && !chain) 757 trb->ctrl |= DWC3_TRB_CTRL_IOC; 758 759 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 760 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 761 trb->ctrl |= DWC3_TRB_CTRL_CSP; 762 } else if (last) { 763 trb->ctrl |= DWC3_TRB_CTRL_LST; 764 } 765 766 if (chain) 767 trb->ctrl |= DWC3_TRB_CTRL_CHN; 768 769 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) 770 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id); 771 772 trb->ctrl |= DWC3_TRB_CTRL_HWO; 773 774 dwc3_flush_cache((uintptr_t)dma, length); 775 dwc3_flush_cache((uintptr_t)trb, sizeof(*trb)); 776 } 777 778 /* 779 * dwc3_prepare_trbs - setup TRBs from requests 780 * @dep: endpoint for which requests are being prepared 781 * @starting: true if the endpoint is idle and no requests are queued. 782 * 783 * The function goes through the requests list and sets up TRBs for the 784 * transfers. The function returns once there are no more TRBs available or 785 * it runs out of requests. 786 */ 787 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting) 788 { 789 struct dwc3_request *req, *n; 790 u32 trbs_left; 791 u32 max; 792 793 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 794 795 /* the first request must not be queued */ 796 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK; 797 798 /* Can't wrap around on a non-isoc EP since there's no link TRB */ 799 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 800 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK); 801 if (trbs_left > max) 802 trbs_left = max; 803 } 804 805 /* 806 * If busy & slot are equal than it is either full or empty. If we are 807 * starting to process requests then we are empty. Otherwise we are 808 * full and don't do anything 809 */ 810 if (!trbs_left) { 811 if (!starting) 812 return; 813 trbs_left = DWC3_TRB_NUM; 814 /* 815 * In case we start from scratch, we queue the ISOC requests 816 * starting from slot 1. This is done because we use ring 817 * buffer and have no LST bit to stop us. Instead, we place 818 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt 819 * after the first request so we start at slot 1 and have 820 * 7 requests proceed before we hit the first IOC. 821 * Other transfer types don't use the ring buffer and are 822 * processed from the first TRB until the last one. Since we 823 * don't wrap around we have to start at the beginning. 824 */ 825 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 826 dep->busy_slot = 1; 827 dep->free_slot = 1; 828 } else { 829 dep->busy_slot = 0; 830 dep->free_slot = 0; 831 } 832 } 833 834 /* The last TRB is a link TRB, not used for xfer */ 835 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc)) 836 return; 837 838 list_for_each_entry_safe(req, n, &dep->request_list, list) { 839 unsigned length; 840 dma_addr_t dma; 841 842 dma = req->request.dma; 843 length = req->request.length; 844 845 dwc3_prepare_one_trb(dep, req, dma, length, 846 true, false, 0); 847 848 break; 849 } 850 } 851 852 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param, 853 int start_new) 854 { 855 struct dwc3_gadget_ep_cmd_params params; 856 struct dwc3_request *req; 857 struct dwc3 *dwc = dep->dwc; 858 int ret; 859 u32 cmd; 860 861 if (start_new && (dep->flags & DWC3_EP_BUSY)) { 862 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name); 863 return -EBUSY; 864 } 865 dep->flags &= ~DWC3_EP_PENDING_REQUEST; 866 867 /* 868 * If we are getting here after a short-out-packet we don't enqueue any 869 * new requests as we try to set the IOC bit only on the last request. 870 */ 871 if (start_new) { 872 if (list_empty(&dep->req_queued)) 873 dwc3_prepare_trbs(dep, start_new); 874 875 /* req points to the first request which will be sent */ 876 req = next_request(&dep->req_queued); 877 } else { 878 dwc3_prepare_trbs(dep, start_new); 879 880 /* 881 * req points to the first request where HWO changed from 0 to 1 882 */ 883 req = next_request(&dep->req_queued); 884 } 885 if (!req) { 886 dep->flags |= DWC3_EP_PENDING_REQUEST; 887 return 0; 888 } 889 890 memset(¶ms, 0, sizeof(params)); 891 892 if (start_new) { 893 params.param0 = upper_32_bits(req->trb_dma); 894 params.param1 = lower_32_bits(req->trb_dma); 895 cmd = DWC3_DEPCMD_STARTTRANSFER; 896 } else { 897 cmd = DWC3_DEPCMD_UPDATETRANSFER; 898 } 899 900 cmd |= DWC3_DEPCMD_PARAM(cmd_param); 901 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 902 if (ret < 0) { 903 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n"); 904 905 /* 906 * FIXME we need to iterate over the list of requests 907 * here and stop, unmap, free and del each of the linked 908 * requests instead of what we do now. 909 */ 910 usb_gadget_unmap_request(&dwc->gadget, &req->request, 911 req->direction); 912 list_del(&req->list); 913 return ret; 914 } 915 916 dep->flags |= DWC3_EP_BUSY; 917 918 if (start_new) { 919 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc, 920 dep->number); 921 WARN_ON_ONCE(!dep->resource_index); 922 } 923 924 return 0; 925 } 926 927 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc, 928 struct dwc3_ep *dep, u32 cur_uf) 929 { 930 u32 uf; 931 932 if (list_empty(&dep->request_list)) { 933 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n", 934 dep->name); 935 dep->flags |= DWC3_EP_PENDING_REQUEST; 936 return; 937 } 938 939 /* 4 micro frames in the future */ 940 uf = cur_uf + dep->interval * 4; 941 942 __dwc3_gadget_kick_transfer(dep, uf, 1); 943 } 944 945 static void dwc3_gadget_start_isoc(struct dwc3 *dwc, 946 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 947 { 948 u32 cur_uf, mask; 949 950 mask = ~(dep->interval - 1); 951 cur_uf = event->parameters & mask; 952 953 __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 954 } 955 956 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 957 { 958 struct dwc3 *dwc = dep->dwc; 959 int ret; 960 961 req->request.actual = 0; 962 req->request.status = -EINPROGRESS; 963 req->direction = dep->direction; 964 req->epnum = dep->number; 965 966 /* 967 * DWC3 hangs on OUT requests smaller than maxpacket size, 968 * so HACK the request length 969 */ 970 if (dep->direction == 0 && 971 req->request.length < dep->endpoint.maxpacket) 972 req->request.length = dep->endpoint.maxpacket; 973 974 /* 975 * We only add to our list of requests now and 976 * start consuming the list once we get XferNotReady 977 * IRQ. 978 * 979 * That way, we avoid doing anything that we don't need 980 * to do now and defer it until the point we receive a 981 * particular token from the Host side. 982 * 983 * This will also avoid Host cancelling URBs due to too 984 * many NAKs. 985 */ 986 ret = usb_gadget_map_request(&dwc->gadget, &req->request, 987 dep->direction); 988 if (ret) 989 return ret; 990 991 list_add_tail(&req->list, &dep->request_list); 992 993 /* 994 * There are a few special cases: 995 * 996 * 1. XferNotReady with empty list of requests. We need to kick the 997 * transfer here in that situation, otherwise we will be NAKing 998 * forever. If we get XferNotReady before gadget driver has a 999 * chance to queue a request, we will ACK the IRQ but won't be 1000 * able to receive the data until the next request is queued. 1001 * The following code is handling exactly that. 1002 * 1003 */ 1004 if (dep->flags & DWC3_EP_PENDING_REQUEST) { 1005 /* 1006 * If xfernotready is already elapsed and it is a case 1007 * of isoc transfer, then issue END TRANSFER, so that 1008 * you can receive xfernotready again and can have 1009 * notion of current microframe. 1010 */ 1011 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1012 if (list_empty(&dep->req_queued)) { 1013 dwc3_stop_active_transfer(dwc, dep->number, true); 1014 dep->flags = DWC3_EP_ENABLED; 1015 } 1016 return 0; 1017 } 1018 1019 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1020 if (ret && ret != -EBUSY) 1021 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1022 dep->name); 1023 return ret; 1024 } 1025 1026 /* 1027 * 2. XferInProgress on Isoc EP with an active transfer. We need to 1028 * kick the transfer here after queuing a request, otherwise the 1029 * core may not see the modified TRB(s). 1030 */ 1031 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1032 (dep->flags & DWC3_EP_BUSY) && 1033 !(dep->flags & DWC3_EP_MISSED_ISOC)) { 1034 WARN_ON_ONCE(!dep->resource_index); 1035 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index, 1036 false); 1037 if (ret && ret != -EBUSY) 1038 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1039 dep->name); 1040 return ret; 1041 } 1042 1043 /* 1044 * 4. Stream Capable Bulk Endpoints. We need to start the transfer 1045 * right away, otherwise host will not know we have streams to be 1046 * handled. 1047 */ 1048 if (dep->stream_capable) { 1049 int ret; 1050 1051 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1052 if (ret && ret != -EBUSY) { 1053 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1054 dep->name); 1055 } 1056 } 1057 1058 return 0; 1059 } 1060 1061 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 1062 gfp_t gfp_flags) 1063 { 1064 struct dwc3_request *req = to_dwc3_request(request); 1065 struct dwc3_ep *dep = to_dwc3_ep(ep); 1066 1067 unsigned long flags; 1068 1069 int ret; 1070 1071 spin_lock_irqsave(&dwc->lock, flags); 1072 if (!dep->endpoint.desc) { 1073 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n", 1074 request, ep->name); 1075 ret = -ESHUTDOWN; 1076 goto out; 1077 } 1078 1079 if (req->dep != dep) { 1080 WARN(true, "request %p belongs to '%s'\n", 1081 request, req->dep->name); 1082 ret = -EINVAL; 1083 goto out; 1084 } 1085 1086 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n", 1087 request, ep->name, request->length); 1088 1089 ret = __dwc3_gadget_ep_queue(dep, req); 1090 1091 out: 1092 spin_unlock_irqrestore(&dwc->lock, flags); 1093 1094 return ret; 1095 } 1096 1097 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 1098 struct usb_request *request) 1099 { 1100 struct dwc3_request *req = to_dwc3_request(request); 1101 struct dwc3_request *r = NULL; 1102 1103 struct dwc3_ep *dep = to_dwc3_ep(ep); 1104 struct dwc3 *dwc = dep->dwc; 1105 1106 unsigned long flags; 1107 int ret = 0; 1108 1109 spin_lock_irqsave(&dwc->lock, flags); 1110 1111 list_for_each_entry(r, &dep->request_list, list) { 1112 if (r == req) 1113 break; 1114 } 1115 1116 if (r != req) { 1117 list_for_each_entry(r, &dep->req_queued, list) { 1118 if (r == req) 1119 break; 1120 } 1121 if (r == req) { 1122 /* wait until it is processed */ 1123 dwc3_stop_active_transfer(dwc, dep->number, true); 1124 goto out1; 1125 } 1126 dev_err(dwc->dev, "request %p was not queued to %s\n", 1127 request, ep->name); 1128 ret = -EINVAL; 1129 goto out0; 1130 } 1131 1132 out1: 1133 /* giveback the request */ 1134 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1135 1136 out0: 1137 spin_unlock_irqrestore(&dwc->lock, flags); 1138 1139 return ret; 1140 } 1141 1142 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) 1143 { 1144 struct dwc3_gadget_ep_cmd_params params; 1145 struct dwc3 *dwc = dep->dwc; 1146 int ret; 1147 1148 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1149 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 1150 return -EINVAL; 1151 } 1152 1153 memset(¶ms, 0x00, sizeof(params)); 1154 1155 if (value) { 1156 if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) || 1157 (!list_empty(&dep->req_queued) || 1158 !list_empty(&dep->request_list)))) { 1159 dev_dbg(dwc->dev, "%s: pending request, cannot halt\n", 1160 dep->name); 1161 return -EAGAIN; 1162 } 1163 1164 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1165 DWC3_DEPCMD_SETSTALL, ¶ms); 1166 if (ret) 1167 dev_err(dwc->dev, "failed to set STALL on %s\n", 1168 dep->name); 1169 else 1170 dep->flags |= DWC3_EP_STALL; 1171 } else { 1172 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1173 DWC3_DEPCMD_CLEARSTALL, ¶ms); 1174 if (ret) 1175 dev_err(dwc->dev, "failed to clear STALL on %s\n", 1176 dep->name); 1177 else 1178 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); 1179 } 1180 1181 return ret; 1182 } 1183 1184 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 1185 { 1186 struct dwc3_ep *dep = to_dwc3_ep(ep); 1187 1188 unsigned long flags; 1189 1190 int ret; 1191 1192 spin_lock_irqsave(&dwc->lock, flags); 1193 ret = __dwc3_gadget_ep_set_halt(dep, value, false); 1194 spin_unlock_irqrestore(&dwc->lock, flags); 1195 1196 return ret; 1197 } 1198 1199 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 1200 { 1201 struct dwc3_ep *dep = to_dwc3_ep(ep); 1202 unsigned long flags; 1203 int ret; 1204 1205 spin_lock_irqsave(&dwc->lock, flags); 1206 dep->flags |= DWC3_EP_WEDGE; 1207 1208 if (dep->number == 0 || dep->number == 1) 1209 ret = __dwc3_gadget_ep0_set_halt(ep, 1); 1210 else 1211 ret = __dwc3_gadget_ep_set_halt(dep, 1, false); 1212 spin_unlock_irqrestore(&dwc->lock, flags); 1213 1214 return ret; 1215 } 1216 1217 /* -------------------------------------------------------------------------- */ 1218 1219 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 1220 .bLength = USB_DT_ENDPOINT_SIZE, 1221 .bDescriptorType = USB_DT_ENDPOINT, 1222 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1223 }; 1224 1225 static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 1226 .enable = dwc3_gadget_ep0_enable, 1227 .disable = dwc3_gadget_ep0_disable, 1228 .alloc_request = dwc3_gadget_ep_alloc_request, 1229 .free_request = dwc3_gadget_ep_free_request, 1230 .queue = dwc3_gadget_ep0_queue, 1231 .dequeue = dwc3_gadget_ep_dequeue, 1232 .set_halt = dwc3_gadget_ep0_set_halt, 1233 .set_wedge = dwc3_gadget_ep_set_wedge, 1234 }; 1235 1236 static const struct usb_ep_ops dwc3_gadget_ep_ops = { 1237 .enable = dwc3_gadget_ep_enable, 1238 .disable = dwc3_gadget_ep_disable, 1239 .alloc_request = dwc3_gadget_ep_alloc_request, 1240 .free_request = dwc3_gadget_ep_free_request, 1241 .queue = dwc3_gadget_ep_queue, 1242 .dequeue = dwc3_gadget_ep_dequeue, 1243 .set_halt = dwc3_gadget_ep_set_halt, 1244 .set_wedge = dwc3_gadget_ep_set_wedge, 1245 }; 1246 1247 /* -------------------------------------------------------------------------- */ 1248 1249 static int dwc3_gadget_get_frame(struct usb_gadget *g) 1250 { 1251 struct dwc3 *dwc = gadget_to_dwc(g); 1252 u32 reg; 1253 1254 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1255 return DWC3_DSTS_SOFFN(reg); 1256 } 1257 1258 static int dwc3_gadget_wakeup(struct usb_gadget *g) 1259 { 1260 struct dwc3 *dwc = gadget_to_dwc(g); 1261 1262 unsigned long timeout; 1263 unsigned long flags; 1264 1265 u32 reg; 1266 1267 int ret = 0; 1268 1269 u8 link_state; 1270 u8 speed; 1271 1272 spin_lock_irqsave(&dwc->lock, flags); 1273 1274 /* 1275 * According to the Databook Remote wakeup request should 1276 * be issued only when the device is in early suspend state. 1277 * 1278 * We can check that via USB Link State bits in DSTS register. 1279 */ 1280 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1281 1282 speed = reg & DWC3_DSTS_CONNECTSPD; 1283 if (speed == DWC3_DSTS_SUPERSPEED) { 1284 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n"); 1285 ret = -EINVAL; 1286 goto out; 1287 } 1288 1289 link_state = DWC3_DSTS_USBLNKST(reg); 1290 1291 switch (link_state) { 1292 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1293 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1294 break; 1295 default: 1296 dev_dbg(dwc->dev, "can't wakeup from link state %d\n", 1297 link_state); 1298 ret = -EINVAL; 1299 goto out; 1300 } 1301 1302 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); 1303 if (ret < 0) { 1304 dev_err(dwc->dev, "failed to put link in Recovery\n"); 1305 goto out; 1306 } 1307 1308 /* Recent versions do this automatically */ 1309 if (dwc->revision < DWC3_REVISION_194A) { 1310 /* write zeroes to Link Change Request */ 1311 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1312 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 1313 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1314 } 1315 1316 /* poll until Link State changes to ON */ 1317 timeout = 1000; 1318 1319 while (timeout--) { 1320 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1321 1322 /* in HS, means ON */ 1323 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 1324 break; 1325 } 1326 1327 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1328 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1329 ret = -EINVAL; 1330 } 1331 1332 out: 1333 spin_unlock_irqrestore(&dwc->lock, flags); 1334 1335 return ret; 1336 } 1337 1338 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 1339 int is_selfpowered) 1340 { 1341 struct dwc3 *dwc = gadget_to_dwc(g); 1342 unsigned long flags; 1343 1344 spin_lock_irqsave(&dwc->lock, flags); 1345 dwc->is_selfpowered = !!is_selfpowered; 1346 spin_unlock_irqrestore(&dwc->lock, flags); 1347 1348 return 0; 1349 } 1350 1351 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) 1352 { 1353 u32 reg; 1354 u32 timeout = 500; 1355 1356 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1357 if (is_on) { 1358 if (dwc->revision <= DWC3_REVISION_187A) { 1359 reg &= ~DWC3_DCTL_TRGTULST_MASK; 1360 reg |= DWC3_DCTL_TRGTULST_RX_DET; 1361 } 1362 1363 if (dwc->revision >= DWC3_REVISION_194A) 1364 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1365 reg |= DWC3_DCTL_RUN_STOP; 1366 1367 if (dwc->has_hibernation) 1368 reg |= DWC3_DCTL_KEEP_CONNECT; 1369 1370 dwc->pullups_connected = true; 1371 } else { 1372 reg &= ~DWC3_DCTL_RUN_STOP; 1373 1374 if (dwc->has_hibernation && !suspend) 1375 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1376 1377 dwc->pullups_connected = false; 1378 } 1379 1380 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1381 1382 do { 1383 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1384 if (is_on) { 1385 if (!(reg & DWC3_DSTS_DEVCTRLHLT)) 1386 break; 1387 } else { 1388 if (reg & DWC3_DSTS_DEVCTRLHLT) 1389 break; 1390 } 1391 timeout--; 1392 if (!timeout) 1393 return -ETIMEDOUT; 1394 udelay(1); 1395 } while (1); 1396 1397 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n", 1398 dwc->gadget_driver 1399 ? dwc->gadget_driver->function : "no-function", 1400 is_on ? "connect" : "disconnect"); 1401 1402 return 0; 1403 } 1404 1405 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 1406 { 1407 struct dwc3 *dwc = gadget_to_dwc(g); 1408 unsigned long flags; 1409 int ret; 1410 1411 is_on = !!is_on; 1412 1413 spin_lock_irqsave(&dwc->lock, flags); 1414 ret = dwc3_gadget_run_stop(dwc, is_on, false); 1415 spin_unlock_irqrestore(&dwc->lock, flags); 1416 1417 return ret; 1418 } 1419 1420 static void dwc3_gadget_enable_irq(struct dwc3 *dwc) 1421 { 1422 u32 reg; 1423 1424 /* Enable all but Start and End of Frame IRQs */ 1425 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | 1426 DWC3_DEVTEN_EVNTOVERFLOWEN | 1427 DWC3_DEVTEN_CMDCMPLTEN | 1428 DWC3_DEVTEN_ERRTICERREN | 1429 DWC3_DEVTEN_WKUPEVTEN | 1430 DWC3_DEVTEN_ULSTCNGEN | 1431 DWC3_DEVTEN_CONNECTDONEEN | 1432 DWC3_DEVTEN_USBRSTEN | 1433 DWC3_DEVTEN_DISCONNEVTEN); 1434 1435 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 1436 } 1437 1438 static void dwc3_gadget_disable_irq(struct dwc3 *dwc) 1439 { 1440 /* mask all interrupts */ 1441 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 1442 } 1443 1444 static int dwc3_gadget_start(struct usb_gadget *g, 1445 struct usb_gadget_driver *driver) 1446 { 1447 struct dwc3 *dwc = gadget_to_dwc(g); 1448 struct dwc3_ep *dep; 1449 unsigned long flags; 1450 int ret = 0; 1451 u32 reg; 1452 1453 spin_lock_irqsave(&dwc->lock, flags); 1454 1455 if (dwc->gadget_driver) { 1456 dev_err(dwc->dev, "%s is already bound to %s\n", 1457 dwc->gadget.name, 1458 dwc->gadget_driver->function); 1459 ret = -EBUSY; 1460 goto err1; 1461 } 1462 1463 dwc->gadget_driver = driver; 1464 1465 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1466 reg &= ~(DWC3_DCFG_SPEED_MASK); 1467 1468 /** 1469 * WORKAROUND: DWC3 revision < 2.20a have an issue 1470 * which would cause metastability state on Run/Stop 1471 * bit if we try to force the IP to USB2-only mode. 1472 * 1473 * Because of that, we cannot configure the IP to any 1474 * speed other than the SuperSpeed 1475 * 1476 * Refers to: 1477 * 1478 * STAR#9000525659: Clock Domain Crossing on DCTL in 1479 * USB 2.0 Mode 1480 */ 1481 if (dwc->revision < DWC3_REVISION_220A) { 1482 reg |= DWC3_DCFG_SUPERSPEED; 1483 } else { 1484 switch (dwc->maximum_speed) { 1485 case USB_SPEED_LOW: 1486 reg |= DWC3_DSTS_LOWSPEED; 1487 break; 1488 case USB_SPEED_FULL: 1489 reg |= DWC3_DSTS_FULLSPEED1; 1490 break; 1491 case USB_SPEED_HIGH: 1492 reg |= DWC3_DSTS_HIGHSPEED; 1493 break; 1494 case USB_SPEED_SUPER: /* FALLTHROUGH */ 1495 case USB_SPEED_UNKNOWN: /* FALTHROUGH */ 1496 default: 1497 reg |= DWC3_DSTS_SUPERSPEED; 1498 } 1499 } 1500 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1501 1502 dwc->start_config_issued = false; 1503 1504 /* Start with SuperSpeed Default */ 1505 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1506 1507 dep = dwc->eps[0]; 1508 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 1509 false); 1510 if (ret) { 1511 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1512 goto err2; 1513 } 1514 1515 dep = dwc->eps[1]; 1516 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 1517 false); 1518 if (ret) { 1519 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1520 goto err3; 1521 } 1522 1523 /* begin to receive SETUP packets */ 1524 dwc->ep0state = EP0_SETUP_PHASE; 1525 dwc3_ep0_out_start(dwc); 1526 1527 dwc3_gadget_enable_irq(dwc); 1528 1529 spin_unlock_irqrestore(&dwc->lock, flags); 1530 1531 return 0; 1532 1533 err3: 1534 __dwc3_gadget_ep_disable(dwc->eps[0]); 1535 1536 err2: 1537 dwc->gadget_driver = NULL; 1538 1539 err1: 1540 spin_unlock_irqrestore(&dwc->lock, flags); 1541 1542 return ret; 1543 } 1544 1545 static int dwc3_gadget_stop(struct usb_gadget *g) 1546 { 1547 struct dwc3 *dwc = gadget_to_dwc(g); 1548 unsigned long flags; 1549 1550 spin_lock_irqsave(&dwc->lock, flags); 1551 1552 dwc3_gadget_disable_irq(dwc); 1553 __dwc3_gadget_ep_disable(dwc->eps[0]); 1554 __dwc3_gadget_ep_disable(dwc->eps[1]); 1555 1556 dwc->gadget_driver = NULL; 1557 1558 spin_unlock_irqrestore(&dwc->lock, flags); 1559 1560 return 0; 1561 } 1562 1563 static const struct usb_gadget_ops dwc3_gadget_ops = { 1564 .get_frame = dwc3_gadget_get_frame, 1565 .wakeup = dwc3_gadget_wakeup, 1566 .set_selfpowered = dwc3_gadget_set_selfpowered, 1567 .pullup = dwc3_gadget_pullup, 1568 .udc_start = dwc3_gadget_start, 1569 .udc_stop = dwc3_gadget_stop, 1570 }; 1571 1572 /* -------------------------------------------------------------------------- */ 1573 1574 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc, 1575 u8 num, u32 direction) 1576 { 1577 struct dwc3_ep *dep; 1578 u8 i; 1579 1580 for (i = 0; i < num; i++) { 1581 u8 epnum = (i << 1) | (!!direction); 1582 1583 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 1584 if (!dep) 1585 return -ENOMEM; 1586 1587 dep->dwc = dwc; 1588 dep->number = epnum; 1589 dep->direction = !!direction; 1590 dwc->eps[epnum] = dep; 1591 1592 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1, 1593 (epnum & 1) ? "in" : "out"); 1594 1595 dep->endpoint.name = dep->name; 1596 1597 dev_vdbg(dwc->dev, "initializing %s\n", dep->name); 1598 1599 if (epnum == 0 || epnum == 1) { 1600 usb_ep_set_maxpacket_limit(&dep->endpoint, 512); 1601 dep->endpoint.maxburst = 1; 1602 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 1603 if (!epnum) 1604 dwc->gadget.ep0 = &dep->endpoint; 1605 } else { 1606 int ret; 1607 1608 usb_ep_set_maxpacket_limit(&dep->endpoint, 512); 1609 dep->endpoint.max_streams = 15; 1610 dep->endpoint.ops = &dwc3_gadget_ep_ops; 1611 list_add_tail(&dep->endpoint.ep_list, 1612 &dwc->gadget.ep_list); 1613 1614 ret = dwc3_alloc_trb_pool(dep); 1615 if (ret) 1616 return ret; 1617 } 1618 1619 INIT_LIST_HEAD(&dep->request_list); 1620 INIT_LIST_HEAD(&dep->req_queued); 1621 } 1622 1623 return 0; 1624 } 1625 1626 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc) 1627 { 1628 int ret; 1629 1630 INIT_LIST_HEAD(&dwc->gadget.ep_list); 1631 1632 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0); 1633 if (ret < 0) { 1634 dev_vdbg(dwc->dev, "failed to allocate OUT endpoints\n"); 1635 return ret; 1636 } 1637 1638 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1); 1639 if (ret < 0) { 1640 dev_vdbg(dwc->dev, "failed to allocate IN endpoints\n"); 1641 return ret; 1642 } 1643 1644 return 0; 1645 } 1646 1647 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 1648 { 1649 struct dwc3_ep *dep; 1650 u8 epnum; 1651 1652 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1653 dep = dwc->eps[epnum]; 1654 if (!dep) 1655 continue; 1656 /* 1657 * Physical endpoints 0 and 1 are special; they form the 1658 * bi-directional USB endpoint 0. 1659 * 1660 * For those two physical endpoints, we don't allocate a TRB 1661 * pool nor do we add them the endpoints list. Due to that, we 1662 * shouldn't do these two operations otherwise we would end up 1663 * with all sorts of bugs when removing dwc3.ko. 1664 */ 1665 if (epnum != 0 && epnum != 1) { 1666 dwc3_free_trb_pool(dep); 1667 list_del(&dep->endpoint.ep_list); 1668 } 1669 1670 kfree(dep); 1671 } 1672 } 1673 1674 /* -------------------------------------------------------------------------- */ 1675 1676 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, 1677 struct dwc3_request *req, struct dwc3_trb *trb, 1678 const struct dwc3_event_depevt *event, int status) 1679 { 1680 unsigned int count; 1681 unsigned int s_pkt = 0; 1682 unsigned int trb_status; 1683 1684 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 1685 /* 1686 * We continue despite the error. There is not much we 1687 * can do. If we don't clean it up we loop forever. If 1688 * we skip the TRB then it gets overwritten after a 1689 * while since we use them in a ring buffer. A BUG() 1690 * would help. Lets hope that if this occurs, someone 1691 * fixes the root cause instead of looking away :) 1692 */ 1693 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n", 1694 dep->name, trb); 1695 count = trb->size & DWC3_TRB_SIZE_MASK; 1696 1697 if (dep->direction) { 1698 if (count) { 1699 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); 1700 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) { 1701 dev_dbg(dwc->dev, "incomplete IN transfer %s\n", 1702 dep->name); 1703 /* 1704 * If missed isoc occurred and there is 1705 * no request queued then issue END 1706 * TRANSFER, so that core generates 1707 * next xfernotready and we will issue 1708 * a fresh START TRANSFER. 1709 * If there are still queued request 1710 * then wait, do not issue either END 1711 * or UPDATE TRANSFER, just attach next 1712 * request in request_list during 1713 * giveback.If any future queued request 1714 * is successfully transferred then we 1715 * will issue UPDATE TRANSFER for all 1716 * request in the request_list. 1717 */ 1718 dep->flags |= DWC3_EP_MISSED_ISOC; 1719 } else { 1720 dev_err(dwc->dev, "incomplete IN transfer %s\n", 1721 dep->name); 1722 status = -ECONNRESET; 1723 } 1724 } else { 1725 dep->flags &= ~DWC3_EP_MISSED_ISOC; 1726 } 1727 } else { 1728 if (count && (event->status & DEPEVT_STATUS_SHORT)) 1729 s_pkt = 1; 1730 } 1731 1732 /* 1733 * We assume here we will always receive the entire data block 1734 * which we should receive. Meaning, if we program RX to 1735 * receive 4K but we receive only 2K, we assume that's all we 1736 * should receive and we simply bounce the request back to the 1737 * gadget driver for further processing. 1738 */ 1739 req->request.actual += req->request.length - count; 1740 if (s_pkt) 1741 return 1; 1742 if ((event->status & DEPEVT_STATUS_LST) && 1743 (trb->ctrl & (DWC3_TRB_CTRL_LST | 1744 DWC3_TRB_CTRL_HWO))) 1745 return 1; 1746 if ((event->status & DEPEVT_STATUS_IOC) && 1747 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 1748 return 1; 1749 return 0; 1750 } 1751 1752 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, 1753 const struct dwc3_event_depevt *event, int status) 1754 { 1755 struct dwc3_request *req; 1756 struct dwc3_trb *trb; 1757 unsigned int slot; 1758 1759 req = next_request(&dep->req_queued); 1760 if (!req) { 1761 WARN_ON_ONCE(1); 1762 return 1; 1763 } 1764 1765 slot = req->start_slot; 1766 if ((slot == DWC3_TRB_NUM - 1) && 1767 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 1768 slot++; 1769 slot %= DWC3_TRB_NUM; 1770 trb = &dep->trb_pool[slot]; 1771 1772 dwc3_flush_cache((uintptr_t)trb, sizeof(*trb)); 1773 __dwc3_cleanup_done_trbs(dwc, dep, req, trb, event, status); 1774 dwc3_gadget_giveback(dep, req, status); 1775 1776 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1777 list_empty(&dep->req_queued)) { 1778 if (list_empty(&dep->request_list)) { 1779 /* 1780 * If there is no entry in request list then do 1781 * not issue END TRANSFER now. Just set PENDING 1782 * flag, so that END TRANSFER is issued when an 1783 * entry is added into request list. 1784 */ 1785 dep->flags = DWC3_EP_PENDING_REQUEST; 1786 } else { 1787 dwc3_stop_active_transfer(dwc, dep->number, true); 1788 dep->flags = DWC3_EP_ENABLED; 1789 } 1790 return 1; 1791 } 1792 1793 return 1; 1794 } 1795 1796 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, 1797 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1798 { 1799 unsigned status = 0; 1800 int clean_busy; 1801 1802 if (event->status & DEPEVT_STATUS_BUSERR) 1803 status = -ECONNRESET; 1804 1805 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status); 1806 if (clean_busy) 1807 dep->flags &= ~DWC3_EP_BUSY; 1808 1809 /* 1810 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. 1811 * See dwc3_gadget_linksts_change_interrupt() for 1st half. 1812 */ 1813 if (dwc->revision < DWC3_REVISION_183A) { 1814 u32 reg; 1815 int i; 1816 1817 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 1818 dep = dwc->eps[i]; 1819 1820 if (!(dep->flags & DWC3_EP_ENABLED)) 1821 continue; 1822 1823 if (!list_empty(&dep->req_queued)) 1824 return; 1825 } 1826 1827 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1828 reg |= dwc->u1u2; 1829 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1830 1831 dwc->u1u2 = 0; 1832 } 1833 } 1834 1835 static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 1836 const struct dwc3_event_depevt *event) 1837 { 1838 struct dwc3_ep *dep; 1839 u8 epnum = event->endpoint_number; 1840 1841 dep = dwc->eps[epnum]; 1842 1843 if (!(dep->flags & DWC3_EP_ENABLED)) 1844 return; 1845 1846 if (epnum == 0 || epnum == 1) { 1847 dwc3_ep0_interrupt(dwc, event); 1848 return; 1849 } 1850 1851 switch (event->endpoint_event) { 1852 case DWC3_DEPEVT_XFERCOMPLETE: 1853 dep->resource_index = 0; 1854 1855 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1856 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n", 1857 dep->name); 1858 return; 1859 } 1860 1861 dwc3_endpoint_transfer_complete(dwc, dep, event); 1862 break; 1863 case DWC3_DEPEVT_XFERINPROGRESS: 1864 dwc3_endpoint_transfer_complete(dwc, dep, event); 1865 break; 1866 case DWC3_DEPEVT_XFERNOTREADY: 1867 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1868 dwc3_gadget_start_isoc(dwc, dep, event); 1869 } else { 1870 int ret; 1871 1872 dev_vdbg(dwc->dev, "%s: reason %s\n", 1873 dep->name, event->status & 1874 DEPEVT_STATUS_TRANSFER_ACTIVE 1875 ? "Transfer Active" 1876 : "Transfer Not Active"); 1877 1878 ret = __dwc3_gadget_kick_transfer(dep, 0, 1); 1879 if (!ret || ret == -EBUSY) 1880 return; 1881 1882 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1883 dep->name); 1884 } 1885 1886 break; 1887 case DWC3_DEPEVT_STREAMEVT: 1888 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) { 1889 dev_err(dwc->dev, "Stream event for non-Bulk %s\n", 1890 dep->name); 1891 return; 1892 } 1893 1894 switch (event->status) { 1895 case DEPEVT_STREAMEVT_FOUND: 1896 dev_vdbg(dwc->dev, "Stream %d found and started\n", 1897 event->parameters); 1898 1899 break; 1900 case DEPEVT_STREAMEVT_NOTFOUND: 1901 /* FALLTHROUGH */ 1902 default: 1903 dev_dbg(dwc->dev, "Couldn't find suitable stream\n"); 1904 } 1905 break; 1906 case DWC3_DEPEVT_RXTXFIFOEVT: 1907 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name); 1908 break; 1909 case DWC3_DEPEVT_EPCMDCMPLT: 1910 dev_vdbg(dwc->dev, "Endpoint Command Complete\n"); 1911 break; 1912 } 1913 } 1914 1915 static void dwc3_disconnect_gadget(struct dwc3 *dwc) 1916 { 1917 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { 1918 spin_unlock(&dwc->lock); 1919 dwc->gadget_driver->disconnect(&dwc->gadget); 1920 spin_lock(&dwc->lock); 1921 } 1922 } 1923 1924 static void dwc3_suspend_gadget(struct dwc3 *dwc) 1925 { 1926 if (dwc->gadget_driver && dwc->gadget_driver->suspend) { 1927 spin_unlock(&dwc->lock); 1928 dwc->gadget_driver->suspend(&dwc->gadget); 1929 spin_lock(&dwc->lock); 1930 } 1931 } 1932 1933 static void dwc3_resume_gadget(struct dwc3 *dwc) 1934 { 1935 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 1936 spin_unlock(&dwc->lock); 1937 dwc->gadget_driver->resume(&dwc->gadget); 1938 } 1939 } 1940 1941 static void dwc3_reset_gadget(struct dwc3 *dwc) 1942 { 1943 if (!dwc->gadget_driver) 1944 return; 1945 1946 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) { 1947 spin_unlock(&dwc->lock); 1948 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver); 1949 spin_lock(&dwc->lock); 1950 } 1951 } 1952 1953 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force) 1954 { 1955 struct dwc3_ep *dep; 1956 struct dwc3_gadget_ep_cmd_params params; 1957 u32 cmd; 1958 int ret; 1959 1960 dep = dwc->eps[epnum]; 1961 1962 if (!dep->resource_index) 1963 return; 1964 1965 /* 1966 * NOTICE: We are violating what the Databook says about the 1967 * EndTransfer command. Ideally we would _always_ wait for the 1968 * EndTransfer Command Completion IRQ, but that's causing too 1969 * much trouble synchronizing between us and gadget driver. 1970 * 1971 * We have discussed this with the IP Provider and it was 1972 * suggested to giveback all requests here, but give HW some 1973 * extra time to synchronize with the interconnect. We're using 1974 * an arbitraty 100us delay for that. 1975 * 1976 * Note also that a similar handling was tested by Synopsys 1977 * (thanks a lot Paul) and nothing bad has come out of it. 1978 * In short, what we're doing is: 1979 * 1980 * - Issue EndTransfer WITH CMDIOC bit set 1981 * - Wait 100us 1982 */ 1983 1984 cmd = DWC3_DEPCMD_ENDTRANSFER; 1985 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0; 1986 cmd |= DWC3_DEPCMD_CMDIOC; 1987 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); 1988 memset(¶ms, 0, sizeof(params)); 1989 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 1990 WARN_ON_ONCE(ret); 1991 dep->resource_index = 0; 1992 dep->flags &= ~DWC3_EP_BUSY; 1993 udelay(100); 1994 } 1995 1996 static void dwc3_stop_active_transfers(struct dwc3 *dwc) 1997 { 1998 u32 epnum; 1999 2000 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2001 struct dwc3_ep *dep; 2002 2003 dep = dwc->eps[epnum]; 2004 if (!dep) 2005 continue; 2006 2007 if (!(dep->flags & DWC3_EP_ENABLED)) 2008 continue; 2009 2010 dwc3_remove_requests(dwc, dep); 2011 } 2012 } 2013 2014 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 2015 { 2016 u32 epnum; 2017 2018 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2019 struct dwc3_ep *dep; 2020 struct dwc3_gadget_ep_cmd_params params; 2021 int ret; 2022 2023 dep = dwc->eps[epnum]; 2024 if (!dep) 2025 continue; 2026 2027 if (!(dep->flags & DWC3_EP_STALL)) 2028 continue; 2029 2030 dep->flags &= ~DWC3_EP_STALL; 2031 2032 memset(¶ms, 0, sizeof(params)); 2033 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 2034 DWC3_DEPCMD_CLEARSTALL, ¶ms); 2035 WARN_ON_ONCE(ret); 2036 } 2037 } 2038 2039 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 2040 { 2041 int reg; 2042 2043 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2044 reg &= ~DWC3_DCTL_INITU1ENA; 2045 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2046 2047 reg &= ~DWC3_DCTL_INITU2ENA; 2048 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2049 2050 dwc3_disconnect_gadget(dwc); 2051 dwc->start_config_issued = false; 2052 2053 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2054 dwc->setup_packet_pending = false; 2055 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED); 2056 } 2057 2058 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 2059 { 2060 u32 reg; 2061 2062 /* 2063 * WORKAROUND: DWC3 revisions <1.88a have an issue which 2064 * would cause a missing Disconnect Event if there's a 2065 * pending Setup Packet in the FIFO. 2066 * 2067 * There's no suggested workaround on the official Bug 2068 * report, which states that "unless the driver/application 2069 * is doing any special handling of a disconnect event, 2070 * there is no functional issue". 2071 * 2072 * Unfortunately, it turns out that we _do_ some special 2073 * handling of a disconnect event, namely complete all 2074 * pending transfers, notify gadget driver of the 2075 * disconnection, and so on. 2076 * 2077 * Our suggested workaround is to follow the Disconnect 2078 * Event steps here, instead, based on a setup_packet_pending 2079 * flag. Such flag gets set whenever we have a XferNotReady 2080 * event on EP0 and gets cleared on XferComplete for the 2081 * same endpoint. 2082 * 2083 * Refers to: 2084 * 2085 * STAR#9000466709: RTL: Device : Disconnect event not 2086 * generated if setup packet pending in FIFO 2087 */ 2088 if (dwc->revision < DWC3_REVISION_188A) { 2089 if (dwc->setup_packet_pending) 2090 dwc3_gadget_disconnect_interrupt(dwc); 2091 } 2092 2093 dwc3_reset_gadget(dwc); 2094 2095 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2096 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 2097 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2098 dwc->test_mode = false; 2099 2100 dwc3_stop_active_transfers(dwc); 2101 dwc3_clear_stall_all_ep(dwc); 2102 dwc->start_config_issued = false; 2103 2104 /* Reset device address to zero */ 2105 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2106 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 2107 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2108 } 2109 2110 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed) 2111 { 2112 u32 reg; 2113 u32 usb30_clock = DWC3_GCTL_CLK_BUS; 2114 2115 /* 2116 * We change the clock only at SS but I dunno why I would want to do 2117 * this. Maybe it becomes part of the power saving plan. 2118 */ 2119 2120 if (speed != DWC3_DSTS_SUPERSPEED) 2121 return; 2122 2123 /* 2124 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 2125 * each time on Connect Done. 2126 */ 2127 if (!usb30_clock) 2128 return; 2129 2130 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 2131 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock); 2132 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 2133 } 2134 2135 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 2136 { 2137 struct dwc3_ep *dep; 2138 int ret; 2139 u32 reg; 2140 u8 speed; 2141 2142 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2143 speed = reg & DWC3_DSTS_CONNECTSPD; 2144 dwc->speed = speed; 2145 2146 dwc3_update_ram_clk_sel(dwc, speed); 2147 2148 switch (speed) { 2149 case DWC3_DCFG_SUPERSPEED: 2150 /* 2151 * WORKAROUND: DWC3 revisions <1.90a have an issue which 2152 * would cause a missing USB3 Reset event. 2153 * 2154 * In such situations, we should force a USB3 Reset 2155 * event by calling our dwc3_gadget_reset_interrupt() 2156 * routine. 2157 * 2158 * Refers to: 2159 * 2160 * STAR#9000483510: RTL: SS : USB3 reset event may 2161 * not be generated always when the link enters poll 2162 */ 2163 if (dwc->revision < DWC3_REVISION_190A) 2164 dwc3_gadget_reset_interrupt(dwc); 2165 2166 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2167 dwc->gadget.ep0->maxpacket = 512; 2168 dwc->gadget.speed = USB_SPEED_SUPER; 2169 break; 2170 case DWC3_DCFG_HIGHSPEED: 2171 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2172 dwc->gadget.ep0->maxpacket = 64; 2173 dwc->gadget.speed = USB_SPEED_HIGH; 2174 break; 2175 case DWC3_DCFG_FULLSPEED2: 2176 case DWC3_DCFG_FULLSPEED1: 2177 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2178 dwc->gadget.ep0->maxpacket = 64; 2179 dwc->gadget.speed = USB_SPEED_FULL; 2180 break; 2181 case DWC3_DCFG_LOWSPEED: 2182 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); 2183 dwc->gadget.ep0->maxpacket = 8; 2184 dwc->gadget.speed = USB_SPEED_LOW; 2185 break; 2186 } 2187 2188 /* Enable USB2 LPM Capability */ 2189 2190 if ((dwc->revision > DWC3_REVISION_194A) 2191 && (speed != DWC3_DCFG_SUPERSPEED)) { 2192 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2193 reg |= DWC3_DCFG_LPM_CAP; 2194 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2195 2196 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2197 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); 2198 2199 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold); 2200 2201 /* 2202 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and 2203 * DCFG.LPMCap is set, core responses with an ACK and the 2204 * BESL value in the LPM token is less than or equal to LPM 2205 * NYET threshold. 2206 */ 2207 if (dwc->revision < DWC3_REVISION_240A && dwc->has_lpm_erratum) 2208 WARN(true, "LPM Erratum not available on dwc3 revisisions < 2.40a\n"); 2209 2210 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A) 2211 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold); 2212 2213 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2214 } else { 2215 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2216 reg &= ~DWC3_DCTL_HIRD_THRES_MASK; 2217 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2218 } 2219 2220 dep = dwc->eps[0]; 2221 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, 2222 false); 2223 if (ret) { 2224 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2225 return; 2226 } 2227 2228 dep = dwc->eps[1]; 2229 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, 2230 false); 2231 if (ret) { 2232 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2233 return; 2234 } 2235 2236 /* 2237 * Configure PHY via GUSB3PIPECTLn if required. 2238 * 2239 * Update GTXFIFOSIZn 2240 * 2241 * In both cases reset values should be sufficient. 2242 */ 2243 } 2244 2245 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 2246 { 2247 /* 2248 * TODO take core out of low power mode when that's 2249 * implemented. 2250 */ 2251 2252 dwc->gadget_driver->resume(&dwc->gadget); 2253 } 2254 2255 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 2256 unsigned int evtinfo) 2257 { 2258 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2259 unsigned int pwropt; 2260 2261 /* 2262 * WORKAROUND: DWC3 < 2.50a have an issue when configured without 2263 * Hibernation mode enabled which would show up when device detects 2264 * host-initiated U3 exit. 2265 * 2266 * In that case, device will generate a Link State Change Interrupt 2267 * from U3 to RESUME which is only necessary if Hibernation is 2268 * configured in. 2269 * 2270 * There are no functional changes due to such spurious event and we 2271 * just need to ignore it. 2272 * 2273 * Refers to: 2274 * 2275 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation 2276 * operational mode 2277 */ 2278 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 2279 if ((dwc->revision < DWC3_REVISION_250A) && 2280 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { 2281 if ((dwc->link_state == DWC3_LINK_STATE_U3) && 2282 (next == DWC3_LINK_STATE_RESUME)) { 2283 dev_vdbg(dwc->dev, "ignoring transition U3 -> Resume\n"); 2284 return; 2285 } 2286 } 2287 2288 /* 2289 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending 2290 * on the link partner, the USB session might do multiple entry/exit 2291 * of low power states before a transfer takes place. 2292 * 2293 * Due to this problem, we might experience lower throughput. The 2294 * suggested workaround is to disable DCTL[12:9] bits if we're 2295 * transitioning from U1/U2 to U0 and enable those bits again 2296 * after a transfer completes and there are no pending transfers 2297 * on any of the enabled endpoints. 2298 * 2299 * This is the first half of that workaround. 2300 * 2301 * Refers to: 2302 * 2303 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us 2304 * core send LGO_Ux entering U0 2305 */ 2306 if (dwc->revision < DWC3_REVISION_183A) { 2307 if (next == DWC3_LINK_STATE_U0) { 2308 u32 u1u2; 2309 u32 reg; 2310 2311 switch (dwc->link_state) { 2312 case DWC3_LINK_STATE_U1: 2313 case DWC3_LINK_STATE_U2: 2314 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2315 u1u2 = reg & (DWC3_DCTL_INITU2ENA 2316 | DWC3_DCTL_ACCEPTU2ENA 2317 | DWC3_DCTL_INITU1ENA 2318 | DWC3_DCTL_ACCEPTU1ENA); 2319 2320 if (!dwc->u1u2) 2321 dwc->u1u2 = reg & u1u2; 2322 2323 reg &= ~u1u2; 2324 2325 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2326 break; 2327 default: 2328 /* do nothing */ 2329 break; 2330 } 2331 } 2332 } 2333 2334 switch (next) { 2335 case DWC3_LINK_STATE_U1: 2336 if (dwc->speed == USB_SPEED_SUPER) 2337 dwc3_suspend_gadget(dwc); 2338 break; 2339 case DWC3_LINK_STATE_U2: 2340 case DWC3_LINK_STATE_U3: 2341 dwc3_suspend_gadget(dwc); 2342 break; 2343 case DWC3_LINK_STATE_RESUME: 2344 dwc3_resume_gadget(dwc); 2345 break; 2346 default: 2347 /* do nothing */ 2348 break; 2349 } 2350 2351 dwc->link_state = next; 2352 } 2353 2354 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, 2355 unsigned int evtinfo) 2356 { 2357 unsigned int is_ss = evtinfo & (1UL << 4); 2358 2359 /** 2360 * WORKAROUND: DWC3 revison 2.20a with hibernation support 2361 * have a known issue which can cause USB CV TD.9.23 to fail 2362 * randomly. 2363 * 2364 * Because of this issue, core could generate bogus hibernation 2365 * events which SW needs to ignore. 2366 * 2367 * Refers to: 2368 * 2369 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0 2370 * Device Fallback from SuperSpeed 2371 */ 2372 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER)) 2373 return; 2374 2375 /* enter hibernation here */ 2376 } 2377 2378 static void dwc3_gadget_interrupt(struct dwc3 *dwc, 2379 const struct dwc3_event_devt *event) 2380 { 2381 switch (event->type) { 2382 case DWC3_DEVICE_EVENT_DISCONNECT: 2383 dwc3_gadget_disconnect_interrupt(dwc); 2384 break; 2385 case DWC3_DEVICE_EVENT_RESET: 2386 dwc3_gadget_reset_interrupt(dwc); 2387 break; 2388 case DWC3_DEVICE_EVENT_CONNECT_DONE: 2389 dwc3_gadget_conndone_interrupt(dwc); 2390 break; 2391 case DWC3_DEVICE_EVENT_WAKEUP: 2392 dwc3_gadget_wakeup_interrupt(dwc); 2393 break; 2394 case DWC3_DEVICE_EVENT_HIBER_REQ: 2395 if (!dwc->has_hibernation) { 2396 WARN(1 ,"unexpected hibernation event\n"); 2397 break; 2398 } 2399 dwc3_gadget_hibernation_interrupt(dwc, event->event_info); 2400 break; 2401 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 2402 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 2403 break; 2404 case DWC3_DEVICE_EVENT_EOPF: 2405 dev_vdbg(dwc->dev, "End of Periodic Frame\n"); 2406 break; 2407 case DWC3_DEVICE_EVENT_SOF: 2408 dev_vdbg(dwc->dev, "Start of Periodic Frame\n"); 2409 break; 2410 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 2411 dev_vdbg(dwc->dev, "Erratic Error\n"); 2412 break; 2413 case DWC3_DEVICE_EVENT_CMD_CMPL: 2414 dev_vdbg(dwc->dev, "Command Complete\n"); 2415 break; 2416 case DWC3_DEVICE_EVENT_OVERFLOW: 2417 dev_vdbg(dwc->dev, "Overflow\n"); 2418 break; 2419 default: 2420 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 2421 } 2422 } 2423 2424 static void dwc3_process_event_entry(struct dwc3 *dwc, 2425 const union dwc3_event *event) 2426 { 2427 /* Endpoint IRQ, handle it and return early */ 2428 if (event->type.is_devspec == 0) { 2429 /* depevt */ 2430 return dwc3_endpoint_interrupt(dwc, &event->depevt); 2431 } 2432 2433 switch (event->type.type) { 2434 case DWC3_EVENT_TYPE_DEV: 2435 dwc3_gadget_interrupt(dwc, &event->devt); 2436 break; 2437 /* REVISIT what to do with Carkit and I2C events ? */ 2438 default: 2439 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 2440 } 2441 } 2442 2443 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf) 2444 { 2445 struct dwc3_event_buffer *evt; 2446 irqreturn_t ret = IRQ_NONE; 2447 int left; 2448 u32 reg; 2449 2450 evt = dwc->ev_buffs[buf]; 2451 left = evt->count; 2452 2453 if (!(evt->flags & DWC3_EVENT_PENDING)) 2454 return IRQ_NONE; 2455 2456 while (left > 0) { 2457 union dwc3_event event; 2458 2459 event.raw = *(u32 *) (evt->buf + evt->lpos); 2460 2461 dwc3_process_event_entry(dwc, &event); 2462 2463 /* 2464 * FIXME we wrap around correctly to the next entry as 2465 * almost all entries are 4 bytes in size. There is one 2466 * entry which has 12 bytes which is a regular entry 2467 * followed by 8 bytes data. ATM I don't know how 2468 * things are organized if we get next to the a 2469 * boundary so I worry about that once we try to handle 2470 * that. 2471 */ 2472 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE; 2473 left -= 4; 2474 2475 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4); 2476 } 2477 2478 evt->count = 0; 2479 evt->flags &= ~DWC3_EVENT_PENDING; 2480 ret = IRQ_HANDLED; 2481 2482 /* Unmask interrupt */ 2483 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf)); 2484 reg &= ~DWC3_GEVNTSIZ_INTMASK; 2485 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg); 2486 2487 return ret; 2488 } 2489 2490 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc) 2491 { 2492 struct dwc3 *dwc = _dwc; 2493 unsigned long flags; 2494 irqreturn_t ret = IRQ_NONE; 2495 int i; 2496 2497 spin_lock_irqsave(&dwc->lock, flags); 2498 2499 for (i = 0; i < dwc->num_event_buffers; i++) 2500 ret |= dwc3_process_event_buf(dwc, i); 2501 2502 spin_unlock_irqrestore(&dwc->lock, flags); 2503 2504 return ret; 2505 } 2506 2507 static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf) 2508 { 2509 struct dwc3_event_buffer *evt; 2510 u32 count; 2511 u32 reg; 2512 2513 evt = dwc->ev_buffs[buf]; 2514 2515 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf)); 2516 count &= DWC3_GEVNTCOUNT_MASK; 2517 if (!count) 2518 return IRQ_NONE; 2519 2520 evt->count = count; 2521 evt->flags |= DWC3_EVENT_PENDING; 2522 2523 /* Mask interrupt */ 2524 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf)); 2525 reg |= DWC3_GEVNTSIZ_INTMASK; 2526 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg); 2527 2528 return IRQ_WAKE_THREAD; 2529 } 2530 2531 static irqreturn_t dwc3_interrupt(int irq, void *_dwc) 2532 { 2533 struct dwc3 *dwc = _dwc; 2534 int i; 2535 irqreturn_t ret = IRQ_NONE; 2536 2537 spin_lock(&dwc->lock); 2538 2539 for (i = 0; i < dwc->num_event_buffers; i++) { 2540 irqreturn_t status; 2541 2542 status = dwc3_check_event_buf(dwc, i); 2543 if (status == IRQ_WAKE_THREAD) 2544 ret = status; 2545 } 2546 2547 spin_unlock(&dwc->lock); 2548 2549 return ret; 2550 } 2551 2552 /** 2553 * dwc3_gadget_init - Initializes gadget related registers 2554 * @dwc: pointer to our controller context structure 2555 * 2556 * Returns 0 on success otherwise negative errno. 2557 */ 2558 int dwc3_gadget_init(struct dwc3 *dwc) 2559 { 2560 int ret; 2561 2562 dwc->ctrl_req = dma_alloc_coherent(sizeof(*dwc->ctrl_req), 2563 (unsigned long *)&dwc->ctrl_req_addr); 2564 if (!dwc->ctrl_req) { 2565 dev_err(dwc->dev, "failed to allocate ctrl request\n"); 2566 ret = -ENOMEM; 2567 goto err0; 2568 } 2569 2570 dwc->ep0_trb = dma_alloc_coherent(sizeof(*dwc->ep0_trb) * 2, 2571 (unsigned long *)&dwc->ep0_trb_addr); 2572 if (!dwc->ep0_trb) { 2573 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 2574 ret = -ENOMEM; 2575 goto err1; 2576 } 2577 2578 dwc->setup_buf = memalign(CONFIG_SYS_CACHELINE_SIZE, 2579 DWC3_EP0_BOUNCE_SIZE); 2580 if (!dwc->setup_buf) { 2581 ret = -ENOMEM; 2582 goto err2; 2583 } 2584 2585 dwc->ep0_bounce = dma_alloc_coherent(DWC3_EP0_BOUNCE_SIZE, 2586 (unsigned long *)&dwc->ep0_bounce_addr); 2587 if (!dwc->ep0_bounce) { 2588 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n"); 2589 ret = -ENOMEM; 2590 goto err3; 2591 } 2592 2593 dwc->gadget.ops = &dwc3_gadget_ops; 2594 dwc->gadget.max_speed = USB_SPEED_SUPER; 2595 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2596 dwc->gadget.name = "dwc3-gadget"; 2597 2598 /* 2599 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize 2600 * on ep out. 2601 */ 2602 dwc->gadget.quirk_ep_out_aligned_size = true; 2603 2604 /* 2605 * REVISIT: Here we should clear all pending IRQs to be 2606 * sure we're starting from a well known location. 2607 */ 2608 2609 ret = dwc3_gadget_init_endpoints(dwc); 2610 if (ret) 2611 goto err4; 2612 2613 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 2614 if (ret) { 2615 dev_err(dwc->dev, "failed to register udc\n"); 2616 goto err4; 2617 } 2618 2619 return 0; 2620 2621 err4: 2622 dwc3_gadget_free_endpoints(dwc); 2623 dma_free_coherent(dwc->ep0_bounce); 2624 2625 err3: 2626 kfree(dwc->setup_buf); 2627 2628 err2: 2629 dma_free_coherent(dwc->ep0_trb); 2630 2631 err1: 2632 dma_free_coherent(dwc->ctrl_req); 2633 2634 err0: 2635 return ret; 2636 } 2637 2638 /* -------------------------------------------------------------------------- */ 2639 2640 void dwc3_gadget_exit(struct dwc3 *dwc) 2641 { 2642 usb_del_gadget_udc(&dwc->gadget); 2643 2644 dwc3_gadget_free_endpoints(dwc); 2645 2646 dma_free_coherent(dwc->ep0_bounce); 2647 2648 kfree(dwc->setup_buf); 2649 2650 dma_free_coherent(dwc->ep0_trb); 2651 2652 dma_free_coherent(dwc->ctrl_req); 2653 } 2654 2655 /** 2656 * dwc3_gadget_uboot_handle_interrupt - handle dwc3 gadget interrupt 2657 * @dwc: struct dwce * 2658 * 2659 * Handles ep0 and gadget interrupt 2660 * 2661 * Should be called from dwc3 core. 2662 */ 2663 void dwc3_gadget_uboot_handle_interrupt(struct dwc3 *dwc) 2664 { 2665 int ret = dwc3_interrupt(0, dwc); 2666 2667 if (ret == IRQ_WAKE_THREAD) { 2668 int i; 2669 struct dwc3_event_buffer *evt; 2670 2671 dwc3_thread_interrupt(0, dwc); 2672 2673 /* Clean + Invalidate the buffers after touching them */ 2674 for (i = 0; i < dwc->num_event_buffers; i++) { 2675 evt = dwc->ev_buffs[i]; 2676 dwc3_flush_cache((uintptr_t)evt->buf, evt->length); 2677 } 2678 } 2679 } 2680