1 /** 2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 3 * 4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com 5 * 6 * Authors: Felipe Balbi <balbi@ti.com>, 7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 8 * 9 * This program is free software: you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 of 11 * the License as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/delay.h> 21 #include <linux/slab.h> 22 #include <linux/spinlock.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/interrupt.h> 26 #include <linux/io.h> 27 #include <linux/list.h> 28 #include <linux/dma-mapping.h> 29 30 #include <linux/usb/ch9.h> 31 #include <linux/usb/gadget.h> 32 33 #include "debug.h" 34 #include "core.h" 35 #include "gadget.h" 36 #include "io.h" 37 38 /** 39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes 40 * @dwc: pointer to our context structure 41 * @mode: the mode to set (J, K SE0 NAK, Force Enable) 42 * 43 * Caller should take care of locking. This function will 44 * return 0 on success or -EINVAL if wrong Test Selector 45 * is passed 46 */ 47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) 48 { 49 u32 reg; 50 51 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 52 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 53 54 switch (mode) { 55 case TEST_J: 56 case TEST_K: 57 case TEST_SE0_NAK: 58 case TEST_PACKET: 59 case TEST_FORCE_EN: 60 reg |= mode << 1; 61 break; 62 default: 63 return -EINVAL; 64 } 65 66 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 67 68 return 0; 69 } 70 71 /** 72 * dwc3_gadget_get_link_state - Gets current state of USB Link 73 * @dwc: pointer to our context structure 74 * 75 * Caller should take care of locking. This function will 76 * return the link state on success (>= 0) or -ETIMEDOUT. 77 */ 78 int dwc3_gadget_get_link_state(struct dwc3 *dwc) 79 { 80 u32 reg; 81 82 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 83 84 return DWC3_DSTS_USBLNKST(reg); 85 } 86 87 /** 88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State 89 * @dwc: pointer to our context structure 90 * @state: the state to put link into 91 * 92 * Caller should take care of locking. This function will 93 * return 0 on success or -ETIMEDOUT. 94 */ 95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) 96 { 97 int retries = 10000; 98 u32 reg; 99 100 /* 101 * Wait until device controller is ready. Only applies to 1.94a and 102 * later RTL. 103 */ 104 if (dwc->revision >= DWC3_REVISION_194A) { 105 while (--retries) { 106 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 107 if (reg & DWC3_DSTS_DCNRD) 108 udelay(5); 109 else 110 break; 111 } 112 113 if (retries <= 0) 114 return -ETIMEDOUT; 115 } 116 117 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 119 120 /* set requested state */ 121 reg |= DWC3_DCTL_ULSTCHNGREQ(state); 122 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 123 124 /* 125 * The following code is racy when called from dwc3_gadget_wakeup, 126 * and is not needed, at least on newer versions 127 */ 128 if (dwc->revision >= DWC3_REVISION_194A) 129 return 0; 130 131 /* wait for a change in DSTS */ 132 retries = 10000; 133 while (--retries) { 134 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 135 136 if (DWC3_DSTS_USBLNKST(reg) == state) 137 return 0; 138 139 udelay(5); 140 } 141 142 dev_vdbg(dwc->dev, "link state change request timed out\n"); 143 144 return -ETIMEDOUT; 145 } 146 147 /** 148 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case 149 * @dwc: pointer to our context structure 150 * 151 * This function will a best effort FIFO allocation in order 152 * to improve FIFO usage and throughput, while still allowing 153 * us to enable as many endpoints as possible. 154 * 155 * Keep in mind that this operation will be highly dependent 156 * on the configured size for RAM1 - which contains TxFifo -, 157 * the amount of endpoints enabled on coreConsultant tool, and 158 * the width of the Master Bus. 159 * 160 * In the ideal world, we would always be able to satisfy the 161 * following equation: 162 * 163 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \ 164 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes 165 * 166 * Unfortunately, due to many variables that's not always the case. 167 */ 168 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc) 169 { 170 int last_fifo_depth = 0; 171 int ram1_depth; 172 int fifo_size; 173 int mdwidth; 174 int num; 175 176 if (!dwc->needs_fifo_resize) 177 return 0; 178 179 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7); 180 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); 181 182 /* MDWIDTH is represented in bits, we need it in bytes */ 183 mdwidth >>= 3; 184 185 /* 186 * FIXME For now we will only allocate 1 wMaxPacketSize space 187 * for each enabled endpoint, later patches will come to 188 * improve this algorithm so that we better use the internal 189 * FIFO space 190 */ 191 for (num = 0; num < dwc->num_in_eps; num++) { 192 /* bit0 indicates direction; 1 means IN ep */ 193 struct dwc3_ep *dep = dwc->eps[(num << 1) | 1]; 194 int mult = 1; 195 int tmp; 196 197 if (!(dep->flags & DWC3_EP_ENABLED)) 198 continue; 199 200 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) 201 || usb_endpoint_xfer_isoc(dep->endpoint.desc)) 202 mult = 3; 203 204 /* 205 * REVISIT: the following assumes we will always have enough 206 * space available on the FIFO RAM for all possible use cases. 207 * Make sure that's true somehow and change FIFO allocation 208 * accordingly. 209 * 210 * If we have Bulk or Isochronous endpoints, we want 211 * them to be able to be very, very fast. So we're giving 212 * those endpoints a fifo_size which is enough for 3 full 213 * packets 214 */ 215 tmp = mult * (dep->endpoint.maxpacket + mdwidth); 216 tmp += mdwidth; 217 218 fifo_size = DIV_ROUND_UP(tmp, mdwidth); 219 220 fifo_size |= (last_fifo_depth << 16); 221 222 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n", 223 dep->name, last_fifo_depth, fifo_size & 0xffff); 224 225 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size); 226 227 last_fifo_depth += (fifo_size & 0xffff); 228 } 229 230 return 0; 231 } 232 233 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 234 int status) 235 { 236 struct dwc3 *dwc = dep->dwc; 237 int i; 238 239 if (req->queued) { 240 i = 0; 241 do { 242 dep->busy_slot++; 243 /* 244 * Skip LINK TRB. We can't use req->trb and check for 245 * DWC3_TRBCTL_LINK_TRB because it points the TRB we 246 * just completed (not the LINK TRB). 247 */ 248 if (((dep->busy_slot & DWC3_TRB_MASK) == 249 DWC3_TRB_NUM- 1) && 250 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 251 dep->busy_slot++; 252 } while(++i < req->request.num_mapped_sgs); 253 req->queued = false; 254 } 255 list_del(&req->list); 256 req->trb = NULL; 257 258 if (req->request.status == -EINPROGRESS) 259 req->request.status = status; 260 261 if (dwc->ep0_bounced && dep->number == 0) 262 dwc->ep0_bounced = false; 263 else 264 usb_gadget_unmap_request(&dwc->gadget, &req->request, 265 req->direction); 266 267 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n", 268 req, dep->name, req->request.actual, 269 req->request.length, status); 270 trace_dwc3_gadget_giveback(req); 271 272 spin_unlock(&dwc->lock); 273 usb_gadget_giveback_request(&dep->endpoint, &req->request); 274 spin_lock(&dwc->lock); 275 } 276 277 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param) 278 { 279 u32 timeout = 500; 280 u32 reg; 281 282 trace_dwc3_gadget_generic_cmd(cmd, param); 283 284 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param); 285 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT); 286 287 do { 288 reg = dwc3_readl(dwc->regs, DWC3_DGCMD); 289 if (!(reg & DWC3_DGCMD_CMDACT)) { 290 dev_vdbg(dwc->dev, "Command Complete --> %d\n", 291 DWC3_DGCMD_STATUS(reg)); 292 return 0; 293 } 294 295 /* 296 * We can't sleep here, because it's also called from 297 * interrupt context. 298 */ 299 timeout--; 300 if (!timeout) 301 return -ETIMEDOUT; 302 udelay(1); 303 } while (1); 304 } 305 306 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep, 307 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params) 308 { 309 struct dwc3_ep *dep = dwc->eps[ep]; 310 u32 timeout = 500; 311 u32 reg; 312 313 trace_dwc3_gadget_ep_cmd(dep, cmd, params); 314 315 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0); 316 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1); 317 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2); 318 319 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT); 320 do { 321 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep)); 322 if (!(reg & DWC3_DEPCMD_CMDACT)) { 323 dev_vdbg(dwc->dev, "Command Complete --> %d\n", 324 DWC3_DEPCMD_STATUS(reg)); 325 return 0; 326 } 327 328 /* 329 * We can't sleep here, because it is also called from 330 * interrupt context. 331 */ 332 timeout--; 333 if (!timeout) 334 return -ETIMEDOUT; 335 336 udelay(1); 337 } while (1); 338 } 339 340 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 341 struct dwc3_trb *trb) 342 { 343 u32 offset = (char *) trb - (char *) dep->trb_pool; 344 345 return dep->trb_pool_dma + offset; 346 } 347 348 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 349 { 350 struct dwc3 *dwc = dep->dwc; 351 352 if (dep->trb_pool) 353 return 0; 354 355 if (dep->number == 0 || dep->number == 1) 356 return 0; 357 358 dep->trb_pool = dma_alloc_coherent(dwc->dev, 359 sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 360 &dep->trb_pool_dma, GFP_KERNEL); 361 if (!dep->trb_pool) { 362 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 363 dep->name); 364 return -ENOMEM; 365 } 366 367 return 0; 368 } 369 370 static void dwc3_free_trb_pool(struct dwc3_ep *dep) 371 { 372 struct dwc3 *dwc = dep->dwc; 373 374 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 375 dep->trb_pool, dep->trb_pool_dma); 376 377 dep->trb_pool = NULL; 378 dep->trb_pool_dma = 0; 379 } 380 381 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 382 { 383 struct dwc3_gadget_ep_cmd_params params; 384 u32 cmd; 385 386 memset(¶ms, 0x00, sizeof(params)); 387 388 if (dep->number != 1) { 389 cmd = DWC3_DEPCMD_DEPSTARTCFG; 390 /* XferRscIdx == 0 for ep0 and 2 for the remaining */ 391 if (dep->number > 1) { 392 if (dwc->start_config_issued) 393 return 0; 394 dwc->start_config_issued = true; 395 cmd |= DWC3_DEPCMD_PARAM(2); 396 } 397 398 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms); 399 } 400 401 return 0; 402 } 403 404 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, 405 const struct usb_endpoint_descriptor *desc, 406 const struct usb_ss_ep_comp_descriptor *comp_desc, 407 bool ignore, bool restore) 408 { 409 struct dwc3_gadget_ep_cmd_params params; 410 411 memset(¶ms, 0x00, sizeof(params)); 412 413 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) 414 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); 415 416 /* Burst size is only needed in SuperSpeed mode */ 417 if (dwc->gadget.speed == USB_SPEED_SUPER) { 418 u32 burst = dep->endpoint.maxburst - 1; 419 420 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst); 421 } 422 423 if (ignore) 424 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM; 425 426 if (restore) { 427 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE; 428 params.param2 |= dep->saved_state; 429 } 430 431 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN 432 | DWC3_DEPCFG_XFER_NOT_READY_EN; 433 434 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 435 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE 436 | DWC3_DEPCFG_STREAM_EVENT_EN; 437 dep->stream_capable = true; 438 } 439 440 if (!usb_endpoint_xfer_control(desc)) 441 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; 442 443 /* 444 * We are doing 1:1 mapping for endpoints, meaning 445 * Physical Endpoints 2 maps to Logical Endpoint 2 and 446 * so on. We consider the direction bit as part of the physical 447 * endpoint number. So USB endpoint 0x81 is 0x03. 448 */ 449 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); 450 451 /* 452 * We must use the lower 16 TX FIFOs even though 453 * HW might have more 454 */ 455 if (dep->direction) 456 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); 457 458 if (desc->bInterval) { 459 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); 460 dep->interval = 1 << (desc->bInterval - 1); 461 } 462 463 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 464 DWC3_DEPCMD_SETEPCONFIG, ¶ms); 465 } 466 467 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) 468 { 469 struct dwc3_gadget_ep_cmd_params params; 470 471 memset(¶ms, 0x00, sizeof(params)); 472 473 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 474 475 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 476 DWC3_DEPCMD_SETTRANSFRESOURCE, ¶ms); 477 } 478 479 /** 480 * __dwc3_gadget_ep_enable - Initializes a HW endpoint 481 * @dep: endpoint to be initialized 482 * @desc: USB Endpoint Descriptor 483 * 484 * Caller should take care of locking 485 */ 486 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, 487 const struct usb_endpoint_descriptor *desc, 488 const struct usb_ss_ep_comp_descriptor *comp_desc, 489 bool ignore, bool restore) 490 { 491 struct dwc3 *dwc = dep->dwc; 492 u32 reg; 493 int ret; 494 495 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name); 496 497 if (!(dep->flags & DWC3_EP_ENABLED)) { 498 ret = dwc3_gadget_start_config(dwc, dep); 499 if (ret) 500 return ret; 501 } 502 503 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore, 504 restore); 505 if (ret) 506 return ret; 507 508 if (!(dep->flags & DWC3_EP_ENABLED)) { 509 struct dwc3_trb *trb_st_hw; 510 struct dwc3_trb *trb_link; 511 512 ret = dwc3_gadget_set_xfer_resource(dwc, dep); 513 if (ret) 514 return ret; 515 516 dep->endpoint.desc = desc; 517 dep->comp_desc = comp_desc; 518 dep->type = usb_endpoint_type(desc); 519 dep->flags |= DWC3_EP_ENABLED; 520 521 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 522 reg |= DWC3_DALEPENA_EP(dep->number); 523 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 524 525 if (!usb_endpoint_xfer_isoc(desc)) 526 return 0; 527 528 /* Link TRB for ISOC. The HWO bit is never reset */ 529 trb_st_hw = &dep->trb_pool[0]; 530 531 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 532 memset(trb_link, 0, sizeof(*trb_link)); 533 534 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 535 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 536 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB; 537 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 538 } 539 540 return 0; 541 } 542 543 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force); 544 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 545 { 546 struct dwc3_request *req; 547 548 if (!list_empty(&dep->req_queued)) { 549 dwc3_stop_active_transfer(dwc, dep->number, true); 550 551 /* - giveback all requests to gadget driver */ 552 while (!list_empty(&dep->req_queued)) { 553 req = next_request(&dep->req_queued); 554 555 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 556 } 557 } 558 559 while (!list_empty(&dep->request_list)) { 560 req = next_request(&dep->request_list); 561 562 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 563 } 564 } 565 566 /** 567 * __dwc3_gadget_ep_disable - Disables a HW endpoint 568 * @dep: the endpoint to disable 569 * 570 * This function also removes requests which are currently processed ny the 571 * hardware and those which are not yet scheduled. 572 * Caller should take care of locking. 573 */ 574 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 575 { 576 struct dwc3 *dwc = dep->dwc; 577 u32 reg; 578 579 dwc3_remove_requests(dwc, dep); 580 581 /* make sure HW endpoint isn't stalled */ 582 if (dep->flags & DWC3_EP_STALL) 583 __dwc3_gadget_ep_set_halt(dep, 0, false); 584 585 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 586 reg &= ~DWC3_DALEPENA_EP(dep->number); 587 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 588 589 dep->stream_capable = false; 590 dep->endpoint.desc = NULL; 591 dep->comp_desc = NULL; 592 dep->type = 0; 593 dep->flags = 0; 594 595 return 0; 596 } 597 598 /* -------------------------------------------------------------------------- */ 599 600 static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 601 const struct usb_endpoint_descriptor *desc) 602 { 603 return -EINVAL; 604 } 605 606 static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 607 { 608 return -EINVAL; 609 } 610 611 /* -------------------------------------------------------------------------- */ 612 613 static int dwc3_gadget_ep_enable(struct usb_ep *ep, 614 const struct usb_endpoint_descriptor *desc) 615 { 616 struct dwc3_ep *dep; 617 struct dwc3 *dwc; 618 unsigned long flags; 619 int ret; 620 621 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 622 pr_debug("dwc3: invalid parameters\n"); 623 return -EINVAL; 624 } 625 626 if (!desc->wMaxPacketSize) { 627 pr_debug("dwc3: missing wMaxPacketSize\n"); 628 return -EINVAL; 629 } 630 631 dep = to_dwc3_ep(ep); 632 dwc = dep->dwc; 633 634 if (dep->flags & DWC3_EP_ENABLED) { 635 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n", 636 dep->name); 637 return 0; 638 } 639 640 switch (usb_endpoint_type(desc)) { 641 case USB_ENDPOINT_XFER_CONTROL: 642 strlcat(dep->name, "-control", sizeof(dep->name)); 643 break; 644 case USB_ENDPOINT_XFER_ISOC: 645 strlcat(dep->name, "-isoc", sizeof(dep->name)); 646 break; 647 case USB_ENDPOINT_XFER_BULK: 648 strlcat(dep->name, "-bulk", sizeof(dep->name)); 649 break; 650 case USB_ENDPOINT_XFER_INT: 651 strlcat(dep->name, "-int", sizeof(dep->name)); 652 break; 653 default: 654 dev_err(dwc->dev, "invalid endpoint transfer type\n"); 655 } 656 657 spin_lock_irqsave(&dwc->lock, flags); 658 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false); 659 spin_unlock_irqrestore(&dwc->lock, flags); 660 661 return ret; 662 } 663 664 static int dwc3_gadget_ep_disable(struct usb_ep *ep) 665 { 666 struct dwc3_ep *dep; 667 struct dwc3 *dwc; 668 unsigned long flags; 669 int ret; 670 671 if (!ep) { 672 pr_debug("dwc3: invalid parameters\n"); 673 return -EINVAL; 674 } 675 676 dep = to_dwc3_ep(ep); 677 dwc = dep->dwc; 678 679 if (!(dep->flags & DWC3_EP_ENABLED)) { 680 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n", 681 dep->name); 682 return 0; 683 } 684 685 snprintf(dep->name, sizeof(dep->name), "ep%d%s", 686 dep->number >> 1, 687 (dep->number & 1) ? "in" : "out"); 688 689 spin_lock_irqsave(&dwc->lock, flags); 690 ret = __dwc3_gadget_ep_disable(dep); 691 spin_unlock_irqrestore(&dwc->lock, flags); 692 693 return ret; 694 } 695 696 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 697 gfp_t gfp_flags) 698 { 699 struct dwc3_request *req; 700 struct dwc3_ep *dep = to_dwc3_ep(ep); 701 702 req = kzalloc(sizeof(*req), gfp_flags); 703 if (!req) 704 return NULL; 705 706 req->epnum = dep->number; 707 req->dep = dep; 708 709 trace_dwc3_alloc_request(req); 710 711 return &req->request; 712 } 713 714 static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 715 struct usb_request *request) 716 { 717 struct dwc3_request *req = to_dwc3_request(request); 718 719 trace_dwc3_free_request(req); 720 kfree(req); 721 } 722 723 /** 724 * dwc3_prepare_one_trb - setup one TRB from one request 725 * @dep: endpoint for which this request is prepared 726 * @req: dwc3_request pointer 727 */ 728 static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 729 struct dwc3_request *req, dma_addr_t dma, 730 unsigned length, unsigned last, unsigned chain, unsigned node) 731 { 732 struct dwc3 *dwc = dep->dwc; 733 struct dwc3_trb *trb; 734 735 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n", 736 dep->name, req, (unsigned long long) dma, 737 length, last ? " last" : "", 738 chain ? " chain" : ""); 739 740 741 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK]; 742 743 if (!req->trb) { 744 dwc3_gadget_move_request_queued(req); 745 req->trb = trb; 746 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 747 req->start_slot = dep->free_slot & DWC3_TRB_MASK; 748 } 749 750 dep->free_slot++; 751 /* Skip the LINK-TRB on ISOC */ 752 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && 753 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 754 dep->free_slot++; 755 756 trb->size = DWC3_TRB_SIZE_LENGTH(length); 757 trb->bpl = lower_32_bits(dma); 758 trb->bph = upper_32_bits(dma); 759 760 switch (usb_endpoint_type(dep->endpoint.desc)) { 761 case USB_ENDPOINT_XFER_CONTROL: 762 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP; 763 break; 764 765 case USB_ENDPOINT_XFER_ISOC: 766 if (!node) 767 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 768 else 769 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 770 break; 771 772 case USB_ENDPOINT_XFER_BULK: 773 case USB_ENDPOINT_XFER_INT: 774 trb->ctrl = DWC3_TRBCTL_NORMAL; 775 break; 776 default: 777 /* 778 * This is only possible with faulty memory because we 779 * checked it already :) 780 */ 781 BUG(); 782 } 783 784 if (!req->request.no_interrupt && !chain) 785 trb->ctrl |= DWC3_TRB_CTRL_IOC; 786 787 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 788 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 789 trb->ctrl |= DWC3_TRB_CTRL_CSP; 790 } else if (last) { 791 trb->ctrl |= DWC3_TRB_CTRL_LST; 792 } 793 794 if (chain) 795 trb->ctrl |= DWC3_TRB_CTRL_CHN; 796 797 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) 798 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id); 799 800 trb->ctrl |= DWC3_TRB_CTRL_HWO; 801 802 trace_dwc3_prepare_trb(dep, trb); 803 } 804 805 /* 806 * dwc3_prepare_trbs - setup TRBs from requests 807 * @dep: endpoint for which requests are being prepared 808 * @starting: true if the endpoint is idle and no requests are queued. 809 * 810 * The function goes through the requests list and sets up TRBs for the 811 * transfers. The function returns once there are no more TRBs available or 812 * it runs out of requests. 813 */ 814 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting) 815 { 816 struct dwc3_request *req, *n; 817 u32 trbs_left; 818 u32 max; 819 unsigned int last_one = 0; 820 821 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 822 823 /* the first request must not be queued */ 824 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK; 825 826 /* Can't wrap around on a non-isoc EP since there's no link TRB */ 827 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 828 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK); 829 if (trbs_left > max) 830 trbs_left = max; 831 } 832 833 /* 834 * If busy & slot are equal than it is either full or empty. If we are 835 * starting to process requests then we are empty. Otherwise we are 836 * full and don't do anything 837 */ 838 if (!trbs_left) { 839 if (!starting) 840 return; 841 trbs_left = DWC3_TRB_NUM; 842 /* 843 * In case we start from scratch, we queue the ISOC requests 844 * starting from slot 1. This is done because we use ring 845 * buffer and have no LST bit to stop us. Instead, we place 846 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt 847 * after the first request so we start at slot 1 and have 848 * 7 requests proceed before we hit the first IOC. 849 * Other transfer types don't use the ring buffer and are 850 * processed from the first TRB until the last one. Since we 851 * don't wrap around we have to start at the beginning. 852 */ 853 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 854 dep->busy_slot = 1; 855 dep->free_slot = 1; 856 } else { 857 dep->busy_slot = 0; 858 dep->free_slot = 0; 859 } 860 } 861 862 /* The last TRB is a link TRB, not used for xfer */ 863 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc)) 864 return; 865 866 list_for_each_entry_safe(req, n, &dep->request_list, list) { 867 unsigned length; 868 dma_addr_t dma; 869 last_one = false; 870 871 if (req->request.num_mapped_sgs > 0) { 872 struct usb_request *request = &req->request; 873 struct scatterlist *sg = request->sg; 874 struct scatterlist *s; 875 int i; 876 877 for_each_sg(sg, s, request->num_mapped_sgs, i) { 878 unsigned chain = true; 879 880 length = sg_dma_len(s); 881 dma = sg_dma_address(s); 882 883 if (i == (request->num_mapped_sgs - 1) || 884 sg_is_last(s)) { 885 if (list_is_last(&req->list, 886 &dep->request_list)) 887 last_one = true; 888 chain = false; 889 } 890 891 trbs_left--; 892 if (!trbs_left) 893 last_one = true; 894 895 if (last_one) 896 chain = false; 897 898 dwc3_prepare_one_trb(dep, req, dma, length, 899 last_one, chain, i); 900 901 if (last_one) 902 break; 903 } 904 } else { 905 dma = req->request.dma; 906 length = req->request.length; 907 trbs_left--; 908 909 if (!trbs_left) 910 last_one = 1; 911 912 /* Is this the last request? */ 913 if (list_is_last(&req->list, &dep->request_list)) 914 last_one = 1; 915 916 dwc3_prepare_one_trb(dep, req, dma, length, 917 last_one, false, 0); 918 919 if (last_one) 920 break; 921 } 922 } 923 } 924 925 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param, 926 int start_new) 927 { 928 struct dwc3_gadget_ep_cmd_params params; 929 struct dwc3_request *req; 930 struct dwc3 *dwc = dep->dwc; 931 int ret; 932 u32 cmd; 933 934 if (start_new && (dep->flags & DWC3_EP_BUSY)) { 935 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name); 936 return -EBUSY; 937 } 938 dep->flags &= ~DWC3_EP_PENDING_REQUEST; 939 940 /* 941 * If we are getting here after a short-out-packet we don't enqueue any 942 * new requests as we try to set the IOC bit only on the last request. 943 */ 944 if (start_new) { 945 if (list_empty(&dep->req_queued)) 946 dwc3_prepare_trbs(dep, start_new); 947 948 /* req points to the first request which will be sent */ 949 req = next_request(&dep->req_queued); 950 } else { 951 dwc3_prepare_trbs(dep, start_new); 952 953 /* 954 * req points to the first request where HWO changed from 0 to 1 955 */ 956 req = next_request(&dep->req_queued); 957 } 958 if (!req) { 959 dep->flags |= DWC3_EP_PENDING_REQUEST; 960 return 0; 961 } 962 963 memset(¶ms, 0, sizeof(params)); 964 965 if (start_new) { 966 params.param0 = upper_32_bits(req->trb_dma); 967 params.param1 = lower_32_bits(req->trb_dma); 968 cmd = DWC3_DEPCMD_STARTTRANSFER; 969 } else { 970 cmd = DWC3_DEPCMD_UPDATETRANSFER; 971 } 972 973 cmd |= DWC3_DEPCMD_PARAM(cmd_param); 974 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 975 if (ret < 0) { 976 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n"); 977 978 /* 979 * FIXME we need to iterate over the list of requests 980 * here and stop, unmap, free and del each of the linked 981 * requests instead of what we do now. 982 */ 983 usb_gadget_unmap_request(&dwc->gadget, &req->request, 984 req->direction); 985 list_del(&req->list); 986 return ret; 987 } 988 989 dep->flags |= DWC3_EP_BUSY; 990 991 if (start_new) { 992 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc, 993 dep->number); 994 WARN_ON_ONCE(!dep->resource_index); 995 } 996 997 return 0; 998 } 999 1000 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc, 1001 struct dwc3_ep *dep, u32 cur_uf) 1002 { 1003 u32 uf; 1004 1005 if (list_empty(&dep->request_list)) { 1006 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n", 1007 dep->name); 1008 dep->flags |= DWC3_EP_PENDING_REQUEST; 1009 return; 1010 } 1011 1012 /* 4 micro frames in the future */ 1013 uf = cur_uf + dep->interval * 4; 1014 1015 __dwc3_gadget_kick_transfer(dep, uf, 1); 1016 } 1017 1018 static void dwc3_gadget_start_isoc(struct dwc3 *dwc, 1019 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1020 { 1021 u32 cur_uf, mask; 1022 1023 mask = ~(dep->interval - 1); 1024 cur_uf = event->parameters & mask; 1025 1026 __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1027 } 1028 1029 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 1030 { 1031 struct dwc3 *dwc = dep->dwc; 1032 int ret; 1033 1034 req->request.actual = 0; 1035 req->request.status = -EINPROGRESS; 1036 req->direction = dep->direction; 1037 req->epnum = dep->number; 1038 1039 /* 1040 * We only add to our list of requests now and 1041 * start consuming the list once we get XferNotReady 1042 * IRQ. 1043 * 1044 * That way, we avoid doing anything that we don't need 1045 * to do now and defer it until the point we receive a 1046 * particular token from the Host side. 1047 * 1048 * This will also avoid Host cancelling URBs due to too 1049 * many NAKs. 1050 */ 1051 ret = usb_gadget_map_request(&dwc->gadget, &req->request, 1052 dep->direction); 1053 if (ret) 1054 return ret; 1055 1056 list_add_tail(&req->list, &dep->request_list); 1057 1058 /* 1059 * There are a few special cases: 1060 * 1061 * 1. XferNotReady with empty list of requests. We need to kick the 1062 * transfer here in that situation, otherwise we will be NAKing 1063 * forever. If we get XferNotReady before gadget driver has a 1064 * chance to queue a request, we will ACK the IRQ but won't be 1065 * able to receive the data until the next request is queued. 1066 * The following code is handling exactly that. 1067 * 1068 */ 1069 if (dep->flags & DWC3_EP_PENDING_REQUEST) { 1070 /* 1071 * If xfernotready is already elapsed and it is a case 1072 * of isoc transfer, then issue END TRANSFER, so that 1073 * you can receive xfernotready again and can have 1074 * notion of current microframe. 1075 */ 1076 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1077 if (list_empty(&dep->req_queued)) { 1078 dwc3_stop_active_transfer(dwc, dep->number, true); 1079 dep->flags = DWC3_EP_ENABLED; 1080 } 1081 return 0; 1082 } 1083 1084 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1085 if (ret && ret != -EBUSY) 1086 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1087 dep->name); 1088 return ret; 1089 } 1090 1091 /* 1092 * 2. XferInProgress on Isoc EP with an active transfer. We need to 1093 * kick the transfer here after queuing a request, otherwise the 1094 * core may not see the modified TRB(s). 1095 */ 1096 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1097 (dep->flags & DWC3_EP_BUSY) && 1098 !(dep->flags & DWC3_EP_MISSED_ISOC)) { 1099 WARN_ON_ONCE(!dep->resource_index); 1100 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index, 1101 false); 1102 if (ret && ret != -EBUSY) 1103 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1104 dep->name); 1105 return ret; 1106 } 1107 1108 /* 1109 * 4. Stream Capable Bulk Endpoints. We need to start the transfer 1110 * right away, otherwise host will not know we have streams to be 1111 * handled. 1112 */ 1113 if (dep->stream_capable) { 1114 int ret; 1115 1116 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1117 if (ret && ret != -EBUSY) { 1118 struct dwc3 *dwc = dep->dwc; 1119 1120 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1121 dep->name); 1122 } 1123 } 1124 1125 return 0; 1126 } 1127 1128 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 1129 gfp_t gfp_flags) 1130 { 1131 struct dwc3_request *req = to_dwc3_request(request); 1132 struct dwc3_ep *dep = to_dwc3_ep(ep); 1133 struct dwc3 *dwc = dep->dwc; 1134 1135 unsigned long flags; 1136 1137 int ret; 1138 1139 spin_lock_irqsave(&dwc->lock, flags); 1140 if (!dep->endpoint.desc) { 1141 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n", 1142 request, ep->name); 1143 ret = -ESHUTDOWN; 1144 goto out; 1145 } 1146 1147 if (WARN(req->dep != dep, "request %p belongs to '%s'\n", 1148 request, req->dep->name)) { 1149 ret = -EINVAL; 1150 goto out; 1151 } 1152 1153 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n", 1154 request, ep->name, request->length); 1155 trace_dwc3_ep_queue(req); 1156 1157 ret = __dwc3_gadget_ep_queue(dep, req); 1158 1159 out: 1160 spin_unlock_irqrestore(&dwc->lock, flags); 1161 1162 return ret; 1163 } 1164 1165 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 1166 struct usb_request *request) 1167 { 1168 struct dwc3_request *req = to_dwc3_request(request); 1169 struct dwc3_request *r = NULL; 1170 1171 struct dwc3_ep *dep = to_dwc3_ep(ep); 1172 struct dwc3 *dwc = dep->dwc; 1173 1174 unsigned long flags; 1175 int ret = 0; 1176 1177 trace_dwc3_ep_dequeue(req); 1178 1179 spin_lock_irqsave(&dwc->lock, flags); 1180 1181 list_for_each_entry(r, &dep->request_list, list) { 1182 if (r == req) 1183 break; 1184 } 1185 1186 if (r != req) { 1187 list_for_each_entry(r, &dep->req_queued, list) { 1188 if (r == req) 1189 break; 1190 } 1191 if (r == req) { 1192 /* wait until it is processed */ 1193 dwc3_stop_active_transfer(dwc, dep->number, true); 1194 goto out1; 1195 } 1196 dev_err(dwc->dev, "request %p was not queued to %s\n", 1197 request, ep->name); 1198 ret = -EINVAL; 1199 goto out0; 1200 } 1201 1202 out1: 1203 /* giveback the request */ 1204 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1205 1206 out0: 1207 spin_unlock_irqrestore(&dwc->lock, flags); 1208 1209 return ret; 1210 } 1211 1212 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) 1213 { 1214 struct dwc3_gadget_ep_cmd_params params; 1215 struct dwc3 *dwc = dep->dwc; 1216 int ret; 1217 1218 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1219 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 1220 return -EINVAL; 1221 } 1222 1223 memset(¶ms, 0x00, sizeof(params)); 1224 1225 if (value) { 1226 if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) || 1227 (!list_empty(&dep->req_queued) || 1228 !list_empty(&dep->request_list)))) { 1229 dev_dbg(dwc->dev, "%s: pending request, cannot halt\n", 1230 dep->name); 1231 return -EAGAIN; 1232 } 1233 1234 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1235 DWC3_DEPCMD_SETSTALL, ¶ms); 1236 if (ret) 1237 dev_err(dwc->dev, "failed to set STALL on %s\n", 1238 dep->name); 1239 else 1240 dep->flags |= DWC3_EP_STALL; 1241 } else { 1242 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1243 DWC3_DEPCMD_CLEARSTALL, ¶ms); 1244 if (ret) 1245 dev_err(dwc->dev, "failed to clear STALL on %s\n", 1246 dep->name); 1247 else 1248 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); 1249 } 1250 1251 return ret; 1252 } 1253 1254 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 1255 { 1256 struct dwc3_ep *dep = to_dwc3_ep(ep); 1257 struct dwc3 *dwc = dep->dwc; 1258 1259 unsigned long flags; 1260 1261 int ret; 1262 1263 spin_lock_irqsave(&dwc->lock, flags); 1264 ret = __dwc3_gadget_ep_set_halt(dep, value, false); 1265 spin_unlock_irqrestore(&dwc->lock, flags); 1266 1267 return ret; 1268 } 1269 1270 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 1271 { 1272 struct dwc3_ep *dep = to_dwc3_ep(ep); 1273 struct dwc3 *dwc = dep->dwc; 1274 unsigned long flags; 1275 int ret; 1276 1277 spin_lock_irqsave(&dwc->lock, flags); 1278 dep->flags |= DWC3_EP_WEDGE; 1279 1280 if (dep->number == 0 || dep->number == 1) 1281 ret = __dwc3_gadget_ep0_set_halt(ep, 1); 1282 else 1283 ret = __dwc3_gadget_ep_set_halt(dep, 1, false); 1284 spin_unlock_irqrestore(&dwc->lock, flags); 1285 1286 return ret; 1287 } 1288 1289 /* -------------------------------------------------------------------------- */ 1290 1291 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 1292 .bLength = USB_DT_ENDPOINT_SIZE, 1293 .bDescriptorType = USB_DT_ENDPOINT, 1294 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1295 }; 1296 1297 static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 1298 .enable = dwc3_gadget_ep0_enable, 1299 .disable = dwc3_gadget_ep0_disable, 1300 .alloc_request = dwc3_gadget_ep_alloc_request, 1301 .free_request = dwc3_gadget_ep_free_request, 1302 .queue = dwc3_gadget_ep0_queue, 1303 .dequeue = dwc3_gadget_ep_dequeue, 1304 .set_halt = dwc3_gadget_ep0_set_halt, 1305 .set_wedge = dwc3_gadget_ep_set_wedge, 1306 }; 1307 1308 static const struct usb_ep_ops dwc3_gadget_ep_ops = { 1309 .enable = dwc3_gadget_ep_enable, 1310 .disable = dwc3_gadget_ep_disable, 1311 .alloc_request = dwc3_gadget_ep_alloc_request, 1312 .free_request = dwc3_gadget_ep_free_request, 1313 .queue = dwc3_gadget_ep_queue, 1314 .dequeue = dwc3_gadget_ep_dequeue, 1315 .set_halt = dwc3_gadget_ep_set_halt, 1316 .set_wedge = dwc3_gadget_ep_set_wedge, 1317 }; 1318 1319 /* -------------------------------------------------------------------------- */ 1320 1321 static int dwc3_gadget_get_frame(struct usb_gadget *g) 1322 { 1323 struct dwc3 *dwc = gadget_to_dwc(g); 1324 u32 reg; 1325 1326 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1327 return DWC3_DSTS_SOFFN(reg); 1328 } 1329 1330 static int dwc3_gadget_wakeup(struct usb_gadget *g) 1331 { 1332 struct dwc3 *dwc = gadget_to_dwc(g); 1333 1334 unsigned long timeout; 1335 unsigned long flags; 1336 1337 u32 reg; 1338 1339 int ret = 0; 1340 1341 u8 link_state; 1342 u8 speed; 1343 1344 spin_lock_irqsave(&dwc->lock, flags); 1345 1346 /* 1347 * According to the Databook Remote wakeup request should 1348 * be issued only when the device is in early suspend state. 1349 * 1350 * We can check that via USB Link State bits in DSTS register. 1351 */ 1352 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1353 1354 speed = reg & DWC3_DSTS_CONNECTSPD; 1355 if (speed == DWC3_DSTS_SUPERSPEED) { 1356 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n"); 1357 ret = -EINVAL; 1358 goto out; 1359 } 1360 1361 link_state = DWC3_DSTS_USBLNKST(reg); 1362 1363 switch (link_state) { 1364 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1365 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1366 break; 1367 default: 1368 dev_dbg(dwc->dev, "can't wakeup from link state %d\n", 1369 link_state); 1370 ret = -EINVAL; 1371 goto out; 1372 } 1373 1374 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); 1375 if (ret < 0) { 1376 dev_err(dwc->dev, "failed to put link in Recovery\n"); 1377 goto out; 1378 } 1379 1380 /* Recent versions do this automatically */ 1381 if (dwc->revision < DWC3_REVISION_194A) { 1382 /* write zeroes to Link Change Request */ 1383 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1384 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 1385 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1386 } 1387 1388 /* poll until Link State changes to ON */ 1389 timeout = jiffies + msecs_to_jiffies(100); 1390 1391 while (!time_after(jiffies, timeout)) { 1392 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1393 1394 /* in HS, means ON */ 1395 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 1396 break; 1397 } 1398 1399 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1400 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1401 ret = -EINVAL; 1402 } 1403 1404 out: 1405 spin_unlock_irqrestore(&dwc->lock, flags); 1406 1407 return ret; 1408 } 1409 1410 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 1411 int is_selfpowered) 1412 { 1413 struct dwc3 *dwc = gadget_to_dwc(g); 1414 unsigned long flags; 1415 1416 spin_lock_irqsave(&dwc->lock, flags); 1417 dwc->is_selfpowered = !!is_selfpowered; 1418 spin_unlock_irqrestore(&dwc->lock, flags); 1419 1420 return 0; 1421 } 1422 1423 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) 1424 { 1425 u32 reg; 1426 u32 timeout = 500; 1427 1428 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1429 if (is_on) { 1430 if (dwc->revision <= DWC3_REVISION_187A) { 1431 reg &= ~DWC3_DCTL_TRGTULST_MASK; 1432 reg |= DWC3_DCTL_TRGTULST_RX_DET; 1433 } 1434 1435 if (dwc->revision >= DWC3_REVISION_194A) 1436 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1437 reg |= DWC3_DCTL_RUN_STOP; 1438 1439 if (dwc->has_hibernation) 1440 reg |= DWC3_DCTL_KEEP_CONNECT; 1441 1442 dwc->pullups_connected = true; 1443 } else { 1444 reg &= ~DWC3_DCTL_RUN_STOP; 1445 1446 if (dwc->has_hibernation && !suspend) 1447 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1448 1449 dwc->pullups_connected = false; 1450 } 1451 1452 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1453 1454 do { 1455 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1456 if (is_on) { 1457 if (!(reg & DWC3_DSTS_DEVCTRLHLT)) 1458 break; 1459 } else { 1460 if (reg & DWC3_DSTS_DEVCTRLHLT) 1461 break; 1462 } 1463 timeout--; 1464 if (!timeout) 1465 return -ETIMEDOUT; 1466 udelay(1); 1467 } while (1); 1468 1469 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n", 1470 dwc->gadget_driver 1471 ? dwc->gadget_driver->function : "no-function", 1472 is_on ? "connect" : "disconnect"); 1473 1474 return 0; 1475 } 1476 1477 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 1478 { 1479 struct dwc3 *dwc = gadget_to_dwc(g); 1480 unsigned long flags; 1481 int ret; 1482 1483 is_on = !!is_on; 1484 1485 spin_lock_irqsave(&dwc->lock, flags); 1486 ret = dwc3_gadget_run_stop(dwc, is_on, false); 1487 spin_unlock_irqrestore(&dwc->lock, flags); 1488 1489 return ret; 1490 } 1491 1492 static void dwc3_gadget_enable_irq(struct dwc3 *dwc) 1493 { 1494 u32 reg; 1495 1496 /* Enable all but Start and End of Frame IRQs */ 1497 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | 1498 DWC3_DEVTEN_EVNTOVERFLOWEN | 1499 DWC3_DEVTEN_CMDCMPLTEN | 1500 DWC3_DEVTEN_ERRTICERREN | 1501 DWC3_DEVTEN_WKUPEVTEN | 1502 DWC3_DEVTEN_ULSTCNGEN | 1503 DWC3_DEVTEN_CONNECTDONEEN | 1504 DWC3_DEVTEN_USBRSTEN | 1505 DWC3_DEVTEN_DISCONNEVTEN); 1506 1507 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 1508 } 1509 1510 static void dwc3_gadget_disable_irq(struct dwc3 *dwc) 1511 { 1512 /* mask all interrupts */ 1513 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 1514 } 1515 1516 static irqreturn_t dwc3_interrupt(int irq, void *_dwc); 1517 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc); 1518 1519 static int dwc3_gadget_start(struct usb_gadget *g, 1520 struct usb_gadget_driver *driver) 1521 { 1522 struct dwc3 *dwc = gadget_to_dwc(g); 1523 struct dwc3_ep *dep; 1524 unsigned long flags; 1525 int ret = 0; 1526 int irq; 1527 u32 reg; 1528 1529 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1530 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, 1531 IRQF_SHARED, "dwc3", dwc); 1532 if (ret) { 1533 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 1534 irq, ret); 1535 goto err0; 1536 } 1537 1538 spin_lock_irqsave(&dwc->lock, flags); 1539 1540 if (dwc->gadget_driver) { 1541 dev_err(dwc->dev, "%s is already bound to %s\n", 1542 dwc->gadget.name, 1543 dwc->gadget_driver->driver.name); 1544 ret = -EBUSY; 1545 goto err1; 1546 } 1547 1548 dwc->gadget_driver = driver; 1549 1550 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1551 reg &= ~(DWC3_DCFG_SPEED_MASK); 1552 1553 /** 1554 * WORKAROUND: DWC3 revision < 2.20a have an issue 1555 * which would cause metastability state on Run/Stop 1556 * bit if we try to force the IP to USB2-only mode. 1557 * 1558 * Because of that, we cannot configure the IP to any 1559 * speed other than the SuperSpeed 1560 * 1561 * Refers to: 1562 * 1563 * STAR#9000525659: Clock Domain Crossing on DCTL in 1564 * USB 2.0 Mode 1565 */ 1566 if (dwc->revision < DWC3_REVISION_220A) { 1567 reg |= DWC3_DCFG_SUPERSPEED; 1568 } else { 1569 switch (dwc->maximum_speed) { 1570 case USB_SPEED_LOW: 1571 reg |= DWC3_DSTS_LOWSPEED; 1572 break; 1573 case USB_SPEED_FULL: 1574 reg |= DWC3_DSTS_FULLSPEED1; 1575 break; 1576 case USB_SPEED_HIGH: 1577 reg |= DWC3_DSTS_HIGHSPEED; 1578 break; 1579 case USB_SPEED_SUPER: /* FALLTHROUGH */ 1580 case USB_SPEED_UNKNOWN: /* FALTHROUGH */ 1581 default: 1582 reg |= DWC3_DSTS_SUPERSPEED; 1583 } 1584 } 1585 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1586 1587 dwc->start_config_issued = false; 1588 1589 /* Start with SuperSpeed Default */ 1590 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1591 1592 dep = dwc->eps[0]; 1593 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 1594 false); 1595 if (ret) { 1596 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1597 goto err2; 1598 } 1599 1600 dep = dwc->eps[1]; 1601 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 1602 false); 1603 if (ret) { 1604 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1605 goto err3; 1606 } 1607 1608 /* begin to receive SETUP packets */ 1609 dwc->ep0state = EP0_SETUP_PHASE; 1610 dwc3_ep0_out_start(dwc); 1611 1612 dwc3_gadget_enable_irq(dwc); 1613 1614 spin_unlock_irqrestore(&dwc->lock, flags); 1615 1616 return 0; 1617 1618 err3: 1619 __dwc3_gadget_ep_disable(dwc->eps[0]); 1620 1621 err2: 1622 dwc->gadget_driver = NULL; 1623 1624 err1: 1625 spin_unlock_irqrestore(&dwc->lock, flags); 1626 1627 free_irq(irq, dwc); 1628 1629 err0: 1630 return ret; 1631 } 1632 1633 static int dwc3_gadget_stop(struct usb_gadget *g) 1634 { 1635 struct dwc3 *dwc = gadget_to_dwc(g); 1636 unsigned long flags; 1637 int irq; 1638 1639 spin_lock_irqsave(&dwc->lock, flags); 1640 1641 dwc3_gadget_disable_irq(dwc); 1642 __dwc3_gadget_ep_disable(dwc->eps[0]); 1643 __dwc3_gadget_ep_disable(dwc->eps[1]); 1644 1645 dwc->gadget_driver = NULL; 1646 1647 spin_unlock_irqrestore(&dwc->lock, flags); 1648 1649 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1650 free_irq(irq, dwc); 1651 1652 return 0; 1653 } 1654 1655 static const struct usb_gadget_ops dwc3_gadget_ops = { 1656 .get_frame = dwc3_gadget_get_frame, 1657 .wakeup = dwc3_gadget_wakeup, 1658 .set_selfpowered = dwc3_gadget_set_selfpowered, 1659 .pullup = dwc3_gadget_pullup, 1660 .udc_start = dwc3_gadget_start, 1661 .udc_stop = dwc3_gadget_stop, 1662 }; 1663 1664 /* -------------------------------------------------------------------------- */ 1665 1666 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc, 1667 u8 num, u32 direction) 1668 { 1669 struct dwc3_ep *dep; 1670 u8 i; 1671 1672 for (i = 0; i < num; i++) { 1673 u8 epnum = (i << 1) | (!!direction); 1674 1675 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 1676 if (!dep) 1677 return -ENOMEM; 1678 1679 dep->dwc = dwc; 1680 dep->number = epnum; 1681 dep->direction = !!direction; 1682 dwc->eps[epnum] = dep; 1683 1684 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1, 1685 (epnum & 1) ? "in" : "out"); 1686 1687 dep->endpoint.name = dep->name; 1688 1689 dev_vdbg(dwc->dev, "initializing %s\n", dep->name); 1690 1691 if (epnum == 0 || epnum == 1) { 1692 usb_ep_set_maxpacket_limit(&dep->endpoint, 512); 1693 dep->endpoint.maxburst = 1; 1694 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 1695 if (!epnum) 1696 dwc->gadget.ep0 = &dep->endpoint; 1697 } else { 1698 int ret; 1699 1700 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024); 1701 dep->endpoint.max_streams = 15; 1702 dep->endpoint.ops = &dwc3_gadget_ep_ops; 1703 list_add_tail(&dep->endpoint.ep_list, 1704 &dwc->gadget.ep_list); 1705 1706 ret = dwc3_alloc_trb_pool(dep); 1707 if (ret) 1708 return ret; 1709 } 1710 1711 INIT_LIST_HEAD(&dep->request_list); 1712 INIT_LIST_HEAD(&dep->req_queued); 1713 } 1714 1715 return 0; 1716 } 1717 1718 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc) 1719 { 1720 int ret; 1721 1722 INIT_LIST_HEAD(&dwc->gadget.ep_list); 1723 1724 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0); 1725 if (ret < 0) { 1726 dev_vdbg(dwc->dev, "failed to allocate OUT endpoints\n"); 1727 return ret; 1728 } 1729 1730 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1); 1731 if (ret < 0) { 1732 dev_vdbg(dwc->dev, "failed to allocate IN endpoints\n"); 1733 return ret; 1734 } 1735 1736 return 0; 1737 } 1738 1739 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 1740 { 1741 struct dwc3_ep *dep; 1742 u8 epnum; 1743 1744 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1745 dep = dwc->eps[epnum]; 1746 if (!dep) 1747 continue; 1748 /* 1749 * Physical endpoints 0 and 1 are special; they form the 1750 * bi-directional USB endpoint 0. 1751 * 1752 * For those two physical endpoints, we don't allocate a TRB 1753 * pool nor do we add them the endpoints list. Due to that, we 1754 * shouldn't do these two operations otherwise we would end up 1755 * with all sorts of bugs when removing dwc3.ko. 1756 */ 1757 if (epnum != 0 && epnum != 1) { 1758 dwc3_free_trb_pool(dep); 1759 list_del(&dep->endpoint.ep_list); 1760 } 1761 1762 kfree(dep); 1763 } 1764 } 1765 1766 /* -------------------------------------------------------------------------- */ 1767 1768 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, 1769 struct dwc3_request *req, struct dwc3_trb *trb, 1770 const struct dwc3_event_depevt *event, int status) 1771 { 1772 unsigned int count; 1773 unsigned int s_pkt = 0; 1774 unsigned int trb_status; 1775 1776 trace_dwc3_complete_trb(dep, trb); 1777 1778 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 1779 /* 1780 * We continue despite the error. There is not much we 1781 * can do. If we don't clean it up we loop forever. If 1782 * we skip the TRB then it gets overwritten after a 1783 * while since we use them in a ring buffer. A BUG() 1784 * would help. Lets hope that if this occurs, someone 1785 * fixes the root cause instead of looking away :) 1786 */ 1787 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n", 1788 dep->name, trb); 1789 count = trb->size & DWC3_TRB_SIZE_MASK; 1790 1791 if (dep->direction) { 1792 if (count) { 1793 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); 1794 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) { 1795 dev_dbg(dwc->dev, "incomplete IN transfer %s\n", 1796 dep->name); 1797 /* 1798 * If missed isoc occurred and there is 1799 * no request queued then issue END 1800 * TRANSFER, so that core generates 1801 * next xfernotready and we will issue 1802 * a fresh START TRANSFER. 1803 * If there are still queued request 1804 * then wait, do not issue either END 1805 * or UPDATE TRANSFER, just attach next 1806 * request in request_list during 1807 * giveback.If any future queued request 1808 * is successfully transferred then we 1809 * will issue UPDATE TRANSFER for all 1810 * request in the request_list. 1811 */ 1812 dep->flags |= DWC3_EP_MISSED_ISOC; 1813 } else { 1814 dev_err(dwc->dev, "incomplete IN transfer %s\n", 1815 dep->name); 1816 status = -ECONNRESET; 1817 } 1818 } else { 1819 dep->flags &= ~DWC3_EP_MISSED_ISOC; 1820 } 1821 } else { 1822 if (count && (event->status & DEPEVT_STATUS_SHORT)) 1823 s_pkt = 1; 1824 } 1825 1826 /* 1827 * We assume here we will always receive the entire data block 1828 * which we should receive. Meaning, if we program RX to 1829 * receive 4K but we receive only 2K, we assume that's all we 1830 * should receive and we simply bounce the request back to the 1831 * gadget driver for further processing. 1832 */ 1833 req->request.actual += req->request.length - count; 1834 if (s_pkt) 1835 return 1; 1836 if ((event->status & DEPEVT_STATUS_LST) && 1837 (trb->ctrl & (DWC3_TRB_CTRL_LST | 1838 DWC3_TRB_CTRL_HWO))) 1839 return 1; 1840 if ((event->status & DEPEVT_STATUS_IOC) && 1841 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 1842 return 1; 1843 return 0; 1844 } 1845 1846 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, 1847 const struct dwc3_event_depevt *event, int status) 1848 { 1849 struct dwc3_request *req; 1850 struct dwc3_trb *trb; 1851 unsigned int slot; 1852 unsigned int i; 1853 int ret; 1854 1855 do { 1856 req = next_request(&dep->req_queued); 1857 if (!req) { 1858 WARN_ON_ONCE(1); 1859 return 1; 1860 } 1861 i = 0; 1862 do { 1863 slot = req->start_slot + i; 1864 if ((slot == DWC3_TRB_NUM - 1) && 1865 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 1866 slot++; 1867 slot %= DWC3_TRB_NUM; 1868 trb = &dep->trb_pool[slot]; 1869 1870 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 1871 event, status); 1872 if (ret) 1873 break; 1874 }while (++i < req->request.num_mapped_sgs); 1875 1876 dwc3_gadget_giveback(dep, req, status); 1877 1878 if (ret) 1879 break; 1880 } while (1); 1881 1882 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1883 list_empty(&dep->req_queued)) { 1884 if (list_empty(&dep->request_list)) { 1885 /* 1886 * If there is no entry in request list then do 1887 * not issue END TRANSFER now. Just set PENDING 1888 * flag, so that END TRANSFER is issued when an 1889 * entry is added into request list. 1890 */ 1891 dep->flags = DWC3_EP_PENDING_REQUEST; 1892 } else { 1893 dwc3_stop_active_transfer(dwc, dep->number, true); 1894 dep->flags = DWC3_EP_ENABLED; 1895 } 1896 return 1; 1897 } 1898 1899 return 1; 1900 } 1901 1902 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, 1903 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1904 { 1905 unsigned status = 0; 1906 int clean_busy; 1907 1908 if (event->status & DEPEVT_STATUS_BUSERR) 1909 status = -ECONNRESET; 1910 1911 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status); 1912 if (clean_busy) 1913 dep->flags &= ~DWC3_EP_BUSY; 1914 1915 /* 1916 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. 1917 * See dwc3_gadget_linksts_change_interrupt() for 1st half. 1918 */ 1919 if (dwc->revision < DWC3_REVISION_183A) { 1920 u32 reg; 1921 int i; 1922 1923 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 1924 dep = dwc->eps[i]; 1925 1926 if (!(dep->flags & DWC3_EP_ENABLED)) 1927 continue; 1928 1929 if (!list_empty(&dep->req_queued)) 1930 return; 1931 } 1932 1933 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1934 reg |= dwc->u1u2; 1935 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1936 1937 dwc->u1u2 = 0; 1938 } 1939 } 1940 1941 static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 1942 const struct dwc3_event_depevt *event) 1943 { 1944 struct dwc3_ep *dep; 1945 u8 epnum = event->endpoint_number; 1946 1947 dep = dwc->eps[epnum]; 1948 1949 if (!(dep->flags & DWC3_EP_ENABLED)) 1950 return; 1951 1952 if (epnum == 0 || epnum == 1) { 1953 dwc3_ep0_interrupt(dwc, event); 1954 return; 1955 } 1956 1957 switch (event->endpoint_event) { 1958 case DWC3_DEPEVT_XFERCOMPLETE: 1959 dep->resource_index = 0; 1960 1961 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1962 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n", 1963 dep->name); 1964 return; 1965 } 1966 1967 dwc3_endpoint_transfer_complete(dwc, dep, event); 1968 break; 1969 case DWC3_DEPEVT_XFERINPROGRESS: 1970 dwc3_endpoint_transfer_complete(dwc, dep, event); 1971 break; 1972 case DWC3_DEPEVT_XFERNOTREADY: 1973 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1974 dwc3_gadget_start_isoc(dwc, dep, event); 1975 } else { 1976 int ret; 1977 1978 dev_vdbg(dwc->dev, "%s: reason %s\n", 1979 dep->name, event->status & 1980 DEPEVT_STATUS_TRANSFER_ACTIVE 1981 ? "Transfer Active" 1982 : "Transfer Not Active"); 1983 1984 ret = __dwc3_gadget_kick_transfer(dep, 0, 1); 1985 if (!ret || ret == -EBUSY) 1986 return; 1987 1988 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1989 dep->name); 1990 } 1991 1992 break; 1993 case DWC3_DEPEVT_STREAMEVT: 1994 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) { 1995 dev_err(dwc->dev, "Stream event for non-Bulk %s\n", 1996 dep->name); 1997 return; 1998 } 1999 2000 switch (event->status) { 2001 case DEPEVT_STREAMEVT_FOUND: 2002 dev_vdbg(dwc->dev, "Stream %d found and started\n", 2003 event->parameters); 2004 2005 break; 2006 case DEPEVT_STREAMEVT_NOTFOUND: 2007 /* FALLTHROUGH */ 2008 default: 2009 dev_dbg(dwc->dev, "Couldn't find suitable stream\n"); 2010 } 2011 break; 2012 case DWC3_DEPEVT_RXTXFIFOEVT: 2013 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name); 2014 break; 2015 case DWC3_DEPEVT_EPCMDCMPLT: 2016 dev_vdbg(dwc->dev, "Endpoint Command Complete\n"); 2017 break; 2018 } 2019 } 2020 2021 static void dwc3_disconnect_gadget(struct dwc3 *dwc) 2022 { 2023 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { 2024 spin_unlock(&dwc->lock); 2025 dwc->gadget_driver->disconnect(&dwc->gadget); 2026 spin_lock(&dwc->lock); 2027 } 2028 } 2029 2030 static void dwc3_suspend_gadget(struct dwc3 *dwc) 2031 { 2032 if (dwc->gadget_driver && dwc->gadget_driver->suspend) { 2033 spin_unlock(&dwc->lock); 2034 dwc->gadget_driver->suspend(&dwc->gadget); 2035 spin_lock(&dwc->lock); 2036 } 2037 } 2038 2039 static void dwc3_resume_gadget(struct dwc3 *dwc) 2040 { 2041 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2042 spin_unlock(&dwc->lock); 2043 dwc->gadget_driver->resume(&dwc->gadget); 2044 } 2045 } 2046 2047 static void dwc3_reset_gadget(struct dwc3 *dwc) 2048 { 2049 if (!dwc->gadget_driver) 2050 return; 2051 2052 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) { 2053 spin_unlock(&dwc->lock); 2054 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver); 2055 spin_lock(&dwc->lock); 2056 } 2057 } 2058 2059 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force) 2060 { 2061 struct dwc3_ep *dep; 2062 struct dwc3_gadget_ep_cmd_params params; 2063 u32 cmd; 2064 int ret; 2065 2066 dep = dwc->eps[epnum]; 2067 2068 if (!dep->resource_index) 2069 return; 2070 2071 /* 2072 * NOTICE: We are violating what the Databook says about the 2073 * EndTransfer command. Ideally we would _always_ wait for the 2074 * EndTransfer Command Completion IRQ, but that's causing too 2075 * much trouble synchronizing between us and gadget driver. 2076 * 2077 * We have discussed this with the IP Provider and it was 2078 * suggested to giveback all requests here, but give HW some 2079 * extra time to synchronize with the interconnect. We're using 2080 * an arbitraty 100us delay for that. 2081 * 2082 * Note also that a similar handling was tested by Synopsys 2083 * (thanks a lot Paul) and nothing bad has come out of it. 2084 * In short, what we're doing is: 2085 * 2086 * - Issue EndTransfer WITH CMDIOC bit set 2087 * - Wait 100us 2088 */ 2089 2090 cmd = DWC3_DEPCMD_ENDTRANSFER; 2091 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0; 2092 cmd |= DWC3_DEPCMD_CMDIOC; 2093 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); 2094 memset(¶ms, 0, sizeof(params)); 2095 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 2096 WARN_ON_ONCE(ret); 2097 dep->resource_index = 0; 2098 dep->flags &= ~DWC3_EP_BUSY; 2099 udelay(100); 2100 } 2101 2102 static void dwc3_stop_active_transfers(struct dwc3 *dwc) 2103 { 2104 u32 epnum; 2105 2106 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2107 struct dwc3_ep *dep; 2108 2109 dep = dwc->eps[epnum]; 2110 if (!dep) 2111 continue; 2112 2113 if (!(dep->flags & DWC3_EP_ENABLED)) 2114 continue; 2115 2116 dwc3_remove_requests(dwc, dep); 2117 } 2118 } 2119 2120 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 2121 { 2122 u32 epnum; 2123 2124 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2125 struct dwc3_ep *dep; 2126 struct dwc3_gadget_ep_cmd_params params; 2127 int ret; 2128 2129 dep = dwc->eps[epnum]; 2130 if (!dep) 2131 continue; 2132 2133 if (!(dep->flags & DWC3_EP_STALL)) 2134 continue; 2135 2136 dep->flags &= ~DWC3_EP_STALL; 2137 2138 memset(¶ms, 0, sizeof(params)); 2139 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 2140 DWC3_DEPCMD_CLEARSTALL, ¶ms); 2141 WARN_ON_ONCE(ret); 2142 } 2143 } 2144 2145 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 2146 { 2147 int reg; 2148 2149 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2150 reg &= ~DWC3_DCTL_INITU1ENA; 2151 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2152 2153 reg &= ~DWC3_DCTL_INITU2ENA; 2154 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2155 2156 dwc3_disconnect_gadget(dwc); 2157 dwc->start_config_issued = false; 2158 2159 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2160 dwc->setup_packet_pending = false; 2161 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED); 2162 } 2163 2164 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 2165 { 2166 u32 reg; 2167 2168 /* 2169 * WORKAROUND: DWC3 revisions <1.88a have an issue which 2170 * would cause a missing Disconnect Event if there's a 2171 * pending Setup Packet in the FIFO. 2172 * 2173 * There's no suggested workaround on the official Bug 2174 * report, which states that "unless the driver/application 2175 * is doing any special handling of a disconnect event, 2176 * there is no functional issue". 2177 * 2178 * Unfortunately, it turns out that we _do_ some special 2179 * handling of a disconnect event, namely complete all 2180 * pending transfers, notify gadget driver of the 2181 * disconnection, and so on. 2182 * 2183 * Our suggested workaround is to follow the Disconnect 2184 * Event steps here, instead, based on a setup_packet_pending 2185 * flag. Such flag gets set whenever we have a XferNotReady 2186 * event on EP0 and gets cleared on XferComplete for the 2187 * same endpoint. 2188 * 2189 * Refers to: 2190 * 2191 * STAR#9000466709: RTL: Device : Disconnect event not 2192 * generated if setup packet pending in FIFO 2193 */ 2194 if (dwc->revision < DWC3_REVISION_188A) { 2195 if (dwc->setup_packet_pending) 2196 dwc3_gadget_disconnect_interrupt(dwc); 2197 } 2198 2199 dwc3_reset_gadget(dwc); 2200 2201 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2202 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 2203 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2204 dwc->test_mode = false; 2205 2206 dwc3_stop_active_transfers(dwc); 2207 dwc3_clear_stall_all_ep(dwc); 2208 dwc->start_config_issued = false; 2209 2210 /* Reset device address to zero */ 2211 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2212 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 2213 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2214 } 2215 2216 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed) 2217 { 2218 u32 reg; 2219 u32 usb30_clock = DWC3_GCTL_CLK_BUS; 2220 2221 /* 2222 * We change the clock only at SS but I dunno why I would want to do 2223 * this. Maybe it becomes part of the power saving plan. 2224 */ 2225 2226 if (speed != DWC3_DSTS_SUPERSPEED) 2227 return; 2228 2229 /* 2230 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 2231 * each time on Connect Done. 2232 */ 2233 if (!usb30_clock) 2234 return; 2235 2236 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 2237 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock); 2238 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 2239 } 2240 2241 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 2242 { 2243 struct dwc3_ep *dep; 2244 int ret; 2245 u32 reg; 2246 u8 speed; 2247 2248 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2249 speed = reg & DWC3_DSTS_CONNECTSPD; 2250 dwc->speed = speed; 2251 2252 dwc3_update_ram_clk_sel(dwc, speed); 2253 2254 switch (speed) { 2255 case DWC3_DCFG_SUPERSPEED: 2256 /* 2257 * WORKAROUND: DWC3 revisions <1.90a have an issue which 2258 * would cause a missing USB3 Reset event. 2259 * 2260 * In such situations, we should force a USB3 Reset 2261 * event by calling our dwc3_gadget_reset_interrupt() 2262 * routine. 2263 * 2264 * Refers to: 2265 * 2266 * STAR#9000483510: RTL: SS : USB3 reset event may 2267 * not be generated always when the link enters poll 2268 */ 2269 if (dwc->revision < DWC3_REVISION_190A) 2270 dwc3_gadget_reset_interrupt(dwc); 2271 2272 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2273 dwc->gadget.ep0->maxpacket = 512; 2274 dwc->gadget.speed = USB_SPEED_SUPER; 2275 break; 2276 case DWC3_DCFG_HIGHSPEED: 2277 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2278 dwc->gadget.ep0->maxpacket = 64; 2279 dwc->gadget.speed = USB_SPEED_HIGH; 2280 break; 2281 case DWC3_DCFG_FULLSPEED2: 2282 case DWC3_DCFG_FULLSPEED1: 2283 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2284 dwc->gadget.ep0->maxpacket = 64; 2285 dwc->gadget.speed = USB_SPEED_FULL; 2286 break; 2287 case DWC3_DCFG_LOWSPEED: 2288 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); 2289 dwc->gadget.ep0->maxpacket = 8; 2290 dwc->gadget.speed = USB_SPEED_LOW; 2291 break; 2292 } 2293 2294 /* Enable USB2 LPM Capability */ 2295 2296 if ((dwc->revision > DWC3_REVISION_194A) 2297 && (speed != DWC3_DCFG_SUPERSPEED)) { 2298 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2299 reg |= DWC3_DCFG_LPM_CAP; 2300 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2301 2302 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2303 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); 2304 2305 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold); 2306 2307 /* 2308 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and 2309 * DCFG.LPMCap is set, core responses with an ACK and the 2310 * BESL value in the LPM token is less than or equal to LPM 2311 * NYET threshold. 2312 */ 2313 WARN_ONCE(dwc->revision < DWC3_REVISION_240A 2314 && dwc->has_lpm_erratum, 2315 "LPM Erratum not available on dwc3 revisisions < 2.40a\n"); 2316 2317 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A) 2318 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold); 2319 2320 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2321 } else { 2322 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2323 reg &= ~DWC3_DCTL_HIRD_THRES_MASK; 2324 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2325 } 2326 2327 dep = dwc->eps[0]; 2328 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, 2329 false); 2330 if (ret) { 2331 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2332 return; 2333 } 2334 2335 dep = dwc->eps[1]; 2336 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, 2337 false); 2338 if (ret) { 2339 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2340 return; 2341 } 2342 2343 /* 2344 * Configure PHY via GUSB3PIPECTLn if required. 2345 * 2346 * Update GTXFIFOSIZn 2347 * 2348 * In both cases reset values should be sufficient. 2349 */ 2350 } 2351 2352 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 2353 { 2354 /* 2355 * TODO take core out of low power mode when that's 2356 * implemented. 2357 */ 2358 2359 dwc->gadget_driver->resume(&dwc->gadget); 2360 } 2361 2362 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 2363 unsigned int evtinfo) 2364 { 2365 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2366 unsigned int pwropt; 2367 2368 /* 2369 * WORKAROUND: DWC3 < 2.50a have an issue when configured without 2370 * Hibernation mode enabled which would show up when device detects 2371 * host-initiated U3 exit. 2372 * 2373 * In that case, device will generate a Link State Change Interrupt 2374 * from U3 to RESUME which is only necessary if Hibernation is 2375 * configured in. 2376 * 2377 * There are no functional changes due to such spurious event and we 2378 * just need to ignore it. 2379 * 2380 * Refers to: 2381 * 2382 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation 2383 * operational mode 2384 */ 2385 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 2386 if ((dwc->revision < DWC3_REVISION_250A) && 2387 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { 2388 if ((dwc->link_state == DWC3_LINK_STATE_U3) && 2389 (next == DWC3_LINK_STATE_RESUME)) { 2390 dev_vdbg(dwc->dev, "ignoring transition U3 -> Resume\n"); 2391 return; 2392 } 2393 } 2394 2395 /* 2396 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending 2397 * on the link partner, the USB session might do multiple entry/exit 2398 * of low power states before a transfer takes place. 2399 * 2400 * Due to this problem, we might experience lower throughput. The 2401 * suggested workaround is to disable DCTL[12:9] bits if we're 2402 * transitioning from U1/U2 to U0 and enable those bits again 2403 * after a transfer completes and there are no pending transfers 2404 * on any of the enabled endpoints. 2405 * 2406 * This is the first half of that workaround. 2407 * 2408 * Refers to: 2409 * 2410 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us 2411 * core send LGO_Ux entering U0 2412 */ 2413 if (dwc->revision < DWC3_REVISION_183A) { 2414 if (next == DWC3_LINK_STATE_U0) { 2415 u32 u1u2; 2416 u32 reg; 2417 2418 switch (dwc->link_state) { 2419 case DWC3_LINK_STATE_U1: 2420 case DWC3_LINK_STATE_U2: 2421 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2422 u1u2 = reg & (DWC3_DCTL_INITU2ENA 2423 | DWC3_DCTL_ACCEPTU2ENA 2424 | DWC3_DCTL_INITU1ENA 2425 | DWC3_DCTL_ACCEPTU1ENA); 2426 2427 if (!dwc->u1u2) 2428 dwc->u1u2 = reg & u1u2; 2429 2430 reg &= ~u1u2; 2431 2432 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2433 break; 2434 default: 2435 /* do nothing */ 2436 break; 2437 } 2438 } 2439 } 2440 2441 switch (next) { 2442 case DWC3_LINK_STATE_U1: 2443 if (dwc->speed == USB_SPEED_SUPER) 2444 dwc3_suspend_gadget(dwc); 2445 break; 2446 case DWC3_LINK_STATE_U2: 2447 case DWC3_LINK_STATE_U3: 2448 dwc3_suspend_gadget(dwc); 2449 break; 2450 case DWC3_LINK_STATE_RESUME: 2451 dwc3_resume_gadget(dwc); 2452 break; 2453 default: 2454 /* do nothing */ 2455 break; 2456 } 2457 2458 dwc->link_state = next; 2459 } 2460 2461 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, 2462 unsigned int evtinfo) 2463 { 2464 unsigned int is_ss = evtinfo & BIT(4); 2465 2466 /** 2467 * WORKAROUND: DWC3 revison 2.20a with hibernation support 2468 * have a known issue which can cause USB CV TD.9.23 to fail 2469 * randomly. 2470 * 2471 * Because of this issue, core could generate bogus hibernation 2472 * events which SW needs to ignore. 2473 * 2474 * Refers to: 2475 * 2476 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0 2477 * Device Fallback from SuperSpeed 2478 */ 2479 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER)) 2480 return; 2481 2482 /* enter hibernation here */ 2483 } 2484 2485 static void dwc3_gadget_interrupt(struct dwc3 *dwc, 2486 const struct dwc3_event_devt *event) 2487 { 2488 switch (event->type) { 2489 case DWC3_DEVICE_EVENT_DISCONNECT: 2490 dwc3_gadget_disconnect_interrupt(dwc); 2491 break; 2492 case DWC3_DEVICE_EVENT_RESET: 2493 dwc3_gadget_reset_interrupt(dwc); 2494 break; 2495 case DWC3_DEVICE_EVENT_CONNECT_DONE: 2496 dwc3_gadget_conndone_interrupt(dwc); 2497 break; 2498 case DWC3_DEVICE_EVENT_WAKEUP: 2499 dwc3_gadget_wakeup_interrupt(dwc); 2500 break; 2501 case DWC3_DEVICE_EVENT_HIBER_REQ: 2502 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation, 2503 "unexpected hibernation event\n")) 2504 break; 2505 2506 dwc3_gadget_hibernation_interrupt(dwc, event->event_info); 2507 break; 2508 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 2509 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 2510 break; 2511 case DWC3_DEVICE_EVENT_EOPF: 2512 dev_vdbg(dwc->dev, "End of Periodic Frame\n"); 2513 break; 2514 case DWC3_DEVICE_EVENT_SOF: 2515 dev_vdbg(dwc->dev, "Start of Periodic Frame\n"); 2516 break; 2517 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 2518 dev_vdbg(dwc->dev, "Erratic Error\n"); 2519 break; 2520 case DWC3_DEVICE_EVENT_CMD_CMPL: 2521 dev_vdbg(dwc->dev, "Command Complete\n"); 2522 break; 2523 case DWC3_DEVICE_EVENT_OVERFLOW: 2524 dev_vdbg(dwc->dev, "Overflow\n"); 2525 break; 2526 default: 2527 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 2528 } 2529 } 2530 2531 static void dwc3_process_event_entry(struct dwc3 *dwc, 2532 const union dwc3_event *event) 2533 { 2534 trace_dwc3_event(event->raw); 2535 2536 /* Endpoint IRQ, handle it and return early */ 2537 if (event->type.is_devspec == 0) { 2538 /* depevt */ 2539 return dwc3_endpoint_interrupt(dwc, &event->depevt); 2540 } 2541 2542 switch (event->type.type) { 2543 case DWC3_EVENT_TYPE_DEV: 2544 dwc3_gadget_interrupt(dwc, &event->devt); 2545 break; 2546 /* REVISIT what to do with Carkit and I2C events ? */ 2547 default: 2548 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 2549 } 2550 } 2551 2552 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf) 2553 { 2554 struct dwc3_event_buffer *evt; 2555 irqreturn_t ret = IRQ_NONE; 2556 int left; 2557 u32 reg; 2558 2559 evt = dwc->ev_buffs[buf]; 2560 left = evt->count; 2561 2562 if (!(evt->flags & DWC3_EVENT_PENDING)) 2563 return IRQ_NONE; 2564 2565 while (left > 0) { 2566 union dwc3_event event; 2567 2568 event.raw = *(u32 *) (evt->buf + evt->lpos); 2569 2570 dwc3_process_event_entry(dwc, &event); 2571 2572 /* 2573 * FIXME we wrap around correctly to the next entry as 2574 * almost all entries are 4 bytes in size. There is one 2575 * entry which has 12 bytes which is a regular entry 2576 * followed by 8 bytes data. ATM I don't know how 2577 * things are organized if we get next to the a 2578 * boundary so I worry about that once we try to handle 2579 * that. 2580 */ 2581 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE; 2582 left -= 4; 2583 2584 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4); 2585 } 2586 2587 evt->count = 0; 2588 evt->flags &= ~DWC3_EVENT_PENDING; 2589 ret = IRQ_HANDLED; 2590 2591 /* Unmask interrupt */ 2592 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf)); 2593 reg &= ~DWC3_GEVNTSIZ_INTMASK; 2594 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg); 2595 2596 return ret; 2597 } 2598 2599 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc) 2600 { 2601 struct dwc3 *dwc = _dwc; 2602 unsigned long flags; 2603 irqreturn_t ret = IRQ_NONE; 2604 int i; 2605 2606 spin_lock_irqsave(&dwc->lock, flags); 2607 2608 for (i = 0; i < dwc->num_event_buffers; i++) 2609 ret |= dwc3_process_event_buf(dwc, i); 2610 2611 spin_unlock_irqrestore(&dwc->lock, flags); 2612 2613 return ret; 2614 } 2615 2616 static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf) 2617 { 2618 struct dwc3_event_buffer *evt; 2619 u32 count; 2620 u32 reg; 2621 2622 evt = dwc->ev_buffs[buf]; 2623 2624 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf)); 2625 count &= DWC3_GEVNTCOUNT_MASK; 2626 if (!count) 2627 return IRQ_NONE; 2628 2629 evt->count = count; 2630 evt->flags |= DWC3_EVENT_PENDING; 2631 2632 /* Mask interrupt */ 2633 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf)); 2634 reg |= DWC3_GEVNTSIZ_INTMASK; 2635 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg); 2636 2637 return IRQ_WAKE_THREAD; 2638 } 2639 2640 static irqreturn_t dwc3_interrupt(int irq, void *_dwc) 2641 { 2642 struct dwc3 *dwc = _dwc; 2643 int i; 2644 irqreturn_t ret = IRQ_NONE; 2645 2646 spin_lock(&dwc->lock); 2647 2648 for (i = 0; i < dwc->num_event_buffers; i++) { 2649 irqreturn_t status; 2650 2651 status = dwc3_check_event_buf(dwc, i); 2652 if (status == IRQ_WAKE_THREAD) 2653 ret = status; 2654 } 2655 2656 spin_unlock(&dwc->lock); 2657 2658 return ret; 2659 } 2660 2661 /** 2662 * dwc3_gadget_init - Initializes gadget related registers 2663 * @dwc: pointer to our controller context structure 2664 * 2665 * Returns 0 on success otherwise negative errno. 2666 */ 2667 int dwc3_gadget_init(struct dwc3 *dwc) 2668 { 2669 int ret; 2670 2671 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2672 &dwc->ctrl_req_addr, GFP_KERNEL); 2673 if (!dwc->ctrl_req) { 2674 dev_err(dwc->dev, "failed to allocate ctrl request\n"); 2675 ret = -ENOMEM; 2676 goto err0; 2677 } 2678 2679 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2680 &dwc->ep0_trb_addr, GFP_KERNEL); 2681 if (!dwc->ep0_trb) { 2682 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 2683 ret = -ENOMEM; 2684 goto err1; 2685 } 2686 2687 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL); 2688 if (!dwc->setup_buf) { 2689 ret = -ENOMEM; 2690 goto err2; 2691 } 2692 2693 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev, 2694 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr, 2695 GFP_KERNEL); 2696 if (!dwc->ep0_bounce) { 2697 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n"); 2698 ret = -ENOMEM; 2699 goto err3; 2700 } 2701 2702 dwc->gadget.ops = &dwc3_gadget_ops; 2703 dwc->gadget.max_speed = USB_SPEED_SUPER; 2704 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2705 dwc->gadget.sg_supported = true; 2706 dwc->gadget.name = "dwc3-gadget"; 2707 2708 /* 2709 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize 2710 * on ep out. 2711 */ 2712 dwc->gadget.quirk_ep_out_aligned_size = true; 2713 2714 /* 2715 * REVISIT: Here we should clear all pending IRQs to be 2716 * sure we're starting from a well known location. 2717 */ 2718 2719 ret = dwc3_gadget_init_endpoints(dwc); 2720 if (ret) 2721 goto err4; 2722 2723 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 2724 if (ret) { 2725 dev_err(dwc->dev, "failed to register udc\n"); 2726 goto err4; 2727 } 2728 2729 return 0; 2730 2731 err4: 2732 dwc3_gadget_free_endpoints(dwc); 2733 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2734 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2735 2736 err3: 2737 kfree(dwc->setup_buf); 2738 2739 err2: 2740 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2741 dwc->ep0_trb, dwc->ep0_trb_addr); 2742 2743 err1: 2744 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2745 dwc->ctrl_req, dwc->ctrl_req_addr); 2746 2747 err0: 2748 return ret; 2749 } 2750 2751 /* -------------------------------------------------------------------------- */ 2752 2753 void dwc3_gadget_exit(struct dwc3 *dwc) 2754 { 2755 usb_del_gadget_udc(&dwc->gadget); 2756 2757 dwc3_gadget_free_endpoints(dwc); 2758 2759 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2760 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2761 2762 kfree(dwc->setup_buf); 2763 2764 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2765 dwc->ep0_trb, dwc->ep0_trb_addr); 2766 2767 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2768 dwc->ctrl_req, dwc->ctrl_req_addr); 2769 } 2770 2771 int dwc3_gadget_suspend(struct dwc3 *dwc) 2772 { 2773 if (dwc->pullups_connected) { 2774 dwc3_gadget_disable_irq(dwc); 2775 dwc3_gadget_run_stop(dwc, true, true); 2776 } 2777 2778 __dwc3_gadget_ep_disable(dwc->eps[0]); 2779 __dwc3_gadget_ep_disable(dwc->eps[1]); 2780 2781 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG); 2782 2783 return 0; 2784 } 2785 2786 int dwc3_gadget_resume(struct dwc3 *dwc) 2787 { 2788 struct dwc3_ep *dep; 2789 int ret; 2790 2791 /* Start with SuperSpeed Default */ 2792 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2793 2794 dep = dwc->eps[0]; 2795 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 2796 false); 2797 if (ret) 2798 goto err0; 2799 2800 dep = dwc->eps[1]; 2801 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 2802 false); 2803 if (ret) 2804 goto err1; 2805 2806 /* begin to receive SETUP packets */ 2807 dwc->ep0state = EP0_SETUP_PHASE; 2808 dwc3_ep0_out_start(dwc); 2809 2810 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg); 2811 2812 if (dwc->pullups_connected) { 2813 dwc3_gadget_enable_irq(dwc); 2814 dwc3_gadget_run_stop(dwc, true, false); 2815 } 2816 2817 return 0; 2818 2819 err1: 2820 __dwc3_gadget_ep_disable(dwc->eps[0]); 2821 2822 err0: 2823 return ret; 2824 } 2825