1 /** 2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 3 * 4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com 5 * 6 * Authors: Felipe Balbi <balbi@ti.com>, 7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 8 * 9 * This program is free software: you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 of 11 * the License as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/delay.h> 21 #include <linux/slab.h> 22 #include <linux/spinlock.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/interrupt.h> 26 #include <linux/io.h> 27 #include <linux/list.h> 28 #include <linux/dma-mapping.h> 29 30 #include <linux/usb/ch9.h> 31 #include <linux/usb/gadget.h> 32 33 #include "debug.h" 34 #include "core.h" 35 #include "gadget.h" 36 #include "io.h" 37 38 /** 39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes 40 * @dwc: pointer to our context structure 41 * @mode: the mode to set (J, K SE0 NAK, Force Enable) 42 * 43 * Caller should take care of locking. This function will 44 * return 0 on success or -EINVAL if wrong Test Selector 45 * is passed 46 */ 47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) 48 { 49 u32 reg; 50 51 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 52 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 53 54 switch (mode) { 55 case TEST_J: 56 case TEST_K: 57 case TEST_SE0_NAK: 58 case TEST_PACKET: 59 case TEST_FORCE_EN: 60 reg |= mode << 1; 61 break; 62 default: 63 return -EINVAL; 64 } 65 66 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 67 68 return 0; 69 } 70 71 /** 72 * dwc3_gadget_get_link_state - Gets current state of USB Link 73 * @dwc: pointer to our context structure 74 * 75 * Caller should take care of locking. This function will 76 * return the link state on success (>= 0) or -ETIMEDOUT. 77 */ 78 int dwc3_gadget_get_link_state(struct dwc3 *dwc) 79 { 80 u32 reg; 81 82 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 83 84 return DWC3_DSTS_USBLNKST(reg); 85 } 86 87 /** 88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State 89 * @dwc: pointer to our context structure 90 * @state: the state to put link into 91 * 92 * Caller should take care of locking. This function will 93 * return 0 on success or -ETIMEDOUT. 94 */ 95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) 96 { 97 int retries = 10000; 98 u32 reg; 99 100 /* 101 * Wait until device controller is ready. Only applies to 1.94a and 102 * later RTL. 103 */ 104 if (dwc->revision >= DWC3_REVISION_194A) { 105 while (--retries) { 106 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 107 if (reg & DWC3_DSTS_DCNRD) 108 udelay(5); 109 else 110 break; 111 } 112 113 if (retries <= 0) 114 return -ETIMEDOUT; 115 } 116 117 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 119 120 /* set requested state */ 121 reg |= DWC3_DCTL_ULSTCHNGREQ(state); 122 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 123 124 /* 125 * The following code is racy when called from dwc3_gadget_wakeup, 126 * and is not needed, at least on newer versions 127 */ 128 if (dwc->revision >= DWC3_REVISION_194A) 129 return 0; 130 131 /* wait for a change in DSTS */ 132 retries = 10000; 133 while (--retries) { 134 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 135 136 if (DWC3_DSTS_USBLNKST(reg) == state) 137 return 0; 138 139 udelay(5); 140 } 141 142 dev_vdbg(dwc->dev, "link state change request timed out\n"); 143 144 return -ETIMEDOUT; 145 } 146 147 /** 148 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case 149 * @dwc: pointer to our context structure 150 * 151 * This function will a best effort FIFO allocation in order 152 * to improve FIFO usage and throughput, while still allowing 153 * us to enable as many endpoints as possible. 154 * 155 * Keep in mind that this operation will be highly dependent 156 * on the configured size for RAM1 - which contains TxFifo -, 157 * the amount of endpoints enabled on coreConsultant tool, and 158 * the width of the Master Bus. 159 * 160 * In the ideal world, we would always be able to satisfy the 161 * following equation: 162 * 163 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \ 164 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes 165 * 166 * Unfortunately, due to many variables that's not always the case. 167 */ 168 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc) 169 { 170 int last_fifo_depth = 0; 171 int ram1_depth; 172 int fifo_size; 173 int mdwidth; 174 int num; 175 176 if (!dwc->needs_fifo_resize) 177 return 0; 178 179 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7); 180 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); 181 182 /* MDWIDTH is represented in bits, we need it in bytes */ 183 mdwidth >>= 3; 184 185 /* 186 * FIXME For now we will only allocate 1 wMaxPacketSize space 187 * for each enabled endpoint, later patches will come to 188 * improve this algorithm so that we better use the internal 189 * FIFO space 190 */ 191 for (num = 0; num < dwc->num_in_eps; num++) { 192 /* bit0 indicates direction; 1 means IN ep */ 193 struct dwc3_ep *dep = dwc->eps[(num << 1) | 1]; 194 int mult = 1; 195 int tmp; 196 197 if (!(dep->flags & DWC3_EP_ENABLED)) 198 continue; 199 200 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) 201 || usb_endpoint_xfer_isoc(dep->endpoint.desc)) 202 mult = 3; 203 204 /* 205 * REVISIT: the following assumes we will always have enough 206 * space available on the FIFO RAM for all possible use cases. 207 * Make sure that's true somehow and change FIFO allocation 208 * accordingly. 209 * 210 * If we have Bulk or Isochronous endpoints, we want 211 * them to be able to be very, very fast. So we're giving 212 * those endpoints a fifo_size which is enough for 3 full 213 * packets 214 */ 215 tmp = mult * (dep->endpoint.maxpacket + mdwidth); 216 tmp += mdwidth; 217 218 fifo_size = DIV_ROUND_UP(tmp, mdwidth); 219 220 fifo_size |= (last_fifo_depth << 16); 221 222 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n", 223 dep->name, last_fifo_depth, fifo_size & 0xffff); 224 225 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size); 226 227 last_fifo_depth += (fifo_size & 0xffff); 228 } 229 230 return 0; 231 } 232 233 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 234 int status) 235 { 236 struct dwc3 *dwc = dep->dwc; 237 int i; 238 239 if (req->queued) { 240 i = 0; 241 do { 242 dep->busy_slot++; 243 /* 244 * Skip LINK TRB. We can't use req->trb and check for 245 * DWC3_TRBCTL_LINK_TRB because it points the TRB we 246 * just completed (not the LINK TRB). 247 */ 248 if (((dep->busy_slot & DWC3_TRB_MASK) == 249 DWC3_TRB_NUM- 1) && 250 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 251 dep->busy_slot++; 252 } while(++i < req->request.num_mapped_sgs); 253 req->queued = false; 254 } 255 list_del(&req->list); 256 req->trb = NULL; 257 258 if (req->request.status == -EINPROGRESS) 259 req->request.status = status; 260 261 if (dwc->ep0_bounced && dep->number == 0) 262 dwc->ep0_bounced = false; 263 else 264 usb_gadget_unmap_request(&dwc->gadget, &req->request, 265 req->direction); 266 267 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n", 268 req, dep->name, req->request.actual, 269 req->request.length, status); 270 trace_dwc3_gadget_giveback(req); 271 272 spin_unlock(&dwc->lock); 273 usb_gadget_giveback_request(&dep->endpoint, &req->request); 274 spin_lock(&dwc->lock); 275 } 276 277 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param) 278 { 279 u32 timeout = 500; 280 u32 reg; 281 282 trace_dwc3_gadget_generic_cmd(cmd, param); 283 284 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param); 285 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT); 286 287 do { 288 reg = dwc3_readl(dwc->regs, DWC3_DGCMD); 289 if (!(reg & DWC3_DGCMD_CMDACT)) { 290 dev_vdbg(dwc->dev, "Command Complete --> %d\n", 291 DWC3_DGCMD_STATUS(reg)); 292 return 0; 293 } 294 295 /* 296 * We can't sleep here, because it's also called from 297 * interrupt context. 298 */ 299 timeout--; 300 if (!timeout) 301 return -ETIMEDOUT; 302 udelay(1); 303 } while (1); 304 } 305 306 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep, 307 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params) 308 { 309 struct dwc3_ep *dep = dwc->eps[ep]; 310 u32 timeout = 500; 311 u32 reg; 312 313 trace_dwc3_gadget_ep_cmd(dep, cmd, params); 314 315 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0); 316 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1); 317 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2); 318 319 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT); 320 do { 321 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep)); 322 if (!(reg & DWC3_DEPCMD_CMDACT)) { 323 dev_vdbg(dwc->dev, "Command Complete --> %d\n", 324 DWC3_DEPCMD_STATUS(reg)); 325 return 0; 326 } 327 328 /* 329 * We can't sleep here, because it is also called from 330 * interrupt context. 331 */ 332 timeout--; 333 if (!timeout) 334 return -ETIMEDOUT; 335 336 udelay(1); 337 } while (1); 338 } 339 340 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 341 struct dwc3_trb *trb) 342 { 343 u32 offset = (char *) trb - (char *) dep->trb_pool; 344 345 return dep->trb_pool_dma + offset; 346 } 347 348 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 349 { 350 struct dwc3 *dwc = dep->dwc; 351 352 if (dep->trb_pool) 353 return 0; 354 355 if (dep->number == 0 || dep->number == 1) 356 return 0; 357 358 dep->trb_pool = dma_alloc_coherent(dwc->dev, 359 sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 360 &dep->trb_pool_dma, GFP_KERNEL); 361 if (!dep->trb_pool) { 362 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 363 dep->name); 364 return -ENOMEM; 365 } 366 367 return 0; 368 } 369 370 static void dwc3_free_trb_pool(struct dwc3_ep *dep) 371 { 372 struct dwc3 *dwc = dep->dwc; 373 374 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 375 dep->trb_pool, dep->trb_pool_dma); 376 377 dep->trb_pool = NULL; 378 dep->trb_pool_dma = 0; 379 } 380 381 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 382 { 383 struct dwc3_gadget_ep_cmd_params params; 384 u32 cmd; 385 386 memset(¶ms, 0x00, sizeof(params)); 387 388 if (dep->number != 1) { 389 cmd = DWC3_DEPCMD_DEPSTARTCFG; 390 /* XferRscIdx == 0 for ep0 and 2 for the remaining */ 391 if (dep->number > 1) { 392 if (dwc->start_config_issued) 393 return 0; 394 dwc->start_config_issued = true; 395 cmd |= DWC3_DEPCMD_PARAM(2); 396 } 397 398 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms); 399 } 400 401 return 0; 402 } 403 404 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, 405 const struct usb_endpoint_descriptor *desc, 406 const struct usb_ss_ep_comp_descriptor *comp_desc, 407 bool ignore, bool restore) 408 { 409 struct dwc3_gadget_ep_cmd_params params; 410 411 memset(¶ms, 0x00, sizeof(params)); 412 413 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) 414 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); 415 416 /* Burst size is only needed in SuperSpeed mode */ 417 if (dwc->gadget.speed == USB_SPEED_SUPER) { 418 u32 burst = dep->endpoint.maxburst - 1; 419 420 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst); 421 } 422 423 if (ignore) 424 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM; 425 426 if (restore) { 427 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE; 428 params.param2 |= dep->saved_state; 429 } 430 431 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN 432 | DWC3_DEPCFG_XFER_NOT_READY_EN; 433 434 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 435 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE 436 | DWC3_DEPCFG_STREAM_EVENT_EN; 437 dep->stream_capable = true; 438 } 439 440 if (!usb_endpoint_xfer_control(desc)) 441 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; 442 443 /* 444 * We are doing 1:1 mapping for endpoints, meaning 445 * Physical Endpoints 2 maps to Logical Endpoint 2 and 446 * so on. We consider the direction bit as part of the physical 447 * endpoint number. So USB endpoint 0x81 is 0x03. 448 */ 449 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); 450 451 /* 452 * We must use the lower 16 TX FIFOs even though 453 * HW might have more 454 */ 455 if (dep->direction) 456 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); 457 458 if (desc->bInterval) { 459 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); 460 dep->interval = 1 << (desc->bInterval - 1); 461 } 462 463 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 464 DWC3_DEPCMD_SETEPCONFIG, ¶ms); 465 } 466 467 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) 468 { 469 struct dwc3_gadget_ep_cmd_params params; 470 471 memset(¶ms, 0x00, sizeof(params)); 472 473 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 474 475 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 476 DWC3_DEPCMD_SETTRANSFRESOURCE, ¶ms); 477 } 478 479 /** 480 * __dwc3_gadget_ep_enable - Initializes a HW endpoint 481 * @dep: endpoint to be initialized 482 * @desc: USB Endpoint Descriptor 483 * 484 * Caller should take care of locking 485 */ 486 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, 487 const struct usb_endpoint_descriptor *desc, 488 const struct usb_ss_ep_comp_descriptor *comp_desc, 489 bool ignore, bool restore) 490 { 491 struct dwc3 *dwc = dep->dwc; 492 u32 reg; 493 int ret; 494 495 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name); 496 497 if (!(dep->flags & DWC3_EP_ENABLED)) { 498 ret = dwc3_gadget_start_config(dwc, dep); 499 if (ret) 500 return ret; 501 } 502 503 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore, 504 restore); 505 if (ret) 506 return ret; 507 508 if (!(dep->flags & DWC3_EP_ENABLED)) { 509 struct dwc3_trb *trb_st_hw; 510 struct dwc3_trb *trb_link; 511 512 ret = dwc3_gadget_set_xfer_resource(dwc, dep); 513 if (ret) 514 return ret; 515 516 dep->endpoint.desc = desc; 517 dep->comp_desc = comp_desc; 518 dep->type = usb_endpoint_type(desc); 519 dep->flags |= DWC3_EP_ENABLED; 520 521 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 522 reg |= DWC3_DALEPENA_EP(dep->number); 523 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 524 525 if (!usb_endpoint_xfer_isoc(desc)) 526 return 0; 527 528 /* Link TRB for ISOC. The HWO bit is never reset */ 529 trb_st_hw = &dep->trb_pool[0]; 530 531 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 532 memset(trb_link, 0, sizeof(*trb_link)); 533 534 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 535 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 536 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB; 537 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 538 } 539 540 return 0; 541 } 542 543 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force); 544 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 545 { 546 struct dwc3_request *req; 547 548 if (!list_empty(&dep->req_queued)) { 549 dwc3_stop_active_transfer(dwc, dep->number, true); 550 551 /* - giveback all requests to gadget driver */ 552 while (!list_empty(&dep->req_queued)) { 553 req = next_request(&dep->req_queued); 554 555 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 556 } 557 } 558 559 while (!list_empty(&dep->request_list)) { 560 req = next_request(&dep->request_list); 561 562 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 563 } 564 } 565 566 /** 567 * __dwc3_gadget_ep_disable - Disables a HW endpoint 568 * @dep: the endpoint to disable 569 * 570 * This function also removes requests which are currently processed ny the 571 * hardware and those which are not yet scheduled. 572 * Caller should take care of locking. 573 */ 574 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 575 { 576 struct dwc3 *dwc = dep->dwc; 577 u32 reg; 578 579 dwc3_remove_requests(dwc, dep); 580 581 /* make sure HW endpoint isn't stalled */ 582 if (dep->flags & DWC3_EP_STALL) 583 __dwc3_gadget_ep_set_halt(dep, 0, false); 584 585 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 586 reg &= ~DWC3_DALEPENA_EP(dep->number); 587 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 588 589 dep->stream_capable = false; 590 dep->endpoint.desc = NULL; 591 dep->comp_desc = NULL; 592 dep->type = 0; 593 dep->flags = 0; 594 595 return 0; 596 } 597 598 /* -------------------------------------------------------------------------- */ 599 600 static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 601 const struct usb_endpoint_descriptor *desc) 602 { 603 return -EINVAL; 604 } 605 606 static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 607 { 608 return -EINVAL; 609 } 610 611 /* -------------------------------------------------------------------------- */ 612 613 static int dwc3_gadget_ep_enable(struct usb_ep *ep, 614 const struct usb_endpoint_descriptor *desc) 615 { 616 struct dwc3_ep *dep; 617 struct dwc3 *dwc; 618 unsigned long flags; 619 int ret; 620 621 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 622 pr_debug("dwc3: invalid parameters\n"); 623 return -EINVAL; 624 } 625 626 if (!desc->wMaxPacketSize) { 627 pr_debug("dwc3: missing wMaxPacketSize\n"); 628 return -EINVAL; 629 } 630 631 dep = to_dwc3_ep(ep); 632 dwc = dep->dwc; 633 634 if (dep->flags & DWC3_EP_ENABLED) { 635 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n", 636 dep->name); 637 return 0; 638 } 639 640 switch (usb_endpoint_type(desc)) { 641 case USB_ENDPOINT_XFER_CONTROL: 642 strlcat(dep->name, "-control", sizeof(dep->name)); 643 break; 644 case USB_ENDPOINT_XFER_ISOC: 645 strlcat(dep->name, "-isoc", sizeof(dep->name)); 646 break; 647 case USB_ENDPOINT_XFER_BULK: 648 strlcat(dep->name, "-bulk", sizeof(dep->name)); 649 break; 650 case USB_ENDPOINT_XFER_INT: 651 strlcat(dep->name, "-int", sizeof(dep->name)); 652 break; 653 default: 654 dev_err(dwc->dev, "invalid endpoint transfer type\n"); 655 } 656 657 spin_lock_irqsave(&dwc->lock, flags); 658 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false); 659 spin_unlock_irqrestore(&dwc->lock, flags); 660 661 return ret; 662 } 663 664 static int dwc3_gadget_ep_disable(struct usb_ep *ep) 665 { 666 struct dwc3_ep *dep; 667 struct dwc3 *dwc; 668 unsigned long flags; 669 int ret; 670 671 if (!ep) { 672 pr_debug("dwc3: invalid parameters\n"); 673 return -EINVAL; 674 } 675 676 dep = to_dwc3_ep(ep); 677 dwc = dep->dwc; 678 679 if (!(dep->flags & DWC3_EP_ENABLED)) { 680 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n", 681 dep->name); 682 return 0; 683 } 684 685 snprintf(dep->name, sizeof(dep->name), "ep%d%s", 686 dep->number >> 1, 687 (dep->number & 1) ? "in" : "out"); 688 689 spin_lock_irqsave(&dwc->lock, flags); 690 ret = __dwc3_gadget_ep_disable(dep); 691 spin_unlock_irqrestore(&dwc->lock, flags); 692 693 return ret; 694 } 695 696 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 697 gfp_t gfp_flags) 698 { 699 struct dwc3_request *req; 700 struct dwc3_ep *dep = to_dwc3_ep(ep); 701 702 req = kzalloc(sizeof(*req), gfp_flags); 703 if (!req) 704 return NULL; 705 706 req->epnum = dep->number; 707 req->dep = dep; 708 709 trace_dwc3_alloc_request(req); 710 711 return &req->request; 712 } 713 714 static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 715 struct usb_request *request) 716 { 717 struct dwc3_request *req = to_dwc3_request(request); 718 719 trace_dwc3_free_request(req); 720 kfree(req); 721 } 722 723 /** 724 * dwc3_prepare_one_trb - setup one TRB from one request 725 * @dep: endpoint for which this request is prepared 726 * @req: dwc3_request pointer 727 */ 728 static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 729 struct dwc3_request *req, dma_addr_t dma, 730 unsigned length, unsigned last, unsigned chain, unsigned node) 731 { 732 struct dwc3 *dwc = dep->dwc; 733 struct dwc3_trb *trb; 734 735 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n", 736 dep->name, req, (unsigned long long) dma, 737 length, last ? " last" : "", 738 chain ? " chain" : ""); 739 740 741 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK]; 742 743 if (!req->trb) { 744 dwc3_gadget_move_request_queued(req); 745 req->trb = trb; 746 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 747 req->start_slot = dep->free_slot & DWC3_TRB_MASK; 748 } 749 750 dep->free_slot++; 751 /* Skip the LINK-TRB on ISOC */ 752 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && 753 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 754 dep->free_slot++; 755 756 trb->size = DWC3_TRB_SIZE_LENGTH(length); 757 trb->bpl = lower_32_bits(dma); 758 trb->bph = upper_32_bits(dma); 759 760 switch (usb_endpoint_type(dep->endpoint.desc)) { 761 case USB_ENDPOINT_XFER_CONTROL: 762 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP; 763 break; 764 765 case USB_ENDPOINT_XFER_ISOC: 766 if (!node) 767 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 768 else 769 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 770 break; 771 772 case USB_ENDPOINT_XFER_BULK: 773 case USB_ENDPOINT_XFER_INT: 774 trb->ctrl = DWC3_TRBCTL_NORMAL; 775 break; 776 default: 777 /* 778 * This is only possible with faulty memory because we 779 * checked it already :) 780 */ 781 BUG(); 782 } 783 784 if (!req->request.no_interrupt && !chain) 785 trb->ctrl |= DWC3_TRB_CTRL_IOC; 786 787 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 788 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 789 trb->ctrl |= DWC3_TRB_CTRL_CSP; 790 } else if (last) { 791 trb->ctrl |= DWC3_TRB_CTRL_LST; 792 } 793 794 if (chain) 795 trb->ctrl |= DWC3_TRB_CTRL_CHN; 796 797 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) 798 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id); 799 800 trb->ctrl |= DWC3_TRB_CTRL_HWO; 801 802 trace_dwc3_prepare_trb(dep, trb); 803 } 804 805 /* 806 * dwc3_prepare_trbs - setup TRBs from requests 807 * @dep: endpoint for which requests are being prepared 808 * @starting: true if the endpoint is idle and no requests are queued. 809 * 810 * The function goes through the requests list and sets up TRBs for the 811 * transfers. The function returns once there are no more TRBs available or 812 * it runs out of requests. 813 */ 814 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting) 815 { 816 struct dwc3_request *req, *n; 817 u32 trbs_left; 818 u32 max; 819 unsigned int last_one = 0; 820 821 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 822 823 /* the first request must not be queued */ 824 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK; 825 826 /* Can't wrap around on a non-isoc EP since there's no link TRB */ 827 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 828 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK); 829 if (trbs_left > max) 830 trbs_left = max; 831 } 832 833 /* 834 * If busy & slot are equal than it is either full or empty. If we are 835 * starting to process requests then we are empty. Otherwise we are 836 * full and don't do anything 837 */ 838 if (!trbs_left) { 839 if (!starting) 840 return; 841 trbs_left = DWC3_TRB_NUM; 842 /* 843 * In case we start from scratch, we queue the ISOC requests 844 * starting from slot 1. This is done because we use ring 845 * buffer and have no LST bit to stop us. Instead, we place 846 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt 847 * after the first request so we start at slot 1 and have 848 * 7 requests proceed before we hit the first IOC. 849 * Other transfer types don't use the ring buffer and are 850 * processed from the first TRB until the last one. Since we 851 * don't wrap around we have to start at the beginning. 852 */ 853 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 854 dep->busy_slot = 1; 855 dep->free_slot = 1; 856 } else { 857 dep->busy_slot = 0; 858 dep->free_slot = 0; 859 } 860 } 861 862 /* The last TRB is a link TRB, not used for xfer */ 863 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc)) 864 return; 865 866 list_for_each_entry_safe(req, n, &dep->request_list, list) { 867 unsigned length; 868 dma_addr_t dma; 869 last_one = false; 870 871 if (req->request.num_mapped_sgs > 0) { 872 struct usb_request *request = &req->request; 873 struct scatterlist *sg = request->sg; 874 struct scatterlist *s; 875 int i; 876 877 for_each_sg(sg, s, request->num_mapped_sgs, i) { 878 unsigned chain = true; 879 880 length = sg_dma_len(s); 881 dma = sg_dma_address(s); 882 883 if (i == (request->num_mapped_sgs - 1) || 884 sg_is_last(s)) { 885 if (list_is_last(&req->list, 886 &dep->request_list)) 887 last_one = true; 888 chain = false; 889 } 890 891 trbs_left--; 892 if (!trbs_left) 893 last_one = true; 894 895 if (last_one) 896 chain = false; 897 898 dwc3_prepare_one_trb(dep, req, dma, length, 899 last_one, chain, i); 900 901 if (last_one) 902 break; 903 } 904 } else { 905 dma = req->request.dma; 906 length = req->request.length; 907 trbs_left--; 908 909 if (!trbs_left) 910 last_one = 1; 911 912 /* Is this the last request? */ 913 if (list_is_last(&req->list, &dep->request_list)) 914 last_one = 1; 915 916 dwc3_prepare_one_trb(dep, req, dma, length, 917 last_one, false, 0); 918 919 if (last_one) 920 break; 921 } 922 } 923 } 924 925 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param, 926 int start_new) 927 { 928 struct dwc3_gadget_ep_cmd_params params; 929 struct dwc3_request *req; 930 struct dwc3 *dwc = dep->dwc; 931 int ret; 932 u32 cmd; 933 934 if (start_new && (dep->flags & DWC3_EP_BUSY)) { 935 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name); 936 return -EBUSY; 937 } 938 dep->flags &= ~DWC3_EP_PENDING_REQUEST; 939 940 /* 941 * If we are getting here after a short-out-packet we don't enqueue any 942 * new requests as we try to set the IOC bit only on the last request. 943 */ 944 if (start_new) { 945 if (list_empty(&dep->req_queued)) 946 dwc3_prepare_trbs(dep, start_new); 947 948 /* req points to the first request which will be sent */ 949 req = next_request(&dep->req_queued); 950 } else { 951 dwc3_prepare_trbs(dep, start_new); 952 953 /* 954 * req points to the first request where HWO changed from 0 to 1 955 */ 956 req = next_request(&dep->req_queued); 957 } 958 if (!req) { 959 dep->flags |= DWC3_EP_PENDING_REQUEST; 960 return 0; 961 } 962 963 memset(¶ms, 0, sizeof(params)); 964 965 if (start_new) { 966 params.param0 = upper_32_bits(req->trb_dma); 967 params.param1 = lower_32_bits(req->trb_dma); 968 cmd = DWC3_DEPCMD_STARTTRANSFER; 969 } else { 970 cmd = DWC3_DEPCMD_UPDATETRANSFER; 971 } 972 973 cmd |= DWC3_DEPCMD_PARAM(cmd_param); 974 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 975 if (ret < 0) { 976 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n"); 977 978 /* 979 * FIXME we need to iterate over the list of requests 980 * here and stop, unmap, free and del each of the linked 981 * requests instead of what we do now. 982 */ 983 usb_gadget_unmap_request(&dwc->gadget, &req->request, 984 req->direction); 985 list_del(&req->list); 986 return ret; 987 } 988 989 dep->flags |= DWC3_EP_BUSY; 990 991 if (start_new) { 992 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc, 993 dep->number); 994 WARN_ON_ONCE(!dep->resource_index); 995 } 996 997 return 0; 998 } 999 1000 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc, 1001 struct dwc3_ep *dep, u32 cur_uf) 1002 { 1003 u32 uf; 1004 1005 if (list_empty(&dep->request_list)) { 1006 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n", 1007 dep->name); 1008 dep->flags |= DWC3_EP_PENDING_REQUEST; 1009 return; 1010 } 1011 1012 /* 4 micro frames in the future */ 1013 uf = cur_uf + dep->interval * 4; 1014 1015 __dwc3_gadget_kick_transfer(dep, uf, 1); 1016 } 1017 1018 static void dwc3_gadget_start_isoc(struct dwc3 *dwc, 1019 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1020 { 1021 u32 cur_uf, mask; 1022 1023 mask = ~(dep->interval - 1); 1024 cur_uf = event->parameters & mask; 1025 1026 __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1027 } 1028 1029 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 1030 { 1031 struct dwc3 *dwc = dep->dwc; 1032 int ret; 1033 1034 req->request.actual = 0; 1035 req->request.status = -EINPROGRESS; 1036 req->direction = dep->direction; 1037 req->epnum = dep->number; 1038 1039 /* 1040 * We only add to our list of requests now and 1041 * start consuming the list once we get XferNotReady 1042 * IRQ. 1043 * 1044 * That way, we avoid doing anything that we don't need 1045 * to do now and defer it until the point we receive a 1046 * particular token from the Host side. 1047 * 1048 * This will also avoid Host cancelling URBs due to too 1049 * many NAKs. 1050 */ 1051 ret = usb_gadget_map_request(&dwc->gadget, &req->request, 1052 dep->direction); 1053 if (ret) 1054 return ret; 1055 1056 list_add_tail(&req->list, &dep->request_list); 1057 1058 /* 1059 * There are a few special cases: 1060 * 1061 * 1. XferNotReady with empty list of requests. We need to kick the 1062 * transfer here in that situation, otherwise we will be NAKing 1063 * forever. If we get XferNotReady before gadget driver has a 1064 * chance to queue a request, we will ACK the IRQ but won't be 1065 * able to receive the data until the next request is queued. 1066 * The following code is handling exactly that. 1067 * 1068 */ 1069 if (dep->flags & DWC3_EP_PENDING_REQUEST) { 1070 /* 1071 * If xfernotready is already elapsed and it is a case 1072 * of isoc transfer, then issue END TRANSFER, so that 1073 * you can receive xfernotready again and can have 1074 * notion of current microframe. 1075 */ 1076 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1077 if (list_empty(&dep->req_queued)) { 1078 dwc3_stop_active_transfer(dwc, dep->number, true); 1079 dep->flags = DWC3_EP_ENABLED; 1080 } 1081 return 0; 1082 } 1083 1084 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1085 if (ret && ret != -EBUSY) 1086 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1087 dep->name); 1088 return ret; 1089 } 1090 1091 /* 1092 * 2. XferInProgress on Isoc EP with an active transfer. We need to 1093 * kick the transfer here after queuing a request, otherwise the 1094 * core may not see the modified TRB(s). 1095 */ 1096 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1097 (dep->flags & DWC3_EP_BUSY) && 1098 !(dep->flags & DWC3_EP_MISSED_ISOC)) { 1099 WARN_ON_ONCE(!dep->resource_index); 1100 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index, 1101 false); 1102 if (ret && ret != -EBUSY) 1103 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1104 dep->name); 1105 return ret; 1106 } 1107 1108 /* 1109 * 4. Stream Capable Bulk Endpoints. We need to start the transfer 1110 * right away, otherwise host will not know we have streams to be 1111 * handled. 1112 */ 1113 if (dep->stream_capable) { 1114 int ret; 1115 1116 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1117 if (ret && ret != -EBUSY) { 1118 struct dwc3 *dwc = dep->dwc; 1119 1120 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1121 dep->name); 1122 } 1123 } 1124 1125 return 0; 1126 } 1127 1128 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 1129 gfp_t gfp_flags) 1130 { 1131 struct dwc3_request *req = to_dwc3_request(request); 1132 struct dwc3_ep *dep = to_dwc3_ep(ep); 1133 struct dwc3 *dwc = dep->dwc; 1134 1135 unsigned long flags; 1136 1137 int ret; 1138 1139 spin_lock_irqsave(&dwc->lock, flags); 1140 if (!dep->endpoint.desc) { 1141 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n", 1142 request, ep->name); 1143 spin_unlock_irqrestore(&dwc->lock, flags); 1144 return -ESHUTDOWN; 1145 } 1146 1147 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n", 1148 request, ep->name, request->length); 1149 trace_dwc3_ep_queue(req); 1150 1151 ret = __dwc3_gadget_ep_queue(dep, req); 1152 spin_unlock_irqrestore(&dwc->lock, flags); 1153 1154 return ret; 1155 } 1156 1157 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 1158 struct usb_request *request) 1159 { 1160 struct dwc3_request *req = to_dwc3_request(request); 1161 struct dwc3_request *r = NULL; 1162 1163 struct dwc3_ep *dep = to_dwc3_ep(ep); 1164 struct dwc3 *dwc = dep->dwc; 1165 1166 unsigned long flags; 1167 int ret = 0; 1168 1169 trace_dwc3_ep_dequeue(req); 1170 1171 spin_lock_irqsave(&dwc->lock, flags); 1172 1173 list_for_each_entry(r, &dep->request_list, list) { 1174 if (r == req) 1175 break; 1176 } 1177 1178 if (r != req) { 1179 list_for_each_entry(r, &dep->req_queued, list) { 1180 if (r == req) 1181 break; 1182 } 1183 if (r == req) { 1184 /* wait until it is processed */ 1185 dwc3_stop_active_transfer(dwc, dep->number, true); 1186 goto out1; 1187 } 1188 dev_err(dwc->dev, "request %p was not queued to %s\n", 1189 request, ep->name); 1190 ret = -EINVAL; 1191 goto out0; 1192 } 1193 1194 out1: 1195 /* giveback the request */ 1196 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1197 1198 out0: 1199 spin_unlock_irqrestore(&dwc->lock, flags); 1200 1201 return ret; 1202 } 1203 1204 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) 1205 { 1206 struct dwc3_gadget_ep_cmd_params params; 1207 struct dwc3 *dwc = dep->dwc; 1208 int ret; 1209 1210 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1211 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 1212 return -EINVAL; 1213 } 1214 1215 memset(¶ms, 0x00, sizeof(params)); 1216 1217 if (value) { 1218 if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) || 1219 (!list_empty(&dep->req_queued) || 1220 !list_empty(&dep->request_list)))) { 1221 dev_dbg(dwc->dev, "%s: pending request, cannot halt\n", 1222 dep->name); 1223 return -EAGAIN; 1224 } 1225 1226 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1227 DWC3_DEPCMD_SETSTALL, ¶ms); 1228 if (ret) 1229 dev_err(dwc->dev, "failed to set STALL on %s\n", 1230 dep->name); 1231 else 1232 dep->flags |= DWC3_EP_STALL; 1233 } else { 1234 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1235 DWC3_DEPCMD_CLEARSTALL, ¶ms); 1236 if (ret) 1237 dev_err(dwc->dev, "failed to clear STALL on %s\n", 1238 dep->name); 1239 else 1240 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); 1241 } 1242 1243 return ret; 1244 } 1245 1246 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 1247 { 1248 struct dwc3_ep *dep = to_dwc3_ep(ep); 1249 struct dwc3 *dwc = dep->dwc; 1250 1251 unsigned long flags; 1252 1253 int ret; 1254 1255 spin_lock_irqsave(&dwc->lock, flags); 1256 ret = __dwc3_gadget_ep_set_halt(dep, value, false); 1257 spin_unlock_irqrestore(&dwc->lock, flags); 1258 1259 return ret; 1260 } 1261 1262 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 1263 { 1264 struct dwc3_ep *dep = to_dwc3_ep(ep); 1265 struct dwc3 *dwc = dep->dwc; 1266 unsigned long flags; 1267 int ret; 1268 1269 spin_lock_irqsave(&dwc->lock, flags); 1270 dep->flags |= DWC3_EP_WEDGE; 1271 1272 if (dep->number == 0 || dep->number == 1) 1273 ret = __dwc3_gadget_ep0_set_halt(ep, 1); 1274 else 1275 ret = __dwc3_gadget_ep_set_halt(dep, 1, false); 1276 spin_unlock_irqrestore(&dwc->lock, flags); 1277 1278 return ret; 1279 } 1280 1281 /* -------------------------------------------------------------------------- */ 1282 1283 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 1284 .bLength = USB_DT_ENDPOINT_SIZE, 1285 .bDescriptorType = USB_DT_ENDPOINT, 1286 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1287 }; 1288 1289 static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 1290 .enable = dwc3_gadget_ep0_enable, 1291 .disable = dwc3_gadget_ep0_disable, 1292 .alloc_request = dwc3_gadget_ep_alloc_request, 1293 .free_request = dwc3_gadget_ep_free_request, 1294 .queue = dwc3_gadget_ep0_queue, 1295 .dequeue = dwc3_gadget_ep_dequeue, 1296 .set_halt = dwc3_gadget_ep0_set_halt, 1297 .set_wedge = dwc3_gadget_ep_set_wedge, 1298 }; 1299 1300 static const struct usb_ep_ops dwc3_gadget_ep_ops = { 1301 .enable = dwc3_gadget_ep_enable, 1302 .disable = dwc3_gadget_ep_disable, 1303 .alloc_request = dwc3_gadget_ep_alloc_request, 1304 .free_request = dwc3_gadget_ep_free_request, 1305 .queue = dwc3_gadget_ep_queue, 1306 .dequeue = dwc3_gadget_ep_dequeue, 1307 .set_halt = dwc3_gadget_ep_set_halt, 1308 .set_wedge = dwc3_gadget_ep_set_wedge, 1309 }; 1310 1311 /* -------------------------------------------------------------------------- */ 1312 1313 static int dwc3_gadget_get_frame(struct usb_gadget *g) 1314 { 1315 struct dwc3 *dwc = gadget_to_dwc(g); 1316 u32 reg; 1317 1318 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1319 return DWC3_DSTS_SOFFN(reg); 1320 } 1321 1322 static int dwc3_gadget_wakeup(struct usb_gadget *g) 1323 { 1324 struct dwc3 *dwc = gadget_to_dwc(g); 1325 1326 unsigned long timeout; 1327 unsigned long flags; 1328 1329 u32 reg; 1330 1331 int ret = 0; 1332 1333 u8 link_state; 1334 u8 speed; 1335 1336 spin_lock_irqsave(&dwc->lock, flags); 1337 1338 /* 1339 * According to the Databook Remote wakeup request should 1340 * be issued only when the device is in early suspend state. 1341 * 1342 * We can check that via USB Link State bits in DSTS register. 1343 */ 1344 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1345 1346 speed = reg & DWC3_DSTS_CONNECTSPD; 1347 if (speed == DWC3_DSTS_SUPERSPEED) { 1348 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n"); 1349 ret = -EINVAL; 1350 goto out; 1351 } 1352 1353 link_state = DWC3_DSTS_USBLNKST(reg); 1354 1355 switch (link_state) { 1356 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1357 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1358 break; 1359 default: 1360 dev_dbg(dwc->dev, "can't wakeup from link state %d\n", 1361 link_state); 1362 ret = -EINVAL; 1363 goto out; 1364 } 1365 1366 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); 1367 if (ret < 0) { 1368 dev_err(dwc->dev, "failed to put link in Recovery\n"); 1369 goto out; 1370 } 1371 1372 /* Recent versions do this automatically */ 1373 if (dwc->revision < DWC3_REVISION_194A) { 1374 /* write zeroes to Link Change Request */ 1375 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1376 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 1377 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1378 } 1379 1380 /* poll until Link State changes to ON */ 1381 timeout = jiffies + msecs_to_jiffies(100); 1382 1383 while (!time_after(jiffies, timeout)) { 1384 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1385 1386 /* in HS, means ON */ 1387 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 1388 break; 1389 } 1390 1391 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1392 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1393 ret = -EINVAL; 1394 } 1395 1396 out: 1397 spin_unlock_irqrestore(&dwc->lock, flags); 1398 1399 return ret; 1400 } 1401 1402 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 1403 int is_selfpowered) 1404 { 1405 struct dwc3 *dwc = gadget_to_dwc(g); 1406 unsigned long flags; 1407 1408 spin_lock_irqsave(&dwc->lock, flags); 1409 dwc->is_selfpowered = !!is_selfpowered; 1410 spin_unlock_irqrestore(&dwc->lock, flags); 1411 1412 return 0; 1413 } 1414 1415 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) 1416 { 1417 u32 reg; 1418 u32 timeout = 500; 1419 1420 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1421 if (is_on) { 1422 if (dwc->revision <= DWC3_REVISION_187A) { 1423 reg &= ~DWC3_DCTL_TRGTULST_MASK; 1424 reg |= DWC3_DCTL_TRGTULST_RX_DET; 1425 } 1426 1427 if (dwc->revision >= DWC3_REVISION_194A) 1428 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1429 reg |= DWC3_DCTL_RUN_STOP; 1430 1431 if (dwc->has_hibernation) 1432 reg |= DWC3_DCTL_KEEP_CONNECT; 1433 1434 dwc->pullups_connected = true; 1435 } else { 1436 reg &= ~DWC3_DCTL_RUN_STOP; 1437 1438 if (dwc->has_hibernation && !suspend) 1439 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1440 1441 dwc->pullups_connected = false; 1442 } 1443 1444 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1445 1446 do { 1447 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1448 if (is_on) { 1449 if (!(reg & DWC3_DSTS_DEVCTRLHLT)) 1450 break; 1451 } else { 1452 if (reg & DWC3_DSTS_DEVCTRLHLT) 1453 break; 1454 } 1455 timeout--; 1456 if (!timeout) 1457 return -ETIMEDOUT; 1458 udelay(1); 1459 } while (1); 1460 1461 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n", 1462 dwc->gadget_driver 1463 ? dwc->gadget_driver->function : "no-function", 1464 is_on ? "connect" : "disconnect"); 1465 1466 return 0; 1467 } 1468 1469 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 1470 { 1471 struct dwc3 *dwc = gadget_to_dwc(g); 1472 unsigned long flags; 1473 int ret; 1474 1475 is_on = !!is_on; 1476 1477 spin_lock_irqsave(&dwc->lock, flags); 1478 ret = dwc3_gadget_run_stop(dwc, is_on, false); 1479 spin_unlock_irqrestore(&dwc->lock, flags); 1480 1481 return ret; 1482 } 1483 1484 static void dwc3_gadget_enable_irq(struct dwc3 *dwc) 1485 { 1486 u32 reg; 1487 1488 /* Enable all but Start and End of Frame IRQs */ 1489 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | 1490 DWC3_DEVTEN_EVNTOVERFLOWEN | 1491 DWC3_DEVTEN_CMDCMPLTEN | 1492 DWC3_DEVTEN_ERRTICERREN | 1493 DWC3_DEVTEN_WKUPEVTEN | 1494 DWC3_DEVTEN_ULSTCNGEN | 1495 DWC3_DEVTEN_CONNECTDONEEN | 1496 DWC3_DEVTEN_USBRSTEN | 1497 DWC3_DEVTEN_DISCONNEVTEN); 1498 1499 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 1500 } 1501 1502 static void dwc3_gadget_disable_irq(struct dwc3 *dwc) 1503 { 1504 /* mask all interrupts */ 1505 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 1506 } 1507 1508 static irqreturn_t dwc3_interrupt(int irq, void *_dwc); 1509 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc); 1510 1511 static int dwc3_gadget_start(struct usb_gadget *g, 1512 struct usb_gadget_driver *driver) 1513 { 1514 struct dwc3 *dwc = gadget_to_dwc(g); 1515 struct dwc3_ep *dep; 1516 unsigned long flags; 1517 int ret = 0; 1518 int irq; 1519 u32 reg; 1520 1521 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1522 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, 1523 IRQF_SHARED, "dwc3", dwc); 1524 if (ret) { 1525 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 1526 irq, ret); 1527 goto err0; 1528 } 1529 1530 spin_lock_irqsave(&dwc->lock, flags); 1531 1532 if (dwc->gadget_driver) { 1533 dev_err(dwc->dev, "%s is already bound to %s\n", 1534 dwc->gadget.name, 1535 dwc->gadget_driver->driver.name); 1536 ret = -EBUSY; 1537 goto err1; 1538 } 1539 1540 dwc->gadget_driver = driver; 1541 1542 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1543 reg &= ~(DWC3_DCFG_SPEED_MASK); 1544 1545 /** 1546 * WORKAROUND: DWC3 revision < 2.20a have an issue 1547 * which would cause metastability state on Run/Stop 1548 * bit if we try to force the IP to USB2-only mode. 1549 * 1550 * Because of that, we cannot configure the IP to any 1551 * speed other than the SuperSpeed 1552 * 1553 * Refers to: 1554 * 1555 * STAR#9000525659: Clock Domain Crossing on DCTL in 1556 * USB 2.0 Mode 1557 */ 1558 if (dwc->revision < DWC3_REVISION_220A) { 1559 reg |= DWC3_DCFG_SUPERSPEED; 1560 } else { 1561 switch (dwc->maximum_speed) { 1562 case USB_SPEED_LOW: 1563 reg |= DWC3_DSTS_LOWSPEED; 1564 break; 1565 case USB_SPEED_FULL: 1566 reg |= DWC3_DSTS_FULLSPEED1; 1567 break; 1568 case USB_SPEED_HIGH: 1569 reg |= DWC3_DSTS_HIGHSPEED; 1570 break; 1571 case USB_SPEED_SUPER: /* FALLTHROUGH */ 1572 case USB_SPEED_UNKNOWN: /* FALTHROUGH */ 1573 default: 1574 reg |= DWC3_DSTS_SUPERSPEED; 1575 } 1576 } 1577 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1578 1579 dwc->start_config_issued = false; 1580 1581 /* Start with SuperSpeed Default */ 1582 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1583 1584 dep = dwc->eps[0]; 1585 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 1586 false); 1587 if (ret) { 1588 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1589 goto err2; 1590 } 1591 1592 dep = dwc->eps[1]; 1593 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 1594 false); 1595 if (ret) { 1596 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1597 goto err3; 1598 } 1599 1600 /* begin to receive SETUP packets */ 1601 dwc->ep0state = EP0_SETUP_PHASE; 1602 dwc3_ep0_out_start(dwc); 1603 1604 dwc3_gadget_enable_irq(dwc); 1605 1606 spin_unlock_irqrestore(&dwc->lock, flags); 1607 1608 return 0; 1609 1610 err3: 1611 __dwc3_gadget_ep_disable(dwc->eps[0]); 1612 1613 err2: 1614 dwc->gadget_driver = NULL; 1615 1616 err1: 1617 spin_unlock_irqrestore(&dwc->lock, flags); 1618 1619 free_irq(irq, dwc); 1620 1621 err0: 1622 return ret; 1623 } 1624 1625 static int dwc3_gadget_stop(struct usb_gadget *g, 1626 struct usb_gadget_driver *driver) 1627 { 1628 struct dwc3 *dwc = gadget_to_dwc(g); 1629 unsigned long flags; 1630 int irq; 1631 1632 spin_lock_irqsave(&dwc->lock, flags); 1633 1634 dwc3_gadget_disable_irq(dwc); 1635 __dwc3_gadget_ep_disable(dwc->eps[0]); 1636 __dwc3_gadget_ep_disable(dwc->eps[1]); 1637 1638 dwc->gadget_driver = NULL; 1639 1640 spin_unlock_irqrestore(&dwc->lock, flags); 1641 1642 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1643 free_irq(irq, dwc); 1644 1645 return 0; 1646 } 1647 1648 static const struct usb_gadget_ops dwc3_gadget_ops = { 1649 .get_frame = dwc3_gadget_get_frame, 1650 .wakeup = dwc3_gadget_wakeup, 1651 .set_selfpowered = dwc3_gadget_set_selfpowered, 1652 .pullup = dwc3_gadget_pullup, 1653 .udc_start = dwc3_gadget_start, 1654 .udc_stop = dwc3_gadget_stop, 1655 }; 1656 1657 /* -------------------------------------------------------------------------- */ 1658 1659 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc, 1660 u8 num, u32 direction) 1661 { 1662 struct dwc3_ep *dep; 1663 u8 i; 1664 1665 for (i = 0; i < num; i++) { 1666 u8 epnum = (i << 1) | (!!direction); 1667 1668 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 1669 if (!dep) 1670 return -ENOMEM; 1671 1672 dep->dwc = dwc; 1673 dep->number = epnum; 1674 dep->direction = !!direction; 1675 dwc->eps[epnum] = dep; 1676 1677 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1, 1678 (epnum & 1) ? "in" : "out"); 1679 1680 dep->endpoint.name = dep->name; 1681 1682 dev_vdbg(dwc->dev, "initializing %s\n", dep->name); 1683 1684 if (epnum == 0 || epnum == 1) { 1685 usb_ep_set_maxpacket_limit(&dep->endpoint, 512); 1686 dep->endpoint.maxburst = 1; 1687 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 1688 if (!epnum) 1689 dwc->gadget.ep0 = &dep->endpoint; 1690 } else { 1691 int ret; 1692 1693 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024); 1694 dep->endpoint.max_streams = 15; 1695 dep->endpoint.ops = &dwc3_gadget_ep_ops; 1696 list_add_tail(&dep->endpoint.ep_list, 1697 &dwc->gadget.ep_list); 1698 1699 ret = dwc3_alloc_trb_pool(dep); 1700 if (ret) 1701 return ret; 1702 } 1703 1704 INIT_LIST_HEAD(&dep->request_list); 1705 INIT_LIST_HEAD(&dep->req_queued); 1706 } 1707 1708 return 0; 1709 } 1710 1711 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc) 1712 { 1713 int ret; 1714 1715 INIT_LIST_HEAD(&dwc->gadget.ep_list); 1716 1717 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0); 1718 if (ret < 0) { 1719 dev_vdbg(dwc->dev, "failed to allocate OUT endpoints\n"); 1720 return ret; 1721 } 1722 1723 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1); 1724 if (ret < 0) { 1725 dev_vdbg(dwc->dev, "failed to allocate IN endpoints\n"); 1726 return ret; 1727 } 1728 1729 return 0; 1730 } 1731 1732 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 1733 { 1734 struct dwc3_ep *dep; 1735 u8 epnum; 1736 1737 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1738 dep = dwc->eps[epnum]; 1739 if (!dep) 1740 continue; 1741 /* 1742 * Physical endpoints 0 and 1 are special; they form the 1743 * bi-directional USB endpoint 0. 1744 * 1745 * For those two physical endpoints, we don't allocate a TRB 1746 * pool nor do we add them the endpoints list. Due to that, we 1747 * shouldn't do these two operations otherwise we would end up 1748 * with all sorts of bugs when removing dwc3.ko. 1749 */ 1750 if (epnum != 0 && epnum != 1) { 1751 dwc3_free_trb_pool(dep); 1752 list_del(&dep->endpoint.ep_list); 1753 } 1754 1755 kfree(dep); 1756 } 1757 } 1758 1759 /* -------------------------------------------------------------------------- */ 1760 1761 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, 1762 struct dwc3_request *req, struct dwc3_trb *trb, 1763 const struct dwc3_event_depevt *event, int status) 1764 { 1765 unsigned int count; 1766 unsigned int s_pkt = 0; 1767 unsigned int trb_status; 1768 1769 trace_dwc3_complete_trb(dep, trb); 1770 1771 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 1772 /* 1773 * We continue despite the error. There is not much we 1774 * can do. If we don't clean it up we loop forever. If 1775 * we skip the TRB then it gets overwritten after a 1776 * while since we use them in a ring buffer. A BUG() 1777 * would help. Lets hope that if this occurs, someone 1778 * fixes the root cause instead of looking away :) 1779 */ 1780 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n", 1781 dep->name, trb); 1782 count = trb->size & DWC3_TRB_SIZE_MASK; 1783 1784 if (dep->direction) { 1785 if (count) { 1786 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); 1787 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) { 1788 dev_dbg(dwc->dev, "incomplete IN transfer %s\n", 1789 dep->name); 1790 /* 1791 * If missed isoc occurred and there is 1792 * no request queued then issue END 1793 * TRANSFER, so that core generates 1794 * next xfernotready and we will issue 1795 * a fresh START TRANSFER. 1796 * If there are still queued request 1797 * then wait, do not issue either END 1798 * or UPDATE TRANSFER, just attach next 1799 * request in request_list during 1800 * giveback.If any future queued request 1801 * is successfully transferred then we 1802 * will issue UPDATE TRANSFER for all 1803 * request in the request_list. 1804 */ 1805 dep->flags |= DWC3_EP_MISSED_ISOC; 1806 } else { 1807 dev_err(dwc->dev, "incomplete IN transfer %s\n", 1808 dep->name); 1809 status = -ECONNRESET; 1810 } 1811 } else { 1812 dep->flags &= ~DWC3_EP_MISSED_ISOC; 1813 } 1814 } else { 1815 if (count && (event->status & DEPEVT_STATUS_SHORT)) 1816 s_pkt = 1; 1817 } 1818 1819 /* 1820 * We assume here we will always receive the entire data block 1821 * which we should receive. Meaning, if we program RX to 1822 * receive 4K but we receive only 2K, we assume that's all we 1823 * should receive and we simply bounce the request back to the 1824 * gadget driver for further processing. 1825 */ 1826 req->request.actual += req->request.length - count; 1827 if (s_pkt) 1828 return 1; 1829 if ((event->status & DEPEVT_STATUS_LST) && 1830 (trb->ctrl & (DWC3_TRB_CTRL_LST | 1831 DWC3_TRB_CTRL_HWO))) 1832 return 1; 1833 if ((event->status & DEPEVT_STATUS_IOC) && 1834 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 1835 return 1; 1836 return 0; 1837 } 1838 1839 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, 1840 const struct dwc3_event_depevt *event, int status) 1841 { 1842 struct dwc3_request *req; 1843 struct dwc3_trb *trb; 1844 unsigned int slot; 1845 unsigned int i; 1846 int ret; 1847 1848 do { 1849 req = next_request(&dep->req_queued); 1850 if (!req) { 1851 WARN_ON_ONCE(1); 1852 return 1; 1853 } 1854 i = 0; 1855 do { 1856 slot = req->start_slot + i; 1857 if ((slot == DWC3_TRB_NUM - 1) && 1858 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 1859 slot++; 1860 slot %= DWC3_TRB_NUM; 1861 trb = &dep->trb_pool[slot]; 1862 1863 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 1864 event, status); 1865 if (ret) 1866 break; 1867 }while (++i < req->request.num_mapped_sgs); 1868 1869 dwc3_gadget_giveback(dep, req, status); 1870 1871 if (ret) 1872 break; 1873 } while (1); 1874 1875 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1876 list_empty(&dep->req_queued)) { 1877 if (list_empty(&dep->request_list)) { 1878 /* 1879 * If there is no entry in request list then do 1880 * not issue END TRANSFER now. Just set PENDING 1881 * flag, so that END TRANSFER is issued when an 1882 * entry is added into request list. 1883 */ 1884 dep->flags = DWC3_EP_PENDING_REQUEST; 1885 } else { 1886 dwc3_stop_active_transfer(dwc, dep->number, true); 1887 dep->flags = DWC3_EP_ENABLED; 1888 } 1889 return 1; 1890 } 1891 1892 return 1; 1893 } 1894 1895 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, 1896 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1897 { 1898 unsigned status = 0; 1899 int clean_busy; 1900 1901 if (event->status & DEPEVT_STATUS_BUSERR) 1902 status = -ECONNRESET; 1903 1904 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status); 1905 if (clean_busy) 1906 dep->flags &= ~DWC3_EP_BUSY; 1907 1908 /* 1909 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. 1910 * See dwc3_gadget_linksts_change_interrupt() for 1st half. 1911 */ 1912 if (dwc->revision < DWC3_REVISION_183A) { 1913 u32 reg; 1914 int i; 1915 1916 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 1917 dep = dwc->eps[i]; 1918 1919 if (!(dep->flags & DWC3_EP_ENABLED)) 1920 continue; 1921 1922 if (!list_empty(&dep->req_queued)) 1923 return; 1924 } 1925 1926 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1927 reg |= dwc->u1u2; 1928 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1929 1930 dwc->u1u2 = 0; 1931 } 1932 } 1933 1934 static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 1935 const struct dwc3_event_depevt *event) 1936 { 1937 struct dwc3_ep *dep; 1938 u8 epnum = event->endpoint_number; 1939 1940 dep = dwc->eps[epnum]; 1941 1942 if (!(dep->flags & DWC3_EP_ENABLED)) 1943 return; 1944 1945 if (epnum == 0 || epnum == 1) { 1946 dwc3_ep0_interrupt(dwc, event); 1947 return; 1948 } 1949 1950 switch (event->endpoint_event) { 1951 case DWC3_DEPEVT_XFERCOMPLETE: 1952 dep->resource_index = 0; 1953 1954 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1955 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n", 1956 dep->name); 1957 return; 1958 } 1959 1960 dwc3_endpoint_transfer_complete(dwc, dep, event); 1961 break; 1962 case DWC3_DEPEVT_XFERINPROGRESS: 1963 dwc3_endpoint_transfer_complete(dwc, dep, event); 1964 break; 1965 case DWC3_DEPEVT_XFERNOTREADY: 1966 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1967 dwc3_gadget_start_isoc(dwc, dep, event); 1968 } else { 1969 int ret; 1970 1971 dev_vdbg(dwc->dev, "%s: reason %s\n", 1972 dep->name, event->status & 1973 DEPEVT_STATUS_TRANSFER_ACTIVE 1974 ? "Transfer Active" 1975 : "Transfer Not Active"); 1976 1977 ret = __dwc3_gadget_kick_transfer(dep, 0, 1); 1978 if (!ret || ret == -EBUSY) 1979 return; 1980 1981 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1982 dep->name); 1983 } 1984 1985 break; 1986 case DWC3_DEPEVT_STREAMEVT: 1987 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) { 1988 dev_err(dwc->dev, "Stream event for non-Bulk %s\n", 1989 dep->name); 1990 return; 1991 } 1992 1993 switch (event->status) { 1994 case DEPEVT_STREAMEVT_FOUND: 1995 dev_vdbg(dwc->dev, "Stream %d found and started\n", 1996 event->parameters); 1997 1998 break; 1999 case DEPEVT_STREAMEVT_NOTFOUND: 2000 /* FALLTHROUGH */ 2001 default: 2002 dev_dbg(dwc->dev, "Couldn't find suitable stream\n"); 2003 } 2004 break; 2005 case DWC3_DEPEVT_RXTXFIFOEVT: 2006 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name); 2007 break; 2008 case DWC3_DEPEVT_EPCMDCMPLT: 2009 dev_vdbg(dwc->dev, "Endpoint Command Complete\n"); 2010 break; 2011 } 2012 } 2013 2014 static void dwc3_disconnect_gadget(struct dwc3 *dwc) 2015 { 2016 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { 2017 spin_unlock(&dwc->lock); 2018 dwc->gadget_driver->disconnect(&dwc->gadget); 2019 spin_lock(&dwc->lock); 2020 } 2021 } 2022 2023 static void dwc3_suspend_gadget(struct dwc3 *dwc) 2024 { 2025 if (dwc->gadget_driver && dwc->gadget_driver->suspend) { 2026 spin_unlock(&dwc->lock); 2027 dwc->gadget_driver->suspend(&dwc->gadget); 2028 spin_lock(&dwc->lock); 2029 } 2030 } 2031 2032 static void dwc3_resume_gadget(struct dwc3 *dwc) 2033 { 2034 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2035 spin_unlock(&dwc->lock); 2036 dwc->gadget_driver->resume(&dwc->gadget); 2037 spin_lock(&dwc->lock); 2038 } 2039 } 2040 2041 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force) 2042 { 2043 struct dwc3_ep *dep; 2044 struct dwc3_gadget_ep_cmd_params params; 2045 u32 cmd; 2046 int ret; 2047 2048 dep = dwc->eps[epnum]; 2049 2050 if (!dep->resource_index) 2051 return; 2052 2053 /* 2054 * NOTICE: We are violating what the Databook says about the 2055 * EndTransfer command. Ideally we would _always_ wait for the 2056 * EndTransfer Command Completion IRQ, but that's causing too 2057 * much trouble synchronizing between us and gadget driver. 2058 * 2059 * We have discussed this with the IP Provider and it was 2060 * suggested to giveback all requests here, but give HW some 2061 * extra time to synchronize with the interconnect. We're using 2062 * an arbitraty 100us delay for that. 2063 * 2064 * Note also that a similar handling was tested by Synopsys 2065 * (thanks a lot Paul) and nothing bad has come out of it. 2066 * In short, what we're doing is: 2067 * 2068 * - Issue EndTransfer WITH CMDIOC bit set 2069 * - Wait 100us 2070 */ 2071 2072 cmd = DWC3_DEPCMD_ENDTRANSFER; 2073 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0; 2074 cmd |= DWC3_DEPCMD_CMDIOC; 2075 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); 2076 memset(¶ms, 0, sizeof(params)); 2077 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 2078 WARN_ON_ONCE(ret); 2079 dep->resource_index = 0; 2080 dep->flags &= ~DWC3_EP_BUSY; 2081 udelay(100); 2082 } 2083 2084 static void dwc3_stop_active_transfers(struct dwc3 *dwc) 2085 { 2086 u32 epnum; 2087 2088 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2089 struct dwc3_ep *dep; 2090 2091 dep = dwc->eps[epnum]; 2092 if (!dep) 2093 continue; 2094 2095 if (!(dep->flags & DWC3_EP_ENABLED)) 2096 continue; 2097 2098 dwc3_remove_requests(dwc, dep); 2099 } 2100 } 2101 2102 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 2103 { 2104 u32 epnum; 2105 2106 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2107 struct dwc3_ep *dep; 2108 struct dwc3_gadget_ep_cmd_params params; 2109 int ret; 2110 2111 dep = dwc->eps[epnum]; 2112 if (!dep) 2113 continue; 2114 2115 if (!(dep->flags & DWC3_EP_STALL)) 2116 continue; 2117 2118 dep->flags &= ~DWC3_EP_STALL; 2119 2120 memset(¶ms, 0, sizeof(params)); 2121 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 2122 DWC3_DEPCMD_CLEARSTALL, ¶ms); 2123 WARN_ON_ONCE(ret); 2124 } 2125 } 2126 2127 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 2128 { 2129 int reg; 2130 2131 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2132 reg &= ~DWC3_DCTL_INITU1ENA; 2133 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2134 2135 reg &= ~DWC3_DCTL_INITU2ENA; 2136 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2137 2138 dwc3_disconnect_gadget(dwc); 2139 dwc->start_config_issued = false; 2140 2141 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2142 dwc->setup_packet_pending = false; 2143 } 2144 2145 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 2146 { 2147 u32 reg; 2148 2149 /* 2150 * WORKAROUND: DWC3 revisions <1.88a have an issue which 2151 * would cause a missing Disconnect Event if there's a 2152 * pending Setup Packet in the FIFO. 2153 * 2154 * There's no suggested workaround on the official Bug 2155 * report, which states that "unless the driver/application 2156 * is doing any special handling of a disconnect event, 2157 * there is no functional issue". 2158 * 2159 * Unfortunately, it turns out that we _do_ some special 2160 * handling of a disconnect event, namely complete all 2161 * pending transfers, notify gadget driver of the 2162 * disconnection, and so on. 2163 * 2164 * Our suggested workaround is to follow the Disconnect 2165 * Event steps here, instead, based on a setup_packet_pending 2166 * flag. Such flag gets set whenever we have a XferNotReady 2167 * event on EP0 and gets cleared on XferComplete for the 2168 * same endpoint. 2169 * 2170 * Refers to: 2171 * 2172 * STAR#9000466709: RTL: Device : Disconnect event not 2173 * generated if setup packet pending in FIFO 2174 */ 2175 if (dwc->revision < DWC3_REVISION_188A) { 2176 if (dwc->setup_packet_pending) 2177 dwc3_gadget_disconnect_interrupt(dwc); 2178 } 2179 2180 /* after reset -> Default State */ 2181 usb_gadget_set_state(&dwc->gadget, USB_STATE_DEFAULT); 2182 2183 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) 2184 dwc3_disconnect_gadget(dwc); 2185 2186 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2187 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 2188 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2189 dwc->test_mode = false; 2190 2191 dwc3_stop_active_transfers(dwc); 2192 dwc3_clear_stall_all_ep(dwc); 2193 dwc->start_config_issued = false; 2194 2195 /* Reset device address to zero */ 2196 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2197 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 2198 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2199 } 2200 2201 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed) 2202 { 2203 u32 reg; 2204 u32 usb30_clock = DWC3_GCTL_CLK_BUS; 2205 2206 /* 2207 * We change the clock only at SS but I dunno why I would want to do 2208 * this. Maybe it becomes part of the power saving plan. 2209 */ 2210 2211 if (speed != DWC3_DSTS_SUPERSPEED) 2212 return; 2213 2214 /* 2215 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 2216 * each time on Connect Done. 2217 */ 2218 if (!usb30_clock) 2219 return; 2220 2221 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 2222 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock); 2223 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 2224 } 2225 2226 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 2227 { 2228 struct dwc3_ep *dep; 2229 int ret; 2230 u32 reg; 2231 u8 speed; 2232 2233 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2234 speed = reg & DWC3_DSTS_CONNECTSPD; 2235 dwc->speed = speed; 2236 2237 dwc3_update_ram_clk_sel(dwc, speed); 2238 2239 switch (speed) { 2240 case DWC3_DCFG_SUPERSPEED: 2241 /* 2242 * WORKAROUND: DWC3 revisions <1.90a have an issue which 2243 * would cause a missing USB3 Reset event. 2244 * 2245 * In such situations, we should force a USB3 Reset 2246 * event by calling our dwc3_gadget_reset_interrupt() 2247 * routine. 2248 * 2249 * Refers to: 2250 * 2251 * STAR#9000483510: RTL: SS : USB3 reset event may 2252 * not be generated always when the link enters poll 2253 */ 2254 if (dwc->revision < DWC3_REVISION_190A) 2255 dwc3_gadget_reset_interrupt(dwc); 2256 2257 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2258 dwc->gadget.ep0->maxpacket = 512; 2259 dwc->gadget.speed = USB_SPEED_SUPER; 2260 break; 2261 case DWC3_DCFG_HIGHSPEED: 2262 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2263 dwc->gadget.ep0->maxpacket = 64; 2264 dwc->gadget.speed = USB_SPEED_HIGH; 2265 break; 2266 case DWC3_DCFG_FULLSPEED2: 2267 case DWC3_DCFG_FULLSPEED1: 2268 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2269 dwc->gadget.ep0->maxpacket = 64; 2270 dwc->gadget.speed = USB_SPEED_FULL; 2271 break; 2272 case DWC3_DCFG_LOWSPEED: 2273 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); 2274 dwc->gadget.ep0->maxpacket = 8; 2275 dwc->gadget.speed = USB_SPEED_LOW; 2276 break; 2277 } 2278 2279 /* Enable USB2 LPM Capability */ 2280 2281 if ((dwc->revision > DWC3_REVISION_194A) 2282 && (speed != DWC3_DCFG_SUPERSPEED)) { 2283 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2284 reg |= DWC3_DCFG_LPM_CAP; 2285 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2286 2287 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2288 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); 2289 2290 /* 2291 * TODO: This should be configurable. For now using 2292 * maximum allowed HIRD threshold value of 0b1100 2293 */ 2294 reg |= DWC3_DCTL_HIRD_THRES(12); 2295 2296 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2297 } else { 2298 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2299 reg &= ~DWC3_DCTL_HIRD_THRES_MASK; 2300 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2301 } 2302 2303 dep = dwc->eps[0]; 2304 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, 2305 false); 2306 if (ret) { 2307 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2308 return; 2309 } 2310 2311 dep = dwc->eps[1]; 2312 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, 2313 false); 2314 if (ret) { 2315 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2316 return; 2317 } 2318 2319 /* 2320 * Configure PHY via GUSB3PIPECTLn if required. 2321 * 2322 * Update GTXFIFOSIZn 2323 * 2324 * In both cases reset values should be sufficient. 2325 */ 2326 } 2327 2328 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 2329 { 2330 /* 2331 * TODO take core out of low power mode when that's 2332 * implemented. 2333 */ 2334 2335 dwc->gadget_driver->resume(&dwc->gadget); 2336 } 2337 2338 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 2339 unsigned int evtinfo) 2340 { 2341 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2342 unsigned int pwropt; 2343 2344 /* 2345 * WORKAROUND: DWC3 < 2.50a have an issue when configured without 2346 * Hibernation mode enabled which would show up when device detects 2347 * host-initiated U3 exit. 2348 * 2349 * In that case, device will generate a Link State Change Interrupt 2350 * from U3 to RESUME which is only necessary if Hibernation is 2351 * configured in. 2352 * 2353 * There are no functional changes due to such spurious event and we 2354 * just need to ignore it. 2355 * 2356 * Refers to: 2357 * 2358 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation 2359 * operational mode 2360 */ 2361 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 2362 if ((dwc->revision < DWC3_REVISION_250A) && 2363 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { 2364 if ((dwc->link_state == DWC3_LINK_STATE_U3) && 2365 (next == DWC3_LINK_STATE_RESUME)) { 2366 dev_vdbg(dwc->dev, "ignoring transition U3 -> Resume\n"); 2367 return; 2368 } 2369 } 2370 2371 /* 2372 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending 2373 * on the link partner, the USB session might do multiple entry/exit 2374 * of low power states before a transfer takes place. 2375 * 2376 * Due to this problem, we might experience lower throughput. The 2377 * suggested workaround is to disable DCTL[12:9] bits if we're 2378 * transitioning from U1/U2 to U0 and enable those bits again 2379 * after a transfer completes and there are no pending transfers 2380 * on any of the enabled endpoints. 2381 * 2382 * This is the first half of that workaround. 2383 * 2384 * Refers to: 2385 * 2386 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us 2387 * core send LGO_Ux entering U0 2388 */ 2389 if (dwc->revision < DWC3_REVISION_183A) { 2390 if (next == DWC3_LINK_STATE_U0) { 2391 u32 u1u2; 2392 u32 reg; 2393 2394 switch (dwc->link_state) { 2395 case DWC3_LINK_STATE_U1: 2396 case DWC3_LINK_STATE_U2: 2397 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2398 u1u2 = reg & (DWC3_DCTL_INITU2ENA 2399 | DWC3_DCTL_ACCEPTU2ENA 2400 | DWC3_DCTL_INITU1ENA 2401 | DWC3_DCTL_ACCEPTU1ENA); 2402 2403 if (!dwc->u1u2) 2404 dwc->u1u2 = reg & u1u2; 2405 2406 reg &= ~u1u2; 2407 2408 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2409 break; 2410 default: 2411 /* do nothing */ 2412 break; 2413 } 2414 } 2415 } 2416 2417 switch (next) { 2418 case DWC3_LINK_STATE_U1: 2419 if (dwc->speed == USB_SPEED_SUPER) 2420 dwc3_suspend_gadget(dwc); 2421 break; 2422 case DWC3_LINK_STATE_U2: 2423 case DWC3_LINK_STATE_U3: 2424 dwc3_suspend_gadget(dwc); 2425 break; 2426 case DWC3_LINK_STATE_RESUME: 2427 dwc3_resume_gadget(dwc); 2428 break; 2429 default: 2430 /* do nothing */ 2431 break; 2432 } 2433 2434 dwc->link_state = next; 2435 } 2436 2437 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, 2438 unsigned int evtinfo) 2439 { 2440 unsigned int is_ss = evtinfo & BIT(4); 2441 2442 /** 2443 * WORKAROUND: DWC3 revison 2.20a with hibernation support 2444 * have a known issue which can cause USB CV TD.9.23 to fail 2445 * randomly. 2446 * 2447 * Because of this issue, core could generate bogus hibernation 2448 * events which SW needs to ignore. 2449 * 2450 * Refers to: 2451 * 2452 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0 2453 * Device Fallback from SuperSpeed 2454 */ 2455 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER)) 2456 return; 2457 2458 /* enter hibernation here */ 2459 } 2460 2461 static void dwc3_gadget_interrupt(struct dwc3 *dwc, 2462 const struct dwc3_event_devt *event) 2463 { 2464 switch (event->type) { 2465 case DWC3_DEVICE_EVENT_DISCONNECT: 2466 dwc3_gadget_disconnect_interrupt(dwc); 2467 break; 2468 case DWC3_DEVICE_EVENT_RESET: 2469 dwc3_gadget_reset_interrupt(dwc); 2470 break; 2471 case DWC3_DEVICE_EVENT_CONNECT_DONE: 2472 dwc3_gadget_conndone_interrupt(dwc); 2473 break; 2474 case DWC3_DEVICE_EVENT_WAKEUP: 2475 dwc3_gadget_wakeup_interrupt(dwc); 2476 break; 2477 case DWC3_DEVICE_EVENT_HIBER_REQ: 2478 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation, 2479 "unexpected hibernation event\n")) 2480 break; 2481 2482 dwc3_gadget_hibernation_interrupt(dwc, event->event_info); 2483 break; 2484 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 2485 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 2486 break; 2487 case DWC3_DEVICE_EVENT_EOPF: 2488 dev_vdbg(dwc->dev, "End of Periodic Frame\n"); 2489 break; 2490 case DWC3_DEVICE_EVENT_SOF: 2491 dev_vdbg(dwc->dev, "Start of Periodic Frame\n"); 2492 break; 2493 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 2494 dev_vdbg(dwc->dev, "Erratic Error\n"); 2495 break; 2496 case DWC3_DEVICE_EVENT_CMD_CMPL: 2497 dev_vdbg(dwc->dev, "Command Complete\n"); 2498 break; 2499 case DWC3_DEVICE_EVENT_OVERFLOW: 2500 dev_vdbg(dwc->dev, "Overflow\n"); 2501 break; 2502 default: 2503 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 2504 } 2505 } 2506 2507 static void dwc3_process_event_entry(struct dwc3 *dwc, 2508 const union dwc3_event *event) 2509 { 2510 trace_dwc3_event(event->raw); 2511 2512 /* Endpoint IRQ, handle it and return early */ 2513 if (event->type.is_devspec == 0) { 2514 /* depevt */ 2515 return dwc3_endpoint_interrupt(dwc, &event->depevt); 2516 } 2517 2518 switch (event->type.type) { 2519 case DWC3_EVENT_TYPE_DEV: 2520 dwc3_gadget_interrupt(dwc, &event->devt); 2521 break; 2522 /* REVISIT what to do with Carkit and I2C events ? */ 2523 default: 2524 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 2525 } 2526 } 2527 2528 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf) 2529 { 2530 struct dwc3_event_buffer *evt; 2531 irqreturn_t ret = IRQ_NONE; 2532 int left; 2533 u32 reg; 2534 2535 evt = dwc->ev_buffs[buf]; 2536 left = evt->count; 2537 2538 if (!(evt->flags & DWC3_EVENT_PENDING)) 2539 return IRQ_NONE; 2540 2541 while (left > 0) { 2542 union dwc3_event event; 2543 2544 event.raw = *(u32 *) (evt->buf + evt->lpos); 2545 2546 dwc3_process_event_entry(dwc, &event); 2547 2548 /* 2549 * FIXME we wrap around correctly to the next entry as 2550 * almost all entries are 4 bytes in size. There is one 2551 * entry which has 12 bytes which is a regular entry 2552 * followed by 8 bytes data. ATM I don't know how 2553 * things are organized if we get next to the a 2554 * boundary so I worry about that once we try to handle 2555 * that. 2556 */ 2557 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE; 2558 left -= 4; 2559 2560 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4); 2561 } 2562 2563 evt->count = 0; 2564 evt->flags &= ~DWC3_EVENT_PENDING; 2565 ret = IRQ_HANDLED; 2566 2567 /* Unmask interrupt */ 2568 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf)); 2569 reg &= ~DWC3_GEVNTSIZ_INTMASK; 2570 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg); 2571 2572 return ret; 2573 } 2574 2575 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc) 2576 { 2577 struct dwc3 *dwc = _dwc; 2578 unsigned long flags; 2579 irqreturn_t ret = IRQ_NONE; 2580 int i; 2581 2582 spin_lock_irqsave(&dwc->lock, flags); 2583 2584 for (i = 0; i < dwc->num_event_buffers; i++) 2585 ret |= dwc3_process_event_buf(dwc, i); 2586 2587 spin_unlock_irqrestore(&dwc->lock, flags); 2588 2589 return ret; 2590 } 2591 2592 static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf) 2593 { 2594 struct dwc3_event_buffer *evt; 2595 u32 count; 2596 u32 reg; 2597 2598 evt = dwc->ev_buffs[buf]; 2599 2600 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf)); 2601 count &= DWC3_GEVNTCOUNT_MASK; 2602 if (!count) 2603 return IRQ_NONE; 2604 2605 evt->count = count; 2606 evt->flags |= DWC3_EVENT_PENDING; 2607 2608 /* Mask interrupt */ 2609 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf)); 2610 reg |= DWC3_GEVNTSIZ_INTMASK; 2611 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg); 2612 2613 return IRQ_WAKE_THREAD; 2614 } 2615 2616 static irqreturn_t dwc3_interrupt(int irq, void *_dwc) 2617 { 2618 struct dwc3 *dwc = _dwc; 2619 int i; 2620 irqreturn_t ret = IRQ_NONE; 2621 2622 spin_lock(&dwc->lock); 2623 2624 for (i = 0; i < dwc->num_event_buffers; i++) { 2625 irqreturn_t status; 2626 2627 status = dwc3_check_event_buf(dwc, i); 2628 if (status == IRQ_WAKE_THREAD) 2629 ret = status; 2630 } 2631 2632 spin_unlock(&dwc->lock); 2633 2634 return ret; 2635 } 2636 2637 /** 2638 * dwc3_gadget_init - Initializes gadget related registers 2639 * @dwc: pointer to our controller context structure 2640 * 2641 * Returns 0 on success otherwise negative errno. 2642 */ 2643 int dwc3_gadget_init(struct dwc3 *dwc) 2644 { 2645 int ret; 2646 2647 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2648 &dwc->ctrl_req_addr, GFP_KERNEL); 2649 if (!dwc->ctrl_req) { 2650 dev_err(dwc->dev, "failed to allocate ctrl request\n"); 2651 ret = -ENOMEM; 2652 goto err0; 2653 } 2654 2655 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2656 &dwc->ep0_trb_addr, GFP_KERNEL); 2657 if (!dwc->ep0_trb) { 2658 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 2659 ret = -ENOMEM; 2660 goto err1; 2661 } 2662 2663 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL); 2664 if (!dwc->setup_buf) { 2665 ret = -ENOMEM; 2666 goto err2; 2667 } 2668 2669 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev, 2670 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr, 2671 GFP_KERNEL); 2672 if (!dwc->ep0_bounce) { 2673 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n"); 2674 ret = -ENOMEM; 2675 goto err3; 2676 } 2677 2678 dwc->gadget.ops = &dwc3_gadget_ops; 2679 dwc->gadget.max_speed = USB_SPEED_SUPER; 2680 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2681 dwc->gadget.sg_supported = true; 2682 dwc->gadget.name = "dwc3-gadget"; 2683 2684 /* 2685 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize 2686 * on ep out. 2687 */ 2688 dwc->gadget.quirk_ep_out_aligned_size = true; 2689 2690 /* 2691 * REVISIT: Here we should clear all pending IRQs to be 2692 * sure we're starting from a well known location. 2693 */ 2694 2695 ret = dwc3_gadget_init_endpoints(dwc); 2696 if (ret) 2697 goto err4; 2698 2699 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 2700 if (ret) { 2701 dev_err(dwc->dev, "failed to register udc\n"); 2702 goto err4; 2703 } 2704 2705 return 0; 2706 2707 err4: 2708 dwc3_gadget_free_endpoints(dwc); 2709 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2710 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2711 2712 err3: 2713 kfree(dwc->setup_buf); 2714 2715 err2: 2716 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2717 dwc->ep0_trb, dwc->ep0_trb_addr); 2718 2719 err1: 2720 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2721 dwc->ctrl_req, dwc->ctrl_req_addr); 2722 2723 err0: 2724 return ret; 2725 } 2726 2727 /* -------------------------------------------------------------------------- */ 2728 2729 void dwc3_gadget_exit(struct dwc3 *dwc) 2730 { 2731 usb_del_gadget_udc(&dwc->gadget); 2732 2733 dwc3_gadget_free_endpoints(dwc); 2734 2735 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2736 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2737 2738 kfree(dwc->setup_buf); 2739 2740 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2741 dwc->ep0_trb, dwc->ep0_trb_addr); 2742 2743 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2744 dwc->ctrl_req, dwc->ctrl_req_addr); 2745 } 2746 2747 int dwc3_gadget_prepare(struct dwc3 *dwc) 2748 { 2749 if (dwc->pullups_connected) { 2750 dwc3_gadget_disable_irq(dwc); 2751 dwc3_gadget_run_stop(dwc, true, true); 2752 } 2753 2754 return 0; 2755 } 2756 2757 void dwc3_gadget_complete(struct dwc3 *dwc) 2758 { 2759 if (dwc->pullups_connected) { 2760 dwc3_gadget_enable_irq(dwc); 2761 dwc3_gadget_run_stop(dwc, true, false); 2762 } 2763 } 2764 2765 int dwc3_gadget_suspend(struct dwc3 *dwc) 2766 { 2767 __dwc3_gadget_ep_disable(dwc->eps[0]); 2768 __dwc3_gadget_ep_disable(dwc->eps[1]); 2769 2770 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG); 2771 2772 return 0; 2773 } 2774 2775 int dwc3_gadget_resume(struct dwc3 *dwc) 2776 { 2777 struct dwc3_ep *dep; 2778 int ret; 2779 2780 /* Start with SuperSpeed Default */ 2781 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2782 2783 dep = dwc->eps[0]; 2784 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 2785 false); 2786 if (ret) 2787 goto err0; 2788 2789 dep = dwc->eps[1]; 2790 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 2791 false); 2792 if (ret) 2793 goto err1; 2794 2795 /* begin to receive SETUP packets */ 2796 dwc->ep0state = EP0_SETUP_PHASE; 2797 dwc3_ep0_out_start(dwc); 2798 2799 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg); 2800 2801 return 0; 2802 2803 err1: 2804 __dwc3_gadget_ep_disable(dwc->eps[0]); 2805 2806 err0: 2807 return ret; 2808 } 2809