1 /** 2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 3 * 4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com 5 * 6 * Authors: Felipe Balbi <balbi@ti.com>, 7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 8 * 9 * This program is free software: you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 of 11 * the License as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/delay.h> 21 #include <linux/slab.h> 22 #include <linux/spinlock.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/interrupt.h> 26 #include <linux/io.h> 27 #include <linux/list.h> 28 #include <linux/dma-mapping.h> 29 30 #include <linux/usb/ch9.h> 31 #include <linux/usb/gadget.h> 32 33 #include "debug.h" 34 #include "core.h" 35 #include "gadget.h" 36 #include "io.h" 37 38 /** 39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes 40 * @dwc: pointer to our context structure 41 * @mode: the mode to set (J, K SE0 NAK, Force Enable) 42 * 43 * Caller should take care of locking. This function will 44 * return 0 on success or -EINVAL if wrong Test Selector 45 * is passed 46 */ 47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) 48 { 49 u32 reg; 50 51 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 52 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 53 54 switch (mode) { 55 case TEST_J: 56 case TEST_K: 57 case TEST_SE0_NAK: 58 case TEST_PACKET: 59 case TEST_FORCE_EN: 60 reg |= mode << 1; 61 break; 62 default: 63 return -EINVAL; 64 } 65 66 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 67 68 return 0; 69 } 70 71 /** 72 * dwc3_gadget_get_link_state - Gets current state of USB Link 73 * @dwc: pointer to our context structure 74 * 75 * Caller should take care of locking. This function will 76 * return the link state on success (>= 0) or -ETIMEDOUT. 77 */ 78 int dwc3_gadget_get_link_state(struct dwc3 *dwc) 79 { 80 u32 reg; 81 82 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 83 84 return DWC3_DSTS_USBLNKST(reg); 85 } 86 87 /** 88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State 89 * @dwc: pointer to our context structure 90 * @state: the state to put link into 91 * 92 * Caller should take care of locking. This function will 93 * return 0 on success or -ETIMEDOUT. 94 */ 95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) 96 { 97 int retries = 10000; 98 u32 reg; 99 100 /* 101 * Wait until device controller is ready. Only applies to 1.94a and 102 * later RTL. 103 */ 104 if (dwc->revision >= DWC3_REVISION_194A) { 105 while (--retries) { 106 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 107 if (reg & DWC3_DSTS_DCNRD) 108 udelay(5); 109 else 110 break; 111 } 112 113 if (retries <= 0) 114 return -ETIMEDOUT; 115 } 116 117 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 119 120 /* set requested state */ 121 reg |= DWC3_DCTL_ULSTCHNGREQ(state); 122 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 123 124 /* 125 * The following code is racy when called from dwc3_gadget_wakeup, 126 * and is not needed, at least on newer versions 127 */ 128 if (dwc->revision >= DWC3_REVISION_194A) 129 return 0; 130 131 /* wait for a change in DSTS */ 132 retries = 10000; 133 while (--retries) { 134 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 135 136 if (DWC3_DSTS_USBLNKST(reg) == state) 137 return 0; 138 139 udelay(5); 140 } 141 142 dev_vdbg(dwc->dev, "link state change request timed out\n"); 143 144 return -ETIMEDOUT; 145 } 146 147 /** 148 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case 149 * @dwc: pointer to our context structure 150 * 151 * This function will a best effort FIFO allocation in order 152 * to improve FIFO usage and throughput, while still allowing 153 * us to enable as many endpoints as possible. 154 * 155 * Keep in mind that this operation will be highly dependent 156 * on the configured size for RAM1 - which contains TxFifo -, 157 * the amount of endpoints enabled on coreConsultant tool, and 158 * the width of the Master Bus. 159 * 160 * In the ideal world, we would always be able to satisfy the 161 * following equation: 162 * 163 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \ 164 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes 165 * 166 * Unfortunately, due to many variables that's not always the case. 167 */ 168 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc) 169 { 170 int last_fifo_depth = 0; 171 int ram1_depth; 172 int fifo_size; 173 int mdwidth; 174 int num; 175 176 if (!dwc->needs_fifo_resize) 177 return 0; 178 179 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7); 180 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); 181 182 /* MDWIDTH is represented in bits, we need it in bytes */ 183 mdwidth >>= 3; 184 185 /* 186 * FIXME For now we will only allocate 1 wMaxPacketSize space 187 * for each enabled endpoint, later patches will come to 188 * improve this algorithm so that we better use the internal 189 * FIFO space 190 */ 191 for (num = 0; num < dwc->num_in_eps; num++) { 192 /* bit0 indicates direction; 1 means IN ep */ 193 struct dwc3_ep *dep = dwc->eps[(num << 1) | 1]; 194 int mult = 1; 195 int tmp; 196 197 if (!(dep->flags & DWC3_EP_ENABLED)) 198 continue; 199 200 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) 201 || usb_endpoint_xfer_isoc(dep->endpoint.desc)) 202 mult = 3; 203 204 /* 205 * REVISIT: the following assumes we will always have enough 206 * space available on the FIFO RAM for all possible use cases. 207 * Make sure that's true somehow and change FIFO allocation 208 * accordingly. 209 * 210 * If we have Bulk or Isochronous endpoints, we want 211 * them to be able to be very, very fast. So we're giving 212 * those endpoints a fifo_size which is enough for 3 full 213 * packets 214 */ 215 tmp = mult * (dep->endpoint.maxpacket + mdwidth); 216 tmp += mdwidth; 217 218 fifo_size = DIV_ROUND_UP(tmp, mdwidth); 219 220 fifo_size |= (last_fifo_depth << 16); 221 222 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n", 223 dep->name, last_fifo_depth, fifo_size & 0xffff); 224 225 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size); 226 227 last_fifo_depth += (fifo_size & 0xffff); 228 } 229 230 return 0; 231 } 232 233 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 234 int status) 235 { 236 struct dwc3 *dwc = dep->dwc; 237 int i; 238 239 if (req->queued) { 240 i = 0; 241 do { 242 dep->busy_slot++; 243 /* 244 * Skip LINK TRB. We can't use req->trb and check for 245 * DWC3_TRBCTL_LINK_TRB because it points the TRB we 246 * just completed (not the LINK TRB). 247 */ 248 if (((dep->busy_slot & DWC3_TRB_MASK) == 249 DWC3_TRB_NUM- 1) && 250 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 251 dep->busy_slot++; 252 } while(++i < req->request.num_mapped_sgs); 253 req->queued = false; 254 } 255 list_del(&req->list); 256 req->trb = NULL; 257 258 if (req->request.status == -EINPROGRESS) 259 req->request.status = status; 260 261 if (dwc->ep0_bounced && dep->number == 0) 262 dwc->ep0_bounced = false; 263 else 264 usb_gadget_unmap_request(&dwc->gadget, &req->request, 265 req->direction); 266 267 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n", 268 req, dep->name, req->request.actual, 269 req->request.length, status); 270 trace_dwc3_gadget_giveback(req); 271 272 spin_unlock(&dwc->lock); 273 usb_gadget_giveback_request(&dep->endpoint, &req->request); 274 spin_lock(&dwc->lock); 275 } 276 277 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param) 278 { 279 u32 timeout = 500; 280 u32 reg; 281 282 trace_dwc3_gadget_generic_cmd(cmd, param); 283 284 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param); 285 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT); 286 287 do { 288 reg = dwc3_readl(dwc->regs, DWC3_DGCMD); 289 if (!(reg & DWC3_DGCMD_CMDACT)) { 290 dev_vdbg(dwc->dev, "Command Complete --> %d\n", 291 DWC3_DGCMD_STATUS(reg)); 292 return 0; 293 } 294 295 /* 296 * We can't sleep here, because it's also called from 297 * interrupt context. 298 */ 299 timeout--; 300 if (!timeout) 301 return -ETIMEDOUT; 302 udelay(1); 303 } while (1); 304 } 305 306 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep, 307 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params) 308 { 309 struct dwc3_ep *dep = dwc->eps[ep]; 310 u32 timeout = 500; 311 u32 reg; 312 313 trace_dwc3_gadget_ep_cmd(dep, cmd, params); 314 315 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0); 316 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1); 317 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2); 318 319 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT); 320 do { 321 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep)); 322 if (!(reg & DWC3_DEPCMD_CMDACT)) { 323 dev_vdbg(dwc->dev, "Command Complete --> %d\n", 324 DWC3_DEPCMD_STATUS(reg)); 325 return 0; 326 } 327 328 /* 329 * We can't sleep here, because it is also called from 330 * interrupt context. 331 */ 332 timeout--; 333 if (!timeout) 334 return -ETIMEDOUT; 335 336 udelay(1); 337 } while (1); 338 } 339 340 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 341 struct dwc3_trb *trb) 342 { 343 u32 offset = (char *) trb - (char *) dep->trb_pool; 344 345 return dep->trb_pool_dma + offset; 346 } 347 348 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 349 { 350 struct dwc3 *dwc = dep->dwc; 351 352 if (dep->trb_pool) 353 return 0; 354 355 if (dep->number == 0 || dep->number == 1) 356 return 0; 357 358 dep->trb_pool = dma_alloc_coherent(dwc->dev, 359 sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 360 &dep->trb_pool_dma, GFP_KERNEL); 361 if (!dep->trb_pool) { 362 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 363 dep->name); 364 return -ENOMEM; 365 } 366 367 return 0; 368 } 369 370 static void dwc3_free_trb_pool(struct dwc3_ep *dep) 371 { 372 struct dwc3 *dwc = dep->dwc; 373 374 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 375 dep->trb_pool, dep->trb_pool_dma); 376 377 dep->trb_pool = NULL; 378 dep->trb_pool_dma = 0; 379 } 380 381 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 382 { 383 struct dwc3_gadget_ep_cmd_params params; 384 u32 cmd; 385 386 memset(¶ms, 0x00, sizeof(params)); 387 388 if (dep->number != 1) { 389 cmd = DWC3_DEPCMD_DEPSTARTCFG; 390 /* XferRscIdx == 0 for ep0 and 2 for the remaining */ 391 if (dep->number > 1) { 392 if (dwc->start_config_issued) 393 return 0; 394 dwc->start_config_issued = true; 395 cmd |= DWC3_DEPCMD_PARAM(2); 396 } 397 398 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms); 399 } 400 401 return 0; 402 } 403 404 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, 405 const struct usb_endpoint_descriptor *desc, 406 const struct usb_ss_ep_comp_descriptor *comp_desc, 407 bool ignore, bool restore) 408 { 409 struct dwc3_gadget_ep_cmd_params params; 410 411 memset(¶ms, 0x00, sizeof(params)); 412 413 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) 414 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); 415 416 /* Burst size is only needed in SuperSpeed mode */ 417 if (dwc->gadget.speed == USB_SPEED_SUPER) { 418 u32 burst = dep->endpoint.maxburst - 1; 419 420 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst); 421 } 422 423 if (ignore) 424 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM; 425 426 if (restore) { 427 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE; 428 params.param2 |= dep->saved_state; 429 } 430 431 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN 432 | DWC3_DEPCFG_XFER_NOT_READY_EN; 433 434 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 435 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE 436 | DWC3_DEPCFG_STREAM_EVENT_EN; 437 dep->stream_capable = true; 438 } 439 440 if (!usb_endpoint_xfer_control(desc)) 441 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; 442 443 /* 444 * We are doing 1:1 mapping for endpoints, meaning 445 * Physical Endpoints 2 maps to Logical Endpoint 2 and 446 * so on. We consider the direction bit as part of the physical 447 * endpoint number. So USB endpoint 0x81 is 0x03. 448 */ 449 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); 450 451 /* 452 * We must use the lower 16 TX FIFOs even though 453 * HW might have more 454 */ 455 if (dep->direction) 456 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); 457 458 if (desc->bInterval) { 459 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); 460 dep->interval = 1 << (desc->bInterval - 1); 461 } 462 463 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 464 DWC3_DEPCMD_SETEPCONFIG, ¶ms); 465 } 466 467 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) 468 { 469 struct dwc3_gadget_ep_cmd_params params; 470 471 memset(¶ms, 0x00, sizeof(params)); 472 473 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 474 475 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 476 DWC3_DEPCMD_SETTRANSFRESOURCE, ¶ms); 477 } 478 479 /** 480 * __dwc3_gadget_ep_enable - Initializes a HW endpoint 481 * @dep: endpoint to be initialized 482 * @desc: USB Endpoint Descriptor 483 * 484 * Caller should take care of locking 485 */ 486 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, 487 const struct usb_endpoint_descriptor *desc, 488 const struct usb_ss_ep_comp_descriptor *comp_desc, 489 bool ignore, bool restore) 490 { 491 struct dwc3 *dwc = dep->dwc; 492 u32 reg; 493 int ret; 494 495 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name); 496 497 if (!(dep->flags & DWC3_EP_ENABLED)) { 498 ret = dwc3_gadget_start_config(dwc, dep); 499 if (ret) 500 return ret; 501 } 502 503 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore, 504 restore); 505 if (ret) 506 return ret; 507 508 if (!(dep->flags & DWC3_EP_ENABLED)) { 509 struct dwc3_trb *trb_st_hw; 510 struct dwc3_trb *trb_link; 511 512 ret = dwc3_gadget_set_xfer_resource(dwc, dep); 513 if (ret) 514 return ret; 515 516 dep->endpoint.desc = desc; 517 dep->comp_desc = comp_desc; 518 dep->type = usb_endpoint_type(desc); 519 dep->flags |= DWC3_EP_ENABLED; 520 521 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 522 reg |= DWC3_DALEPENA_EP(dep->number); 523 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 524 525 if (!usb_endpoint_xfer_isoc(desc)) 526 return 0; 527 528 /* Link TRB for ISOC. The HWO bit is never reset */ 529 trb_st_hw = &dep->trb_pool[0]; 530 531 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 532 memset(trb_link, 0, sizeof(*trb_link)); 533 534 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 535 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 536 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB; 537 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 538 } 539 540 return 0; 541 } 542 543 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force); 544 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 545 { 546 struct dwc3_request *req; 547 548 if (!list_empty(&dep->req_queued)) { 549 dwc3_stop_active_transfer(dwc, dep->number, true); 550 551 /* - giveback all requests to gadget driver */ 552 while (!list_empty(&dep->req_queued)) { 553 req = next_request(&dep->req_queued); 554 555 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 556 } 557 } 558 559 while (!list_empty(&dep->request_list)) { 560 req = next_request(&dep->request_list); 561 562 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 563 } 564 } 565 566 /** 567 * __dwc3_gadget_ep_disable - Disables a HW endpoint 568 * @dep: the endpoint to disable 569 * 570 * This function also removes requests which are currently processed ny the 571 * hardware and those which are not yet scheduled. 572 * Caller should take care of locking. 573 */ 574 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 575 { 576 struct dwc3 *dwc = dep->dwc; 577 u32 reg; 578 579 dwc3_remove_requests(dwc, dep); 580 581 /* make sure HW endpoint isn't stalled */ 582 if (dep->flags & DWC3_EP_STALL) 583 __dwc3_gadget_ep_set_halt(dep, 0, false); 584 585 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 586 reg &= ~DWC3_DALEPENA_EP(dep->number); 587 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 588 589 dep->stream_capable = false; 590 dep->endpoint.desc = NULL; 591 dep->comp_desc = NULL; 592 dep->type = 0; 593 dep->flags = 0; 594 595 return 0; 596 } 597 598 /* -------------------------------------------------------------------------- */ 599 600 static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 601 const struct usb_endpoint_descriptor *desc) 602 { 603 return -EINVAL; 604 } 605 606 static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 607 { 608 return -EINVAL; 609 } 610 611 /* -------------------------------------------------------------------------- */ 612 613 static int dwc3_gadget_ep_enable(struct usb_ep *ep, 614 const struct usb_endpoint_descriptor *desc) 615 { 616 struct dwc3_ep *dep; 617 struct dwc3 *dwc; 618 unsigned long flags; 619 int ret; 620 621 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 622 pr_debug("dwc3: invalid parameters\n"); 623 return -EINVAL; 624 } 625 626 if (!desc->wMaxPacketSize) { 627 pr_debug("dwc3: missing wMaxPacketSize\n"); 628 return -EINVAL; 629 } 630 631 dep = to_dwc3_ep(ep); 632 dwc = dep->dwc; 633 634 if (dep->flags & DWC3_EP_ENABLED) { 635 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n", 636 dep->name); 637 return 0; 638 } 639 640 switch (usb_endpoint_type(desc)) { 641 case USB_ENDPOINT_XFER_CONTROL: 642 strlcat(dep->name, "-control", sizeof(dep->name)); 643 break; 644 case USB_ENDPOINT_XFER_ISOC: 645 strlcat(dep->name, "-isoc", sizeof(dep->name)); 646 break; 647 case USB_ENDPOINT_XFER_BULK: 648 strlcat(dep->name, "-bulk", sizeof(dep->name)); 649 break; 650 case USB_ENDPOINT_XFER_INT: 651 strlcat(dep->name, "-int", sizeof(dep->name)); 652 break; 653 default: 654 dev_err(dwc->dev, "invalid endpoint transfer type\n"); 655 } 656 657 spin_lock_irqsave(&dwc->lock, flags); 658 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false); 659 spin_unlock_irqrestore(&dwc->lock, flags); 660 661 return ret; 662 } 663 664 static int dwc3_gadget_ep_disable(struct usb_ep *ep) 665 { 666 struct dwc3_ep *dep; 667 struct dwc3 *dwc; 668 unsigned long flags; 669 int ret; 670 671 if (!ep) { 672 pr_debug("dwc3: invalid parameters\n"); 673 return -EINVAL; 674 } 675 676 dep = to_dwc3_ep(ep); 677 dwc = dep->dwc; 678 679 if (!(dep->flags & DWC3_EP_ENABLED)) { 680 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n", 681 dep->name); 682 return 0; 683 } 684 685 snprintf(dep->name, sizeof(dep->name), "ep%d%s", 686 dep->number >> 1, 687 (dep->number & 1) ? "in" : "out"); 688 689 spin_lock_irqsave(&dwc->lock, flags); 690 ret = __dwc3_gadget_ep_disable(dep); 691 spin_unlock_irqrestore(&dwc->lock, flags); 692 693 return ret; 694 } 695 696 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 697 gfp_t gfp_flags) 698 { 699 struct dwc3_request *req; 700 struct dwc3_ep *dep = to_dwc3_ep(ep); 701 702 req = kzalloc(sizeof(*req), gfp_flags); 703 if (!req) 704 return NULL; 705 706 req->epnum = dep->number; 707 req->dep = dep; 708 709 trace_dwc3_alloc_request(req); 710 711 return &req->request; 712 } 713 714 static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 715 struct usb_request *request) 716 { 717 struct dwc3_request *req = to_dwc3_request(request); 718 719 trace_dwc3_free_request(req); 720 kfree(req); 721 } 722 723 /** 724 * dwc3_prepare_one_trb - setup one TRB from one request 725 * @dep: endpoint for which this request is prepared 726 * @req: dwc3_request pointer 727 */ 728 static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 729 struct dwc3_request *req, dma_addr_t dma, 730 unsigned length, unsigned last, unsigned chain, unsigned node) 731 { 732 struct dwc3 *dwc = dep->dwc; 733 struct dwc3_trb *trb; 734 735 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n", 736 dep->name, req, (unsigned long long) dma, 737 length, last ? " last" : "", 738 chain ? " chain" : ""); 739 740 741 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK]; 742 743 if (!req->trb) { 744 dwc3_gadget_move_request_queued(req); 745 req->trb = trb; 746 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 747 req->start_slot = dep->free_slot & DWC3_TRB_MASK; 748 } 749 750 dep->free_slot++; 751 /* Skip the LINK-TRB on ISOC */ 752 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && 753 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 754 dep->free_slot++; 755 756 trb->size = DWC3_TRB_SIZE_LENGTH(length); 757 trb->bpl = lower_32_bits(dma); 758 trb->bph = upper_32_bits(dma); 759 760 switch (usb_endpoint_type(dep->endpoint.desc)) { 761 case USB_ENDPOINT_XFER_CONTROL: 762 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP; 763 break; 764 765 case USB_ENDPOINT_XFER_ISOC: 766 if (!node) 767 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 768 else 769 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 770 break; 771 772 case USB_ENDPOINT_XFER_BULK: 773 case USB_ENDPOINT_XFER_INT: 774 trb->ctrl = DWC3_TRBCTL_NORMAL; 775 break; 776 default: 777 /* 778 * This is only possible with faulty memory because we 779 * checked it already :) 780 */ 781 BUG(); 782 } 783 784 if (!req->request.no_interrupt && !chain) 785 trb->ctrl |= DWC3_TRB_CTRL_IOC; 786 787 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 788 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 789 trb->ctrl |= DWC3_TRB_CTRL_CSP; 790 } else if (last) { 791 trb->ctrl |= DWC3_TRB_CTRL_LST; 792 } 793 794 if (chain) 795 trb->ctrl |= DWC3_TRB_CTRL_CHN; 796 797 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) 798 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id); 799 800 trb->ctrl |= DWC3_TRB_CTRL_HWO; 801 802 trace_dwc3_prepare_trb(dep, trb); 803 } 804 805 /* 806 * dwc3_prepare_trbs - setup TRBs from requests 807 * @dep: endpoint for which requests are being prepared 808 * @starting: true if the endpoint is idle and no requests are queued. 809 * 810 * The function goes through the requests list and sets up TRBs for the 811 * transfers. The function returns once there are no more TRBs available or 812 * it runs out of requests. 813 */ 814 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting) 815 { 816 struct dwc3_request *req, *n; 817 u32 trbs_left; 818 u32 max; 819 unsigned int last_one = 0; 820 821 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 822 823 /* the first request must not be queued */ 824 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK; 825 826 /* Can't wrap around on a non-isoc EP since there's no link TRB */ 827 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 828 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK); 829 if (trbs_left > max) 830 trbs_left = max; 831 } 832 833 /* 834 * If busy & slot are equal than it is either full or empty. If we are 835 * starting to process requests then we are empty. Otherwise we are 836 * full and don't do anything 837 */ 838 if (!trbs_left) { 839 if (!starting) 840 return; 841 trbs_left = DWC3_TRB_NUM; 842 /* 843 * In case we start from scratch, we queue the ISOC requests 844 * starting from slot 1. This is done because we use ring 845 * buffer and have no LST bit to stop us. Instead, we place 846 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt 847 * after the first request so we start at slot 1 and have 848 * 7 requests proceed before we hit the first IOC. 849 * Other transfer types don't use the ring buffer and are 850 * processed from the first TRB until the last one. Since we 851 * don't wrap around we have to start at the beginning. 852 */ 853 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 854 dep->busy_slot = 1; 855 dep->free_slot = 1; 856 } else { 857 dep->busy_slot = 0; 858 dep->free_slot = 0; 859 } 860 } 861 862 /* The last TRB is a link TRB, not used for xfer */ 863 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc)) 864 return; 865 866 list_for_each_entry_safe(req, n, &dep->request_list, list) { 867 unsigned length; 868 dma_addr_t dma; 869 last_one = false; 870 871 if (req->request.num_mapped_sgs > 0) { 872 struct usb_request *request = &req->request; 873 struct scatterlist *sg = request->sg; 874 struct scatterlist *s; 875 int i; 876 877 for_each_sg(sg, s, request->num_mapped_sgs, i) { 878 unsigned chain = true; 879 880 length = sg_dma_len(s); 881 dma = sg_dma_address(s); 882 883 if (i == (request->num_mapped_sgs - 1) || 884 sg_is_last(s)) { 885 if (list_empty(&dep->request_list)) 886 last_one = true; 887 chain = false; 888 } 889 890 trbs_left--; 891 if (!trbs_left) 892 last_one = true; 893 894 if (last_one) 895 chain = false; 896 897 dwc3_prepare_one_trb(dep, req, dma, length, 898 last_one, chain, i); 899 900 if (last_one) 901 break; 902 } 903 904 if (last_one) 905 break; 906 } else { 907 dma = req->request.dma; 908 length = req->request.length; 909 trbs_left--; 910 911 if (!trbs_left) 912 last_one = 1; 913 914 /* Is this the last request? */ 915 if (list_is_last(&req->list, &dep->request_list)) 916 last_one = 1; 917 918 dwc3_prepare_one_trb(dep, req, dma, length, 919 last_one, false, 0); 920 921 if (last_one) 922 break; 923 } 924 } 925 } 926 927 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param, 928 int start_new) 929 { 930 struct dwc3_gadget_ep_cmd_params params; 931 struct dwc3_request *req; 932 struct dwc3 *dwc = dep->dwc; 933 int ret; 934 u32 cmd; 935 936 if (start_new && (dep->flags & DWC3_EP_BUSY)) { 937 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name); 938 return -EBUSY; 939 } 940 dep->flags &= ~DWC3_EP_PENDING_REQUEST; 941 942 /* 943 * If we are getting here after a short-out-packet we don't enqueue any 944 * new requests as we try to set the IOC bit only on the last request. 945 */ 946 if (start_new) { 947 if (list_empty(&dep->req_queued)) 948 dwc3_prepare_trbs(dep, start_new); 949 950 /* req points to the first request which will be sent */ 951 req = next_request(&dep->req_queued); 952 } else { 953 dwc3_prepare_trbs(dep, start_new); 954 955 /* 956 * req points to the first request where HWO changed from 0 to 1 957 */ 958 req = next_request(&dep->req_queued); 959 } 960 if (!req) { 961 dep->flags |= DWC3_EP_PENDING_REQUEST; 962 return 0; 963 } 964 965 memset(¶ms, 0, sizeof(params)); 966 967 if (start_new) { 968 params.param0 = upper_32_bits(req->trb_dma); 969 params.param1 = lower_32_bits(req->trb_dma); 970 cmd = DWC3_DEPCMD_STARTTRANSFER; 971 } else { 972 cmd = DWC3_DEPCMD_UPDATETRANSFER; 973 } 974 975 cmd |= DWC3_DEPCMD_PARAM(cmd_param); 976 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 977 if (ret < 0) { 978 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n"); 979 980 /* 981 * FIXME we need to iterate over the list of requests 982 * here and stop, unmap, free and del each of the linked 983 * requests instead of what we do now. 984 */ 985 usb_gadget_unmap_request(&dwc->gadget, &req->request, 986 req->direction); 987 list_del(&req->list); 988 return ret; 989 } 990 991 dep->flags |= DWC3_EP_BUSY; 992 993 if (start_new) { 994 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc, 995 dep->number); 996 WARN_ON_ONCE(!dep->resource_index); 997 } 998 999 return 0; 1000 } 1001 1002 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc, 1003 struct dwc3_ep *dep, u32 cur_uf) 1004 { 1005 u32 uf; 1006 1007 if (list_empty(&dep->request_list)) { 1008 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n", 1009 dep->name); 1010 dep->flags |= DWC3_EP_PENDING_REQUEST; 1011 return; 1012 } 1013 1014 /* 4 micro frames in the future */ 1015 uf = cur_uf + dep->interval * 4; 1016 1017 __dwc3_gadget_kick_transfer(dep, uf, 1); 1018 } 1019 1020 static void dwc3_gadget_start_isoc(struct dwc3 *dwc, 1021 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1022 { 1023 u32 cur_uf, mask; 1024 1025 mask = ~(dep->interval - 1); 1026 cur_uf = event->parameters & mask; 1027 1028 __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1029 } 1030 1031 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 1032 { 1033 struct dwc3 *dwc = dep->dwc; 1034 int ret; 1035 1036 req->request.actual = 0; 1037 req->request.status = -EINPROGRESS; 1038 req->direction = dep->direction; 1039 req->epnum = dep->number; 1040 1041 /* 1042 * We only add to our list of requests now and 1043 * start consuming the list once we get XferNotReady 1044 * IRQ. 1045 * 1046 * That way, we avoid doing anything that we don't need 1047 * to do now and defer it until the point we receive a 1048 * particular token from the Host side. 1049 * 1050 * This will also avoid Host cancelling URBs due to too 1051 * many NAKs. 1052 */ 1053 ret = usb_gadget_map_request(&dwc->gadget, &req->request, 1054 dep->direction); 1055 if (ret) 1056 return ret; 1057 1058 list_add_tail(&req->list, &dep->request_list); 1059 1060 /* 1061 * There are a few special cases: 1062 * 1063 * 1. XferNotReady with empty list of requests. We need to kick the 1064 * transfer here in that situation, otherwise we will be NAKing 1065 * forever. If we get XferNotReady before gadget driver has a 1066 * chance to queue a request, we will ACK the IRQ but won't be 1067 * able to receive the data until the next request is queued. 1068 * The following code is handling exactly that. 1069 * 1070 */ 1071 if (dep->flags & DWC3_EP_PENDING_REQUEST) { 1072 /* 1073 * If xfernotready is already elapsed and it is a case 1074 * of isoc transfer, then issue END TRANSFER, so that 1075 * you can receive xfernotready again and can have 1076 * notion of current microframe. 1077 */ 1078 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1079 if (list_empty(&dep->req_queued)) { 1080 dwc3_stop_active_transfer(dwc, dep->number, true); 1081 dep->flags = DWC3_EP_ENABLED; 1082 } 1083 return 0; 1084 } 1085 1086 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1087 if (ret && ret != -EBUSY) 1088 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1089 dep->name); 1090 return ret; 1091 } 1092 1093 /* 1094 * 2. XferInProgress on Isoc EP with an active transfer. We need to 1095 * kick the transfer here after queuing a request, otherwise the 1096 * core may not see the modified TRB(s). 1097 */ 1098 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1099 (dep->flags & DWC3_EP_BUSY) && 1100 !(dep->flags & DWC3_EP_MISSED_ISOC)) { 1101 WARN_ON_ONCE(!dep->resource_index); 1102 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index, 1103 false); 1104 if (ret && ret != -EBUSY) 1105 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1106 dep->name); 1107 return ret; 1108 } 1109 1110 /* 1111 * 4. Stream Capable Bulk Endpoints. We need to start the transfer 1112 * right away, otherwise host will not know we have streams to be 1113 * handled. 1114 */ 1115 if (dep->stream_capable) { 1116 int ret; 1117 1118 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1119 if (ret && ret != -EBUSY) { 1120 struct dwc3 *dwc = dep->dwc; 1121 1122 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1123 dep->name); 1124 } 1125 } 1126 1127 return 0; 1128 } 1129 1130 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 1131 gfp_t gfp_flags) 1132 { 1133 struct dwc3_request *req = to_dwc3_request(request); 1134 struct dwc3_ep *dep = to_dwc3_ep(ep); 1135 struct dwc3 *dwc = dep->dwc; 1136 1137 unsigned long flags; 1138 1139 int ret; 1140 1141 spin_lock_irqsave(&dwc->lock, flags); 1142 if (!dep->endpoint.desc) { 1143 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n", 1144 request, ep->name); 1145 ret = -ESHUTDOWN; 1146 goto out; 1147 } 1148 1149 if (WARN(req->dep != dep, "request %p belongs to '%s'\n", 1150 request, req->dep->name)) { 1151 ret = -EINVAL; 1152 goto out; 1153 } 1154 1155 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n", 1156 request, ep->name, request->length); 1157 trace_dwc3_ep_queue(req); 1158 1159 ret = __dwc3_gadget_ep_queue(dep, req); 1160 1161 out: 1162 spin_unlock_irqrestore(&dwc->lock, flags); 1163 1164 return ret; 1165 } 1166 1167 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 1168 struct usb_request *request) 1169 { 1170 struct dwc3_request *req = to_dwc3_request(request); 1171 struct dwc3_request *r = NULL; 1172 1173 struct dwc3_ep *dep = to_dwc3_ep(ep); 1174 struct dwc3 *dwc = dep->dwc; 1175 1176 unsigned long flags; 1177 int ret = 0; 1178 1179 trace_dwc3_ep_dequeue(req); 1180 1181 spin_lock_irqsave(&dwc->lock, flags); 1182 1183 list_for_each_entry(r, &dep->request_list, list) { 1184 if (r == req) 1185 break; 1186 } 1187 1188 if (r != req) { 1189 list_for_each_entry(r, &dep->req_queued, list) { 1190 if (r == req) 1191 break; 1192 } 1193 if (r == req) { 1194 /* wait until it is processed */ 1195 dwc3_stop_active_transfer(dwc, dep->number, true); 1196 goto out1; 1197 } 1198 dev_err(dwc->dev, "request %p was not queued to %s\n", 1199 request, ep->name); 1200 ret = -EINVAL; 1201 goto out0; 1202 } 1203 1204 out1: 1205 /* giveback the request */ 1206 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1207 1208 out0: 1209 spin_unlock_irqrestore(&dwc->lock, flags); 1210 1211 return ret; 1212 } 1213 1214 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) 1215 { 1216 struct dwc3_gadget_ep_cmd_params params; 1217 struct dwc3 *dwc = dep->dwc; 1218 int ret; 1219 1220 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1221 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 1222 return -EINVAL; 1223 } 1224 1225 memset(¶ms, 0x00, sizeof(params)); 1226 1227 if (value) { 1228 if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) || 1229 (!list_empty(&dep->req_queued) || 1230 !list_empty(&dep->request_list)))) { 1231 dev_dbg(dwc->dev, "%s: pending request, cannot halt\n", 1232 dep->name); 1233 return -EAGAIN; 1234 } 1235 1236 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1237 DWC3_DEPCMD_SETSTALL, ¶ms); 1238 if (ret) 1239 dev_err(dwc->dev, "failed to set STALL on %s\n", 1240 dep->name); 1241 else 1242 dep->flags |= DWC3_EP_STALL; 1243 } else { 1244 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1245 DWC3_DEPCMD_CLEARSTALL, ¶ms); 1246 if (ret) 1247 dev_err(dwc->dev, "failed to clear STALL on %s\n", 1248 dep->name); 1249 else 1250 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); 1251 } 1252 1253 return ret; 1254 } 1255 1256 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 1257 { 1258 struct dwc3_ep *dep = to_dwc3_ep(ep); 1259 struct dwc3 *dwc = dep->dwc; 1260 1261 unsigned long flags; 1262 1263 int ret; 1264 1265 spin_lock_irqsave(&dwc->lock, flags); 1266 ret = __dwc3_gadget_ep_set_halt(dep, value, false); 1267 spin_unlock_irqrestore(&dwc->lock, flags); 1268 1269 return ret; 1270 } 1271 1272 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 1273 { 1274 struct dwc3_ep *dep = to_dwc3_ep(ep); 1275 struct dwc3 *dwc = dep->dwc; 1276 unsigned long flags; 1277 int ret; 1278 1279 spin_lock_irqsave(&dwc->lock, flags); 1280 dep->flags |= DWC3_EP_WEDGE; 1281 1282 if (dep->number == 0 || dep->number == 1) 1283 ret = __dwc3_gadget_ep0_set_halt(ep, 1); 1284 else 1285 ret = __dwc3_gadget_ep_set_halt(dep, 1, false); 1286 spin_unlock_irqrestore(&dwc->lock, flags); 1287 1288 return ret; 1289 } 1290 1291 /* -------------------------------------------------------------------------- */ 1292 1293 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 1294 .bLength = USB_DT_ENDPOINT_SIZE, 1295 .bDescriptorType = USB_DT_ENDPOINT, 1296 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1297 }; 1298 1299 static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 1300 .enable = dwc3_gadget_ep0_enable, 1301 .disable = dwc3_gadget_ep0_disable, 1302 .alloc_request = dwc3_gadget_ep_alloc_request, 1303 .free_request = dwc3_gadget_ep_free_request, 1304 .queue = dwc3_gadget_ep0_queue, 1305 .dequeue = dwc3_gadget_ep_dequeue, 1306 .set_halt = dwc3_gadget_ep0_set_halt, 1307 .set_wedge = dwc3_gadget_ep_set_wedge, 1308 }; 1309 1310 static const struct usb_ep_ops dwc3_gadget_ep_ops = { 1311 .enable = dwc3_gadget_ep_enable, 1312 .disable = dwc3_gadget_ep_disable, 1313 .alloc_request = dwc3_gadget_ep_alloc_request, 1314 .free_request = dwc3_gadget_ep_free_request, 1315 .queue = dwc3_gadget_ep_queue, 1316 .dequeue = dwc3_gadget_ep_dequeue, 1317 .set_halt = dwc3_gadget_ep_set_halt, 1318 .set_wedge = dwc3_gadget_ep_set_wedge, 1319 }; 1320 1321 /* -------------------------------------------------------------------------- */ 1322 1323 static int dwc3_gadget_get_frame(struct usb_gadget *g) 1324 { 1325 struct dwc3 *dwc = gadget_to_dwc(g); 1326 u32 reg; 1327 1328 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1329 return DWC3_DSTS_SOFFN(reg); 1330 } 1331 1332 static int dwc3_gadget_wakeup(struct usb_gadget *g) 1333 { 1334 struct dwc3 *dwc = gadget_to_dwc(g); 1335 1336 unsigned long timeout; 1337 unsigned long flags; 1338 1339 u32 reg; 1340 1341 int ret = 0; 1342 1343 u8 link_state; 1344 u8 speed; 1345 1346 spin_lock_irqsave(&dwc->lock, flags); 1347 1348 /* 1349 * According to the Databook Remote wakeup request should 1350 * be issued only when the device is in early suspend state. 1351 * 1352 * We can check that via USB Link State bits in DSTS register. 1353 */ 1354 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1355 1356 speed = reg & DWC3_DSTS_CONNECTSPD; 1357 if (speed == DWC3_DSTS_SUPERSPEED) { 1358 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n"); 1359 ret = -EINVAL; 1360 goto out; 1361 } 1362 1363 link_state = DWC3_DSTS_USBLNKST(reg); 1364 1365 switch (link_state) { 1366 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1367 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1368 break; 1369 default: 1370 dev_dbg(dwc->dev, "can't wakeup from link state %d\n", 1371 link_state); 1372 ret = -EINVAL; 1373 goto out; 1374 } 1375 1376 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); 1377 if (ret < 0) { 1378 dev_err(dwc->dev, "failed to put link in Recovery\n"); 1379 goto out; 1380 } 1381 1382 /* Recent versions do this automatically */ 1383 if (dwc->revision < DWC3_REVISION_194A) { 1384 /* write zeroes to Link Change Request */ 1385 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1386 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 1387 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1388 } 1389 1390 /* poll until Link State changes to ON */ 1391 timeout = jiffies + msecs_to_jiffies(100); 1392 1393 while (!time_after(jiffies, timeout)) { 1394 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1395 1396 /* in HS, means ON */ 1397 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 1398 break; 1399 } 1400 1401 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1402 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1403 ret = -EINVAL; 1404 } 1405 1406 out: 1407 spin_unlock_irqrestore(&dwc->lock, flags); 1408 1409 return ret; 1410 } 1411 1412 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 1413 int is_selfpowered) 1414 { 1415 struct dwc3 *dwc = gadget_to_dwc(g); 1416 unsigned long flags; 1417 1418 spin_lock_irqsave(&dwc->lock, flags); 1419 dwc->is_selfpowered = !!is_selfpowered; 1420 spin_unlock_irqrestore(&dwc->lock, flags); 1421 1422 return 0; 1423 } 1424 1425 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) 1426 { 1427 u32 reg; 1428 u32 timeout = 500; 1429 1430 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1431 if (is_on) { 1432 if (dwc->revision <= DWC3_REVISION_187A) { 1433 reg &= ~DWC3_DCTL_TRGTULST_MASK; 1434 reg |= DWC3_DCTL_TRGTULST_RX_DET; 1435 } 1436 1437 if (dwc->revision >= DWC3_REVISION_194A) 1438 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1439 reg |= DWC3_DCTL_RUN_STOP; 1440 1441 if (dwc->has_hibernation) 1442 reg |= DWC3_DCTL_KEEP_CONNECT; 1443 1444 dwc->pullups_connected = true; 1445 } else { 1446 reg &= ~DWC3_DCTL_RUN_STOP; 1447 1448 if (dwc->has_hibernation && !suspend) 1449 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1450 1451 dwc->pullups_connected = false; 1452 } 1453 1454 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1455 1456 do { 1457 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1458 if (is_on) { 1459 if (!(reg & DWC3_DSTS_DEVCTRLHLT)) 1460 break; 1461 } else { 1462 if (reg & DWC3_DSTS_DEVCTRLHLT) 1463 break; 1464 } 1465 timeout--; 1466 if (!timeout) 1467 return -ETIMEDOUT; 1468 udelay(1); 1469 } while (1); 1470 1471 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n", 1472 dwc->gadget_driver 1473 ? dwc->gadget_driver->function : "no-function", 1474 is_on ? "connect" : "disconnect"); 1475 1476 return 0; 1477 } 1478 1479 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 1480 { 1481 struct dwc3 *dwc = gadget_to_dwc(g); 1482 unsigned long flags; 1483 int ret; 1484 1485 is_on = !!is_on; 1486 1487 spin_lock_irqsave(&dwc->lock, flags); 1488 ret = dwc3_gadget_run_stop(dwc, is_on, false); 1489 spin_unlock_irqrestore(&dwc->lock, flags); 1490 1491 return ret; 1492 } 1493 1494 static void dwc3_gadget_enable_irq(struct dwc3 *dwc) 1495 { 1496 u32 reg; 1497 1498 /* Enable all but Start and End of Frame IRQs */ 1499 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | 1500 DWC3_DEVTEN_EVNTOVERFLOWEN | 1501 DWC3_DEVTEN_CMDCMPLTEN | 1502 DWC3_DEVTEN_ERRTICERREN | 1503 DWC3_DEVTEN_WKUPEVTEN | 1504 DWC3_DEVTEN_ULSTCNGEN | 1505 DWC3_DEVTEN_CONNECTDONEEN | 1506 DWC3_DEVTEN_USBRSTEN | 1507 DWC3_DEVTEN_DISCONNEVTEN); 1508 1509 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 1510 } 1511 1512 static void dwc3_gadget_disable_irq(struct dwc3 *dwc) 1513 { 1514 /* mask all interrupts */ 1515 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 1516 } 1517 1518 static irqreturn_t dwc3_interrupt(int irq, void *_dwc); 1519 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc); 1520 1521 static int dwc3_gadget_start(struct usb_gadget *g, 1522 struct usb_gadget_driver *driver) 1523 { 1524 struct dwc3 *dwc = gadget_to_dwc(g); 1525 struct dwc3_ep *dep; 1526 unsigned long flags; 1527 int ret = 0; 1528 int irq; 1529 u32 reg; 1530 1531 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1532 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, 1533 IRQF_SHARED, "dwc3", dwc); 1534 if (ret) { 1535 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 1536 irq, ret); 1537 goto err0; 1538 } 1539 1540 spin_lock_irqsave(&dwc->lock, flags); 1541 1542 if (dwc->gadget_driver) { 1543 dev_err(dwc->dev, "%s is already bound to %s\n", 1544 dwc->gadget.name, 1545 dwc->gadget_driver->driver.name); 1546 ret = -EBUSY; 1547 goto err1; 1548 } 1549 1550 dwc->gadget_driver = driver; 1551 1552 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1553 reg &= ~(DWC3_DCFG_SPEED_MASK); 1554 1555 /** 1556 * WORKAROUND: DWC3 revision < 2.20a have an issue 1557 * which would cause metastability state on Run/Stop 1558 * bit if we try to force the IP to USB2-only mode. 1559 * 1560 * Because of that, we cannot configure the IP to any 1561 * speed other than the SuperSpeed 1562 * 1563 * Refers to: 1564 * 1565 * STAR#9000525659: Clock Domain Crossing on DCTL in 1566 * USB 2.0 Mode 1567 */ 1568 if (dwc->revision < DWC3_REVISION_220A) { 1569 reg |= DWC3_DCFG_SUPERSPEED; 1570 } else { 1571 switch (dwc->maximum_speed) { 1572 case USB_SPEED_LOW: 1573 reg |= DWC3_DSTS_LOWSPEED; 1574 break; 1575 case USB_SPEED_FULL: 1576 reg |= DWC3_DSTS_FULLSPEED1; 1577 break; 1578 case USB_SPEED_HIGH: 1579 reg |= DWC3_DSTS_HIGHSPEED; 1580 break; 1581 case USB_SPEED_SUPER: /* FALLTHROUGH */ 1582 case USB_SPEED_UNKNOWN: /* FALTHROUGH */ 1583 default: 1584 reg |= DWC3_DSTS_SUPERSPEED; 1585 } 1586 } 1587 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1588 1589 dwc->start_config_issued = false; 1590 1591 /* Start with SuperSpeed Default */ 1592 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1593 1594 dep = dwc->eps[0]; 1595 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 1596 false); 1597 if (ret) { 1598 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1599 goto err2; 1600 } 1601 1602 dep = dwc->eps[1]; 1603 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 1604 false); 1605 if (ret) { 1606 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1607 goto err3; 1608 } 1609 1610 /* begin to receive SETUP packets */ 1611 dwc->ep0state = EP0_SETUP_PHASE; 1612 dwc3_ep0_out_start(dwc); 1613 1614 dwc3_gadget_enable_irq(dwc); 1615 1616 spin_unlock_irqrestore(&dwc->lock, flags); 1617 1618 return 0; 1619 1620 err3: 1621 __dwc3_gadget_ep_disable(dwc->eps[0]); 1622 1623 err2: 1624 dwc->gadget_driver = NULL; 1625 1626 err1: 1627 spin_unlock_irqrestore(&dwc->lock, flags); 1628 1629 free_irq(irq, dwc); 1630 1631 err0: 1632 return ret; 1633 } 1634 1635 static int dwc3_gadget_stop(struct usb_gadget *g) 1636 { 1637 struct dwc3 *dwc = gadget_to_dwc(g); 1638 unsigned long flags; 1639 int irq; 1640 1641 spin_lock_irqsave(&dwc->lock, flags); 1642 1643 dwc3_gadget_disable_irq(dwc); 1644 __dwc3_gadget_ep_disable(dwc->eps[0]); 1645 __dwc3_gadget_ep_disable(dwc->eps[1]); 1646 1647 dwc->gadget_driver = NULL; 1648 1649 spin_unlock_irqrestore(&dwc->lock, flags); 1650 1651 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1652 free_irq(irq, dwc); 1653 1654 return 0; 1655 } 1656 1657 static const struct usb_gadget_ops dwc3_gadget_ops = { 1658 .get_frame = dwc3_gadget_get_frame, 1659 .wakeup = dwc3_gadget_wakeup, 1660 .set_selfpowered = dwc3_gadget_set_selfpowered, 1661 .pullup = dwc3_gadget_pullup, 1662 .udc_start = dwc3_gadget_start, 1663 .udc_stop = dwc3_gadget_stop, 1664 }; 1665 1666 /* -------------------------------------------------------------------------- */ 1667 1668 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc, 1669 u8 num, u32 direction) 1670 { 1671 struct dwc3_ep *dep; 1672 u8 i; 1673 1674 for (i = 0; i < num; i++) { 1675 u8 epnum = (i << 1) | (!!direction); 1676 1677 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 1678 if (!dep) 1679 return -ENOMEM; 1680 1681 dep->dwc = dwc; 1682 dep->number = epnum; 1683 dep->direction = !!direction; 1684 dwc->eps[epnum] = dep; 1685 1686 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1, 1687 (epnum & 1) ? "in" : "out"); 1688 1689 dep->endpoint.name = dep->name; 1690 1691 dev_vdbg(dwc->dev, "initializing %s\n", dep->name); 1692 1693 if (epnum == 0 || epnum == 1) { 1694 usb_ep_set_maxpacket_limit(&dep->endpoint, 512); 1695 dep->endpoint.maxburst = 1; 1696 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 1697 if (!epnum) 1698 dwc->gadget.ep0 = &dep->endpoint; 1699 } else { 1700 int ret; 1701 1702 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024); 1703 dep->endpoint.max_streams = 15; 1704 dep->endpoint.ops = &dwc3_gadget_ep_ops; 1705 list_add_tail(&dep->endpoint.ep_list, 1706 &dwc->gadget.ep_list); 1707 1708 ret = dwc3_alloc_trb_pool(dep); 1709 if (ret) 1710 return ret; 1711 } 1712 1713 INIT_LIST_HEAD(&dep->request_list); 1714 INIT_LIST_HEAD(&dep->req_queued); 1715 } 1716 1717 return 0; 1718 } 1719 1720 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc) 1721 { 1722 int ret; 1723 1724 INIT_LIST_HEAD(&dwc->gadget.ep_list); 1725 1726 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0); 1727 if (ret < 0) { 1728 dev_vdbg(dwc->dev, "failed to allocate OUT endpoints\n"); 1729 return ret; 1730 } 1731 1732 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1); 1733 if (ret < 0) { 1734 dev_vdbg(dwc->dev, "failed to allocate IN endpoints\n"); 1735 return ret; 1736 } 1737 1738 return 0; 1739 } 1740 1741 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 1742 { 1743 struct dwc3_ep *dep; 1744 u8 epnum; 1745 1746 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1747 dep = dwc->eps[epnum]; 1748 if (!dep) 1749 continue; 1750 /* 1751 * Physical endpoints 0 and 1 are special; they form the 1752 * bi-directional USB endpoint 0. 1753 * 1754 * For those two physical endpoints, we don't allocate a TRB 1755 * pool nor do we add them the endpoints list. Due to that, we 1756 * shouldn't do these two operations otherwise we would end up 1757 * with all sorts of bugs when removing dwc3.ko. 1758 */ 1759 if (epnum != 0 && epnum != 1) { 1760 dwc3_free_trb_pool(dep); 1761 list_del(&dep->endpoint.ep_list); 1762 } 1763 1764 kfree(dep); 1765 } 1766 } 1767 1768 /* -------------------------------------------------------------------------- */ 1769 1770 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, 1771 struct dwc3_request *req, struct dwc3_trb *trb, 1772 const struct dwc3_event_depevt *event, int status) 1773 { 1774 unsigned int count; 1775 unsigned int s_pkt = 0; 1776 unsigned int trb_status; 1777 1778 trace_dwc3_complete_trb(dep, trb); 1779 1780 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 1781 /* 1782 * We continue despite the error. There is not much we 1783 * can do. If we don't clean it up we loop forever. If 1784 * we skip the TRB then it gets overwritten after a 1785 * while since we use them in a ring buffer. A BUG() 1786 * would help. Lets hope that if this occurs, someone 1787 * fixes the root cause instead of looking away :) 1788 */ 1789 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n", 1790 dep->name, trb); 1791 count = trb->size & DWC3_TRB_SIZE_MASK; 1792 1793 if (dep->direction) { 1794 if (count) { 1795 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); 1796 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) { 1797 dev_dbg(dwc->dev, "incomplete IN transfer %s\n", 1798 dep->name); 1799 /* 1800 * If missed isoc occurred and there is 1801 * no request queued then issue END 1802 * TRANSFER, so that core generates 1803 * next xfernotready and we will issue 1804 * a fresh START TRANSFER. 1805 * If there are still queued request 1806 * then wait, do not issue either END 1807 * or UPDATE TRANSFER, just attach next 1808 * request in request_list during 1809 * giveback.If any future queued request 1810 * is successfully transferred then we 1811 * will issue UPDATE TRANSFER for all 1812 * request in the request_list. 1813 */ 1814 dep->flags |= DWC3_EP_MISSED_ISOC; 1815 } else { 1816 dev_err(dwc->dev, "incomplete IN transfer %s\n", 1817 dep->name); 1818 status = -ECONNRESET; 1819 } 1820 } else { 1821 dep->flags &= ~DWC3_EP_MISSED_ISOC; 1822 } 1823 } else { 1824 if (count && (event->status & DEPEVT_STATUS_SHORT)) 1825 s_pkt = 1; 1826 } 1827 1828 /* 1829 * We assume here we will always receive the entire data block 1830 * which we should receive. Meaning, if we program RX to 1831 * receive 4K but we receive only 2K, we assume that's all we 1832 * should receive and we simply bounce the request back to the 1833 * gadget driver for further processing. 1834 */ 1835 req->request.actual += req->request.length - count; 1836 if (s_pkt) 1837 return 1; 1838 if ((event->status & DEPEVT_STATUS_LST) && 1839 (trb->ctrl & (DWC3_TRB_CTRL_LST | 1840 DWC3_TRB_CTRL_HWO))) 1841 return 1; 1842 if ((event->status & DEPEVT_STATUS_IOC) && 1843 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 1844 return 1; 1845 return 0; 1846 } 1847 1848 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, 1849 const struct dwc3_event_depevt *event, int status) 1850 { 1851 struct dwc3_request *req; 1852 struct dwc3_trb *trb; 1853 unsigned int slot; 1854 unsigned int i; 1855 int ret; 1856 1857 do { 1858 req = next_request(&dep->req_queued); 1859 if (!req) { 1860 WARN_ON_ONCE(1); 1861 return 1; 1862 } 1863 i = 0; 1864 do { 1865 slot = req->start_slot + i; 1866 if ((slot == DWC3_TRB_NUM - 1) && 1867 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 1868 slot++; 1869 slot %= DWC3_TRB_NUM; 1870 trb = &dep->trb_pool[slot]; 1871 1872 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 1873 event, status); 1874 if (ret) 1875 break; 1876 }while (++i < req->request.num_mapped_sgs); 1877 1878 dwc3_gadget_giveback(dep, req, status); 1879 1880 if (ret) 1881 break; 1882 } while (1); 1883 1884 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1885 list_empty(&dep->req_queued)) { 1886 if (list_empty(&dep->request_list)) { 1887 /* 1888 * If there is no entry in request list then do 1889 * not issue END TRANSFER now. Just set PENDING 1890 * flag, so that END TRANSFER is issued when an 1891 * entry is added into request list. 1892 */ 1893 dep->flags = DWC3_EP_PENDING_REQUEST; 1894 } else { 1895 dwc3_stop_active_transfer(dwc, dep->number, true); 1896 dep->flags = DWC3_EP_ENABLED; 1897 } 1898 return 1; 1899 } 1900 1901 return 1; 1902 } 1903 1904 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, 1905 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1906 { 1907 unsigned status = 0; 1908 int clean_busy; 1909 1910 if (event->status & DEPEVT_STATUS_BUSERR) 1911 status = -ECONNRESET; 1912 1913 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status); 1914 if (clean_busy) 1915 dep->flags &= ~DWC3_EP_BUSY; 1916 1917 /* 1918 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. 1919 * See dwc3_gadget_linksts_change_interrupt() for 1st half. 1920 */ 1921 if (dwc->revision < DWC3_REVISION_183A) { 1922 u32 reg; 1923 int i; 1924 1925 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 1926 dep = dwc->eps[i]; 1927 1928 if (!(dep->flags & DWC3_EP_ENABLED)) 1929 continue; 1930 1931 if (!list_empty(&dep->req_queued)) 1932 return; 1933 } 1934 1935 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1936 reg |= dwc->u1u2; 1937 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1938 1939 dwc->u1u2 = 0; 1940 } 1941 } 1942 1943 static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 1944 const struct dwc3_event_depevt *event) 1945 { 1946 struct dwc3_ep *dep; 1947 u8 epnum = event->endpoint_number; 1948 1949 dep = dwc->eps[epnum]; 1950 1951 if (!(dep->flags & DWC3_EP_ENABLED)) 1952 return; 1953 1954 if (epnum == 0 || epnum == 1) { 1955 dwc3_ep0_interrupt(dwc, event); 1956 return; 1957 } 1958 1959 switch (event->endpoint_event) { 1960 case DWC3_DEPEVT_XFERCOMPLETE: 1961 dep->resource_index = 0; 1962 1963 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1964 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n", 1965 dep->name); 1966 return; 1967 } 1968 1969 dwc3_endpoint_transfer_complete(dwc, dep, event); 1970 break; 1971 case DWC3_DEPEVT_XFERINPROGRESS: 1972 dwc3_endpoint_transfer_complete(dwc, dep, event); 1973 break; 1974 case DWC3_DEPEVT_XFERNOTREADY: 1975 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1976 dwc3_gadget_start_isoc(dwc, dep, event); 1977 } else { 1978 int ret; 1979 1980 dev_vdbg(dwc->dev, "%s: reason %s\n", 1981 dep->name, event->status & 1982 DEPEVT_STATUS_TRANSFER_ACTIVE 1983 ? "Transfer Active" 1984 : "Transfer Not Active"); 1985 1986 ret = __dwc3_gadget_kick_transfer(dep, 0, 1); 1987 if (!ret || ret == -EBUSY) 1988 return; 1989 1990 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1991 dep->name); 1992 } 1993 1994 break; 1995 case DWC3_DEPEVT_STREAMEVT: 1996 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) { 1997 dev_err(dwc->dev, "Stream event for non-Bulk %s\n", 1998 dep->name); 1999 return; 2000 } 2001 2002 switch (event->status) { 2003 case DEPEVT_STREAMEVT_FOUND: 2004 dev_vdbg(dwc->dev, "Stream %d found and started\n", 2005 event->parameters); 2006 2007 break; 2008 case DEPEVT_STREAMEVT_NOTFOUND: 2009 /* FALLTHROUGH */ 2010 default: 2011 dev_dbg(dwc->dev, "Couldn't find suitable stream\n"); 2012 } 2013 break; 2014 case DWC3_DEPEVT_RXTXFIFOEVT: 2015 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name); 2016 break; 2017 case DWC3_DEPEVT_EPCMDCMPLT: 2018 dev_vdbg(dwc->dev, "Endpoint Command Complete\n"); 2019 break; 2020 } 2021 } 2022 2023 static void dwc3_disconnect_gadget(struct dwc3 *dwc) 2024 { 2025 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { 2026 spin_unlock(&dwc->lock); 2027 dwc->gadget_driver->disconnect(&dwc->gadget); 2028 spin_lock(&dwc->lock); 2029 } 2030 } 2031 2032 static void dwc3_suspend_gadget(struct dwc3 *dwc) 2033 { 2034 if (dwc->gadget_driver && dwc->gadget_driver->suspend) { 2035 spin_unlock(&dwc->lock); 2036 dwc->gadget_driver->suspend(&dwc->gadget); 2037 spin_lock(&dwc->lock); 2038 } 2039 } 2040 2041 static void dwc3_resume_gadget(struct dwc3 *dwc) 2042 { 2043 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2044 spin_unlock(&dwc->lock); 2045 dwc->gadget_driver->resume(&dwc->gadget); 2046 } 2047 } 2048 2049 static void dwc3_reset_gadget(struct dwc3 *dwc) 2050 { 2051 if (!dwc->gadget_driver) 2052 return; 2053 2054 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) { 2055 spin_unlock(&dwc->lock); 2056 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver); 2057 spin_lock(&dwc->lock); 2058 } 2059 } 2060 2061 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force) 2062 { 2063 struct dwc3_ep *dep; 2064 struct dwc3_gadget_ep_cmd_params params; 2065 u32 cmd; 2066 int ret; 2067 2068 dep = dwc->eps[epnum]; 2069 2070 if (!dep->resource_index) 2071 return; 2072 2073 /* 2074 * NOTICE: We are violating what the Databook says about the 2075 * EndTransfer command. Ideally we would _always_ wait for the 2076 * EndTransfer Command Completion IRQ, but that's causing too 2077 * much trouble synchronizing between us and gadget driver. 2078 * 2079 * We have discussed this with the IP Provider and it was 2080 * suggested to giveback all requests here, but give HW some 2081 * extra time to synchronize with the interconnect. We're using 2082 * an arbitraty 100us delay for that. 2083 * 2084 * Note also that a similar handling was tested by Synopsys 2085 * (thanks a lot Paul) and nothing bad has come out of it. 2086 * In short, what we're doing is: 2087 * 2088 * - Issue EndTransfer WITH CMDIOC bit set 2089 * - Wait 100us 2090 */ 2091 2092 cmd = DWC3_DEPCMD_ENDTRANSFER; 2093 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0; 2094 cmd |= DWC3_DEPCMD_CMDIOC; 2095 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); 2096 memset(¶ms, 0, sizeof(params)); 2097 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 2098 WARN_ON_ONCE(ret); 2099 dep->resource_index = 0; 2100 dep->flags &= ~DWC3_EP_BUSY; 2101 udelay(100); 2102 } 2103 2104 static void dwc3_stop_active_transfers(struct dwc3 *dwc) 2105 { 2106 u32 epnum; 2107 2108 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2109 struct dwc3_ep *dep; 2110 2111 dep = dwc->eps[epnum]; 2112 if (!dep) 2113 continue; 2114 2115 if (!(dep->flags & DWC3_EP_ENABLED)) 2116 continue; 2117 2118 dwc3_remove_requests(dwc, dep); 2119 } 2120 } 2121 2122 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 2123 { 2124 u32 epnum; 2125 2126 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2127 struct dwc3_ep *dep; 2128 struct dwc3_gadget_ep_cmd_params params; 2129 int ret; 2130 2131 dep = dwc->eps[epnum]; 2132 if (!dep) 2133 continue; 2134 2135 if (!(dep->flags & DWC3_EP_STALL)) 2136 continue; 2137 2138 dep->flags &= ~DWC3_EP_STALL; 2139 2140 memset(¶ms, 0, sizeof(params)); 2141 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 2142 DWC3_DEPCMD_CLEARSTALL, ¶ms); 2143 WARN_ON_ONCE(ret); 2144 } 2145 } 2146 2147 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 2148 { 2149 int reg; 2150 2151 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2152 reg &= ~DWC3_DCTL_INITU1ENA; 2153 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2154 2155 reg &= ~DWC3_DCTL_INITU2ENA; 2156 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2157 2158 dwc3_disconnect_gadget(dwc); 2159 dwc->start_config_issued = false; 2160 2161 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2162 dwc->setup_packet_pending = false; 2163 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED); 2164 } 2165 2166 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 2167 { 2168 u32 reg; 2169 2170 /* 2171 * WORKAROUND: DWC3 revisions <1.88a have an issue which 2172 * would cause a missing Disconnect Event if there's a 2173 * pending Setup Packet in the FIFO. 2174 * 2175 * There's no suggested workaround on the official Bug 2176 * report, which states that "unless the driver/application 2177 * is doing any special handling of a disconnect event, 2178 * there is no functional issue". 2179 * 2180 * Unfortunately, it turns out that we _do_ some special 2181 * handling of a disconnect event, namely complete all 2182 * pending transfers, notify gadget driver of the 2183 * disconnection, and so on. 2184 * 2185 * Our suggested workaround is to follow the Disconnect 2186 * Event steps here, instead, based on a setup_packet_pending 2187 * flag. Such flag gets set whenever we have a XferNotReady 2188 * event on EP0 and gets cleared on XferComplete for the 2189 * same endpoint. 2190 * 2191 * Refers to: 2192 * 2193 * STAR#9000466709: RTL: Device : Disconnect event not 2194 * generated if setup packet pending in FIFO 2195 */ 2196 if (dwc->revision < DWC3_REVISION_188A) { 2197 if (dwc->setup_packet_pending) 2198 dwc3_gadget_disconnect_interrupt(dwc); 2199 } 2200 2201 dwc3_reset_gadget(dwc); 2202 2203 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2204 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 2205 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2206 dwc->test_mode = false; 2207 2208 dwc3_stop_active_transfers(dwc); 2209 dwc3_clear_stall_all_ep(dwc); 2210 dwc->start_config_issued = false; 2211 2212 /* Reset device address to zero */ 2213 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2214 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 2215 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2216 } 2217 2218 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed) 2219 { 2220 u32 reg; 2221 u32 usb30_clock = DWC3_GCTL_CLK_BUS; 2222 2223 /* 2224 * We change the clock only at SS but I dunno why I would want to do 2225 * this. Maybe it becomes part of the power saving plan. 2226 */ 2227 2228 if (speed != DWC3_DSTS_SUPERSPEED) 2229 return; 2230 2231 /* 2232 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 2233 * each time on Connect Done. 2234 */ 2235 if (!usb30_clock) 2236 return; 2237 2238 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 2239 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock); 2240 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 2241 } 2242 2243 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 2244 { 2245 struct dwc3_ep *dep; 2246 int ret; 2247 u32 reg; 2248 u8 speed; 2249 2250 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2251 speed = reg & DWC3_DSTS_CONNECTSPD; 2252 dwc->speed = speed; 2253 2254 dwc3_update_ram_clk_sel(dwc, speed); 2255 2256 switch (speed) { 2257 case DWC3_DCFG_SUPERSPEED: 2258 /* 2259 * WORKAROUND: DWC3 revisions <1.90a have an issue which 2260 * would cause a missing USB3 Reset event. 2261 * 2262 * In such situations, we should force a USB3 Reset 2263 * event by calling our dwc3_gadget_reset_interrupt() 2264 * routine. 2265 * 2266 * Refers to: 2267 * 2268 * STAR#9000483510: RTL: SS : USB3 reset event may 2269 * not be generated always when the link enters poll 2270 */ 2271 if (dwc->revision < DWC3_REVISION_190A) 2272 dwc3_gadget_reset_interrupt(dwc); 2273 2274 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2275 dwc->gadget.ep0->maxpacket = 512; 2276 dwc->gadget.speed = USB_SPEED_SUPER; 2277 break; 2278 case DWC3_DCFG_HIGHSPEED: 2279 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2280 dwc->gadget.ep0->maxpacket = 64; 2281 dwc->gadget.speed = USB_SPEED_HIGH; 2282 break; 2283 case DWC3_DCFG_FULLSPEED2: 2284 case DWC3_DCFG_FULLSPEED1: 2285 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2286 dwc->gadget.ep0->maxpacket = 64; 2287 dwc->gadget.speed = USB_SPEED_FULL; 2288 break; 2289 case DWC3_DCFG_LOWSPEED: 2290 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); 2291 dwc->gadget.ep0->maxpacket = 8; 2292 dwc->gadget.speed = USB_SPEED_LOW; 2293 break; 2294 } 2295 2296 /* Enable USB2 LPM Capability */ 2297 2298 if ((dwc->revision > DWC3_REVISION_194A) 2299 && (speed != DWC3_DCFG_SUPERSPEED)) { 2300 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2301 reg |= DWC3_DCFG_LPM_CAP; 2302 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2303 2304 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2305 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); 2306 2307 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold); 2308 2309 /* 2310 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and 2311 * DCFG.LPMCap is set, core responses with an ACK and the 2312 * BESL value in the LPM token is less than or equal to LPM 2313 * NYET threshold. 2314 */ 2315 WARN_ONCE(dwc->revision < DWC3_REVISION_240A 2316 && dwc->has_lpm_erratum, 2317 "LPM Erratum not available on dwc3 revisisions < 2.40a\n"); 2318 2319 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A) 2320 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold); 2321 2322 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2323 } else { 2324 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2325 reg &= ~DWC3_DCTL_HIRD_THRES_MASK; 2326 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2327 } 2328 2329 dep = dwc->eps[0]; 2330 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, 2331 false); 2332 if (ret) { 2333 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2334 return; 2335 } 2336 2337 dep = dwc->eps[1]; 2338 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, 2339 false); 2340 if (ret) { 2341 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2342 return; 2343 } 2344 2345 /* 2346 * Configure PHY via GUSB3PIPECTLn if required. 2347 * 2348 * Update GTXFIFOSIZn 2349 * 2350 * In both cases reset values should be sufficient. 2351 */ 2352 } 2353 2354 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 2355 { 2356 /* 2357 * TODO take core out of low power mode when that's 2358 * implemented. 2359 */ 2360 2361 dwc->gadget_driver->resume(&dwc->gadget); 2362 } 2363 2364 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 2365 unsigned int evtinfo) 2366 { 2367 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2368 unsigned int pwropt; 2369 2370 /* 2371 * WORKAROUND: DWC3 < 2.50a have an issue when configured without 2372 * Hibernation mode enabled which would show up when device detects 2373 * host-initiated U3 exit. 2374 * 2375 * In that case, device will generate a Link State Change Interrupt 2376 * from U3 to RESUME which is only necessary if Hibernation is 2377 * configured in. 2378 * 2379 * There are no functional changes due to such spurious event and we 2380 * just need to ignore it. 2381 * 2382 * Refers to: 2383 * 2384 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation 2385 * operational mode 2386 */ 2387 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 2388 if ((dwc->revision < DWC3_REVISION_250A) && 2389 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { 2390 if ((dwc->link_state == DWC3_LINK_STATE_U3) && 2391 (next == DWC3_LINK_STATE_RESUME)) { 2392 dev_vdbg(dwc->dev, "ignoring transition U3 -> Resume\n"); 2393 return; 2394 } 2395 } 2396 2397 /* 2398 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending 2399 * on the link partner, the USB session might do multiple entry/exit 2400 * of low power states before a transfer takes place. 2401 * 2402 * Due to this problem, we might experience lower throughput. The 2403 * suggested workaround is to disable DCTL[12:9] bits if we're 2404 * transitioning from U1/U2 to U0 and enable those bits again 2405 * after a transfer completes and there are no pending transfers 2406 * on any of the enabled endpoints. 2407 * 2408 * This is the first half of that workaround. 2409 * 2410 * Refers to: 2411 * 2412 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us 2413 * core send LGO_Ux entering U0 2414 */ 2415 if (dwc->revision < DWC3_REVISION_183A) { 2416 if (next == DWC3_LINK_STATE_U0) { 2417 u32 u1u2; 2418 u32 reg; 2419 2420 switch (dwc->link_state) { 2421 case DWC3_LINK_STATE_U1: 2422 case DWC3_LINK_STATE_U2: 2423 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2424 u1u2 = reg & (DWC3_DCTL_INITU2ENA 2425 | DWC3_DCTL_ACCEPTU2ENA 2426 | DWC3_DCTL_INITU1ENA 2427 | DWC3_DCTL_ACCEPTU1ENA); 2428 2429 if (!dwc->u1u2) 2430 dwc->u1u2 = reg & u1u2; 2431 2432 reg &= ~u1u2; 2433 2434 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2435 break; 2436 default: 2437 /* do nothing */ 2438 break; 2439 } 2440 } 2441 } 2442 2443 switch (next) { 2444 case DWC3_LINK_STATE_U1: 2445 if (dwc->speed == USB_SPEED_SUPER) 2446 dwc3_suspend_gadget(dwc); 2447 break; 2448 case DWC3_LINK_STATE_U2: 2449 case DWC3_LINK_STATE_U3: 2450 dwc3_suspend_gadget(dwc); 2451 break; 2452 case DWC3_LINK_STATE_RESUME: 2453 dwc3_resume_gadget(dwc); 2454 break; 2455 default: 2456 /* do nothing */ 2457 break; 2458 } 2459 2460 dwc->link_state = next; 2461 } 2462 2463 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, 2464 unsigned int evtinfo) 2465 { 2466 unsigned int is_ss = evtinfo & BIT(4); 2467 2468 /** 2469 * WORKAROUND: DWC3 revison 2.20a with hibernation support 2470 * have a known issue which can cause USB CV TD.9.23 to fail 2471 * randomly. 2472 * 2473 * Because of this issue, core could generate bogus hibernation 2474 * events which SW needs to ignore. 2475 * 2476 * Refers to: 2477 * 2478 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0 2479 * Device Fallback from SuperSpeed 2480 */ 2481 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER)) 2482 return; 2483 2484 /* enter hibernation here */ 2485 } 2486 2487 static void dwc3_gadget_interrupt(struct dwc3 *dwc, 2488 const struct dwc3_event_devt *event) 2489 { 2490 switch (event->type) { 2491 case DWC3_DEVICE_EVENT_DISCONNECT: 2492 dwc3_gadget_disconnect_interrupt(dwc); 2493 break; 2494 case DWC3_DEVICE_EVENT_RESET: 2495 dwc3_gadget_reset_interrupt(dwc); 2496 break; 2497 case DWC3_DEVICE_EVENT_CONNECT_DONE: 2498 dwc3_gadget_conndone_interrupt(dwc); 2499 break; 2500 case DWC3_DEVICE_EVENT_WAKEUP: 2501 dwc3_gadget_wakeup_interrupt(dwc); 2502 break; 2503 case DWC3_DEVICE_EVENT_HIBER_REQ: 2504 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation, 2505 "unexpected hibernation event\n")) 2506 break; 2507 2508 dwc3_gadget_hibernation_interrupt(dwc, event->event_info); 2509 break; 2510 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 2511 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 2512 break; 2513 case DWC3_DEVICE_EVENT_EOPF: 2514 dev_vdbg(dwc->dev, "End of Periodic Frame\n"); 2515 break; 2516 case DWC3_DEVICE_EVENT_SOF: 2517 dev_vdbg(dwc->dev, "Start of Periodic Frame\n"); 2518 break; 2519 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 2520 dev_vdbg(dwc->dev, "Erratic Error\n"); 2521 break; 2522 case DWC3_DEVICE_EVENT_CMD_CMPL: 2523 dev_vdbg(dwc->dev, "Command Complete\n"); 2524 break; 2525 case DWC3_DEVICE_EVENT_OVERFLOW: 2526 dev_vdbg(dwc->dev, "Overflow\n"); 2527 break; 2528 default: 2529 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 2530 } 2531 } 2532 2533 static void dwc3_process_event_entry(struct dwc3 *dwc, 2534 const union dwc3_event *event) 2535 { 2536 trace_dwc3_event(event->raw); 2537 2538 /* Endpoint IRQ, handle it and return early */ 2539 if (event->type.is_devspec == 0) { 2540 /* depevt */ 2541 return dwc3_endpoint_interrupt(dwc, &event->depevt); 2542 } 2543 2544 switch (event->type.type) { 2545 case DWC3_EVENT_TYPE_DEV: 2546 dwc3_gadget_interrupt(dwc, &event->devt); 2547 break; 2548 /* REVISIT what to do with Carkit and I2C events ? */ 2549 default: 2550 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 2551 } 2552 } 2553 2554 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf) 2555 { 2556 struct dwc3_event_buffer *evt; 2557 irqreturn_t ret = IRQ_NONE; 2558 int left; 2559 u32 reg; 2560 2561 evt = dwc->ev_buffs[buf]; 2562 left = evt->count; 2563 2564 if (!(evt->flags & DWC3_EVENT_PENDING)) 2565 return IRQ_NONE; 2566 2567 while (left > 0) { 2568 union dwc3_event event; 2569 2570 event.raw = *(u32 *) (evt->buf + evt->lpos); 2571 2572 dwc3_process_event_entry(dwc, &event); 2573 2574 /* 2575 * FIXME we wrap around correctly to the next entry as 2576 * almost all entries are 4 bytes in size. There is one 2577 * entry which has 12 bytes which is a regular entry 2578 * followed by 8 bytes data. ATM I don't know how 2579 * things are organized if we get next to the a 2580 * boundary so I worry about that once we try to handle 2581 * that. 2582 */ 2583 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE; 2584 left -= 4; 2585 2586 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4); 2587 } 2588 2589 evt->count = 0; 2590 evt->flags &= ~DWC3_EVENT_PENDING; 2591 ret = IRQ_HANDLED; 2592 2593 /* Unmask interrupt */ 2594 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf)); 2595 reg &= ~DWC3_GEVNTSIZ_INTMASK; 2596 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg); 2597 2598 return ret; 2599 } 2600 2601 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc) 2602 { 2603 struct dwc3 *dwc = _dwc; 2604 unsigned long flags; 2605 irqreturn_t ret = IRQ_NONE; 2606 int i; 2607 2608 spin_lock_irqsave(&dwc->lock, flags); 2609 2610 for (i = 0; i < dwc->num_event_buffers; i++) 2611 ret |= dwc3_process_event_buf(dwc, i); 2612 2613 spin_unlock_irqrestore(&dwc->lock, flags); 2614 2615 return ret; 2616 } 2617 2618 static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf) 2619 { 2620 struct dwc3_event_buffer *evt; 2621 u32 count; 2622 u32 reg; 2623 2624 evt = dwc->ev_buffs[buf]; 2625 2626 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf)); 2627 count &= DWC3_GEVNTCOUNT_MASK; 2628 if (!count) 2629 return IRQ_NONE; 2630 2631 evt->count = count; 2632 evt->flags |= DWC3_EVENT_PENDING; 2633 2634 /* Mask interrupt */ 2635 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf)); 2636 reg |= DWC3_GEVNTSIZ_INTMASK; 2637 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg); 2638 2639 return IRQ_WAKE_THREAD; 2640 } 2641 2642 static irqreturn_t dwc3_interrupt(int irq, void *_dwc) 2643 { 2644 struct dwc3 *dwc = _dwc; 2645 int i; 2646 irqreturn_t ret = IRQ_NONE; 2647 2648 spin_lock(&dwc->lock); 2649 2650 for (i = 0; i < dwc->num_event_buffers; i++) { 2651 irqreturn_t status; 2652 2653 status = dwc3_check_event_buf(dwc, i); 2654 if (status == IRQ_WAKE_THREAD) 2655 ret = status; 2656 } 2657 2658 spin_unlock(&dwc->lock); 2659 2660 return ret; 2661 } 2662 2663 /** 2664 * dwc3_gadget_init - Initializes gadget related registers 2665 * @dwc: pointer to our controller context structure 2666 * 2667 * Returns 0 on success otherwise negative errno. 2668 */ 2669 int dwc3_gadget_init(struct dwc3 *dwc) 2670 { 2671 int ret; 2672 2673 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2674 &dwc->ctrl_req_addr, GFP_KERNEL); 2675 if (!dwc->ctrl_req) { 2676 dev_err(dwc->dev, "failed to allocate ctrl request\n"); 2677 ret = -ENOMEM; 2678 goto err0; 2679 } 2680 2681 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2682 &dwc->ep0_trb_addr, GFP_KERNEL); 2683 if (!dwc->ep0_trb) { 2684 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 2685 ret = -ENOMEM; 2686 goto err1; 2687 } 2688 2689 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL); 2690 if (!dwc->setup_buf) { 2691 ret = -ENOMEM; 2692 goto err2; 2693 } 2694 2695 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev, 2696 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr, 2697 GFP_KERNEL); 2698 if (!dwc->ep0_bounce) { 2699 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n"); 2700 ret = -ENOMEM; 2701 goto err3; 2702 } 2703 2704 dwc->gadget.ops = &dwc3_gadget_ops; 2705 dwc->gadget.max_speed = USB_SPEED_SUPER; 2706 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2707 dwc->gadget.sg_supported = true; 2708 dwc->gadget.name = "dwc3-gadget"; 2709 2710 /* 2711 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize 2712 * on ep out. 2713 */ 2714 dwc->gadget.quirk_ep_out_aligned_size = true; 2715 2716 /* 2717 * REVISIT: Here we should clear all pending IRQs to be 2718 * sure we're starting from a well known location. 2719 */ 2720 2721 ret = dwc3_gadget_init_endpoints(dwc); 2722 if (ret) 2723 goto err4; 2724 2725 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 2726 if (ret) { 2727 dev_err(dwc->dev, "failed to register udc\n"); 2728 goto err4; 2729 } 2730 2731 return 0; 2732 2733 err4: 2734 dwc3_gadget_free_endpoints(dwc); 2735 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2736 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2737 2738 err3: 2739 kfree(dwc->setup_buf); 2740 2741 err2: 2742 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2743 dwc->ep0_trb, dwc->ep0_trb_addr); 2744 2745 err1: 2746 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2747 dwc->ctrl_req, dwc->ctrl_req_addr); 2748 2749 err0: 2750 return ret; 2751 } 2752 2753 /* -------------------------------------------------------------------------- */ 2754 2755 void dwc3_gadget_exit(struct dwc3 *dwc) 2756 { 2757 usb_del_gadget_udc(&dwc->gadget); 2758 2759 dwc3_gadget_free_endpoints(dwc); 2760 2761 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2762 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2763 2764 kfree(dwc->setup_buf); 2765 2766 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2767 dwc->ep0_trb, dwc->ep0_trb_addr); 2768 2769 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2770 dwc->ctrl_req, dwc->ctrl_req_addr); 2771 } 2772 2773 int dwc3_gadget_suspend(struct dwc3 *dwc) 2774 { 2775 if (dwc->pullups_connected) { 2776 dwc3_gadget_disable_irq(dwc); 2777 dwc3_gadget_run_stop(dwc, true, true); 2778 } 2779 2780 __dwc3_gadget_ep_disable(dwc->eps[0]); 2781 __dwc3_gadget_ep_disable(dwc->eps[1]); 2782 2783 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG); 2784 2785 return 0; 2786 } 2787 2788 int dwc3_gadget_resume(struct dwc3 *dwc) 2789 { 2790 struct dwc3_ep *dep; 2791 int ret; 2792 2793 /* Start with SuperSpeed Default */ 2794 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2795 2796 dep = dwc->eps[0]; 2797 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 2798 false); 2799 if (ret) 2800 goto err0; 2801 2802 dep = dwc->eps[1]; 2803 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 2804 false); 2805 if (ret) 2806 goto err1; 2807 2808 /* begin to receive SETUP packets */ 2809 dwc->ep0state = EP0_SETUP_PHASE; 2810 dwc3_ep0_out_start(dwc); 2811 2812 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg); 2813 2814 if (dwc->pullups_connected) { 2815 dwc3_gadget_enable_irq(dwc); 2816 dwc3_gadget_run_stop(dwc, true, false); 2817 } 2818 2819 return 0; 2820 2821 err1: 2822 __dwc3_gadget_ep_disable(dwc->eps[0]); 2823 2824 err0: 2825 return ret; 2826 } 2827