1 /** 2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 3 * 4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com 5 * 6 * Authors: Felipe Balbi <balbi@ti.com>, 7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 8 * 9 * This program is free software: you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 of 11 * the License as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/delay.h> 21 #include <linux/slab.h> 22 #include <linux/spinlock.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/interrupt.h> 26 #include <linux/io.h> 27 #include <linux/list.h> 28 #include <linux/dma-mapping.h> 29 30 #include <linux/usb/ch9.h> 31 #include <linux/usb/gadget.h> 32 33 #include "debug.h" 34 #include "core.h" 35 #include "gadget.h" 36 #include "io.h" 37 38 /** 39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes 40 * @dwc: pointer to our context structure 41 * @mode: the mode to set (J, K SE0 NAK, Force Enable) 42 * 43 * Caller should take care of locking. This function will 44 * return 0 on success or -EINVAL if wrong Test Selector 45 * is passed 46 */ 47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) 48 { 49 u32 reg; 50 51 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 52 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 53 54 switch (mode) { 55 case TEST_J: 56 case TEST_K: 57 case TEST_SE0_NAK: 58 case TEST_PACKET: 59 case TEST_FORCE_EN: 60 reg |= mode << 1; 61 break; 62 default: 63 return -EINVAL; 64 } 65 66 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 67 68 return 0; 69 } 70 71 /** 72 * dwc3_gadget_get_link_state - Gets current state of USB Link 73 * @dwc: pointer to our context structure 74 * 75 * Caller should take care of locking. This function will 76 * return the link state on success (>= 0) or -ETIMEDOUT. 77 */ 78 int dwc3_gadget_get_link_state(struct dwc3 *dwc) 79 { 80 u32 reg; 81 82 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 83 84 return DWC3_DSTS_USBLNKST(reg); 85 } 86 87 /** 88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State 89 * @dwc: pointer to our context structure 90 * @state: the state to put link into 91 * 92 * Caller should take care of locking. This function will 93 * return 0 on success or -ETIMEDOUT. 94 */ 95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) 96 { 97 int retries = 10000; 98 u32 reg; 99 100 /* 101 * Wait until device controller is ready. Only applies to 1.94a and 102 * later RTL. 103 */ 104 if (dwc->revision >= DWC3_REVISION_194A) { 105 while (--retries) { 106 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 107 if (reg & DWC3_DSTS_DCNRD) 108 udelay(5); 109 else 110 break; 111 } 112 113 if (retries <= 0) 114 return -ETIMEDOUT; 115 } 116 117 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 119 120 /* set requested state */ 121 reg |= DWC3_DCTL_ULSTCHNGREQ(state); 122 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 123 124 /* 125 * The following code is racy when called from dwc3_gadget_wakeup, 126 * and is not needed, at least on newer versions 127 */ 128 if (dwc->revision >= DWC3_REVISION_194A) 129 return 0; 130 131 /* wait for a change in DSTS */ 132 retries = 10000; 133 while (--retries) { 134 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 135 136 if (DWC3_DSTS_USBLNKST(reg) == state) 137 return 0; 138 139 udelay(5); 140 } 141 142 dwc3_trace(trace_dwc3_gadget, 143 "link state change request timed out"); 144 145 return -ETIMEDOUT; 146 } 147 148 /** 149 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case 150 * @dwc: pointer to our context structure 151 * 152 * This function will a best effort FIFO allocation in order 153 * to improve FIFO usage and throughput, while still allowing 154 * us to enable as many endpoints as possible. 155 * 156 * Keep in mind that this operation will be highly dependent 157 * on the configured size for RAM1 - which contains TxFifo -, 158 * the amount of endpoints enabled on coreConsultant tool, and 159 * the width of the Master Bus. 160 * 161 * In the ideal world, we would always be able to satisfy the 162 * following equation: 163 * 164 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \ 165 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes 166 * 167 * Unfortunately, due to many variables that's not always the case. 168 */ 169 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc) 170 { 171 int last_fifo_depth = 0; 172 int ram1_depth; 173 int fifo_size; 174 int mdwidth; 175 int num; 176 177 if (!dwc->needs_fifo_resize) 178 return 0; 179 180 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7); 181 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); 182 183 /* MDWIDTH is represented in bits, we need it in bytes */ 184 mdwidth >>= 3; 185 186 /* 187 * FIXME For now we will only allocate 1 wMaxPacketSize space 188 * for each enabled endpoint, later patches will come to 189 * improve this algorithm so that we better use the internal 190 * FIFO space 191 */ 192 for (num = 0; num < dwc->num_in_eps; num++) { 193 /* bit0 indicates direction; 1 means IN ep */ 194 struct dwc3_ep *dep = dwc->eps[(num << 1) | 1]; 195 int mult = 1; 196 int tmp; 197 198 if (!(dep->flags & DWC3_EP_ENABLED)) 199 continue; 200 201 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) 202 || usb_endpoint_xfer_isoc(dep->endpoint.desc)) 203 mult = 3; 204 205 /* 206 * REVISIT: the following assumes we will always have enough 207 * space available on the FIFO RAM for all possible use cases. 208 * Make sure that's true somehow and change FIFO allocation 209 * accordingly. 210 * 211 * If we have Bulk or Isochronous endpoints, we want 212 * them to be able to be very, very fast. So we're giving 213 * those endpoints a fifo_size which is enough for 3 full 214 * packets 215 */ 216 tmp = mult * (dep->endpoint.maxpacket + mdwidth); 217 tmp += mdwidth; 218 219 fifo_size = DIV_ROUND_UP(tmp, mdwidth); 220 221 fifo_size |= (last_fifo_depth << 16); 222 223 dwc3_trace(trace_dwc3_gadget, "%s: Fifo Addr %04x Size %d", 224 dep->name, last_fifo_depth, fifo_size & 0xffff); 225 226 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size); 227 228 last_fifo_depth += (fifo_size & 0xffff); 229 } 230 231 return 0; 232 } 233 234 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 235 int status) 236 { 237 struct dwc3 *dwc = dep->dwc; 238 int i; 239 240 if (req->queued) { 241 i = 0; 242 do { 243 dep->busy_slot++; 244 /* 245 * Skip LINK TRB. We can't use req->trb and check for 246 * DWC3_TRBCTL_LINK_TRB because it points the TRB we 247 * just completed (not the LINK TRB). 248 */ 249 if (((dep->busy_slot & DWC3_TRB_MASK) == 250 DWC3_TRB_NUM- 1) && 251 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 252 dep->busy_slot++; 253 } while(++i < req->request.num_mapped_sgs); 254 req->queued = false; 255 } 256 list_del(&req->list); 257 req->trb = NULL; 258 259 if (req->request.status == -EINPROGRESS) 260 req->request.status = status; 261 262 if (dwc->ep0_bounced && dep->number == 0) 263 dwc->ep0_bounced = false; 264 else 265 usb_gadget_unmap_request(&dwc->gadget, &req->request, 266 req->direction); 267 268 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n", 269 req, dep->name, req->request.actual, 270 req->request.length, status); 271 trace_dwc3_gadget_giveback(req); 272 273 spin_unlock(&dwc->lock); 274 usb_gadget_giveback_request(&dep->endpoint, &req->request); 275 spin_lock(&dwc->lock); 276 } 277 278 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param) 279 { 280 u32 timeout = 500; 281 u32 reg; 282 283 trace_dwc3_gadget_generic_cmd(cmd, param); 284 285 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param); 286 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT); 287 288 do { 289 reg = dwc3_readl(dwc->regs, DWC3_DGCMD); 290 if (!(reg & DWC3_DGCMD_CMDACT)) { 291 dwc3_trace(trace_dwc3_gadget, 292 "Command Complete --> %d", 293 DWC3_DGCMD_STATUS(reg)); 294 return 0; 295 } 296 297 /* 298 * We can't sleep here, because it's also called from 299 * interrupt context. 300 */ 301 timeout--; 302 if (!timeout) { 303 dwc3_trace(trace_dwc3_gadget, 304 "Command Timed Out"); 305 return -ETIMEDOUT; 306 } 307 udelay(1); 308 } while (1); 309 } 310 311 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep, 312 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params) 313 { 314 struct dwc3_ep *dep = dwc->eps[ep]; 315 u32 timeout = 500; 316 u32 reg; 317 318 trace_dwc3_gadget_ep_cmd(dep, cmd, params); 319 320 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0); 321 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1); 322 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2); 323 324 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT); 325 do { 326 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep)); 327 if (!(reg & DWC3_DEPCMD_CMDACT)) { 328 dwc3_trace(trace_dwc3_gadget, 329 "Command Complete --> %d", 330 DWC3_DEPCMD_STATUS(reg)); 331 return 0; 332 } 333 334 /* 335 * We can't sleep here, because it is also called from 336 * interrupt context. 337 */ 338 timeout--; 339 if (!timeout) { 340 dwc3_trace(trace_dwc3_gadget, 341 "Command Timed Out"); 342 return -ETIMEDOUT; 343 } 344 345 udelay(1); 346 } while (1); 347 } 348 349 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 350 struct dwc3_trb *trb) 351 { 352 u32 offset = (char *) trb - (char *) dep->trb_pool; 353 354 return dep->trb_pool_dma + offset; 355 } 356 357 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 358 { 359 struct dwc3 *dwc = dep->dwc; 360 361 if (dep->trb_pool) 362 return 0; 363 364 dep->trb_pool = dma_alloc_coherent(dwc->dev, 365 sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 366 &dep->trb_pool_dma, GFP_KERNEL); 367 if (!dep->trb_pool) { 368 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 369 dep->name); 370 return -ENOMEM; 371 } 372 373 return 0; 374 } 375 376 static void dwc3_free_trb_pool(struct dwc3_ep *dep) 377 { 378 struct dwc3 *dwc = dep->dwc; 379 380 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 381 dep->trb_pool, dep->trb_pool_dma); 382 383 dep->trb_pool = NULL; 384 dep->trb_pool_dma = 0; 385 } 386 387 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 388 { 389 struct dwc3_gadget_ep_cmd_params params; 390 u32 cmd; 391 392 memset(¶ms, 0x00, sizeof(params)); 393 394 if (dep->number != 1) { 395 cmd = DWC3_DEPCMD_DEPSTARTCFG; 396 /* XferRscIdx == 0 for ep0 and 2 for the remaining */ 397 if (dep->number > 1) { 398 if (dwc->start_config_issued) 399 return 0; 400 dwc->start_config_issued = true; 401 cmd |= DWC3_DEPCMD_PARAM(2); 402 } 403 404 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms); 405 } 406 407 return 0; 408 } 409 410 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, 411 const struct usb_endpoint_descriptor *desc, 412 const struct usb_ss_ep_comp_descriptor *comp_desc, 413 bool ignore, bool restore) 414 { 415 struct dwc3_gadget_ep_cmd_params params; 416 417 memset(¶ms, 0x00, sizeof(params)); 418 419 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) 420 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); 421 422 /* Burst size is only needed in SuperSpeed mode */ 423 if (dwc->gadget.speed == USB_SPEED_SUPER) { 424 u32 burst = dep->endpoint.maxburst - 1; 425 426 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst); 427 } 428 429 if (ignore) 430 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM; 431 432 if (restore) { 433 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE; 434 params.param2 |= dep->saved_state; 435 } 436 437 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN 438 | DWC3_DEPCFG_XFER_NOT_READY_EN; 439 440 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 441 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE 442 | DWC3_DEPCFG_STREAM_EVENT_EN; 443 dep->stream_capable = true; 444 } 445 446 if (!usb_endpoint_xfer_control(desc)) 447 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; 448 449 /* 450 * We are doing 1:1 mapping for endpoints, meaning 451 * Physical Endpoints 2 maps to Logical Endpoint 2 and 452 * so on. We consider the direction bit as part of the physical 453 * endpoint number. So USB endpoint 0x81 is 0x03. 454 */ 455 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); 456 457 /* 458 * We must use the lower 16 TX FIFOs even though 459 * HW might have more 460 */ 461 if (dep->direction) 462 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); 463 464 if (desc->bInterval) { 465 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); 466 dep->interval = 1 << (desc->bInterval - 1); 467 } 468 469 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 470 DWC3_DEPCMD_SETEPCONFIG, ¶ms); 471 } 472 473 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) 474 { 475 struct dwc3_gadget_ep_cmd_params params; 476 477 memset(¶ms, 0x00, sizeof(params)); 478 479 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 480 481 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 482 DWC3_DEPCMD_SETTRANSFRESOURCE, ¶ms); 483 } 484 485 /** 486 * __dwc3_gadget_ep_enable - Initializes a HW endpoint 487 * @dep: endpoint to be initialized 488 * @desc: USB Endpoint Descriptor 489 * 490 * Caller should take care of locking 491 */ 492 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, 493 const struct usb_endpoint_descriptor *desc, 494 const struct usb_ss_ep_comp_descriptor *comp_desc, 495 bool ignore, bool restore) 496 { 497 struct dwc3 *dwc = dep->dwc; 498 u32 reg; 499 int ret; 500 501 dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name); 502 503 if (!(dep->flags & DWC3_EP_ENABLED)) { 504 ret = dwc3_gadget_start_config(dwc, dep); 505 if (ret) 506 return ret; 507 } 508 509 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore, 510 restore); 511 if (ret) 512 return ret; 513 514 if (!(dep->flags & DWC3_EP_ENABLED)) { 515 struct dwc3_trb *trb_st_hw; 516 struct dwc3_trb *trb_link; 517 518 ret = dwc3_gadget_set_xfer_resource(dwc, dep); 519 if (ret) 520 return ret; 521 522 dep->endpoint.desc = desc; 523 dep->comp_desc = comp_desc; 524 dep->type = usb_endpoint_type(desc); 525 dep->flags |= DWC3_EP_ENABLED; 526 527 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 528 reg |= DWC3_DALEPENA_EP(dep->number); 529 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 530 531 if (!usb_endpoint_xfer_isoc(desc)) 532 return 0; 533 534 /* Link TRB for ISOC. The HWO bit is never reset */ 535 trb_st_hw = &dep->trb_pool[0]; 536 537 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 538 memset(trb_link, 0, sizeof(*trb_link)); 539 540 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 541 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 542 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB; 543 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 544 } 545 546 return 0; 547 } 548 549 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force); 550 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 551 { 552 struct dwc3_request *req; 553 554 if (!list_empty(&dep->req_queued)) { 555 dwc3_stop_active_transfer(dwc, dep->number, true); 556 557 /* - giveback all requests to gadget driver */ 558 while (!list_empty(&dep->req_queued)) { 559 req = next_request(&dep->req_queued); 560 561 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 562 } 563 } 564 565 while (!list_empty(&dep->request_list)) { 566 req = next_request(&dep->request_list); 567 568 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 569 } 570 } 571 572 /** 573 * __dwc3_gadget_ep_disable - Disables a HW endpoint 574 * @dep: the endpoint to disable 575 * 576 * This function also removes requests which are currently processed ny the 577 * hardware and those which are not yet scheduled. 578 * Caller should take care of locking. 579 */ 580 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 581 { 582 struct dwc3 *dwc = dep->dwc; 583 u32 reg; 584 585 dwc3_remove_requests(dwc, dep); 586 587 /* make sure HW endpoint isn't stalled */ 588 if (dep->flags & DWC3_EP_STALL) 589 __dwc3_gadget_ep_set_halt(dep, 0, false); 590 591 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 592 reg &= ~DWC3_DALEPENA_EP(dep->number); 593 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 594 595 dep->stream_capable = false; 596 dep->endpoint.desc = NULL; 597 dep->comp_desc = NULL; 598 dep->type = 0; 599 dep->flags = 0; 600 601 return 0; 602 } 603 604 /* -------------------------------------------------------------------------- */ 605 606 static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 607 const struct usb_endpoint_descriptor *desc) 608 { 609 return -EINVAL; 610 } 611 612 static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 613 { 614 return -EINVAL; 615 } 616 617 /* -------------------------------------------------------------------------- */ 618 619 static int dwc3_gadget_ep_enable(struct usb_ep *ep, 620 const struct usb_endpoint_descriptor *desc) 621 { 622 struct dwc3_ep *dep; 623 struct dwc3 *dwc; 624 unsigned long flags; 625 int ret; 626 627 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 628 pr_debug("dwc3: invalid parameters\n"); 629 return -EINVAL; 630 } 631 632 if (!desc->wMaxPacketSize) { 633 pr_debug("dwc3: missing wMaxPacketSize\n"); 634 return -EINVAL; 635 } 636 637 dep = to_dwc3_ep(ep); 638 dwc = dep->dwc; 639 640 if (dep->flags & DWC3_EP_ENABLED) { 641 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n", 642 dep->name); 643 return 0; 644 } 645 646 switch (usb_endpoint_type(desc)) { 647 case USB_ENDPOINT_XFER_CONTROL: 648 strlcat(dep->name, "-control", sizeof(dep->name)); 649 break; 650 case USB_ENDPOINT_XFER_ISOC: 651 strlcat(dep->name, "-isoc", sizeof(dep->name)); 652 break; 653 case USB_ENDPOINT_XFER_BULK: 654 strlcat(dep->name, "-bulk", sizeof(dep->name)); 655 break; 656 case USB_ENDPOINT_XFER_INT: 657 strlcat(dep->name, "-int", sizeof(dep->name)); 658 break; 659 default: 660 dev_err(dwc->dev, "invalid endpoint transfer type\n"); 661 } 662 663 spin_lock_irqsave(&dwc->lock, flags); 664 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false); 665 spin_unlock_irqrestore(&dwc->lock, flags); 666 667 return ret; 668 } 669 670 static int dwc3_gadget_ep_disable(struct usb_ep *ep) 671 { 672 struct dwc3_ep *dep; 673 struct dwc3 *dwc; 674 unsigned long flags; 675 int ret; 676 677 if (!ep) { 678 pr_debug("dwc3: invalid parameters\n"); 679 return -EINVAL; 680 } 681 682 dep = to_dwc3_ep(ep); 683 dwc = dep->dwc; 684 685 if (!(dep->flags & DWC3_EP_ENABLED)) { 686 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n", 687 dep->name); 688 return 0; 689 } 690 691 snprintf(dep->name, sizeof(dep->name), "ep%d%s", 692 dep->number >> 1, 693 (dep->number & 1) ? "in" : "out"); 694 695 spin_lock_irqsave(&dwc->lock, flags); 696 ret = __dwc3_gadget_ep_disable(dep); 697 spin_unlock_irqrestore(&dwc->lock, flags); 698 699 return ret; 700 } 701 702 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 703 gfp_t gfp_flags) 704 { 705 struct dwc3_request *req; 706 struct dwc3_ep *dep = to_dwc3_ep(ep); 707 708 req = kzalloc(sizeof(*req), gfp_flags); 709 if (!req) 710 return NULL; 711 712 req->epnum = dep->number; 713 req->dep = dep; 714 715 trace_dwc3_alloc_request(req); 716 717 return &req->request; 718 } 719 720 static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 721 struct usb_request *request) 722 { 723 struct dwc3_request *req = to_dwc3_request(request); 724 725 trace_dwc3_free_request(req); 726 kfree(req); 727 } 728 729 /** 730 * dwc3_prepare_one_trb - setup one TRB from one request 731 * @dep: endpoint for which this request is prepared 732 * @req: dwc3_request pointer 733 */ 734 static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 735 struct dwc3_request *req, dma_addr_t dma, 736 unsigned length, unsigned last, unsigned chain, unsigned node) 737 { 738 struct dwc3_trb *trb; 739 740 dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s%s", 741 dep->name, req, (unsigned long long) dma, 742 length, last ? " last" : "", 743 chain ? " chain" : ""); 744 745 746 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK]; 747 748 if (!req->trb) { 749 dwc3_gadget_move_request_queued(req); 750 req->trb = trb; 751 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 752 req->start_slot = dep->free_slot & DWC3_TRB_MASK; 753 } 754 755 dep->free_slot++; 756 /* Skip the LINK-TRB on ISOC */ 757 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && 758 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 759 dep->free_slot++; 760 761 trb->size = DWC3_TRB_SIZE_LENGTH(length); 762 trb->bpl = lower_32_bits(dma); 763 trb->bph = upper_32_bits(dma); 764 765 switch (usb_endpoint_type(dep->endpoint.desc)) { 766 case USB_ENDPOINT_XFER_CONTROL: 767 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP; 768 break; 769 770 case USB_ENDPOINT_XFER_ISOC: 771 if (!node) 772 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 773 else 774 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 775 break; 776 777 case USB_ENDPOINT_XFER_BULK: 778 case USB_ENDPOINT_XFER_INT: 779 trb->ctrl = DWC3_TRBCTL_NORMAL; 780 break; 781 default: 782 /* 783 * This is only possible with faulty memory because we 784 * checked it already :) 785 */ 786 BUG(); 787 } 788 789 if (!req->request.no_interrupt && !chain) 790 trb->ctrl |= DWC3_TRB_CTRL_IOC; 791 792 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 793 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 794 trb->ctrl |= DWC3_TRB_CTRL_CSP; 795 } else if (last) { 796 trb->ctrl |= DWC3_TRB_CTRL_LST; 797 } 798 799 if (chain) 800 trb->ctrl |= DWC3_TRB_CTRL_CHN; 801 802 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) 803 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id); 804 805 trb->ctrl |= DWC3_TRB_CTRL_HWO; 806 807 trace_dwc3_prepare_trb(dep, trb); 808 } 809 810 /* 811 * dwc3_prepare_trbs - setup TRBs from requests 812 * @dep: endpoint for which requests are being prepared 813 * @starting: true if the endpoint is idle and no requests are queued. 814 * 815 * The function goes through the requests list and sets up TRBs for the 816 * transfers. The function returns once there are no more TRBs available or 817 * it runs out of requests. 818 */ 819 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting) 820 { 821 struct dwc3_request *req, *n; 822 u32 trbs_left; 823 u32 max; 824 unsigned int last_one = 0; 825 826 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 827 828 /* the first request must not be queued */ 829 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK; 830 831 /* Can't wrap around on a non-isoc EP since there's no link TRB */ 832 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 833 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK); 834 if (trbs_left > max) 835 trbs_left = max; 836 } 837 838 /* 839 * If busy & slot are equal than it is either full or empty. If we are 840 * starting to process requests then we are empty. Otherwise we are 841 * full and don't do anything 842 */ 843 if (!trbs_left) { 844 if (!starting) 845 return; 846 trbs_left = DWC3_TRB_NUM; 847 /* 848 * In case we start from scratch, we queue the ISOC requests 849 * starting from slot 1. This is done because we use ring 850 * buffer and have no LST bit to stop us. Instead, we place 851 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt 852 * after the first request so we start at slot 1 and have 853 * 7 requests proceed before we hit the first IOC. 854 * Other transfer types don't use the ring buffer and are 855 * processed from the first TRB until the last one. Since we 856 * don't wrap around we have to start at the beginning. 857 */ 858 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 859 dep->busy_slot = 1; 860 dep->free_slot = 1; 861 } else { 862 dep->busy_slot = 0; 863 dep->free_slot = 0; 864 } 865 } 866 867 /* The last TRB is a link TRB, not used for xfer */ 868 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc)) 869 return; 870 871 list_for_each_entry_safe(req, n, &dep->request_list, list) { 872 unsigned length; 873 dma_addr_t dma; 874 last_one = false; 875 876 if (req->request.num_mapped_sgs > 0) { 877 struct usb_request *request = &req->request; 878 struct scatterlist *sg = request->sg; 879 struct scatterlist *s; 880 int i; 881 882 for_each_sg(sg, s, request->num_mapped_sgs, i) { 883 unsigned chain = true; 884 885 length = sg_dma_len(s); 886 dma = sg_dma_address(s); 887 888 if (i == (request->num_mapped_sgs - 1) || 889 sg_is_last(s)) { 890 if (list_empty(&dep->request_list)) 891 last_one = true; 892 chain = false; 893 } 894 895 trbs_left--; 896 if (!trbs_left) 897 last_one = true; 898 899 if (last_one) 900 chain = false; 901 902 dwc3_prepare_one_trb(dep, req, dma, length, 903 last_one, chain, i); 904 905 if (last_one) 906 break; 907 } 908 909 if (last_one) 910 break; 911 } else { 912 dma = req->request.dma; 913 length = req->request.length; 914 trbs_left--; 915 916 if (!trbs_left) 917 last_one = 1; 918 919 /* Is this the last request? */ 920 if (list_is_last(&req->list, &dep->request_list)) 921 last_one = 1; 922 923 dwc3_prepare_one_trb(dep, req, dma, length, 924 last_one, false, 0); 925 926 if (last_one) 927 break; 928 } 929 } 930 } 931 932 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param, 933 int start_new) 934 { 935 struct dwc3_gadget_ep_cmd_params params; 936 struct dwc3_request *req; 937 struct dwc3 *dwc = dep->dwc; 938 int ret; 939 u32 cmd; 940 941 if (start_new && (dep->flags & DWC3_EP_BUSY)) { 942 dwc3_trace(trace_dwc3_gadget, "%s: endpoint busy", dep->name); 943 return -EBUSY; 944 } 945 dep->flags &= ~DWC3_EP_PENDING_REQUEST; 946 947 /* 948 * If we are getting here after a short-out-packet we don't enqueue any 949 * new requests as we try to set the IOC bit only on the last request. 950 */ 951 if (start_new) { 952 if (list_empty(&dep->req_queued)) 953 dwc3_prepare_trbs(dep, start_new); 954 955 /* req points to the first request which will be sent */ 956 req = next_request(&dep->req_queued); 957 } else { 958 dwc3_prepare_trbs(dep, start_new); 959 960 /* 961 * req points to the first request where HWO changed from 0 to 1 962 */ 963 req = next_request(&dep->req_queued); 964 } 965 if (!req) { 966 dep->flags |= DWC3_EP_PENDING_REQUEST; 967 return 0; 968 } 969 970 memset(¶ms, 0, sizeof(params)); 971 972 if (start_new) { 973 params.param0 = upper_32_bits(req->trb_dma); 974 params.param1 = lower_32_bits(req->trb_dma); 975 cmd = DWC3_DEPCMD_STARTTRANSFER; 976 } else { 977 cmd = DWC3_DEPCMD_UPDATETRANSFER; 978 } 979 980 cmd |= DWC3_DEPCMD_PARAM(cmd_param); 981 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 982 if (ret < 0) { 983 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n"); 984 985 /* 986 * FIXME we need to iterate over the list of requests 987 * here and stop, unmap, free and del each of the linked 988 * requests instead of what we do now. 989 */ 990 usb_gadget_unmap_request(&dwc->gadget, &req->request, 991 req->direction); 992 list_del(&req->list); 993 return ret; 994 } 995 996 dep->flags |= DWC3_EP_BUSY; 997 998 if (start_new) { 999 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc, 1000 dep->number); 1001 WARN_ON_ONCE(!dep->resource_index); 1002 } 1003 1004 return 0; 1005 } 1006 1007 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc, 1008 struct dwc3_ep *dep, u32 cur_uf) 1009 { 1010 u32 uf; 1011 1012 if (list_empty(&dep->request_list)) { 1013 dwc3_trace(trace_dwc3_gadget, 1014 "ISOC ep %s run out for requests", 1015 dep->name); 1016 dep->flags |= DWC3_EP_PENDING_REQUEST; 1017 return; 1018 } 1019 1020 /* 4 micro frames in the future */ 1021 uf = cur_uf + dep->interval * 4; 1022 1023 __dwc3_gadget_kick_transfer(dep, uf, 1); 1024 } 1025 1026 static void dwc3_gadget_start_isoc(struct dwc3 *dwc, 1027 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1028 { 1029 u32 cur_uf, mask; 1030 1031 mask = ~(dep->interval - 1); 1032 cur_uf = event->parameters & mask; 1033 1034 __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1035 } 1036 1037 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 1038 { 1039 struct dwc3 *dwc = dep->dwc; 1040 int ret; 1041 1042 req->request.actual = 0; 1043 req->request.status = -EINPROGRESS; 1044 req->direction = dep->direction; 1045 req->epnum = dep->number; 1046 1047 /* 1048 * We only add to our list of requests now and 1049 * start consuming the list once we get XferNotReady 1050 * IRQ. 1051 * 1052 * That way, we avoid doing anything that we don't need 1053 * to do now and defer it until the point we receive a 1054 * particular token from the Host side. 1055 * 1056 * This will also avoid Host cancelling URBs due to too 1057 * many NAKs. 1058 */ 1059 ret = usb_gadget_map_request(&dwc->gadget, &req->request, 1060 dep->direction); 1061 if (ret) 1062 return ret; 1063 1064 list_add_tail(&req->list, &dep->request_list); 1065 1066 /* 1067 * There are a few special cases: 1068 * 1069 * 1. XferNotReady with empty list of requests. We need to kick the 1070 * transfer here in that situation, otherwise we will be NAKing 1071 * forever. If we get XferNotReady before gadget driver has a 1072 * chance to queue a request, we will ACK the IRQ but won't be 1073 * able to receive the data until the next request is queued. 1074 * The following code is handling exactly that. 1075 * 1076 */ 1077 if (dep->flags & DWC3_EP_PENDING_REQUEST) { 1078 /* 1079 * If xfernotready is already elapsed and it is a case 1080 * of isoc transfer, then issue END TRANSFER, so that 1081 * you can receive xfernotready again and can have 1082 * notion of current microframe. 1083 */ 1084 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1085 if (list_empty(&dep->req_queued)) { 1086 dwc3_stop_active_transfer(dwc, dep->number, true); 1087 dep->flags = DWC3_EP_ENABLED; 1088 } 1089 return 0; 1090 } 1091 1092 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1093 if (ret && ret != -EBUSY) 1094 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1095 dep->name); 1096 return ret; 1097 } 1098 1099 /* 1100 * 2. XferInProgress on Isoc EP with an active transfer. We need to 1101 * kick the transfer here after queuing a request, otherwise the 1102 * core may not see the modified TRB(s). 1103 */ 1104 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1105 (dep->flags & DWC3_EP_BUSY) && 1106 !(dep->flags & DWC3_EP_MISSED_ISOC)) { 1107 WARN_ON_ONCE(!dep->resource_index); 1108 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index, 1109 false); 1110 if (ret && ret != -EBUSY) 1111 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1112 dep->name); 1113 return ret; 1114 } 1115 1116 /* 1117 * 4. Stream Capable Bulk Endpoints. We need to start the transfer 1118 * right away, otherwise host will not know we have streams to be 1119 * handled. 1120 */ 1121 if (dep->stream_capable) { 1122 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1123 if (ret && ret != -EBUSY) 1124 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1125 dep->name); 1126 } 1127 1128 return 0; 1129 } 1130 1131 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 1132 gfp_t gfp_flags) 1133 { 1134 struct dwc3_request *req = to_dwc3_request(request); 1135 struct dwc3_ep *dep = to_dwc3_ep(ep); 1136 struct dwc3 *dwc = dep->dwc; 1137 1138 unsigned long flags; 1139 1140 int ret; 1141 1142 spin_lock_irqsave(&dwc->lock, flags); 1143 if (!dep->endpoint.desc) { 1144 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n", 1145 request, ep->name); 1146 ret = -ESHUTDOWN; 1147 goto out; 1148 } 1149 1150 if (WARN(req->dep != dep, "request %p belongs to '%s'\n", 1151 request, req->dep->name)) { 1152 ret = -EINVAL; 1153 goto out; 1154 } 1155 1156 trace_dwc3_ep_queue(req); 1157 1158 ret = __dwc3_gadget_ep_queue(dep, req); 1159 1160 out: 1161 spin_unlock_irqrestore(&dwc->lock, flags); 1162 1163 return ret; 1164 } 1165 1166 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 1167 struct usb_request *request) 1168 { 1169 struct dwc3_request *req = to_dwc3_request(request); 1170 struct dwc3_request *r = NULL; 1171 1172 struct dwc3_ep *dep = to_dwc3_ep(ep); 1173 struct dwc3 *dwc = dep->dwc; 1174 1175 unsigned long flags; 1176 int ret = 0; 1177 1178 trace_dwc3_ep_dequeue(req); 1179 1180 spin_lock_irqsave(&dwc->lock, flags); 1181 1182 list_for_each_entry(r, &dep->request_list, list) { 1183 if (r == req) 1184 break; 1185 } 1186 1187 if (r != req) { 1188 list_for_each_entry(r, &dep->req_queued, list) { 1189 if (r == req) 1190 break; 1191 } 1192 if (r == req) { 1193 /* wait until it is processed */ 1194 dwc3_stop_active_transfer(dwc, dep->number, true); 1195 goto out1; 1196 } 1197 dev_err(dwc->dev, "request %p was not queued to %s\n", 1198 request, ep->name); 1199 ret = -EINVAL; 1200 goto out0; 1201 } 1202 1203 out1: 1204 /* giveback the request */ 1205 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1206 1207 out0: 1208 spin_unlock_irqrestore(&dwc->lock, flags); 1209 1210 return ret; 1211 } 1212 1213 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) 1214 { 1215 struct dwc3_gadget_ep_cmd_params params; 1216 struct dwc3 *dwc = dep->dwc; 1217 int ret; 1218 1219 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1220 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 1221 return -EINVAL; 1222 } 1223 1224 memset(¶ms, 0x00, sizeof(params)); 1225 1226 if (value) { 1227 if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) || 1228 (!list_empty(&dep->req_queued) || 1229 !list_empty(&dep->request_list)))) { 1230 dev_dbg(dwc->dev, "%s: pending request, cannot halt\n", 1231 dep->name); 1232 return -EAGAIN; 1233 } 1234 1235 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1236 DWC3_DEPCMD_SETSTALL, ¶ms); 1237 if (ret) 1238 dev_err(dwc->dev, "failed to set STALL on %s\n", 1239 dep->name); 1240 else 1241 dep->flags |= DWC3_EP_STALL; 1242 } else { 1243 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1244 DWC3_DEPCMD_CLEARSTALL, ¶ms); 1245 if (ret) 1246 dev_err(dwc->dev, "failed to clear STALL on %s\n", 1247 dep->name); 1248 else 1249 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); 1250 } 1251 1252 return ret; 1253 } 1254 1255 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 1256 { 1257 struct dwc3_ep *dep = to_dwc3_ep(ep); 1258 struct dwc3 *dwc = dep->dwc; 1259 1260 unsigned long flags; 1261 1262 int ret; 1263 1264 spin_lock_irqsave(&dwc->lock, flags); 1265 ret = __dwc3_gadget_ep_set_halt(dep, value, false); 1266 spin_unlock_irqrestore(&dwc->lock, flags); 1267 1268 return ret; 1269 } 1270 1271 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 1272 { 1273 struct dwc3_ep *dep = to_dwc3_ep(ep); 1274 struct dwc3 *dwc = dep->dwc; 1275 unsigned long flags; 1276 int ret; 1277 1278 spin_lock_irqsave(&dwc->lock, flags); 1279 dep->flags |= DWC3_EP_WEDGE; 1280 1281 if (dep->number == 0 || dep->number == 1) 1282 ret = __dwc3_gadget_ep0_set_halt(ep, 1); 1283 else 1284 ret = __dwc3_gadget_ep_set_halt(dep, 1, false); 1285 spin_unlock_irqrestore(&dwc->lock, flags); 1286 1287 return ret; 1288 } 1289 1290 /* -------------------------------------------------------------------------- */ 1291 1292 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 1293 .bLength = USB_DT_ENDPOINT_SIZE, 1294 .bDescriptorType = USB_DT_ENDPOINT, 1295 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1296 }; 1297 1298 static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 1299 .enable = dwc3_gadget_ep0_enable, 1300 .disable = dwc3_gadget_ep0_disable, 1301 .alloc_request = dwc3_gadget_ep_alloc_request, 1302 .free_request = dwc3_gadget_ep_free_request, 1303 .queue = dwc3_gadget_ep0_queue, 1304 .dequeue = dwc3_gadget_ep_dequeue, 1305 .set_halt = dwc3_gadget_ep0_set_halt, 1306 .set_wedge = dwc3_gadget_ep_set_wedge, 1307 }; 1308 1309 static const struct usb_ep_ops dwc3_gadget_ep_ops = { 1310 .enable = dwc3_gadget_ep_enable, 1311 .disable = dwc3_gadget_ep_disable, 1312 .alloc_request = dwc3_gadget_ep_alloc_request, 1313 .free_request = dwc3_gadget_ep_free_request, 1314 .queue = dwc3_gadget_ep_queue, 1315 .dequeue = dwc3_gadget_ep_dequeue, 1316 .set_halt = dwc3_gadget_ep_set_halt, 1317 .set_wedge = dwc3_gadget_ep_set_wedge, 1318 }; 1319 1320 /* -------------------------------------------------------------------------- */ 1321 1322 static int dwc3_gadget_get_frame(struct usb_gadget *g) 1323 { 1324 struct dwc3 *dwc = gadget_to_dwc(g); 1325 u32 reg; 1326 1327 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1328 return DWC3_DSTS_SOFFN(reg); 1329 } 1330 1331 static int dwc3_gadget_wakeup(struct usb_gadget *g) 1332 { 1333 struct dwc3 *dwc = gadget_to_dwc(g); 1334 1335 unsigned long timeout; 1336 unsigned long flags; 1337 1338 u32 reg; 1339 1340 int ret = 0; 1341 1342 u8 link_state; 1343 u8 speed; 1344 1345 spin_lock_irqsave(&dwc->lock, flags); 1346 1347 /* 1348 * According to the Databook Remote wakeup request should 1349 * be issued only when the device is in early suspend state. 1350 * 1351 * We can check that via USB Link State bits in DSTS register. 1352 */ 1353 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1354 1355 speed = reg & DWC3_DSTS_CONNECTSPD; 1356 if (speed == DWC3_DSTS_SUPERSPEED) { 1357 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n"); 1358 ret = -EINVAL; 1359 goto out; 1360 } 1361 1362 link_state = DWC3_DSTS_USBLNKST(reg); 1363 1364 switch (link_state) { 1365 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1366 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1367 break; 1368 default: 1369 dev_dbg(dwc->dev, "can't wakeup from link state %d\n", 1370 link_state); 1371 ret = -EINVAL; 1372 goto out; 1373 } 1374 1375 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); 1376 if (ret < 0) { 1377 dev_err(dwc->dev, "failed to put link in Recovery\n"); 1378 goto out; 1379 } 1380 1381 /* Recent versions do this automatically */ 1382 if (dwc->revision < DWC3_REVISION_194A) { 1383 /* write zeroes to Link Change Request */ 1384 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1385 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 1386 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1387 } 1388 1389 /* poll until Link State changes to ON */ 1390 timeout = jiffies + msecs_to_jiffies(100); 1391 1392 while (!time_after(jiffies, timeout)) { 1393 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1394 1395 /* in HS, means ON */ 1396 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 1397 break; 1398 } 1399 1400 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1401 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1402 ret = -EINVAL; 1403 } 1404 1405 out: 1406 spin_unlock_irqrestore(&dwc->lock, flags); 1407 1408 return ret; 1409 } 1410 1411 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 1412 int is_selfpowered) 1413 { 1414 struct dwc3 *dwc = gadget_to_dwc(g); 1415 unsigned long flags; 1416 1417 spin_lock_irqsave(&dwc->lock, flags); 1418 g->is_selfpowered = !!is_selfpowered; 1419 spin_unlock_irqrestore(&dwc->lock, flags); 1420 1421 return 0; 1422 } 1423 1424 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) 1425 { 1426 u32 reg; 1427 u32 timeout = 500; 1428 1429 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1430 if (is_on) { 1431 if (dwc->revision <= DWC3_REVISION_187A) { 1432 reg &= ~DWC3_DCTL_TRGTULST_MASK; 1433 reg |= DWC3_DCTL_TRGTULST_RX_DET; 1434 } 1435 1436 if (dwc->revision >= DWC3_REVISION_194A) 1437 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1438 reg |= DWC3_DCTL_RUN_STOP; 1439 1440 if (dwc->has_hibernation) 1441 reg |= DWC3_DCTL_KEEP_CONNECT; 1442 1443 dwc->pullups_connected = true; 1444 } else { 1445 reg &= ~DWC3_DCTL_RUN_STOP; 1446 1447 if (dwc->has_hibernation && !suspend) 1448 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1449 1450 dwc->pullups_connected = false; 1451 } 1452 1453 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1454 1455 do { 1456 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1457 if (is_on) { 1458 if (!(reg & DWC3_DSTS_DEVCTRLHLT)) 1459 break; 1460 } else { 1461 if (reg & DWC3_DSTS_DEVCTRLHLT) 1462 break; 1463 } 1464 timeout--; 1465 if (!timeout) 1466 return -ETIMEDOUT; 1467 udelay(1); 1468 } while (1); 1469 1470 dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s", 1471 dwc->gadget_driver 1472 ? dwc->gadget_driver->function : "no-function", 1473 is_on ? "connect" : "disconnect"); 1474 1475 return 0; 1476 } 1477 1478 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 1479 { 1480 struct dwc3 *dwc = gadget_to_dwc(g); 1481 unsigned long flags; 1482 int ret; 1483 1484 is_on = !!is_on; 1485 1486 spin_lock_irqsave(&dwc->lock, flags); 1487 ret = dwc3_gadget_run_stop(dwc, is_on, false); 1488 spin_unlock_irqrestore(&dwc->lock, flags); 1489 1490 return ret; 1491 } 1492 1493 static void dwc3_gadget_enable_irq(struct dwc3 *dwc) 1494 { 1495 u32 reg; 1496 1497 /* Enable all but Start and End of Frame IRQs */ 1498 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | 1499 DWC3_DEVTEN_EVNTOVERFLOWEN | 1500 DWC3_DEVTEN_CMDCMPLTEN | 1501 DWC3_DEVTEN_ERRTICERREN | 1502 DWC3_DEVTEN_WKUPEVTEN | 1503 DWC3_DEVTEN_ULSTCNGEN | 1504 DWC3_DEVTEN_CONNECTDONEEN | 1505 DWC3_DEVTEN_USBRSTEN | 1506 DWC3_DEVTEN_DISCONNEVTEN); 1507 1508 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 1509 } 1510 1511 static void dwc3_gadget_disable_irq(struct dwc3 *dwc) 1512 { 1513 /* mask all interrupts */ 1514 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 1515 } 1516 1517 static irqreturn_t dwc3_interrupt(int irq, void *_dwc); 1518 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc); 1519 1520 static int dwc3_gadget_start(struct usb_gadget *g, 1521 struct usb_gadget_driver *driver) 1522 { 1523 struct dwc3 *dwc = gadget_to_dwc(g); 1524 struct dwc3_ep *dep; 1525 unsigned long flags; 1526 int ret = 0; 1527 int irq; 1528 u32 reg; 1529 1530 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1531 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, 1532 IRQF_SHARED, "dwc3", dwc); 1533 if (ret) { 1534 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 1535 irq, ret); 1536 goto err0; 1537 } 1538 1539 spin_lock_irqsave(&dwc->lock, flags); 1540 1541 if (dwc->gadget_driver) { 1542 dev_err(dwc->dev, "%s is already bound to %s\n", 1543 dwc->gadget.name, 1544 dwc->gadget_driver->driver.name); 1545 ret = -EBUSY; 1546 goto err1; 1547 } 1548 1549 dwc->gadget_driver = driver; 1550 1551 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1552 reg &= ~(DWC3_DCFG_SPEED_MASK); 1553 1554 /** 1555 * WORKAROUND: DWC3 revision < 2.20a have an issue 1556 * which would cause metastability state on Run/Stop 1557 * bit if we try to force the IP to USB2-only mode. 1558 * 1559 * Because of that, we cannot configure the IP to any 1560 * speed other than the SuperSpeed 1561 * 1562 * Refers to: 1563 * 1564 * STAR#9000525659: Clock Domain Crossing on DCTL in 1565 * USB 2.0 Mode 1566 */ 1567 if (dwc->revision < DWC3_REVISION_220A) { 1568 reg |= DWC3_DCFG_SUPERSPEED; 1569 } else { 1570 switch (dwc->maximum_speed) { 1571 case USB_SPEED_LOW: 1572 reg |= DWC3_DSTS_LOWSPEED; 1573 break; 1574 case USB_SPEED_FULL: 1575 reg |= DWC3_DSTS_FULLSPEED1; 1576 break; 1577 case USB_SPEED_HIGH: 1578 reg |= DWC3_DSTS_HIGHSPEED; 1579 break; 1580 case USB_SPEED_SUPER: /* FALLTHROUGH */ 1581 case USB_SPEED_UNKNOWN: /* FALTHROUGH */ 1582 default: 1583 reg |= DWC3_DSTS_SUPERSPEED; 1584 } 1585 } 1586 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1587 1588 dwc->start_config_issued = false; 1589 1590 /* Start with SuperSpeed Default */ 1591 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1592 1593 dep = dwc->eps[0]; 1594 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 1595 false); 1596 if (ret) { 1597 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1598 goto err2; 1599 } 1600 1601 dep = dwc->eps[1]; 1602 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 1603 false); 1604 if (ret) { 1605 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1606 goto err3; 1607 } 1608 1609 /* begin to receive SETUP packets */ 1610 dwc->ep0state = EP0_SETUP_PHASE; 1611 dwc3_ep0_out_start(dwc); 1612 1613 dwc3_gadget_enable_irq(dwc); 1614 1615 spin_unlock_irqrestore(&dwc->lock, flags); 1616 1617 return 0; 1618 1619 err3: 1620 __dwc3_gadget_ep_disable(dwc->eps[0]); 1621 1622 err2: 1623 dwc->gadget_driver = NULL; 1624 1625 err1: 1626 spin_unlock_irqrestore(&dwc->lock, flags); 1627 1628 free_irq(irq, dwc); 1629 1630 err0: 1631 return ret; 1632 } 1633 1634 static int dwc3_gadget_stop(struct usb_gadget *g) 1635 { 1636 struct dwc3 *dwc = gadget_to_dwc(g); 1637 unsigned long flags; 1638 int irq; 1639 1640 spin_lock_irqsave(&dwc->lock, flags); 1641 1642 dwc3_gadget_disable_irq(dwc); 1643 __dwc3_gadget_ep_disable(dwc->eps[0]); 1644 __dwc3_gadget_ep_disable(dwc->eps[1]); 1645 1646 dwc->gadget_driver = NULL; 1647 1648 spin_unlock_irqrestore(&dwc->lock, flags); 1649 1650 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1651 free_irq(irq, dwc); 1652 1653 return 0; 1654 } 1655 1656 static const struct usb_gadget_ops dwc3_gadget_ops = { 1657 .get_frame = dwc3_gadget_get_frame, 1658 .wakeup = dwc3_gadget_wakeup, 1659 .set_selfpowered = dwc3_gadget_set_selfpowered, 1660 .pullup = dwc3_gadget_pullup, 1661 .udc_start = dwc3_gadget_start, 1662 .udc_stop = dwc3_gadget_stop, 1663 }; 1664 1665 /* -------------------------------------------------------------------------- */ 1666 1667 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc, 1668 u8 num, u32 direction) 1669 { 1670 struct dwc3_ep *dep; 1671 u8 i; 1672 1673 for (i = 0; i < num; i++) { 1674 u8 epnum = (i << 1) | (!!direction); 1675 1676 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 1677 if (!dep) 1678 return -ENOMEM; 1679 1680 dep->dwc = dwc; 1681 dep->number = epnum; 1682 dep->direction = !!direction; 1683 dwc->eps[epnum] = dep; 1684 1685 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1, 1686 (epnum & 1) ? "in" : "out"); 1687 1688 dep->endpoint.name = dep->name; 1689 1690 dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name); 1691 1692 if (epnum == 0 || epnum == 1) { 1693 usb_ep_set_maxpacket_limit(&dep->endpoint, 512); 1694 dep->endpoint.maxburst = 1; 1695 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 1696 if (!epnum) 1697 dwc->gadget.ep0 = &dep->endpoint; 1698 } else { 1699 int ret; 1700 1701 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024); 1702 dep->endpoint.max_streams = 15; 1703 dep->endpoint.ops = &dwc3_gadget_ep_ops; 1704 list_add_tail(&dep->endpoint.ep_list, 1705 &dwc->gadget.ep_list); 1706 1707 ret = dwc3_alloc_trb_pool(dep); 1708 if (ret) 1709 return ret; 1710 } 1711 1712 INIT_LIST_HEAD(&dep->request_list); 1713 INIT_LIST_HEAD(&dep->req_queued); 1714 } 1715 1716 return 0; 1717 } 1718 1719 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc) 1720 { 1721 int ret; 1722 1723 INIT_LIST_HEAD(&dwc->gadget.ep_list); 1724 1725 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0); 1726 if (ret < 0) { 1727 dwc3_trace(trace_dwc3_gadget, 1728 "failed to allocate OUT endpoints"); 1729 return ret; 1730 } 1731 1732 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1); 1733 if (ret < 0) { 1734 dwc3_trace(trace_dwc3_gadget, 1735 "failed to allocate IN endpoints"); 1736 return ret; 1737 } 1738 1739 return 0; 1740 } 1741 1742 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 1743 { 1744 struct dwc3_ep *dep; 1745 u8 epnum; 1746 1747 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1748 dep = dwc->eps[epnum]; 1749 if (!dep) 1750 continue; 1751 /* 1752 * Physical endpoints 0 and 1 are special; they form the 1753 * bi-directional USB endpoint 0. 1754 * 1755 * For those two physical endpoints, we don't allocate a TRB 1756 * pool nor do we add them the endpoints list. Due to that, we 1757 * shouldn't do these two operations otherwise we would end up 1758 * with all sorts of bugs when removing dwc3.ko. 1759 */ 1760 if (epnum != 0 && epnum != 1) { 1761 dwc3_free_trb_pool(dep); 1762 list_del(&dep->endpoint.ep_list); 1763 } 1764 1765 kfree(dep); 1766 } 1767 } 1768 1769 /* -------------------------------------------------------------------------- */ 1770 1771 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, 1772 struct dwc3_request *req, struct dwc3_trb *trb, 1773 const struct dwc3_event_depevt *event, int status) 1774 { 1775 unsigned int count; 1776 unsigned int s_pkt = 0; 1777 unsigned int trb_status; 1778 1779 trace_dwc3_complete_trb(dep, trb); 1780 1781 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 1782 /* 1783 * We continue despite the error. There is not much we 1784 * can do. If we don't clean it up we loop forever. If 1785 * we skip the TRB then it gets overwritten after a 1786 * while since we use them in a ring buffer. A BUG() 1787 * would help. Lets hope that if this occurs, someone 1788 * fixes the root cause instead of looking away :) 1789 */ 1790 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n", 1791 dep->name, trb); 1792 count = trb->size & DWC3_TRB_SIZE_MASK; 1793 1794 if (dep->direction) { 1795 if (count) { 1796 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); 1797 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) { 1798 dev_dbg(dwc->dev, "incomplete IN transfer %s\n", 1799 dep->name); 1800 /* 1801 * If missed isoc occurred and there is 1802 * no request queued then issue END 1803 * TRANSFER, so that core generates 1804 * next xfernotready and we will issue 1805 * a fresh START TRANSFER. 1806 * If there are still queued request 1807 * then wait, do not issue either END 1808 * or UPDATE TRANSFER, just attach next 1809 * request in request_list during 1810 * giveback.If any future queued request 1811 * is successfully transferred then we 1812 * will issue UPDATE TRANSFER for all 1813 * request in the request_list. 1814 */ 1815 dep->flags |= DWC3_EP_MISSED_ISOC; 1816 } else { 1817 dev_err(dwc->dev, "incomplete IN transfer %s\n", 1818 dep->name); 1819 status = -ECONNRESET; 1820 } 1821 } else { 1822 dep->flags &= ~DWC3_EP_MISSED_ISOC; 1823 } 1824 } else { 1825 if (count && (event->status & DEPEVT_STATUS_SHORT)) 1826 s_pkt = 1; 1827 } 1828 1829 /* 1830 * We assume here we will always receive the entire data block 1831 * which we should receive. Meaning, if we program RX to 1832 * receive 4K but we receive only 2K, we assume that's all we 1833 * should receive and we simply bounce the request back to the 1834 * gadget driver for further processing. 1835 */ 1836 req->request.actual += req->request.length - count; 1837 if (s_pkt) 1838 return 1; 1839 if ((event->status & DEPEVT_STATUS_LST) && 1840 (trb->ctrl & (DWC3_TRB_CTRL_LST | 1841 DWC3_TRB_CTRL_HWO))) 1842 return 1; 1843 if ((event->status & DEPEVT_STATUS_IOC) && 1844 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 1845 return 1; 1846 return 0; 1847 } 1848 1849 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, 1850 const struct dwc3_event_depevt *event, int status) 1851 { 1852 struct dwc3_request *req; 1853 struct dwc3_trb *trb; 1854 unsigned int slot; 1855 unsigned int i; 1856 int ret; 1857 1858 do { 1859 req = next_request(&dep->req_queued); 1860 if (!req) { 1861 WARN_ON_ONCE(1); 1862 return 1; 1863 } 1864 i = 0; 1865 do { 1866 slot = req->start_slot + i; 1867 if ((slot == DWC3_TRB_NUM - 1) && 1868 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 1869 slot++; 1870 slot %= DWC3_TRB_NUM; 1871 trb = &dep->trb_pool[slot]; 1872 1873 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 1874 event, status); 1875 if (ret) 1876 break; 1877 }while (++i < req->request.num_mapped_sgs); 1878 1879 dwc3_gadget_giveback(dep, req, status); 1880 1881 if (ret) 1882 break; 1883 } while (1); 1884 1885 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1886 list_empty(&dep->req_queued)) { 1887 if (list_empty(&dep->request_list)) { 1888 /* 1889 * If there is no entry in request list then do 1890 * not issue END TRANSFER now. Just set PENDING 1891 * flag, so that END TRANSFER is issued when an 1892 * entry is added into request list. 1893 */ 1894 dep->flags = DWC3_EP_PENDING_REQUEST; 1895 } else { 1896 dwc3_stop_active_transfer(dwc, dep->number, true); 1897 dep->flags = DWC3_EP_ENABLED; 1898 } 1899 return 1; 1900 } 1901 1902 return 1; 1903 } 1904 1905 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, 1906 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1907 { 1908 unsigned status = 0; 1909 int clean_busy; 1910 1911 if (event->status & DEPEVT_STATUS_BUSERR) 1912 status = -ECONNRESET; 1913 1914 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status); 1915 if (clean_busy) 1916 dep->flags &= ~DWC3_EP_BUSY; 1917 1918 /* 1919 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. 1920 * See dwc3_gadget_linksts_change_interrupt() for 1st half. 1921 */ 1922 if (dwc->revision < DWC3_REVISION_183A) { 1923 u32 reg; 1924 int i; 1925 1926 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 1927 dep = dwc->eps[i]; 1928 1929 if (!(dep->flags & DWC3_EP_ENABLED)) 1930 continue; 1931 1932 if (!list_empty(&dep->req_queued)) 1933 return; 1934 } 1935 1936 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1937 reg |= dwc->u1u2; 1938 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1939 1940 dwc->u1u2 = 0; 1941 } 1942 } 1943 1944 static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 1945 const struct dwc3_event_depevt *event) 1946 { 1947 struct dwc3_ep *dep; 1948 u8 epnum = event->endpoint_number; 1949 1950 dep = dwc->eps[epnum]; 1951 1952 if (!(dep->flags & DWC3_EP_ENABLED)) 1953 return; 1954 1955 if (epnum == 0 || epnum == 1) { 1956 dwc3_ep0_interrupt(dwc, event); 1957 return; 1958 } 1959 1960 switch (event->endpoint_event) { 1961 case DWC3_DEPEVT_XFERCOMPLETE: 1962 dep->resource_index = 0; 1963 1964 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1965 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n", 1966 dep->name); 1967 return; 1968 } 1969 1970 dwc3_endpoint_transfer_complete(dwc, dep, event); 1971 break; 1972 case DWC3_DEPEVT_XFERINPROGRESS: 1973 dwc3_endpoint_transfer_complete(dwc, dep, event); 1974 break; 1975 case DWC3_DEPEVT_XFERNOTREADY: 1976 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1977 dwc3_gadget_start_isoc(dwc, dep, event); 1978 } else { 1979 int ret; 1980 1981 dwc3_trace(trace_dwc3_gadget, "%s: reason %s", 1982 dep->name, event->status & 1983 DEPEVT_STATUS_TRANSFER_ACTIVE 1984 ? "Transfer Active" 1985 : "Transfer Not Active"); 1986 1987 ret = __dwc3_gadget_kick_transfer(dep, 0, 1); 1988 if (!ret || ret == -EBUSY) 1989 return; 1990 1991 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1992 dep->name); 1993 } 1994 1995 break; 1996 case DWC3_DEPEVT_STREAMEVT: 1997 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) { 1998 dev_err(dwc->dev, "Stream event for non-Bulk %s\n", 1999 dep->name); 2000 return; 2001 } 2002 2003 switch (event->status) { 2004 case DEPEVT_STREAMEVT_FOUND: 2005 dwc3_trace(trace_dwc3_gadget, 2006 "Stream %d found and started", 2007 event->parameters); 2008 2009 break; 2010 case DEPEVT_STREAMEVT_NOTFOUND: 2011 /* FALLTHROUGH */ 2012 default: 2013 dev_dbg(dwc->dev, "Couldn't find suitable stream\n"); 2014 } 2015 break; 2016 case DWC3_DEPEVT_RXTXFIFOEVT: 2017 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name); 2018 break; 2019 case DWC3_DEPEVT_EPCMDCMPLT: 2020 dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete"); 2021 break; 2022 } 2023 } 2024 2025 static void dwc3_disconnect_gadget(struct dwc3 *dwc) 2026 { 2027 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { 2028 spin_unlock(&dwc->lock); 2029 dwc->gadget_driver->disconnect(&dwc->gadget); 2030 spin_lock(&dwc->lock); 2031 } 2032 } 2033 2034 static void dwc3_suspend_gadget(struct dwc3 *dwc) 2035 { 2036 if (dwc->gadget_driver && dwc->gadget_driver->suspend) { 2037 spin_unlock(&dwc->lock); 2038 dwc->gadget_driver->suspend(&dwc->gadget); 2039 spin_lock(&dwc->lock); 2040 } 2041 } 2042 2043 static void dwc3_resume_gadget(struct dwc3 *dwc) 2044 { 2045 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2046 spin_unlock(&dwc->lock); 2047 dwc->gadget_driver->resume(&dwc->gadget); 2048 spin_lock(&dwc->lock); 2049 } 2050 } 2051 2052 static void dwc3_reset_gadget(struct dwc3 *dwc) 2053 { 2054 if (!dwc->gadget_driver) 2055 return; 2056 2057 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) { 2058 spin_unlock(&dwc->lock); 2059 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver); 2060 spin_lock(&dwc->lock); 2061 } 2062 } 2063 2064 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force) 2065 { 2066 struct dwc3_ep *dep; 2067 struct dwc3_gadget_ep_cmd_params params; 2068 u32 cmd; 2069 int ret; 2070 2071 dep = dwc->eps[epnum]; 2072 2073 if (!dep->resource_index) 2074 return; 2075 2076 /* 2077 * NOTICE: We are violating what the Databook says about the 2078 * EndTransfer command. Ideally we would _always_ wait for the 2079 * EndTransfer Command Completion IRQ, but that's causing too 2080 * much trouble synchronizing between us and gadget driver. 2081 * 2082 * We have discussed this with the IP Provider and it was 2083 * suggested to giveback all requests here, but give HW some 2084 * extra time to synchronize with the interconnect. We're using 2085 * an arbitrary 100us delay for that. 2086 * 2087 * Note also that a similar handling was tested by Synopsys 2088 * (thanks a lot Paul) and nothing bad has come out of it. 2089 * In short, what we're doing is: 2090 * 2091 * - Issue EndTransfer WITH CMDIOC bit set 2092 * - Wait 100us 2093 */ 2094 2095 cmd = DWC3_DEPCMD_ENDTRANSFER; 2096 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0; 2097 cmd |= DWC3_DEPCMD_CMDIOC; 2098 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); 2099 memset(¶ms, 0, sizeof(params)); 2100 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 2101 WARN_ON_ONCE(ret); 2102 dep->resource_index = 0; 2103 dep->flags &= ~DWC3_EP_BUSY; 2104 udelay(100); 2105 } 2106 2107 static void dwc3_stop_active_transfers(struct dwc3 *dwc) 2108 { 2109 u32 epnum; 2110 2111 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2112 struct dwc3_ep *dep; 2113 2114 dep = dwc->eps[epnum]; 2115 if (!dep) 2116 continue; 2117 2118 if (!(dep->flags & DWC3_EP_ENABLED)) 2119 continue; 2120 2121 dwc3_remove_requests(dwc, dep); 2122 } 2123 } 2124 2125 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 2126 { 2127 u32 epnum; 2128 2129 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2130 struct dwc3_ep *dep; 2131 struct dwc3_gadget_ep_cmd_params params; 2132 int ret; 2133 2134 dep = dwc->eps[epnum]; 2135 if (!dep) 2136 continue; 2137 2138 if (!(dep->flags & DWC3_EP_STALL)) 2139 continue; 2140 2141 dep->flags &= ~DWC3_EP_STALL; 2142 2143 memset(¶ms, 0, sizeof(params)); 2144 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 2145 DWC3_DEPCMD_CLEARSTALL, ¶ms); 2146 WARN_ON_ONCE(ret); 2147 } 2148 } 2149 2150 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 2151 { 2152 int reg; 2153 2154 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2155 reg &= ~DWC3_DCTL_INITU1ENA; 2156 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2157 2158 reg &= ~DWC3_DCTL_INITU2ENA; 2159 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2160 2161 dwc3_disconnect_gadget(dwc); 2162 dwc->start_config_issued = false; 2163 2164 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2165 dwc->setup_packet_pending = false; 2166 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED); 2167 } 2168 2169 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 2170 { 2171 u32 reg; 2172 2173 /* 2174 * WORKAROUND: DWC3 revisions <1.88a have an issue which 2175 * would cause a missing Disconnect Event if there's a 2176 * pending Setup Packet in the FIFO. 2177 * 2178 * There's no suggested workaround on the official Bug 2179 * report, which states that "unless the driver/application 2180 * is doing any special handling of a disconnect event, 2181 * there is no functional issue". 2182 * 2183 * Unfortunately, it turns out that we _do_ some special 2184 * handling of a disconnect event, namely complete all 2185 * pending transfers, notify gadget driver of the 2186 * disconnection, and so on. 2187 * 2188 * Our suggested workaround is to follow the Disconnect 2189 * Event steps here, instead, based on a setup_packet_pending 2190 * flag. Such flag gets set whenever we have a XferNotReady 2191 * event on EP0 and gets cleared on XferComplete for the 2192 * same endpoint. 2193 * 2194 * Refers to: 2195 * 2196 * STAR#9000466709: RTL: Device : Disconnect event not 2197 * generated if setup packet pending in FIFO 2198 */ 2199 if (dwc->revision < DWC3_REVISION_188A) { 2200 if (dwc->setup_packet_pending) 2201 dwc3_gadget_disconnect_interrupt(dwc); 2202 } 2203 2204 dwc3_reset_gadget(dwc); 2205 2206 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2207 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 2208 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2209 dwc->test_mode = false; 2210 2211 dwc3_stop_active_transfers(dwc); 2212 dwc3_clear_stall_all_ep(dwc); 2213 dwc->start_config_issued = false; 2214 2215 /* Reset device address to zero */ 2216 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2217 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 2218 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2219 } 2220 2221 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed) 2222 { 2223 u32 reg; 2224 u32 usb30_clock = DWC3_GCTL_CLK_BUS; 2225 2226 /* 2227 * We change the clock only at SS but I dunno why I would want to do 2228 * this. Maybe it becomes part of the power saving plan. 2229 */ 2230 2231 if (speed != DWC3_DSTS_SUPERSPEED) 2232 return; 2233 2234 /* 2235 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 2236 * each time on Connect Done. 2237 */ 2238 if (!usb30_clock) 2239 return; 2240 2241 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 2242 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock); 2243 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 2244 } 2245 2246 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 2247 { 2248 struct dwc3_ep *dep; 2249 int ret; 2250 u32 reg; 2251 u8 speed; 2252 2253 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2254 speed = reg & DWC3_DSTS_CONNECTSPD; 2255 dwc->speed = speed; 2256 2257 dwc3_update_ram_clk_sel(dwc, speed); 2258 2259 switch (speed) { 2260 case DWC3_DCFG_SUPERSPEED: 2261 /* 2262 * WORKAROUND: DWC3 revisions <1.90a have an issue which 2263 * would cause a missing USB3 Reset event. 2264 * 2265 * In such situations, we should force a USB3 Reset 2266 * event by calling our dwc3_gadget_reset_interrupt() 2267 * routine. 2268 * 2269 * Refers to: 2270 * 2271 * STAR#9000483510: RTL: SS : USB3 reset event may 2272 * not be generated always when the link enters poll 2273 */ 2274 if (dwc->revision < DWC3_REVISION_190A) 2275 dwc3_gadget_reset_interrupt(dwc); 2276 2277 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2278 dwc->gadget.ep0->maxpacket = 512; 2279 dwc->gadget.speed = USB_SPEED_SUPER; 2280 break; 2281 case DWC3_DCFG_HIGHSPEED: 2282 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2283 dwc->gadget.ep0->maxpacket = 64; 2284 dwc->gadget.speed = USB_SPEED_HIGH; 2285 break; 2286 case DWC3_DCFG_FULLSPEED2: 2287 case DWC3_DCFG_FULLSPEED1: 2288 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2289 dwc->gadget.ep0->maxpacket = 64; 2290 dwc->gadget.speed = USB_SPEED_FULL; 2291 break; 2292 case DWC3_DCFG_LOWSPEED: 2293 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); 2294 dwc->gadget.ep0->maxpacket = 8; 2295 dwc->gadget.speed = USB_SPEED_LOW; 2296 break; 2297 } 2298 2299 /* Enable USB2 LPM Capability */ 2300 2301 if ((dwc->revision > DWC3_REVISION_194A) 2302 && (speed != DWC3_DCFG_SUPERSPEED)) { 2303 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2304 reg |= DWC3_DCFG_LPM_CAP; 2305 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2306 2307 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2308 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); 2309 2310 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold); 2311 2312 /* 2313 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and 2314 * DCFG.LPMCap is set, core responses with an ACK and the 2315 * BESL value in the LPM token is less than or equal to LPM 2316 * NYET threshold. 2317 */ 2318 WARN_ONCE(dwc->revision < DWC3_REVISION_240A 2319 && dwc->has_lpm_erratum, 2320 "LPM Erratum not available on dwc3 revisisions < 2.40a\n"); 2321 2322 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A) 2323 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold); 2324 2325 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2326 } else { 2327 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2328 reg &= ~DWC3_DCTL_HIRD_THRES_MASK; 2329 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2330 } 2331 2332 dep = dwc->eps[0]; 2333 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, 2334 false); 2335 if (ret) { 2336 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2337 return; 2338 } 2339 2340 dep = dwc->eps[1]; 2341 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, 2342 false); 2343 if (ret) { 2344 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2345 return; 2346 } 2347 2348 /* 2349 * Configure PHY via GUSB3PIPECTLn if required. 2350 * 2351 * Update GTXFIFOSIZn 2352 * 2353 * In both cases reset values should be sufficient. 2354 */ 2355 } 2356 2357 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 2358 { 2359 /* 2360 * TODO take core out of low power mode when that's 2361 * implemented. 2362 */ 2363 2364 dwc->gadget_driver->resume(&dwc->gadget); 2365 } 2366 2367 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 2368 unsigned int evtinfo) 2369 { 2370 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2371 unsigned int pwropt; 2372 2373 /* 2374 * WORKAROUND: DWC3 < 2.50a have an issue when configured without 2375 * Hibernation mode enabled which would show up when device detects 2376 * host-initiated U3 exit. 2377 * 2378 * In that case, device will generate a Link State Change Interrupt 2379 * from U3 to RESUME which is only necessary if Hibernation is 2380 * configured in. 2381 * 2382 * There are no functional changes due to such spurious event and we 2383 * just need to ignore it. 2384 * 2385 * Refers to: 2386 * 2387 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation 2388 * operational mode 2389 */ 2390 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 2391 if ((dwc->revision < DWC3_REVISION_250A) && 2392 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { 2393 if ((dwc->link_state == DWC3_LINK_STATE_U3) && 2394 (next == DWC3_LINK_STATE_RESUME)) { 2395 dwc3_trace(trace_dwc3_gadget, 2396 "ignoring transition U3 -> Resume"); 2397 return; 2398 } 2399 } 2400 2401 /* 2402 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending 2403 * on the link partner, the USB session might do multiple entry/exit 2404 * of low power states before a transfer takes place. 2405 * 2406 * Due to this problem, we might experience lower throughput. The 2407 * suggested workaround is to disable DCTL[12:9] bits if we're 2408 * transitioning from U1/U2 to U0 and enable those bits again 2409 * after a transfer completes and there are no pending transfers 2410 * on any of the enabled endpoints. 2411 * 2412 * This is the first half of that workaround. 2413 * 2414 * Refers to: 2415 * 2416 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us 2417 * core send LGO_Ux entering U0 2418 */ 2419 if (dwc->revision < DWC3_REVISION_183A) { 2420 if (next == DWC3_LINK_STATE_U0) { 2421 u32 u1u2; 2422 u32 reg; 2423 2424 switch (dwc->link_state) { 2425 case DWC3_LINK_STATE_U1: 2426 case DWC3_LINK_STATE_U2: 2427 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2428 u1u2 = reg & (DWC3_DCTL_INITU2ENA 2429 | DWC3_DCTL_ACCEPTU2ENA 2430 | DWC3_DCTL_INITU1ENA 2431 | DWC3_DCTL_ACCEPTU1ENA); 2432 2433 if (!dwc->u1u2) 2434 dwc->u1u2 = reg & u1u2; 2435 2436 reg &= ~u1u2; 2437 2438 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2439 break; 2440 default: 2441 /* do nothing */ 2442 break; 2443 } 2444 } 2445 } 2446 2447 switch (next) { 2448 case DWC3_LINK_STATE_U1: 2449 if (dwc->speed == USB_SPEED_SUPER) 2450 dwc3_suspend_gadget(dwc); 2451 break; 2452 case DWC3_LINK_STATE_U2: 2453 case DWC3_LINK_STATE_U3: 2454 dwc3_suspend_gadget(dwc); 2455 break; 2456 case DWC3_LINK_STATE_RESUME: 2457 dwc3_resume_gadget(dwc); 2458 break; 2459 default: 2460 /* do nothing */ 2461 break; 2462 } 2463 2464 dwc->link_state = next; 2465 } 2466 2467 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, 2468 unsigned int evtinfo) 2469 { 2470 unsigned int is_ss = evtinfo & BIT(4); 2471 2472 /** 2473 * WORKAROUND: DWC3 revison 2.20a with hibernation support 2474 * have a known issue which can cause USB CV TD.9.23 to fail 2475 * randomly. 2476 * 2477 * Because of this issue, core could generate bogus hibernation 2478 * events which SW needs to ignore. 2479 * 2480 * Refers to: 2481 * 2482 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0 2483 * Device Fallback from SuperSpeed 2484 */ 2485 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER)) 2486 return; 2487 2488 /* enter hibernation here */ 2489 } 2490 2491 static void dwc3_gadget_interrupt(struct dwc3 *dwc, 2492 const struct dwc3_event_devt *event) 2493 { 2494 switch (event->type) { 2495 case DWC3_DEVICE_EVENT_DISCONNECT: 2496 dwc3_gadget_disconnect_interrupt(dwc); 2497 break; 2498 case DWC3_DEVICE_EVENT_RESET: 2499 dwc3_gadget_reset_interrupt(dwc); 2500 break; 2501 case DWC3_DEVICE_EVENT_CONNECT_DONE: 2502 dwc3_gadget_conndone_interrupt(dwc); 2503 break; 2504 case DWC3_DEVICE_EVENT_WAKEUP: 2505 dwc3_gadget_wakeup_interrupt(dwc); 2506 break; 2507 case DWC3_DEVICE_EVENT_HIBER_REQ: 2508 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation, 2509 "unexpected hibernation event\n")) 2510 break; 2511 2512 dwc3_gadget_hibernation_interrupt(dwc, event->event_info); 2513 break; 2514 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 2515 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 2516 break; 2517 case DWC3_DEVICE_EVENT_EOPF: 2518 dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame"); 2519 break; 2520 case DWC3_DEVICE_EVENT_SOF: 2521 dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame"); 2522 break; 2523 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 2524 dwc3_trace(trace_dwc3_gadget, "Erratic Error"); 2525 break; 2526 case DWC3_DEVICE_EVENT_CMD_CMPL: 2527 dwc3_trace(trace_dwc3_gadget, "Command Complete"); 2528 break; 2529 case DWC3_DEVICE_EVENT_OVERFLOW: 2530 dwc3_trace(trace_dwc3_gadget, "Overflow"); 2531 break; 2532 default: 2533 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 2534 } 2535 } 2536 2537 static void dwc3_process_event_entry(struct dwc3 *dwc, 2538 const union dwc3_event *event) 2539 { 2540 trace_dwc3_event(event->raw); 2541 2542 /* Endpoint IRQ, handle it and return early */ 2543 if (event->type.is_devspec == 0) { 2544 /* depevt */ 2545 return dwc3_endpoint_interrupt(dwc, &event->depevt); 2546 } 2547 2548 switch (event->type.type) { 2549 case DWC3_EVENT_TYPE_DEV: 2550 dwc3_gadget_interrupt(dwc, &event->devt); 2551 break; 2552 /* REVISIT what to do with Carkit and I2C events ? */ 2553 default: 2554 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 2555 } 2556 } 2557 2558 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf) 2559 { 2560 struct dwc3_event_buffer *evt; 2561 irqreturn_t ret = IRQ_NONE; 2562 int left; 2563 u32 reg; 2564 2565 evt = dwc->ev_buffs[buf]; 2566 left = evt->count; 2567 2568 if (!(evt->flags & DWC3_EVENT_PENDING)) 2569 return IRQ_NONE; 2570 2571 while (left > 0) { 2572 union dwc3_event event; 2573 2574 event.raw = *(u32 *) (evt->buf + evt->lpos); 2575 2576 dwc3_process_event_entry(dwc, &event); 2577 2578 /* 2579 * FIXME we wrap around correctly to the next entry as 2580 * almost all entries are 4 bytes in size. There is one 2581 * entry which has 12 bytes which is a regular entry 2582 * followed by 8 bytes data. ATM I don't know how 2583 * things are organized if we get next to the a 2584 * boundary so I worry about that once we try to handle 2585 * that. 2586 */ 2587 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE; 2588 left -= 4; 2589 2590 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4); 2591 } 2592 2593 evt->count = 0; 2594 evt->flags &= ~DWC3_EVENT_PENDING; 2595 ret = IRQ_HANDLED; 2596 2597 /* Unmask interrupt */ 2598 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf)); 2599 reg &= ~DWC3_GEVNTSIZ_INTMASK; 2600 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg); 2601 2602 return ret; 2603 } 2604 2605 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc) 2606 { 2607 struct dwc3 *dwc = _dwc; 2608 unsigned long flags; 2609 irqreturn_t ret = IRQ_NONE; 2610 int i; 2611 2612 spin_lock_irqsave(&dwc->lock, flags); 2613 2614 for (i = 0; i < dwc->num_event_buffers; i++) 2615 ret |= dwc3_process_event_buf(dwc, i); 2616 2617 spin_unlock_irqrestore(&dwc->lock, flags); 2618 2619 return ret; 2620 } 2621 2622 static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf) 2623 { 2624 struct dwc3_event_buffer *evt; 2625 u32 count; 2626 u32 reg; 2627 2628 evt = dwc->ev_buffs[buf]; 2629 2630 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf)); 2631 count &= DWC3_GEVNTCOUNT_MASK; 2632 if (!count) 2633 return IRQ_NONE; 2634 2635 evt->count = count; 2636 evt->flags |= DWC3_EVENT_PENDING; 2637 2638 /* Mask interrupt */ 2639 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf)); 2640 reg |= DWC3_GEVNTSIZ_INTMASK; 2641 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg); 2642 2643 return IRQ_WAKE_THREAD; 2644 } 2645 2646 static irqreturn_t dwc3_interrupt(int irq, void *_dwc) 2647 { 2648 struct dwc3 *dwc = _dwc; 2649 int i; 2650 irqreturn_t ret = IRQ_NONE; 2651 2652 spin_lock(&dwc->lock); 2653 2654 for (i = 0; i < dwc->num_event_buffers; i++) { 2655 irqreturn_t status; 2656 2657 status = dwc3_check_event_buf(dwc, i); 2658 if (status == IRQ_WAKE_THREAD) 2659 ret = status; 2660 } 2661 2662 spin_unlock(&dwc->lock); 2663 2664 return ret; 2665 } 2666 2667 /** 2668 * dwc3_gadget_init - Initializes gadget related registers 2669 * @dwc: pointer to our controller context structure 2670 * 2671 * Returns 0 on success otherwise negative errno. 2672 */ 2673 int dwc3_gadget_init(struct dwc3 *dwc) 2674 { 2675 int ret; 2676 2677 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2678 &dwc->ctrl_req_addr, GFP_KERNEL); 2679 if (!dwc->ctrl_req) { 2680 dev_err(dwc->dev, "failed to allocate ctrl request\n"); 2681 ret = -ENOMEM; 2682 goto err0; 2683 } 2684 2685 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2686 &dwc->ep0_trb_addr, GFP_KERNEL); 2687 if (!dwc->ep0_trb) { 2688 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 2689 ret = -ENOMEM; 2690 goto err1; 2691 } 2692 2693 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL); 2694 if (!dwc->setup_buf) { 2695 ret = -ENOMEM; 2696 goto err2; 2697 } 2698 2699 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev, 2700 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr, 2701 GFP_KERNEL); 2702 if (!dwc->ep0_bounce) { 2703 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n"); 2704 ret = -ENOMEM; 2705 goto err3; 2706 } 2707 2708 dwc->gadget.ops = &dwc3_gadget_ops; 2709 dwc->gadget.max_speed = USB_SPEED_SUPER; 2710 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2711 dwc->gadget.sg_supported = true; 2712 dwc->gadget.name = "dwc3-gadget"; 2713 2714 /* 2715 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize 2716 * on ep out. 2717 */ 2718 dwc->gadget.quirk_ep_out_aligned_size = true; 2719 2720 /* 2721 * REVISIT: Here we should clear all pending IRQs to be 2722 * sure we're starting from a well known location. 2723 */ 2724 2725 ret = dwc3_gadget_init_endpoints(dwc); 2726 if (ret) 2727 goto err4; 2728 2729 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 2730 if (ret) { 2731 dev_err(dwc->dev, "failed to register udc\n"); 2732 goto err4; 2733 } 2734 2735 return 0; 2736 2737 err4: 2738 dwc3_gadget_free_endpoints(dwc); 2739 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2740 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2741 2742 err3: 2743 kfree(dwc->setup_buf); 2744 2745 err2: 2746 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2747 dwc->ep0_trb, dwc->ep0_trb_addr); 2748 2749 err1: 2750 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2751 dwc->ctrl_req, dwc->ctrl_req_addr); 2752 2753 err0: 2754 return ret; 2755 } 2756 2757 /* -------------------------------------------------------------------------- */ 2758 2759 void dwc3_gadget_exit(struct dwc3 *dwc) 2760 { 2761 usb_del_gadget_udc(&dwc->gadget); 2762 2763 dwc3_gadget_free_endpoints(dwc); 2764 2765 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2766 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2767 2768 kfree(dwc->setup_buf); 2769 2770 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2771 dwc->ep0_trb, dwc->ep0_trb_addr); 2772 2773 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2774 dwc->ctrl_req, dwc->ctrl_req_addr); 2775 } 2776 2777 int dwc3_gadget_suspend(struct dwc3 *dwc) 2778 { 2779 if (dwc->pullups_connected) { 2780 dwc3_gadget_disable_irq(dwc); 2781 dwc3_gadget_run_stop(dwc, true, true); 2782 } 2783 2784 __dwc3_gadget_ep_disable(dwc->eps[0]); 2785 __dwc3_gadget_ep_disable(dwc->eps[1]); 2786 2787 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG); 2788 2789 return 0; 2790 } 2791 2792 int dwc3_gadget_resume(struct dwc3 *dwc) 2793 { 2794 struct dwc3_ep *dep; 2795 int ret; 2796 2797 /* Start with SuperSpeed Default */ 2798 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2799 2800 dep = dwc->eps[0]; 2801 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 2802 false); 2803 if (ret) 2804 goto err0; 2805 2806 dep = dwc->eps[1]; 2807 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 2808 false); 2809 if (ret) 2810 goto err1; 2811 2812 /* begin to receive SETUP packets */ 2813 dwc->ep0state = EP0_SETUP_PHASE; 2814 dwc3_ep0_out_start(dwc); 2815 2816 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg); 2817 2818 if (dwc->pullups_connected) { 2819 dwc3_gadget_enable_irq(dwc); 2820 dwc3_gadget_run_stop(dwc, true, false); 2821 } 2822 2823 return 0; 2824 2825 err1: 2826 __dwc3_gadget_ep_disable(dwc->eps[0]); 2827 2828 err0: 2829 return ret; 2830 } 2831