1 /** 2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 3 * 4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com 5 * 6 * Authors: Felipe Balbi <balbi@ti.com>, 7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 8 * 9 * This program is free software: you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 of 11 * the License as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/delay.h> 21 #include <linux/slab.h> 22 #include <linux/spinlock.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/interrupt.h> 26 #include <linux/io.h> 27 #include <linux/list.h> 28 #include <linux/dma-mapping.h> 29 30 #include <linux/usb/ch9.h> 31 #include <linux/usb/gadget.h> 32 33 #include "debug.h" 34 #include "core.h" 35 #include "gadget.h" 36 #include "io.h" 37 38 /** 39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes 40 * @dwc: pointer to our context structure 41 * @mode: the mode to set (J, K SE0 NAK, Force Enable) 42 * 43 * Caller should take care of locking. This function will 44 * return 0 on success or -EINVAL if wrong Test Selector 45 * is passed 46 */ 47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) 48 { 49 u32 reg; 50 51 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 52 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 53 54 switch (mode) { 55 case TEST_J: 56 case TEST_K: 57 case TEST_SE0_NAK: 58 case TEST_PACKET: 59 case TEST_FORCE_EN: 60 reg |= mode << 1; 61 break; 62 default: 63 return -EINVAL; 64 } 65 66 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 67 68 return 0; 69 } 70 71 /** 72 * dwc3_gadget_get_link_state - Gets current state of USB Link 73 * @dwc: pointer to our context structure 74 * 75 * Caller should take care of locking. This function will 76 * return the link state on success (>= 0) or -ETIMEDOUT. 77 */ 78 int dwc3_gadget_get_link_state(struct dwc3 *dwc) 79 { 80 u32 reg; 81 82 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 83 84 return DWC3_DSTS_USBLNKST(reg); 85 } 86 87 /** 88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State 89 * @dwc: pointer to our context structure 90 * @state: the state to put link into 91 * 92 * Caller should take care of locking. This function will 93 * return 0 on success or -ETIMEDOUT. 94 */ 95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) 96 { 97 int retries = 10000; 98 u32 reg; 99 100 /* 101 * Wait until device controller is ready. Only applies to 1.94a and 102 * later RTL. 103 */ 104 if (dwc->revision >= DWC3_REVISION_194A) { 105 while (--retries) { 106 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 107 if (reg & DWC3_DSTS_DCNRD) 108 udelay(5); 109 else 110 break; 111 } 112 113 if (retries <= 0) 114 return -ETIMEDOUT; 115 } 116 117 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 119 120 /* set requested state */ 121 reg |= DWC3_DCTL_ULSTCHNGREQ(state); 122 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 123 124 /* 125 * The following code is racy when called from dwc3_gadget_wakeup, 126 * and is not needed, at least on newer versions 127 */ 128 if (dwc->revision >= DWC3_REVISION_194A) 129 return 0; 130 131 /* wait for a change in DSTS */ 132 retries = 10000; 133 while (--retries) { 134 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 135 136 if (DWC3_DSTS_USBLNKST(reg) == state) 137 return 0; 138 139 udelay(5); 140 } 141 142 dwc3_trace(trace_dwc3_gadget, 143 "link state change request timed out"); 144 145 return -ETIMEDOUT; 146 } 147 148 /** 149 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case 150 * @dwc: pointer to our context structure 151 * 152 * This function will a best effort FIFO allocation in order 153 * to improve FIFO usage and throughput, while still allowing 154 * us to enable as many endpoints as possible. 155 * 156 * Keep in mind that this operation will be highly dependent 157 * on the configured size for RAM1 - which contains TxFifo -, 158 * the amount of endpoints enabled on coreConsultant tool, and 159 * the width of the Master Bus. 160 * 161 * In the ideal world, we would always be able to satisfy the 162 * following equation: 163 * 164 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \ 165 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes 166 * 167 * Unfortunately, due to many variables that's not always the case. 168 */ 169 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc) 170 { 171 int last_fifo_depth = 0; 172 int ram1_depth; 173 int fifo_size; 174 int mdwidth; 175 int num; 176 177 if (!dwc->needs_fifo_resize) 178 return 0; 179 180 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7); 181 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); 182 183 /* MDWIDTH is represented in bits, we need it in bytes */ 184 mdwidth >>= 3; 185 186 /* 187 * FIXME For now we will only allocate 1 wMaxPacketSize space 188 * for each enabled endpoint, later patches will come to 189 * improve this algorithm so that we better use the internal 190 * FIFO space 191 */ 192 for (num = 0; num < dwc->num_in_eps; num++) { 193 /* bit0 indicates direction; 1 means IN ep */ 194 struct dwc3_ep *dep = dwc->eps[(num << 1) | 1]; 195 int mult = 1; 196 int tmp; 197 198 if (!(dep->flags & DWC3_EP_ENABLED)) 199 continue; 200 201 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) 202 || usb_endpoint_xfer_isoc(dep->endpoint.desc)) 203 mult = 3; 204 205 /* 206 * REVISIT: the following assumes we will always have enough 207 * space available on the FIFO RAM for all possible use cases. 208 * Make sure that's true somehow and change FIFO allocation 209 * accordingly. 210 * 211 * If we have Bulk or Isochronous endpoints, we want 212 * them to be able to be very, very fast. So we're giving 213 * those endpoints a fifo_size which is enough for 3 full 214 * packets 215 */ 216 tmp = mult * (dep->endpoint.maxpacket + mdwidth); 217 tmp += mdwidth; 218 219 fifo_size = DIV_ROUND_UP(tmp, mdwidth); 220 221 fifo_size |= (last_fifo_depth << 16); 222 223 dwc3_trace(trace_dwc3_gadget, "%s: Fifo Addr %04x Size %d", 224 dep->name, last_fifo_depth, fifo_size & 0xffff); 225 226 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size); 227 228 last_fifo_depth += (fifo_size & 0xffff); 229 } 230 231 return 0; 232 } 233 234 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 235 int status) 236 { 237 struct dwc3 *dwc = dep->dwc; 238 int i; 239 240 if (req->queued) { 241 i = 0; 242 do { 243 dep->busy_slot++; 244 /* 245 * Skip LINK TRB. We can't use req->trb and check for 246 * DWC3_TRBCTL_LINK_TRB because it points the TRB we 247 * just completed (not the LINK TRB). 248 */ 249 if (((dep->busy_slot & DWC3_TRB_MASK) == 250 DWC3_TRB_NUM- 1) && 251 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 252 dep->busy_slot++; 253 } while(++i < req->request.num_mapped_sgs); 254 req->queued = false; 255 } 256 list_del(&req->list); 257 req->trb = NULL; 258 259 if (req->request.status == -EINPROGRESS) 260 req->request.status = status; 261 262 if (dwc->ep0_bounced && dep->number == 0) 263 dwc->ep0_bounced = false; 264 else 265 usb_gadget_unmap_request(&dwc->gadget, &req->request, 266 req->direction); 267 268 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n", 269 req, dep->name, req->request.actual, 270 req->request.length, status); 271 trace_dwc3_gadget_giveback(req); 272 273 spin_unlock(&dwc->lock); 274 usb_gadget_giveback_request(&dep->endpoint, &req->request); 275 spin_lock(&dwc->lock); 276 } 277 278 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param) 279 { 280 u32 timeout = 500; 281 u32 reg; 282 283 trace_dwc3_gadget_generic_cmd(cmd, param); 284 285 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param); 286 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT); 287 288 do { 289 reg = dwc3_readl(dwc->regs, DWC3_DGCMD); 290 if (!(reg & DWC3_DGCMD_CMDACT)) { 291 dwc3_trace(trace_dwc3_gadget, 292 "Command Complete --> %d", 293 DWC3_DGCMD_STATUS(reg)); 294 return 0; 295 } 296 297 /* 298 * We can't sleep here, because it's also called from 299 * interrupt context. 300 */ 301 timeout--; 302 if (!timeout) { 303 dwc3_trace(trace_dwc3_gadget, 304 "Command Timed Out"); 305 return -ETIMEDOUT; 306 } 307 udelay(1); 308 } while (1); 309 } 310 311 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep, 312 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params) 313 { 314 struct dwc3_ep *dep = dwc->eps[ep]; 315 u32 timeout = 500; 316 u32 reg; 317 318 trace_dwc3_gadget_ep_cmd(dep, cmd, params); 319 320 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0); 321 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1); 322 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2); 323 324 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT); 325 do { 326 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep)); 327 if (!(reg & DWC3_DEPCMD_CMDACT)) { 328 dwc3_trace(trace_dwc3_gadget, 329 "Command Complete --> %d", 330 DWC3_DEPCMD_STATUS(reg)); 331 return 0; 332 } 333 334 /* 335 * We can't sleep here, because it is also called from 336 * interrupt context. 337 */ 338 timeout--; 339 if (!timeout) { 340 dwc3_trace(trace_dwc3_gadget, 341 "Command Timed Out"); 342 return -ETIMEDOUT; 343 } 344 345 udelay(1); 346 } while (1); 347 } 348 349 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 350 struct dwc3_trb *trb) 351 { 352 u32 offset = (char *) trb - (char *) dep->trb_pool; 353 354 return dep->trb_pool_dma + offset; 355 } 356 357 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 358 { 359 struct dwc3 *dwc = dep->dwc; 360 361 if (dep->trb_pool) 362 return 0; 363 364 dep->trb_pool = dma_alloc_coherent(dwc->dev, 365 sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 366 &dep->trb_pool_dma, GFP_KERNEL); 367 if (!dep->trb_pool) { 368 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 369 dep->name); 370 return -ENOMEM; 371 } 372 373 return 0; 374 } 375 376 static void dwc3_free_trb_pool(struct dwc3_ep *dep) 377 { 378 struct dwc3 *dwc = dep->dwc; 379 380 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 381 dep->trb_pool, dep->trb_pool_dma); 382 383 dep->trb_pool = NULL; 384 dep->trb_pool_dma = 0; 385 } 386 387 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 388 { 389 struct dwc3_gadget_ep_cmd_params params; 390 u32 cmd; 391 392 memset(¶ms, 0x00, sizeof(params)); 393 394 if (dep->number != 1) { 395 cmd = DWC3_DEPCMD_DEPSTARTCFG; 396 /* XferRscIdx == 0 for ep0 and 2 for the remaining */ 397 if (dep->number > 1) { 398 if (dwc->start_config_issued) 399 return 0; 400 dwc->start_config_issued = true; 401 cmd |= DWC3_DEPCMD_PARAM(2); 402 } 403 404 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms); 405 } 406 407 return 0; 408 } 409 410 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, 411 const struct usb_endpoint_descriptor *desc, 412 const struct usb_ss_ep_comp_descriptor *comp_desc, 413 bool ignore, bool restore) 414 { 415 struct dwc3_gadget_ep_cmd_params params; 416 417 memset(¶ms, 0x00, sizeof(params)); 418 419 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) 420 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); 421 422 /* Burst size is only needed in SuperSpeed mode */ 423 if (dwc->gadget.speed == USB_SPEED_SUPER) { 424 u32 burst = dep->endpoint.maxburst - 1; 425 426 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst); 427 } 428 429 if (ignore) 430 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM; 431 432 if (restore) { 433 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE; 434 params.param2 |= dep->saved_state; 435 } 436 437 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN 438 | DWC3_DEPCFG_XFER_NOT_READY_EN; 439 440 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 441 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE 442 | DWC3_DEPCFG_STREAM_EVENT_EN; 443 dep->stream_capable = true; 444 } 445 446 if (!usb_endpoint_xfer_control(desc)) 447 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; 448 449 /* 450 * We are doing 1:1 mapping for endpoints, meaning 451 * Physical Endpoints 2 maps to Logical Endpoint 2 and 452 * so on. We consider the direction bit as part of the physical 453 * endpoint number. So USB endpoint 0x81 is 0x03. 454 */ 455 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); 456 457 /* 458 * We must use the lower 16 TX FIFOs even though 459 * HW might have more 460 */ 461 if (dep->direction) 462 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); 463 464 if (desc->bInterval) { 465 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); 466 dep->interval = 1 << (desc->bInterval - 1); 467 } 468 469 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 470 DWC3_DEPCMD_SETEPCONFIG, ¶ms); 471 } 472 473 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) 474 { 475 struct dwc3_gadget_ep_cmd_params params; 476 477 memset(¶ms, 0x00, sizeof(params)); 478 479 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 480 481 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 482 DWC3_DEPCMD_SETTRANSFRESOURCE, ¶ms); 483 } 484 485 /** 486 * __dwc3_gadget_ep_enable - Initializes a HW endpoint 487 * @dep: endpoint to be initialized 488 * @desc: USB Endpoint Descriptor 489 * 490 * Caller should take care of locking 491 */ 492 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, 493 const struct usb_endpoint_descriptor *desc, 494 const struct usb_ss_ep_comp_descriptor *comp_desc, 495 bool ignore, bool restore) 496 { 497 struct dwc3 *dwc = dep->dwc; 498 u32 reg; 499 int ret; 500 501 dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name); 502 503 if (!(dep->flags & DWC3_EP_ENABLED)) { 504 ret = dwc3_gadget_start_config(dwc, dep); 505 if (ret) 506 return ret; 507 } 508 509 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore, 510 restore); 511 if (ret) 512 return ret; 513 514 if (!(dep->flags & DWC3_EP_ENABLED)) { 515 struct dwc3_trb *trb_st_hw; 516 struct dwc3_trb *trb_link; 517 518 ret = dwc3_gadget_set_xfer_resource(dwc, dep); 519 if (ret) 520 return ret; 521 522 dep->endpoint.desc = desc; 523 dep->comp_desc = comp_desc; 524 dep->type = usb_endpoint_type(desc); 525 dep->flags |= DWC3_EP_ENABLED; 526 527 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 528 reg |= DWC3_DALEPENA_EP(dep->number); 529 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 530 531 if (!usb_endpoint_xfer_isoc(desc)) 532 return 0; 533 534 /* Link TRB for ISOC. The HWO bit is never reset */ 535 trb_st_hw = &dep->trb_pool[0]; 536 537 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 538 memset(trb_link, 0, sizeof(*trb_link)); 539 540 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 541 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 542 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB; 543 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 544 } 545 546 return 0; 547 } 548 549 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force); 550 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 551 { 552 struct dwc3_request *req; 553 554 if (!list_empty(&dep->req_queued)) { 555 dwc3_stop_active_transfer(dwc, dep->number, true); 556 557 /* - giveback all requests to gadget driver */ 558 while (!list_empty(&dep->req_queued)) { 559 req = next_request(&dep->req_queued); 560 561 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 562 } 563 } 564 565 while (!list_empty(&dep->request_list)) { 566 req = next_request(&dep->request_list); 567 568 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 569 } 570 } 571 572 /** 573 * __dwc3_gadget_ep_disable - Disables a HW endpoint 574 * @dep: the endpoint to disable 575 * 576 * This function also removes requests which are currently processed ny the 577 * hardware and those which are not yet scheduled. 578 * Caller should take care of locking. 579 */ 580 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 581 { 582 struct dwc3 *dwc = dep->dwc; 583 u32 reg; 584 585 dwc3_remove_requests(dwc, dep); 586 587 /* make sure HW endpoint isn't stalled */ 588 if (dep->flags & DWC3_EP_STALL) 589 __dwc3_gadget_ep_set_halt(dep, 0, false); 590 591 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 592 reg &= ~DWC3_DALEPENA_EP(dep->number); 593 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 594 595 dep->stream_capable = false; 596 dep->endpoint.desc = NULL; 597 dep->comp_desc = NULL; 598 dep->type = 0; 599 dep->flags = 0; 600 601 return 0; 602 } 603 604 /* -------------------------------------------------------------------------- */ 605 606 static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 607 const struct usb_endpoint_descriptor *desc) 608 { 609 return -EINVAL; 610 } 611 612 static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 613 { 614 return -EINVAL; 615 } 616 617 /* -------------------------------------------------------------------------- */ 618 619 static int dwc3_gadget_ep_enable(struct usb_ep *ep, 620 const struct usb_endpoint_descriptor *desc) 621 { 622 struct dwc3_ep *dep; 623 struct dwc3 *dwc; 624 unsigned long flags; 625 int ret; 626 627 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 628 pr_debug("dwc3: invalid parameters\n"); 629 return -EINVAL; 630 } 631 632 if (!desc->wMaxPacketSize) { 633 pr_debug("dwc3: missing wMaxPacketSize\n"); 634 return -EINVAL; 635 } 636 637 dep = to_dwc3_ep(ep); 638 dwc = dep->dwc; 639 640 if (dep->flags & DWC3_EP_ENABLED) { 641 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n", 642 dep->name); 643 return 0; 644 } 645 646 switch (usb_endpoint_type(desc)) { 647 case USB_ENDPOINT_XFER_CONTROL: 648 strlcat(dep->name, "-control", sizeof(dep->name)); 649 break; 650 case USB_ENDPOINT_XFER_ISOC: 651 strlcat(dep->name, "-isoc", sizeof(dep->name)); 652 break; 653 case USB_ENDPOINT_XFER_BULK: 654 strlcat(dep->name, "-bulk", sizeof(dep->name)); 655 break; 656 case USB_ENDPOINT_XFER_INT: 657 strlcat(dep->name, "-int", sizeof(dep->name)); 658 break; 659 default: 660 dev_err(dwc->dev, "invalid endpoint transfer type\n"); 661 } 662 663 spin_lock_irqsave(&dwc->lock, flags); 664 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false); 665 spin_unlock_irqrestore(&dwc->lock, flags); 666 667 return ret; 668 } 669 670 static int dwc3_gadget_ep_disable(struct usb_ep *ep) 671 { 672 struct dwc3_ep *dep; 673 struct dwc3 *dwc; 674 unsigned long flags; 675 int ret; 676 677 if (!ep) { 678 pr_debug("dwc3: invalid parameters\n"); 679 return -EINVAL; 680 } 681 682 dep = to_dwc3_ep(ep); 683 dwc = dep->dwc; 684 685 if (!(dep->flags & DWC3_EP_ENABLED)) { 686 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n", 687 dep->name); 688 return 0; 689 } 690 691 snprintf(dep->name, sizeof(dep->name), "ep%d%s", 692 dep->number >> 1, 693 (dep->number & 1) ? "in" : "out"); 694 695 spin_lock_irqsave(&dwc->lock, flags); 696 ret = __dwc3_gadget_ep_disable(dep); 697 spin_unlock_irqrestore(&dwc->lock, flags); 698 699 return ret; 700 } 701 702 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 703 gfp_t gfp_flags) 704 { 705 struct dwc3_request *req; 706 struct dwc3_ep *dep = to_dwc3_ep(ep); 707 708 req = kzalloc(sizeof(*req), gfp_flags); 709 if (!req) 710 return NULL; 711 712 req->epnum = dep->number; 713 req->dep = dep; 714 715 trace_dwc3_alloc_request(req); 716 717 return &req->request; 718 } 719 720 static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 721 struct usb_request *request) 722 { 723 struct dwc3_request *req = to_dwc3_request(request); 724 725 trace_dwc3_free_request(req); 726 kfree(req); 727 } 728 729 /** 730 * dwc3_prepare_one_trb - setup one TRB from one request 731 * @dep: endpoint for which this request is prepared 732 * @req: dwc3_request pointer 733 */ 734 static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 735 struct dwc3_request *req, dma_addr_t dma, 736 unsigned length, unsigned last, unsigned chain, unsigned node) 737 { 738 struct dwc3_trb *trb; 739 740 dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s%s", 741 dep->name, req, (unsigned long long) dma, 742 length, last ? " last" : "", 743 chain ? " chain" : ""); 744 745 746 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK]; 747 748 if (!req->trb) { 749 dwc3_gadget_move_request_queued(req); 750 req->trb = trb; 751 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 752 req->start_slot = dep->free_slot & DWC3_TRB_MASK; 753 } 754 755 dep->free_slot++; 756 /* Skip the LINK-TRB on ISOC */ 757 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && 758 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 759 dep->free_slot++; 760 761 trb->size = DWC3_TRB_SIZE_LENGTH(length); 762 trb->bpl = lower_32_bits(dma); 763 trb->bph = upper_32_bits(dma); 764 765 switch (usb_endpoint_type(dep->endpoint.desc)) { 766 case USB_ENDPOINT_XFER_CONTROL: 767 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP; 768 break; 769 770 case USB_ENDPOINT_XFER_ISOC: 771 if (!node) 772 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 773 else 774 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 775 break; 776 777 case USB_ENDPOINT_XFER_BULK: 778 case USB_ENDPOINT_XFER_INT: 779 trb->ctrl = DWC3_TRBCTL_NORMAL; 780 break; 781 default: 782 /* 783 * This is only possible with faulty memory because we 784 * checked it already :) 785 */ 786 BUG(); 787 } 788 789 if (!req->request.no_interrupt && !chain) 790 trb->ctrl |= DWC3_TRB_CTRL_IOC; 791 792 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 793 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 794 trb->ctrl |= DWC3_TRB_CTRL_CSP; 795 } else if (last) { 796 trb->ctrl |= DWC3_TRB_CTRL_LST; 797 } 798 799 if (chain) 800 trb->ctrl |= DWC3_TRB_CTRL_CHN; 801 802 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) 803 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id); 804 805 trb->ctrl |= DWC3_TRB_CTRL_HWO; 806 807 trace_dwc3_prepare_trb(dep, trb); 808 } 809 810 /* 811 * dwc3_prepare_trbs - setup TRBs from requests 812 * @dep: endpoint for which requests are being prepared 813 * @starting: true if the endpoint is idle and no requests are queued. 814 * 815 * The function goes through the requests list and sets up TRBs for the 816 * transfers. The function returns once there are no more TRBs available or 817 * it runs out of requests. 818 */ 819 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting) 820 { 821 struct dwc3_request *req, *n; 822 u32 trbs_left; 823 u32 max; 824 unsigned int last_one = 0; 825 826 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 827 828 /* the first request must not be queued */ 829 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK; 830 831 /* Can't wrap around on a non-isoc EP since there's no link TRB */ 832 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 833 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK); 834 if (trbs_left > max) 835 trbs_left = max; 836 } 837 838 /* 839 * If busy & slot are equal than it is either full or empty. If we are 840 * starting to process requests then we are empty. Otherwise we are 841 * full and don't do anything 842 */ 843 if (!trbs_left) { 844 if (!starting) 845 return; 846 trbs_left = DWC3_TRB_NUM; 847 /* 848 * In case we start from scratch, we queue the ISOC requests 849 * starting from slot 1. This is done because we use ring 850 * buffer and have no LST bit to stop us. Instead, we place 851 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt 852 * after the first request so we start at slot 1 and have 853 * 7 requests proceed before we hit the first IOC. 854 * Other transfer types don't use the ring buffer and are 855 * processed from the first TRB until the last one. Since we 856 * don't wrap around we have to start at the beginning. 857 */ 858 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 859 dep->busy_slot = 1; 860 dep->free_slot = 1; 861 } else { 862 dep->busy_slot = 0; 863 dep->free_slot = 0; 864 } 865 } 866 867 /* The last TRB is a link TRB, not used for xfer */ 868 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc)) 869 return; 870 871 list_for_each_entry_safe(req, n, &dep->request_list, list) { 872 unsigned length; 873 dma_addr_t dma; 874 last_one = false; 875 876 if (req->request.num_mapped_sgs > 0) { 877 struct usb_request *request = &req->request; 878 struct scatterlist *sg = request->sg; 879 struct scatterlist *s; 880 int i; 881 882 for_each_sg(sg, s, request->num_mapped_sgs, i) { 883 unsigned chain = true; 884 885 length = sg_dma_len(s); 886 dma = sg_dma_address(s); 887 888 if (i == (request->num_mapped_sgs - 1) || 889 sg_is_last(s)) { 890 if (list_empty(&dep->request_list)) 891 last_one = true; 892 chain = false; 893 } 894 895 trbs_left--; 896 if (!trbs_left) 897 last_one = true; 898 899 if (last_one) 900 chain = false; 901 902 dwc3_prepare_one_trb(dep, req, dma, length, 903 last_one, chain, i); 904 905 if (last_one) 906 break; 907 } 908 909 if (last_one) 910 break; 911 } else { 912 dma = req->request.dma; 913 length = req->request.length; 914 trbs_left--; 915 916 if (!trbs_left) 917 last_one = 1; 918 919 /* Is this the last request? */ 920 if (list_is_last(&req->list, &dep->request_list)) 921 last_one = 1; 922 923 dwc3_prepare_one_trb(dep, req, dma, length, 924 last_one, false, 0); 925 926 if (last_one) 927 break; 928 } 929 } 930 } 931 932 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param, 933 int start_new) 934 { 935 struct dwc3_gadget_ep_cmd_params params; 936 struct dwc3_request *req; 937 struct dwc3 *dwc = dep->dwc; 938 int ret; 939 u32 cmd; 940 941 if (start_new && (dep->flags & DWC3_EP_BUSY)) { 942 dwc3_trace(trace_dwc3_gadget, "%s: endpoint busy", dep->name); 943 return -EBUSY; 944 } 945 dep->flags &= ~DWC3_EP_PENDING_REQUEST; 946 947 /* 948 * If we are getting here after a short-out-packet we don't enqueue any 949 * new requests as we try to set the IOC bit only on the last request. 950 */ 951 if (start_new) { 952 if (list_empty(&dep->req_queued)) 953 dwc3_prepare_trbs(dep, start_new); 954 955 /* req points to the first request which will be sent */ 956 req = next_request(&dep->req_queued); 957 } else { 958 dwc3_prepare_trbs(dep, start_new); 959 960 /* 961 * req points to the first request where HWO changed from 0 to 1 962 */ 963 req = next_request(&dep->req_queued); 964 } 965 if (!req) { 966 dep->flags |= DWC3_EP_PENDING_REQUEST; 967 return 0; 968 } 969 970 memset(¶ms, 0, sizeof(params)); 971 972 if (start_new) { 973 params.param0 = upper_32_bits(req->trb_dma); 974 params.param1 = lower_32_bits(req->trb_dma); 975 cmd = DWC3_DEPCMD_STARTTRANSFER; 976 } else { 977 cmd = DWC3_DEPCMD_UPDATETRANSFER; 978 } 979 980 cmd |= DWC3_DEPCMD_PARAM(cmd_param); 981 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 982 if (ret < 0) { 983 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n"); 984 985 /* 986 * FIXME we need to iterate over the list of requests 987 * here and stop, unmap, free and del each of the linked 988 * requests instead of what we do now. 989 */ 990 usb_gadget_unmap_request(&dwc->gadget, &req->request, 991 req->direction); 992 list_del(&req->list); 993 return ret; 994 } 995 996 dep->flags |= DWC3_EP_BUSY; 997 998 if (start_new) { 999 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc, 1000 dep->number); 1001 WARN_ON_ONCE(!dep->resource_index); 1002 } 1003 1004 return 0; 1005 } 1006 1007 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc, 1008 struct dwc3_ep *dep, u32 cur_uf) 1009 { 1010 u32 uf; 1011 1012 if (list_empty(&dep->request_list)) { 1013 dwc3_trace(trace_dwc3_gadget, 1014 "ISOC ep %s run out for requests", 1015 dep->name); 1016 dep->flags |= DWC3_EP_PENDING_REQUEST; 1017 return; 1018 } 1019 1020 /* 4 micro frames in the future */ 1021 uf = cur_uf + dep->interval * 4; 1022 1023 __dwc3_gadget_kick_transfer(dep, uf, 1); 1024 } 1025 1026 static void dwc3_gadget_start_isoc(struct dwc3 *dwc, 1027 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1028 { 1029 u32 cur_uf, mask; 1030 1031 mask = ~(dep->interval - 1); 1032 cur_uf = event->parameters & mask; 1033 1034 __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1035 } 1036 1037 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 1038 { 1039 struct dwc3 *dwc = dep->dwc; 1040 int ret; 1041 1042 req->request.actual = 0; 1043 req->request.status = -EINPROGRESS; 1044 req->direction = dep->direction; 1045 req->epnum = dep->number; 1046 1047 /* 1048 * We only add to our list of requests now and 1049 * start consuming the list once we get XferNotReady 1050 * IRQ. 1051 * 1052 * That way, we avoid doing anything that we don't need 1053 * to do now and defer it until the point we receive a 1054 * particular token from the Host side. 1055 * 1056 * This will also avoid Host cancelling URBs due to too 1057 * many NAKs. 1058 */ 1059 ret = usb_gadget_map_request(&dwc->gadget, &req->request, 1060 dep->direction); 1061 if (ret) 1062 return ret; 1063 1064 list_add_tail(&req->list, &dep->request_list); 1065 1066 /* 1067 * There are a few special cases: 1068 * 1069 * 1. XferNotReady with empty list of requests. We need to kick the 1070 * transfer here in that situation, otherwise we will be NAKing 1071 * forever. If we get XferNotReady before gadget driver has a 1072 * chance to queue a request, we will ACK the IRQ but won't be 1073 * able to receive the data until the next request is queued. 1074 * The following code is handling exactly that. 1075 * 1076 */ 1077 if (dep->flags & DWC3_EP_PENDING_REQUEST) { 1078 /* 1079 * If xfernotready is already elapsed and it is a case 1080 * of isoc transfer, then issue END TRANSFER, so that 1081 * you can receive xfernotready again and can have 1082 * notion of current microframe. 1083 */ 1084 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1085 if (list_empty(&dep->req_queued)) { 1086 dwc3_stop_active_transfer(dwc, dep->number, true); 1087 dep->flags = DWC3_EP_ENABLED; 1088 } 1089 return 0; 1090 } 1091 1092 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1093 if (ret && ret != -EBUSY) 1094 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1095 dep->name); 1096 return ret; 1097 } 1098 1099 /* 1100 * 2. XferInProgress on Isoc EP with an active transfer. We need to 1101 * kick the transfer here after queuing a request, otherwise the 1102 * core may not see the modified TRB(s). 1103 */ 1104 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1105 (dep->flags & DWC3_EP_BUSY) && 1106 !(dep->flags & DWC3_EP_MISSED_ISOC)) { 1107 WARN_ON_ONCE(!dep->resource_index); 1108 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index, 1109 false); 1110 if (ret && ret != -EBUSY) 1111 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1112 dep->name); 1113 return ret; 1114 } 1115 1116 /* 1117 * 4. Stream Capable Bulk Endpoints. We need to start the transfer 1118 * right away, otherwise host will not know we have streams to be 1119 * handled. 1120 */ 1121 if (dep->stream_capable) { 1122 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1123 if (ret && ret != -EBUSY) 1124 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1125 dep->name); 1126 } 1127 1128 return 0; 1129 } 1130 1131 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 1132 gfp_t gfp_flags) 1133 { 1134 struct dwc3_request *req = to_dwc3_request(request); 1135 struct dwc3_ep *dep = to_dwc3_ep(ep); 1136 struct dwc3 *dwc = dep->dwc; 1137 1138 unsigned long flags; 1139 1140 int ret; 1141 1142 spin_lock_irqsave(&dwc->lock, flags); 1143 if (!dep->endpoint.desc) { 1144 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n", 1145 request, ep->name); 1146 ret = -ESHUTDOWN; 1147 goto out; 1148 } 1149 1150 if (WARN(req->dep != dep, "request %p belongs to '%s'\n", 1151 request, req->dep->name)) { 1152 ret = -EINVAL; 1153 goto out; 1154 } 1155 1156 trace_dwc3_ep_queue(req); 1157 1158 ret = __dwc3_gadget_ep_queue(dep, req); 1159 1160 out: 1161 spin_unlock_irqrestore(&dwc->lock, flags); 1162 1163 return ret; 1164 } 1165 1166 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 1167 struct usb_request *request) 1168 { 1169 struct dwc3_request *req = to_dwc3_request(request); 1170 struct dwc3_request *r = NULL; 1171 1172 struct dwc3_ep *dep = to_dwc3_ep(ep); 1173 struct dwc3 *dwc = dep->dwc; 1174 1175 unsigned long flags; 1176 int ret = 0; 1177 1178 trace_dwc3_ep_dequeue(req); 1179 1180 spin_lock_irqsave(&dwc->lock, flags); 1181 1182 list_for_each_entry(r, &dep->request_list, list) { 1183 if (r == req) 1184 break; 1185 } 1186 1187 if (r != req) { 1188 list_for_each_entry(r, &dep->req_queued, list) { 1189 if (r == req) 1190 break; 1191 } 1192 if (r == req) { 1193 /* wait until it is processed */ 1194 dwc3_stop_active_transfer(dwc, dep->number, true); 1195 goto out1; 1196 } 1197 dev_err(dwc->dev, "request %p was not queued to %s\n", 1198 request, ep->name); 1199 ret = -EINVAL; 1200 goto out0; 1201 } 1202 1203 out1: 1204 /* giveback the request */ 1205 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1206 1207 out0: 1208 spin_unlock_irqrestore(&dwc->lock, flags); 1209 1210 return ret; 1211 } 1212 1213 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) 1214 { 1215 struct dwc3_gadget_ep_cmd_params params; 1216 struct dwc3 *dwc = dep->dwc; 1217 int ret; 1218 1219 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1220 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 1221 return -EINVAL; 1222 } 1223 1224 memset(¶ms, 0x00, sizeof(params)); 1225 1226 if (value) { 1227 if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) || 1228 (!list_empty(&dep->req_queued) || 1229 !list_empty(&dep->request_list)))) { 1230 dev_dbg(dwc->dev, "%s: pending request, cannot halt\n", 1231 dep->name); 1232 return -EAGAIN; 1233 } 1234 1235 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1236 DWC3_DEPCMD_SETSTALL, ¶ms); 1237 if (ret) 1238 dev_err(dwc->dev, "failed to set STALL on %s\n", 1239 dep->name); 1240 else 1241 dep->flags |= DWC3_EP_STALL; 1242 } else { 1243 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1244 DWC3_DEPCMD_CLEARSTALL, ¶ms); 1245 if (ret) 1246 dev_err(dwc->dev, "failed to clear STALL on %s\n", 1247 dep->name); 1248 else 1249 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); 1250 } 1251 1252 return ret; 1253 } 1254 1255 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 1256 { 1257 struct dwc3_ep *dep = to_dwc3_ep(ep); 1258 struct dwc3 *dwc = dep->dwc; 1259 1260 unsigned long flags; 1261 1262 int ret; 1263 1264 spin_lock_irqsave(&dwc->lock, flags); 1265 ret = __dwc3_gadget_ep_set_halt(dep, value, false); 1266 spin_unlock_irqrestore(&dwc->lock, flags); 1267 1268 return ret; 1269 } 1270 1271 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 1272 { 1273 struct dwc3_ep *dep = to_dwc3_ep(ep); 1274 struct dwc3 *dwc = dep->dwc; 1275 unsigned long flags; 1276 int ret; 1277 1278 spin_lock_irqsave(&dwc->lock, flags); 1279 dep->flags |= DWC3_EP_WEDGE; 1280 1281 if (dep->number == 0 || dep->number == 1) 1282 ret = __dwc3_gadget_ep0_set_halt(ep, 1); 1283 else 1284 ret = __dwc3_gadget_ep_set_halt(dep, 1, false); 1285 spin_unlock_irqrestore(&dwc->lock, flags); 1286 1287 return ret; 1288 } 1289 1290 /* -------------------------------------------------------------------------- */ 1291 1292 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 1293 .bLength = USB_DT_ENDPOINT_SIZE, 1294 .bDescriptorType = USB_DT_ENDPOINT, 1295 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1296 }; 1297 1298 static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 1299 .enable = dwc3_gadget_ep0_enable, 1300 .disable = dwc3_gadget_ep0_disable, 1301 .alloc_request = dwc3_gadget_ep_alloc_request, 1302 .free_request = dwc3_gadget_ep_free_request, 1303 .queue = dwc3_gadget_ep0_queue, 1304 .dequeue = dwc3_gadget_ep_dequeue, 1305 .set_halt = dwc3_gadget_ep0_set_halt, 1306 .set_wedge = dwc3_gadget_ep_set_wedge, 1307 }; 1308 1309 static const struct usb_ep_ops dwc3_gadget_ep_ops = { 1310 .enable = dwc3_gadget_ep_enable, 1311 .disable = dwc3_gadget_ep_disable, 1312 .alloc_request = dwc3_gadget_ep_alloc_request, 1313 .free_request = dwc3_gadget_ep_free_request, 1314 .queue = dwc3_gadget_ep_queue, 1315 .dequeue = dwc3_gadget_ep_dequeue, 1316 .set_halt = dwc3_gadget_ep_set_halt, 1317 .set_wedge = dwc3_gadget_ep_set_wedge, 1318 }; 1319 1320 /* -------------------------------------------------------------------------- */ 1321 1322 static int dwc3_gadget_get_frame(struct usb_gadget *g) 1323 { 1324 struct dwc3 *dwc = gadget_to_dwc(g); 1325 u32 reg; 1326 1327 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1328 return DWC3_DSTS_SOFFN(reg); 1329 } 1330 1331 static int dwc3_gadget_wakeup(struct usb_gadget *g) 1332 { 1333 struct dwc3 *dwc = gadget_to_dwc(g); 1334 1335 unsigned long timeout; 1336 unsigned long flags; 1337 1338 u32 reg; 1339 1340 int ret = 0; 1341 1342 u8 link_state; 1343 u8 speed; 1344 1345 spin_lock_irqsave(&dwc->lock, flags); 1346 1347 /* 1348 * According to the Databook Remote wakeup request should 1349 * be issued only when the device is in early suspend state. 1350 * 1351 * We can check that via USB Link State bits in DSTS register. 1352 */ 1353 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1354 1355 speed = reg & DWC3_DSTS_CONNECTSPD; 1356 if (speed == DWC3_DSTS_SUPERSPEED) { 1357 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n"); 1358 ret = -EINVAL; 1359 goto out; 1360 } 1361 1362 link_state = DWC3_DSTS_USBLNKST(reg); 1363 1364 switch (link_state) { 1365 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1366 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1367 break; 1368 default: 1369 dev_dbg(dwc->dev, "can't wakeup from link state %d\n", 1370 link_state); 1371 ret = -EINVAL; 1372 goto out; 1373 } 1374 1375 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); 1376 if (ret < 0) { 1377 dev_err(dwc->dev, "failed to put link in Recovery\n"); 1378 goto out; 1379 } 1380 1381 /* Recent versions do this automatically */ 1382 if (dwc->revision < DWC3_REVISION_194A) { 1383 /* write zeroes to Link Change Request */ 1384 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1385 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 1386 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1387 } 1388 1389 /* poll until Link State changes to ON */ 1390 timeout = jiffies + msecs_to_jiffies(100); 1391 1392 while (!time_after(jiffies, timeout)) { 1393 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1394 1395 /* in HS, means ON */ 1396 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 1397 break; 1398 } 1399 1400 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1401 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1402 ret = -EINVAL; 1403 } 1404 1405 out: 1406 spin_unlock_irqrestore(&dwc->lock, flags); 1407 1408 return ret; 1409 } 1410 1411 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 1412 int is_selfpowered) 1413 { 1414 struct dwc3 *dwc = gadget_to_dwc(g); 1415 unsigned long flags; 1416 1417 spin_lock_irqsave(&dwc->lock, flags); 1418 g->is_selfpowered = !!is_selfpowered; 1419 spin_unlock_irqrestore(&dwc->lock, flags); 1420 1421 return 0; 1422 } 1423 1424 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) 1425 { 1426 u32 reg; 1427 u32 timeout = 500; 1428 1429 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1430 if (is_on) { 1431 if (dwc->revision <= DWC3_REVISION_187A) { 1432 reg &= ~DWC3_DCTL_TRGTULST_MASK; 1433 reg |= DWC3_DCTL_TRGTULST_RX_DET; 1434 } 1435 1436 if (dwc->revision >= DWC3_REVISION_194A) 1437 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1438 reg |= DWC3_DCTL_RUN_STOP; 1439 1440 if (dwc->has_hibernation) 1441 reg |= DWC3_DCTL_KEEP_CONNECT; 1442 1443 dwc->pullups_connected = true; 1444 } else { 1445 reg &= ~DWC3_DCTL_RUN_STOP; 1446 1447 if (dwc->has_hibernation && !suspend) 1448 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1449 1450 dwc->pullups_connected = false; 1451 } 1452 1453 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1454 1455 do { 1456 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1457 if (is_on) { 1458 if (!(reg & DWC3_DSTS_DEVCTRLHLT)) 1459 break; 1460 } else { 1461 if (reg & DWC3_DSTS_DEVCTRLHLT) 1462 break; 1463 } 1464 timeout--; 1465 if (!timeout) 1466 return -ETIMEDOUT; 1467 udelay(1); 1468 } while (1); 1469 1470 dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s", 1471 dwc->gadget_driver 1472 ? dwc->gadget_driver->function : "no-function", 1473 is_on ? "connect" : "disconnect"); 1474 1475 return 0; 1476 } 1477 1478 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 1479 { 1480 struct dwc3 *dwc = gadget_to_dwc(g); 1481 unsigned long flags; 1482 int ret; 1483 1484 is_on = !!is_on; 1485 1486 spin_lock_irqsave(&dwc->lock, flags); 1487 ret = dwc3_gadget_run_stop(dwc, is_on, false); 1488 spin_unlock_irqrestore(&dwc->lock, flags); 1489 1490 return ret; 1491 } 1492 1493 static void dwc3_gadget_enable_irq(struct dwc3 *dwc) 1494 { 1495 u32 reg; 1496 1497 /* Enable all but Start and End of Frame IRQs */ 1498 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | 1499 DWC3_DEVTEN_EVNTOVERFLOWEN | 1500 DWC3_DEVTEN_CMDCMPLTEN | 1501 DWC3_DEVTEN_ERRTICERREN | 1502 DWC3_DEVTEN_WKUPEVTEN | 1503 DWC3_DEVTEN_ULSTCNGEN | 1504 DWC3_DEVTEN_CONNECTDONEEN | 1505 DWC3_DEVTEN_USBRSTEN | 1506 DWC3_DEVTEN_DISCONNEVTEN); 1507 1508 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 1509 } 1510 1511 static void dwc3_gadget_disable_irq(struct dwc3 *dwc) 1512 { 1513 /* mask all interrupts */ 1514 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 1515 } 1516 1517 static irqreturn_t dwc3_interrupt(int irq, void *_dwc); 1518 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc); 1519 1520 static int dwc3_gadget_start(struct usb_gadget *g, 1521 struct usb_gadget_driver *driver) 1522 { 1523 struct dwc3 *dwc = gadget_to_dwc(g); 1524 struct dwc3_ep *dep; 1525 unsigned long flags; 1526 int ret = 0; 1527 int irq; 1528 u32 reg; 1529 1530 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1531 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, 1532 IRQF_SHARED, "dwc3", dwc); 1533 if (ret) { 1534 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 1535 irq, ret); 1536 goto err0; 1537 } 1538 1539 spin_lock_irqsave(&dwc->lock, flags); 1540 1541 if (dwc->gadget_driver) { 1542 dev_err(dwc->dev, "%s is already bound to %s\n", 1543 dwc->gadget.name, 1544 dwc->gadget_driver->driver.name); 1545 ret = -EBUSY; 1546 goto err1; 1547 } 1548 1549 dwc->gadget_driver = driver; 1550 1551 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1552 reg &= ~(DWC3_DCFG_SPEED_MASK); 1553 1554 /** 1555 * WORKAROUND: DWC3 revision < 2.20a have an issue 1556 * which would cause metastability state on Run/Stop 1557 * bit if we try to force the IP to USB2-only mode. 1558 * 1559 * Because of that, we cannot configure the IP to any 1560 * speed other than the SuperSpeed 1561 * 1562 * Refers to: 1563 * 1564 * STAR#9000525659: Clock Domain Crossing on DCTL in 1565 * USB 2.0 Mode 1566 */ 1567 if (dwc->revision < DWC3_REVISION_220A) { 1568 reg |= DWC3_DCFG_SUPERSPEED; 1569 } else { 1570 switch (dwc->maximum_speed) { 1571 case USB_SPEED_LOW: 1572 reg |= DWC3_DSTS_LOWSPEED; 1573 break; 1574 case USB_SPEED_FULL: 1575 reg |= DWC3_DSTS_FULLSPEED1; 1576 break; 1577 case USB_SPEED_HIGH: 1578 reg |= DWC3_DSTS_HIGHSPEED; 1579 break; 1580 case USB_SPEED_SUPER: /* FALLTHROUGH */ 1581 case USB_SPEED_UNKNOWN: /* FALTHROUGH */ 1582 default: 1583 reg |= DWC3_DSTS_SUPERSPEED; 1584 } 1585 } 1586 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1587 1588 dwc->start_config_issued = false; 1589 1590 /* Start with SuperSpeed Default */ 1591 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1592 1593 dep = dwc->eps[0]; 1594 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 1595 false); 1596 if (ret) { 1597 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1598 goto err2; 1599 } 1600 1601 dep = dwc->eps[1]; 1602 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 1603 false); 1604 if (ret) { 1605 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1606 goto err3; 1607 } 1608 1609 /* begin to receive SETUP packets */ 1610 dwc->ep0state = EP0_SETUP_PHASE; 1611 dwc3_ep0_out_start(dwc); 1612 1613 dwc3_gadget_enable_irq(dwc); 1614 1615 spin_unlock_irqrestore(&dwc->lock, flags); 1616 1617 return 0; 1618 1619 err3: 1620 __dwc3_gadget_ep_disable(dwc->eps[0]); 1621 1622 err2: 1623 dwc->gadget_driver = NULL; 1624 1625 err1: 1626 spin_unlock_irqrestore(&dwc->lock, flags); 1627 1628 free_irq(irq, dwc); 1629 1630 err0: 1631 return ret; 1632 } 1633 1634 static int dwc3_gadget_stop(struct usb_gadget *g) 1635 { 1636 struct dwc3 *dwc = gadget_to_dwc(g); 1637 unsigned long flags; 1638 int irq; 1639 1640 spin_lock_irqsave(&dwc->lock, flags); 1641 1642 dwc3_gadget_disable_irq(dwc); 1643 __dwc3_gadget_ep_disable(dwc->eps[0]); 1644 __dwc3_gadget_ep_disable(dwc->eps[1]); 1645 1646 dwc->gadget_driver = NULL; 1647 1648 spin_unlock_irqrestore(&dwc->lock, flags); 1649 1650 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1651 free_irq(irq, dwc); 1652 1653 return 0; 1654 } 1655 1656 static const struct usb_gadget_ops dwc3_gadget_ops = { 1657 .get_frame = dwc3_gadget_get_frame, 1658 .wakeup = dwc3_gadget_wakeup, 1659 .set_selfpowered = dwc3_gadget_set_selfpowered, 1660 .pullup = dwc3_gadget_pullup, 1661 .udc_start = dwc3_gadget_start, 1662 .udc_stop = dwc3_gadget_stop, 1663 }; 1664 1665 /* -------------------------------------------------------------------------- */ 1666 1667 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc, 1668 u8 num, u32 direction) 1669 { 1670 struct dwc3_ep *dep; 1671 u8 i; 1672 1673 for (i = 0; i < num; i++) { 1674 u8 epnum = (i << 1) | (!!direction); 1675 1676 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 1677 if (!dep) 1678 return -ENOMEM; 1679 1680 dep->dwc = dwc; 1681 dep->number = epnum; 1682 dep->direction = !!direction; 1683 dwc->eps[epnum] = dep; 1684 1685 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1, 1686 (epnum & 1) ? "in" : "out"); 1687 1688 dep->endpoint.name = dep->name; 1689 1690 dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name); 1691 1692 if (epnum == 0 || epnum == 1) { 1693 usb_ep_set_maxpacket_limit(&dep->endpoint, 512); 1694 dep->endpoint.maxburst = 1; 1695 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 1696 if (!epnum) 1697 dwc->gadget.ep0 = &dep->endpoint; 1698 } else { 1699 int ret; 1700 1701 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024); 1702 dep->endpoint.max_streams = 15; 1703 dep->endpoint.ops = &dwc3_gadget_ep_ops; 1704 list_add_tail(&dep->endpoint.ep_list, 1705 &dwc->gadget.ep_list); 1706 1707 ret = dwc3_alloc_trb_pool(dep); 1708 if (ret) 1709 return ret; 1710 } 1711 1712 INIT_LIST_HEAD(&dep->request_list); 1713 INIT_LIST_HEAD(&dep->req_queued); 1714 } 1715 1716 return 0; 1717 } 1718 1719 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc) 1720 { 1721 int ret; 1722 1723 INIT_LIST_HEAD(&dwc->gadget.ep_list); 1724 1725 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0); 1726 if (ret < 0) { 1727 dwc3_trace(trace_dwc3_gadget, 1728 "failed to allocate OUT endpoints"); 1729 return ret; 1730 } 1731 1732 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1); 1733 if (ret < 0) { 1734 dwc3_trace(trace_dwc3_gadget, 1735 "failed to allocate IN endpoints"); 1736 return ret; 1737 } 1738 1739 return 0; 1740 } 1741 1742 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 1743 { 1744 struct dwc3_ep *dep; 1745 u8 epnum; 1746 1747 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1748 dep = dwc->eps[epnum]; 1749 if (!dep) 1750 continue; 1751 /* 1752 * Physical endpoints 0 and 1 are special; they form the 1753 * bi-directional USB endpoint 0. 1754 * 1755 * For those two physical endpoints, we don't allocate a TRB 1756 * pool nor do we add them the endpoints list. Due to that, we 1757 * shouldn't do these two operations otherwise we would end up 1758 * with all sorts of bugs when removing dwc3.ko. 1759 */ 1760 if (epnum != 0 && epnum != 1) { 1761 dwc3_free_trb_pool(dep); 1762 list_del(&dep->endpoint.ep_list); 1763 } 1764 1765 kfree(dep); 1766 } 1767 } 1768 1769 /* -------------------------------------------------------------------------- */ 1770 1771 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, 1772 struct dwc3_request *req, struct dwc3_trb *trb, 1773 const struct dwc3_event_depevt *event, int status) 1774 { 1775 unsigned int count; 1776 unsigned int s_pkt = 0; 1777 unsigned int trb_status; 1778 1779 trace_dwc3_complete_trb(dep, trb); 1780 1781 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 1782 /* 1783 * We continue despite the error. There is not much we 1784 * can do. If we don't clean it up we loop forever. If 1785 * we skip the TRB then it gets overwritten after a 1786 * while since we use them in a ring buffer. A BUG() 1787 * would help. Lets hope that if this occurs, someone 1788 * fixes the root cause instead of looking away :) 1789 */ 1790 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n", 1791 dep->name, trb); 1792 count = trb->size & DWC3_TRB_SIZE_MASK; 1793 1794 if (dep->direction) { 1795 if (count) { 1796 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); 1797 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) { 1798 dev_dbg(dwc->dev, "incomplete IN transfer %s\n", 1799 dep->name); 1800 /* 1801 * If missed isoc occurred and there is 1802 * no request queued then issue END 1803 * TRANSFER, so that core generates 1804 * next xfernotready and we will issue 1805 * a fresh START TRANSFER. 1806 * If there are still queued request 1807 * then wait, do not issue either END 1808 * or UPDATE TRANSFER, just attach next 1809 * request in request_list during 1810 * giveback.If any future queued request 1811 * is successfully transferred then we 1812 * will issue UPDATE TRANSFER for all 1813 * request in the request_list. 1814 */ 1815 dep->flags |= DWC3_EP_MISSED_ISOC; 1816 } else { 1817 dev_err(dwc->dev, "incomplete IN transfer %s\n", 1818 dep->name); 1819 status = -ECONNRESET; 1820 } 1821 } else { 1822 dep->flags &= ~DWC3_EP_MISSED_ISOC; 1823 } 1824 } else { 1825 if (count && (event->status & DEPEVT_STATUS_SHORT)) 1826 s_pkt = 1; 1827 } 1828 1829 /* 1830 * We assume here we will always receive the entire data block 1831 * which we should receive. Meaning, if we program RX to 1832 * receive 4K but we receive only 2K, we assume that's all we 1833 * should receive and we simply bounce the request back to the 1834 * gadget driver for further processing. 1835 */ 1836 req->request.actual += req->request.length - count; 1837 if (s_pkt) 1838 return 1; 1839 if ((event->status & DEPEVT_STATUS_LST) && 1840 (trb->ctrl & (DWC3_TRB_CTRL_LST | 1841 DWC3_TRB_CTRL_HWO))) 1842 return 1; 1843 if ((event->status & DEPEVT_STATUS_IOC) && 1844 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 1845 return 1; 1846 return 0; 1847 } 1848 1849 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, 1850 const struct dwc3_event_depevt *event, int status) 1851 { 1852 struct dwc3_request *req; 1853 struct dwc3_trb *trb; 1854 unsigned int slot; 1855 unsigned int i; 1856 int ret; 1857 1858 req = next_request(&dep->req_queued); 1859 if (!req) { 1860 WARN_ON_ONCE(1); 1861 return 1; 1862 } 1863 i = 0; 1864 do { 1865 slot = req->start_slot + i; 1866 if ((slot == DWC3_TRB_NUM - 1) && 1867 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 1868 slot++; 1869 slot %= DWC3_TRB_NUM; 1870 trb = &dep->trb_pool[slot]; 1871 1872 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 1873 event, status); 1874 if (ret) 1875 break; 1876 } while (++i < req->request.num_mapped_sgs); 1877 1878 dwc3_gadget_giveback(dep, req, status); 1879 1880 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1881 list_empty(&dep->req_queued)) { 1882 if (list_empty(&dep->request_list)) { 1883 /* 1884 * If there is no entry in request list then do 1885 * not issue END TRANSFER now. Just set PENDING 1886 * flag, so that END TRANSFER is issued when an 1887 * entry is added into request list. 1888 */ 1889 dep->flags = DWC3_EP_PENDING_REQUEST; 1890 } else { 1891 dwc3_stop_active_transfer(dwc, dep->number, true); 1892 dep->flags = DWC3_EP_ENABLED; 1893 } 1894 return 1; 1895 } 1896 1897 return 1; 1898 } 1899 1900 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, 1901 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1902 { 1903 unsigned status = 0; 1904 int clean_busy; 1905 1906 if (event->status & DEPEVT_STATUS_BUSERR) 1907 status = -ECONNRESET; 1908 1909 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status); 1910 if (clean_busy) 1911 dep->flags &= ~DWC3_EP_BUSY; 1912 1913 /* 1914 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. 1915 * See dwc3_gadget_linksts_change_interrupt() for 1st half. 1916 */ 1917 if (dwc->revision < DWC3_REVISION_183A) { 1918 u32 reg; 1919 int i; 1920 1921 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 1922 dep = dwc->eps[i]; 1923 1924 if (!(dep->flags & DWC3_EP_ENABLED)) 1925 continue; 1926 1927 if (!list_empty(&dep->req_queued)) 1928 return; 1929 } 1930 1931 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1932 reg |= dwc->u1u2; 1933 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1934 1935 dwc->u1u2 = 0; 1936 } 1937 } 1938 1939 static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 1940 const struct dwc3_event_depevt *event) 1941 { 1942 struct dwc3_ep *dep; 1943 u8 epnum = event->endpoint_number; 1944 1945 dep = dwc->eps[epnum]; 1946 1947 if (!(dep->flags & DWC3_EP_ENABLED)) 1948 return; 1949 1950 if (epnum == 0 || epnum == 1) { 1951 dwc3_ep0_interrupt(dwc, event); 1952 return; 1953 } 1954 1955 switch (event->endpoint_event) { 1956 case DWC3_DEPEVT_XFERCOMPLETE: 1957 dep->resource_index = 0; 1958 1959 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1960 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n", 1961 dep->name); 1962 return; 1963 } 1964 1965 dwc3_endpoint_transfer_complete(dwc, dep, event); 1966 break; 1967 case DWC3_DEPEVT_XFERINPROGRESS: 1968 dwc3_endpoint_transfer_complete(dwc, dep, event); 1969 break; 1970 case DWC3_DEPEVT_XFERNOTREADY: 1971 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1972 dwc3_gadget_start_isoc(dwc, dep, event); 1973 } else { 1974 int ret; 1975 1976 dwc3_trace(trace_dwc3_gadget, "%s: reason %s", 1977 dep->name, event->status & 1978 DEPEVT_STATUS_TRANSFER_ACTIVE 1979 ? "Transfer Active" 1980 : "Transfer Not Active"); 1981 1982 ret = __dwc3_gadget_kick_transfer(dep, 0, 1); 1983 if (!ret || ret == -EBUSY) 1984 return; 1985 1986 dev_dbg(dwc->dev, "%s: failed to kick transfers\n", 1987 dep->name); 1988 } 1989 1990 break; 1991 case DWC3_DEPEVT_STREAMEVT: 1992 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) { 1993 dev_err(dwc->dev, "Stream event for non-Bulk %s\n", 1994 dep->name); 1995 return; 1996 } 1997 1998 switch (event->status) { 1999 case DEPEVT_STREAMEVT_FOUND: 2000 dwc3_trace(trace_dwc3_gadget, 2001 "Stream %d found and started", 2002 event->parameters); 2003 2004 break; 2005 case DEPEVT_STREAMEVT_NOTFOUND: 2006 /* FALLTHROUGH */ 2007 default: 2008 dev_dbg(dwc->dev, "Couldn't find suitable stream\n"); 2009 } 2010 break; 2011 case DWC3_DEPEVT_RXTXFIFOEVT: 2012 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name); 2013 break; 2014 case DWC3_DEPEVT_EPCMDCMPLT: 2015 dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete"); 2016 break; 2017 } 2018 } 2019 2020 static void dwc3_disconnect_gadget(struct dwc3 *dwc) 2021 { 2022 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { 2023 spin_unlock(&dwc->lock); 2024 dwc->gadget_driver->disconnect(&dwc->gadget); 2025 spin_lock(&dwc->lock); 2026 } 2027 } 2028 2029 static void dwc3_suspend_gadget(struct dwc3 *dwc) 2030 { 2031 if (dwc->gadget_driver && dwc->gadget_driver->suspend) { 2032 spin_unlock(&dwc->lock); 2033 dwc->gadget_driver->suspend(&dwc->gadget); 2034 spin_lock(&dwc->lock); 2035 } 2036 } 2037 2038 static void dwc3_resume_gadget(struct dwc3 *dwc) 2039 { 2040 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2041 spin_unlock(&dwc->lock); 2042 dwc->gadget_driver->resume(&dwc->gadget); 2043 spin_lock(&dwc->lock); 2044 } 2045 } 2046 2047 static void dwc3_reset_gadget(struct dwc3 *dwc) 2048 { 2049 if (!dwc->gadget_driver) 2050 return; 2051 2052 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) { 2053 spin_unlock(&dwc->lock); 2054 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver); 2055 spin_lock(&dwc->lock); 2056 } 2057 } 2058 2059 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force) 2060 { 2061 struct dwc3_ep *dep; 2062 struct dwc3_gadget_ep_cmd_params params; 2063 u32 cmd; 2064 int ret; 2065 2066 dep = dwc->eps[epnum]; 2067 2068 if (!dep->resource_index) 2069 return; 2070 2071 /* 2072 * NOTICE: We are violating what the Databook says about the 2073 * EndTransfer command. Ideally we would _always_ wait for the 2074 * EndTransfer Command Completion IRQ, but that's causing too 2075 * much trouble synchronizing between us and gadget driver. 2076 * 2077 * We have discussed this with the IP Provider and it was 2078 * suggested to giveback all requests here, but give HW some 2079 * extra time to synchronize with the interconnect. We're using 2080 * an arbitrary 100us delay for that. 2081 * 2082 * Note also that a similar handling was tested by Synopsys 2083 * (thanks a lot Paul) and nothing bad has come out of it. 2084 * In short, what we're doing is: 2085 * 2086 * - Issue EndTransfer WITH CMDIOC bit set 2087 * - Wait 100us 2088 */ 2089 2090 cmd = DWC3_DEPCMD_ENDTRANSFER; 2091 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0; 2092 cmd |= DWC3_DEPCMD_CMDIOC; 2093 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); 2094 memset(¶ms, 0, sizeof(params)); 2095 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 2096 WARN_ON_ONCE(ret); 2097 dep->resource_index = 0; 2098 dep->flags &= ~DWC3_EP_BUSY; 2099 udelay(100); 2100 } 2101 2102 static void dwc3_stop_active_transfers(struct dwc3 *dwc) 2103 { 2104 u32 epnum; 2105 2106 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2107 struct dwc3_ep *dep; 2108 2109 dep = dwc->eps[epnum]; 2110 if (!dep) 2111 continue; 2112 2113 if (!(dep->flags & DWC3_EP_ENABLED)) 2114 continue; 2115 2116 dwc3_remove_requests(dwc, dep); 2117 } 2118 } 2119 2120 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 2121 { 2122 u32 epnum; 2123 2124 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2125 struct dwc3_ep *dep; 2126 struct dwc3_gadget_ep_cmd_params params; 2127 int ret; 2128 2129 dep = dwc->eps[epnum]; 2130 if (!dep) 2131 continue; 2132 2133 if (!(dep->flags & DWC3_EP_STALL)) 2134 continue; 2135 2136 dep->flags &= ~DWC3_EP_STALL; 2137 2138 memset(¶ms, 0, sizeof(params)); 2139 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 2140 DWC3_DEPCMD_CLEARSTALL, ¶ms); 2141 WARN_ON_ONCE(ret); 2142 } 2143 } 2144 2145 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 2146 { 2147 int reg; 2148 2149 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2150 reg &= ~DWC3_DCTL_INITU1ENA; 2151 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2152 2153 reg &= ~DWC3_DCTL_INITU2ENA; 2154 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2155 2156 dwc3_disconnect_gadget(dwc); 2157 dwc->start_config_issued = false; 2158 2159 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2160 dwc->setup_packet_pending = false; 2161 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED); 2162 } 2163 2164 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 2165 { 2166 u32 reg; 2167 2168 /* 2169 * WORKAROUND: DWC3 revisions <1.88a have an issue which 2170 * would cause a missing Disconnect Event if there's a 2171 * pending Setup Packet in the FIFO. 2172 * 2173 * There's no suggested workaround on the official Bug 2174 * report, which states that "unless the driver/application 2175 * is doing any special handling of a disconnect event, 2176 * there is no functional issue". 2177 * 2178 * Unfortunately, it turns out that we _do_ some special 2179 * handling of a disconnect event, namely complete all 2180 * pending transfers, notify gadget driver of the 2181 * disconnection, and so on. 2182 * 2183 * Our suggested workaround is to follow the Disconnect 2184 * Event steps here, instead, based on a setup_packet_pending 2185 * flag. Such flag gets set whenever we have a XferNotReady 2186 * event on EP0 and gets cleared on XferComplete for the 2187 * same endpoint. 2188 * 2189 * Refers to: 2190 * 2191 * STAR#9000466709: RTL: Device : Disconnect event not 2192 * generated if setup packet pending in FIFO 2193 */ 2194 if (dwc->revision < DWC3_REVISION_188A) { 2195 if (dwc->setup_packet_pending) 2196 dwc3_gadget_disconnect_interrupt(dwc); 2197 } 2198 2199 dwc3_reset_gadget(dwc); 2200 2201 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2202 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 2203 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2204 dwc->test_mode = false; 2205 2206 dwc3_stop_active_transfers(dwc); 2207 dwc3_clear_stall_all_ep(dwc); 2208 dwc->start_config_issued = false; 2209 2210 /* Reset device address to zero */ 2211 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2212 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 2213 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2214 } 2215 2216 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed) 2217 { 2218 u32 reg; 2219 u32 usb30_clock = DWC3_GCTL_CLK_BUS; 2220 2221 /* 2222 * We change the clock only at SS but I dunno why I would want to do 2223 * this. Maybe it becomes part of the power saving plan. 2224 */ 2225 2226 if (speed != DWC3_DSTS_SUPERSPEED) 2227 return; 2228 2229 /* 2230 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 2231 * each time on Connect Done. 2232 */ 2233 if (!usb30_clock) 2234 return; 2235 2236 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 2237 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock); 2238 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 2239 } 2240 2241 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 2242 { 2243 struct dwc3_ep *dep; 2244 int ret; 2245 u32 reg; 2246 u8 speed; 2247 2248 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2249 speed = reg & DWC3_DSTS_CONNECTSPD; 2250 dwc->speed = speed; 2251 2252 dwc3_update_ram_clk_sel(dwc, speed); 2253 2254 switch (speed) { 2255 case DWC3_DCFG_SUPERSPEED: 2256 /* 2257 * WORKAROUND: DWC3 revisions <1.90a have an issue which 2258 * would cause a missing USB3 Reset event. 2259 * 2260 * In such situations, we should force a USB3 Reset 2261 * event by calling our dwc3_gadget_reset_interrupt() 2262 * routine. 2263 * 2264 * Refers to: 2265 * 2266 * STAR#9000483510: RTL: SS : USB3 reset event may 2267 * not be generated always when the link enters poll 2268 */ 2269 if (dwc->revision < DWC3_REVISION_190A) 2270 dwc3_gadget_reset_interrupt(dwc); 2271 2272 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2273 dwc->gadget.ep0->maxpacket = 512; 2274 dwc->gadget.speed = USB_SPEED_SUPER; 2275 break; 2276 case DWC3_DCFG_HIGHSPEED: 2277 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2278 dwc->gadget.ep0->maxpacket = 64; 2279 dwc->gadget.speed = USB_SPEED_HIGH; 2280 break; 2281 case DWC3_DCFG_FULLSPEED2: 2282 case DWC3_DCFG_FULLSPEED1: 2283 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2284 dwc->gadget.ep0->maxpacket = 64; 2285 dwc->gadget.speed = USB_SPEED_FULL; 2286 break; 2287 case DWC3_DCFG_LOWSPEED: 2288 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); 2289 dwc->gadget.ep0->maxpacket = 8; 2290 dwc->gadget.speed = USB_SPEED_LOW; 2291 break; 2292 } 2293 2294 /* Enable USB2 LPM Capability */ 2295 2296 if ((dwc->revision > DWC3_REVISION_194A) 2297 && (speed != DWC3_DCFG_SUPERSPEED)) { 2298 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2299 reg |= DWC3_DCFG_LPM_CAP; 2300 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2301 2302 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2303 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); 2304 2305 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold); 2306 2307 /* 2308 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and 2309 * DCFG.LPMCap is set, core responses with an ACK and the 2310 * BESL value in the LPM token is less than or equal to LPM 2311 * NYET threshold. 2312 */ 2313 WARN_ONCE(dwc->revision < DWC3_REVISION_240A 2314 && dwc->has_lpm_erratum, 2315 "LPM Erratum not available on dwc3 revisisions < 2.40a\n"); 2316 2317 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A) 2318 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold); 2319 2320 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2321 } else { 2322 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2323 reg &= ~DWC3_DCTL_HIRD_THRES_MASK; 2324 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2325 } 2326 2327 dep = dwc->eps[0]; 2328 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, 2329 false); 2330 if (ret) { 2331 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2332 return; 2333 } 2334 2335 dep = dwc->eps[1]; 2336 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, 2337 false); 2338 if (ret) { 2339 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2340 return; 2341 } 2342 2343 /* 2344 * Configure PHY via GUSB3PIPECTLn if required. 2345 * 2346 * Update GTXFIFOSIZn 2347 * 2348 * In both cases reset values should be sufficient. 2349 */ 2350 } 2351 2352 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 2353 { 2354 /* 2355 * TODO take core out of low power mode when that's 2356 * implemented. 2357 */ 2358 2359 dwc->gadget_driver->resume(&dwc->gadget); 2360 } 2361 2362 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 2363 unsigned int evtinfo) 2364 { 2365 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2366 unsigned int pwropt; 2367 2368 /* 2369 * WORKAROUND: DWC3 < 2.50a have an issue when configured without 2370 * Hibernation mode enabled which would show up when device detects 2371 * host-initiated U3 exit. 2372 * 2373 * In that case, device will generate a Link State Change Interrupt 2374 * from U3 to RESUME which is only necessary if Hibernation is 2375 * configured in. 2376 * 2377 * There are no functional changes due to such spurious event and we 2378 * just need to ignore it. 2379 * 2380 * Refers to: 2381 * 2382 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation 2383 * operational mode 2384 */ 2385 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 2386 if ((dwc->revision < DWC3_REVISION_250A) && 2387 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { 2388 if ((dwc->link_state == DWC3_LINK_STATE_U3) && 2389 (next == DWC3_LINK_STATE_RESUME)) { 2390 dwc3_trace(trace_dwc3_gadget, 2391 "ignoring transition U3 -> Resume"); 2392 return; 2393 } 2394 } 2395 2396 /* 2397 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending 2398 * on the link partner, the USB session might do multiple entry/exit 2399 * of low power states before a transfer takes place. 2400 * 2401 * Due to this problem, we might experience lower throughput. The 2402 * suggested workaround is to disable DCTL[12:9] bits if we're 2403 * transitioning from U1/U2 to U0 and enable those bits again 2404 * after a transfer completes and there are no pending transfers 2405 * on any of the enabled endpoints. 2406 * 2407 * This is the first half of that workaround. 2408 * 2409 * Refers to: 2410 * 2411 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us 2412 * core send LGO_Ux entering U0 2413 */ 2414 if (dwc->revision < DWC3_REVISION_183A) { 2415 if (next == DWC3_LINK_STATE_U0) { 2416 u32 u1u2; 2417 u32 reg; 2418 2419 switch (dwc->link_state) { 2420 case DWC3_LINK_STATE_U1: 2421 case DWC3_LINK_STATE_U2: 2422 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2423 u1u2 = reg & (DWC3_DCTL_INITU2ENA 2424 | DWC3_DCTL_ACCEPTU2ENA 2425 | DWC3_DCTL_INITU1ENA 2426 | DWC3_DCTL_ACCEPTU1ENA); 2427 2428 if (!dwc->u1u2) 2429 dwc->u1u2 = reg & u1u2; 2430 2431 reg &= ~u1u2; 2432 2433 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2434 break; 2435 default: 2436 /* do nothing */ 2437 break; 2438 } 2439 } 2440 } 2441 2442 switch (next) { 2443 case DWC3_LINK_STATE_U1: 2444 if (dwc->speed == USB_SPEED_SUPER) 2445 dwc3_suspend_gadget(dwc); 2446 break; 2447 case DWC3_LINK_STATE_U2: 2448 case DWC3_LINK_STATE_U3: 2449 dwc3_suspend_gadget(dwc); 2450 break; 2451 case DWC3_LINK_STATE_RESUME: 2452 dwc3_resume_gadget(dwc); 2453 break; 2454 default: 2455 /* do nothing */ 2456 break; 2457 } 2458 2459 dwc->link_state = next; 2460 } 2461 2462 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, 2463 unsigned int evtinfo) 2464 { 2465 unsigned int is_ss = evtinfo & BIT(4); 2466 2467 /** 2468 * WORKAROUND: DWC3 revison 2.20a with hibernation support 2469 * have a known issue which can cause USB CV TD.9.23 to fail 2470 * randomly. 2471 * 2472 * Because of this issue, core could generate bogus hibernation 2473 * events which SW needs to ignore. 2474 * 2475 * Refers to: 2476 * 2477 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0 2478 * Device Fallback from SuperSpeed 2479 */ 2480 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER)) 2481 return; 2482 2483 /* enter hibernation here */ 2484 } 2485 2486 static void dwc3_gadget_interrupt(struct dwc3 *dwc, 2487 const struct dwc3_event_devt *event) 2488 { 2489 switch (event->type) { 2490 case DWC3_DEVICE_EVENT_DISCONNECT: 2491 dwc3_gadget_disconnect_interrupt(dwc); 2492 break; 2493 case DWC3_DEVICE_EVENT_RESET: 2494 dwc3_gadget_reset_interrupt(dwc); 2495 break; 2496 case DWC3_DEVICE_EVENT_CONNECT_DONE: 2497 dwc3_gadget_conndone_interrupt(dwc); 2498 break; 2499 case DWC3_DEVICE_EVENT_WAKEUP: 2500 dwc3_gadget_wakeup_interrupt(dwc); 2501 break; 2502 case DWC3_DEVICE_EVENT_HIBER_REQ: 2503 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation, 2504 "unexpected hibernation event\n")) 2505 break; 2506 2507 dwc3_gadget_hibernation_interrupt(dwc, event->event_info); 2508 break; 2509 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 2510 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 2511 break; 2512 case DWC3_DEVICE_EVENT_EOPF: 2513 dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame"); 2514 break; 2515 case DWC3_DEVICE_EVENT_SOF: 2516 dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame"); 2517 break; 2518 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 2519 dwc3_trace(trace_dwc3_gadget, "Erratic Error"); 2520 break; 2521 case DWC3_DEVICE_EVENT_CMD_CMPL: 2522 dwc3_trace(trace_dwc3_gadget, "Command Complete"); 2523 break; 2524 case DWC3_DEVICE_EVENT_OVERFLOW: 2525 dwc3_trace(trace_dwc3_gadget, "Overflow"); 2526 break; 2527 default: 2528 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 2529 } 2530 } 2531 2532 static void dwc3_process_event_entry(struct dwc3 *dwc, 2533 const union dwc3_event *event) 2534 { 2535 trace_dwc3_event(event->raw); 2536 2537 /* Endpoint IRQ, handle it and return early */ 2538 if (event->type.is_devspec == 0) { 2539 /* depevt */ 2540 return dwc3_endpoint_interrupt(dwc, &event->depevt); 2541 } 2542 2543 switch (event->type.type) { 2544 case DWC3_EVENT_TYPE_DEV: 2545 dwc3_gadget_interrupt(dwc, &event->devt); 2546 break; 2547 /* REVISIT what to do with Carkit and I2C events ? */ 2548 default: 2549 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 2550 } 2551 } 2552 2553 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf) 2554 { 2555 struct dwc3_event_buffer *evt; 2556 irqreturn_t ret = IRQ_NONE; 2557 int left; 2558 u32 reg; 2559 2560 evt = dwc->ev_buffs[buf]; 2561 left = evt->count; 2562 2563 if (!(evt->flags & DWC3_EVENT_PENDING)) 2564 return IRQ_NONE; 2565 2566 while (left > 0) { 2567 union dwc3_event event; 2568 2569 event.raw = *(u32 *) (evt->buf + evt->lpos); 2570 2571 dwc3_process_event_entry(dwc, &event); 2572 2573 /* 2574 * FIXME we wrap around correctly to the next entry as 2575 * almost all entries are 4 bytes in size. There is one 2576 * entry which has 12 bytes which is a regular entry 2577 * followed by 8 bytes data. ATM I don't know how 2578 * things are organized if we get next to the a 2579 * boundary so I worry about that once we try to handle 2580 * that. 2581 */ 2582 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE; 2583 left -= 4; 2584 2585 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4); 2586 } 2587 2588 evt->count = 0; 2589 evt->flags &= ~DWC3_EVENT_PENDING; 2590 ret = IRQ_HANDLED; 2591 2592 /* Unmask interrupt */ 2593 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf)); 2594 reg &= ~DWC3_GEVNTSIZ_INTMASK; 2595 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg); 2596 2597 return ret; 2598 } 2599 2600 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc) 2601 { 2602 struct dwc3 *dwc = _dwc; 2603 unsigned long flags; 2604 irqreturn_t ret = IRQ_NONE; 2605 int i; 2606 2607 spin_lock_irqsave(&dwc->lock, flags); 2608 2609 for (i = 0; i < dwc->num_event_buffers; i++) 2610 ret |= dwc3_process_event_buf(dwc, i); 2611 2612 spin_unlock_irqrestore(&dwc->lock, flags); 2613 2614 return ret; 2615 } 2616 2617 static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf) 2618 { 2619 struct dwc3_event_buffer *evt; 2620 u32 count; 2621 u32 reg; 2622 2623 evt = dwc->ev_buffs[buf]; 2624 2625 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf)); 2626 count &= DWC3_GEVNTCOUNT_MASK; 2627 if (!count) 2628 return IRQ_NONE; 2629 2630 evt->count = count; 2631 evt->flags |= DWC3_EVENT_PENDING; 2632 2633 /* Mask interrupt */ 2634 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf)); 2635 reg |= DWC3_GEVNTSIZ_INTMASK; 2636 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg); 2637 2638 return IRQ_WAKE_THREAD; 2639 } 2640 2641 static irqreturn_t dwc3_interrupt(int irq, void *_dwc) 2642 { 2643 struct dwc3 *dwc = _dwc; 2644 int i; 2645 irqreturn_t ret = IRQ_NONE; 2646 2647 spin_lock(&dwc->lock); 2648 2649 for (i = 0; i < dwc->num_event_buffers; i++) { 2650 irqreturn_t status; 2651 2652 status = dwc3_check_event_buf(dwc, i); 2653 if (status == IRQ_WAKE_THREAD) 2654 ret = status; 2655 } 2656 2657 spin_unlock(&dwc->lock); 2658 2659 return ret; 2660 } 2661 2662 /** 2663 * dwc3_gadget_init - Initializes gadget related registers 2664 * @dwc: pointer to our controller context structure 2665 * 2666 * Returns 0 on success otherwise negative errno. 2667 */ 2668 int dwc3_gadget_init(struct dwc3 *dwc) 2669 { 2670 int ret; 2671 2672 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2673 &dwc->ctrl_req_addr, GFP_KERNEL); 2674 if (!dwc->ctrl_req) { 2675 dev_err(dwc->dev, "failed to allocate ctrl request\n"); 2676 ret = -ENOMEM; 2677 goto err0; 2678 } 2679 2680 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2681 &dwc->ep0_trb_addr, GFP_KERNEL); 2682 if (!dwc->ep0_trb) { 2683 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 2684 ret = -ENOMEM; 2685 goto err1; 2686 } 2687 2688 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL); 2689 if (!dwc->setup_buf) { 2690 ret = -ENOMEM; 2691 goto err2; 2692 } 2693 2694 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev, 2695 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr, 2696 GFP_KERNEL); 2697 if (!dwc->ep0_bounce) { 2698 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n"); 2699 ret = -ENOMEM; 2700 goto err3; 2701 } 2702 2703 dwc->gadget.ops = &dwc3_gadget_ops; 2704 dwc->gadget.max_speed = USB_SPEED_SUPER; 2705 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2706 dwc->gadget.sg_supported = true; 2707 dwc->gadget.name = "dwc3-gadget"; 2708 2709 /* 2710 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize 2711 * on ep out. 2712 */ 2713 dwc->gadget.quirk_ep_out_aligned_size = true; 2714 2715 /* 2716 * REVISIT: Here we should clear all pending IRQs to be 2717 * sure we're starting from a well known location. 2718 */ 2719 2720 ret = dwc3_gadget_init_endpoints(dwc); 2721 if (ret) 2722 goto err4; 2723 2724 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 2725 if (ret) { 2726 dev_err(dwc->dev, "failed to register udc\n"); 2727 goto err4; 2728 } 2729 2730 return 0; 2731 2732 err4: 2733 dwc3_gadget_free_endpoints(dwc); 2734 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2735 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2736 2737 err3: 2738 kfree(dwc->setup_buf); 2739 2740 err2: 2741 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2742 dwc->ep0_trb, dwc->ep0_trb_addr); 2743 2744 err1: 2745 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2746 dwc->ctrl_req, dwc->ctrl_req_addr); 2747 2748 err0: 2749 return ret; 2750 } 2751 2752 /* -------------------------------------------------------------------------- */ 2753 2754 void dwc3_gadget_exit(struct dwc3 *dwc) 2755 { 2756 usb_del_gadget_udc(&dwc->gadget); 2757 2758 dwc3_gadget_free_endpoints(dwc); 2759 2760 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2761 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2762 2763 kfree(dwc->setup_buf); 2764 2765 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2766 dwc->ep0_trb, dwc->ep0_trb_addr); 2767 2768 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2769 dwc->ctrl_req, dwc->ctrl_req_addr); 2770 } 2771 2772 int dwc3_gadget_suspend(struct dwc3 *dwc) 2773 { 2774 if (dwc->pullups_connected) { 2775 dwc3_gadget_disable_irq(dwc); 2776 dwc3_gadget_run_stop(dwc, true, true); 2777 } 2778 2779 __dwc3_gadget_ep_disable(dwc->eps[0]); 2780 __dwc3_gadget_ep_disable(dwc->eps[1]); 2781 2782 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG); 2783 2784 return 0; 2785 } 2786 2787 int dwc3_gadget_resume(struct dwc3 *dwc) 2788 { 2789 struct dwc3_ep *dep; 2790 int ret; 2791 2792 /* Start with SuperSpeed Default */ 2793 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2794 2795 dep = dwc->eps[0]; 2796 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 2797 false); 2798 if (ret) 2799 goto err0; 2800 2801 dep = dwc->eps[1]; 2802 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 2803 false); 2804 if (ret) 2805 goto err1; 2806 2807 /* begin to receive SETUP packets */ 2808 dwc->ep0state = EP0_SETUP_PHASE; 2809 dwc3_ep0_out_start(dwc); 2810 2811 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg); 2812 2813 if (dwc->pullups_connected) { 2814 dwc3_gadget_enable_irq(dwc); 2815 dwc3_gadget_run_stop(dwc, true, false); 2816 } 2817 2818 return 0; 2819 2820 err1: 2821 __dwc3_gadget_ep_disable(dwc->eps[0]); 2822 2823 err0: 2824 return ret; 2825 } 2826