1 /** 2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 3 * 4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com 5 * 6 * Authors: Felipe Balbi <balbi@ti.com>, 7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 8 * 9 * This program is free software: you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 of 11 * the License as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/delay.h> 21 #include <linux/slab.h> 22 #include <linux/spinlock.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/interrupt.h> 26 #include <linux/io.h> 27 #include <linux/list.h> 28 #include <linux/dma-mapping.h> 29 30 #include <linux/usb/ch9.h> 31 #include <linux/usb/gadget.h> 32 33 #include "debug.h" 34 #include "core.h" 35 #include "gadget.h" 36 #include "io.h" 37 38 /** 39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes 40 * @dwc: pointer to our context structure 41 * @mode: the mode to set (J, K SE0 NAK, Force Enable) 42 * 43 * Caller should take care of locking. This function will 44 * return 0 on success or -EINVAL if wrong Test Selector 45 * is passed 46 */ 47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) 48 { 49 u32 reg; 50 51 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 52 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 53 54 switch (mode) { 55 case TEST_J: 56 case TEST_K: 57 case TEST_SE0_NAK: 58 case TEST_PACKET: 59 case TEST_FORCE_EN: 60 reg |= mode << 1; 61 break; 62 default: 63 return -EINVAL; 64 } 65 66 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 67 68 return 0; 69 } 70 71 /** 72 * dwc3_gadget_get_link_state - Gets current state of USB Link 73 * @dwc: pointer to our context structure 74 * 75 * Caller should take care of locking. This function will 76 * return the link state on success (>= 0) or -ETIMEDOUT. 77 */ 78 int dwc3_gadget_get_link_state(struct dwc3 *dwc) 79 { 80 u32 reg; 81 82 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 83 84 return DWC3_DSTS_USBLNKST(reg); 85 } 86 87 /** 88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State 89 * @dwc: pointer to our context structure 90 * @state: the state to put link into 91 * 92 * Caller should take care of locking. This function will 93 * return 0 on success or -ETIMEDOUT. 94 */ 95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) 96 { 97 int retries = 10000; 98 u32 reg; 99 100 /* 101 * Wait until device controller is ready. Only applies to 1.94a and 102 * later RTL. 103 */ 104 if (dwc->revision >= DWC3_REVISION_194A) { 105 while (--retries) { 106 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 107 if (reg & DWC3_DSTS_DCNRD) 108 udelay(5); 109 else 110 break; 111 } 112 113 if (retries <= 0) 114 return -ETIMEDOUT; 115 } 116 117 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 119 120 /* set requested state */ 121 reg |= DWC3_DCTL_ULSTCHNGREQ(state); 122 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 123 124 /* 125 * The following code is racy when called from dwc3_gadget_wakeup, 126 * and is not needed, at least on newer versions 127 */ 128 if (dwc->revision >= DWC3_REVISION_194A) 129 return 0; 130 131 /* wait for a change in DSTS */ 132 retries = 10000; 133 while (--retries) { 134 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 135 136 if (DWC3_DSTS_USBLNKST(reg) == state) 137 return 0; 138 139 udelay(5); 140 } 141 142 dwc3_trace(trace_dwc3_gadget, 143 "link state change request timed out"); 144 145 return -ETIMEDOUT; 146 } 147 148 /** 149 * dwc3_ep_inc_trb() - Increment a TRB index. 150 * @index - Pointer to the TRB index to increment. 151 * 152 * The index should never point to the link TRB. After incrementing, 153 * if it is point to the link TRB, wrap around to the beginning. The 154 * link TRB is always at the last TRB entry. 155 */ 156 static void dwc3_ep_inc_trb(u8 *index) 157 { 158 (*index)++; 159 if (*index == (DWC3_TRB_NUM - 1)) 160 *index = 0; 161 } 162 163 static void dwc3_ep_inc_enq(struct dwc3_ep *dep) 164 { 165 dwc3_ep_inc_trb(&dep->trb_enqueue); 166 } 167 168 static void dwc3_ep_inc_deq(struct dwc3_ep *dep) 169 { 170 dwc3_ep_inc_trb(&dep->trb_dequeue); 171 } 172 173 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 174 int status) 175 { 176 struct dwc3 *dwc = dep->dwc; 177 178 req->started = false; 179 list_del(&req->list); 180 req->trb = NULL; 181 182 if (req->request.status == -EINPROGRESS) 183 req->request.status = status; 184 185 if (dwc->ep0_bounced && dep->number == 0) 186 dwc->ep0_bounced = false; 187 else 188 usb_gadget_unmap_request(&dwc->gadget, &req->request, 189 req->direction); 190 191 trace_dwc3_gadget_giveback(req); 192 193 spin_unlock(&dwc->lock); 194 usb_gadget_giveback_request(&dep->endpoint, &req->request); 195 spin_lock(&dwc->lock); 196 197 if (dep->number > 1) 198 pm_runtime_put(dwc->dev); 199 } 200 201 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param) 202 { 203 u32 timeout = 500; 204 int status = 0; 205 int ret = 0; 206 u32 reg; 207 208 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param); 209 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT); 210 211 do { 212 reg = dwc3_readl(dwc->regs, DWC3_DGCMD); 213 if (!(reg & DWC3_DGCMD_CMDACT)) { 214 status = DWC3_DGCMD_STATUS(reg); 215 if (status) 216 ret = -EINVAL; 217 break; 218 } 219 } while (timeout--); 220 221 if (!timeout) { 222 ret = -ETIMEDOUT; 223 status = -ETIMEDOUT; 224 } 225 226 trace_dwc3_gadget_generic_cmd(cmd, param, status); 227 228 return ret; 229 } 230 231 static int __dwc3_gadget_wakeup(struct dwc3 *dwc); 232 233 int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, 234 struct dwc3_gadget_ep_cmd_params *params) 235 { 236 struct dwc3 *dwc = dep->dwc; 237 u32 timeout = 500; 238 u32 reg; 239 240 int cmd_status = 0; 241 int susphy = false; 242 int ret = -EINVAL; 243 244 /* 245 * Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if 246 * we're issuing an endpoint command, we must check if 247 * GUSB2PHYCFG.SUSPHY bit is set. If it is, then we need to clear it. 248 * 249 * We will also set SUSPHY bit to what it was before returning as stated 250 * by the same section on Synopsys databook. 251 */ 252 if (dwc->gadget.speed <= USB_SPEED_HIGH) { 253 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 254 if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) { 255 susphy = true; 256 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 257 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 258 } 259 } 260 261 if (cmd == DWC3_DEPCMD_STARTTRANSFER) { 262 int needs_wakeup; 263 264 needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 || 265 dwc->link_state == DWC3_LINK_STATE_U2 || 266 dwc->link_state == DWC3_LINK_STATE_U3); 267 268 if (unlikely(needs_wakeup)) { 269 ret = __dwc3_gadget_wakeup(dwc); 270 dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n", 271 ret); 272 } 273 } 274 275 dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0); 276 dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1); 277 dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2); 278 279 dwc3_writel(dep->regs, DWC3_DEPCMD, cmd | DWC3_DEPCMD_CMDACT); 280 do { 281 reg = dwc3_readl(dep->regs, DWC3_DEPCMD); 282 if (!(reg & DWC3_DEPCMD_CMDACT)) { 283 cmd_status = DWC3_DEPCMD_STATUS(reg); 284 285 switch (cmd_status) { 286 case 0: 287 ret = 0; 288 break; 289 case DEPEVT_TRANSFER_NO_RESOURCE: 290 ret = -EINVAL; 291 break; 292 case DEPEVT_TRANSFER_BUS_EXPIRY: 293 /* 294 * SW issues START TRANSFER command to 295 * isochronous ep with future frame interval. If 296 * future interval time has already passed when 297 * core receives the command, it will respond 298 * with an error status of 'Bus Expiry'. 299 * 300 * Instead of always returning -EINVAL, let's 301 * give a hint to the gadget driver that this is 302 * the case by returning -EAGAIN. 303 */ 304 ret = -EAGAIN; 305 break; 306 default: 307 dev_WARN(dwc->dev, "UNKNOWN cmd status\n"); 308 } 309 310 break; 311 } 312 } while (--timeout); 313 314 if (timeout == 0) { 315 ret = -ETIMEDOUT; 316 cmd_status = -ETIMEDOUT; 317 } 318 319 trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status); 320 321 if (unlikely(susphy)) { 322 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 323 reg |= DWC3_GUSB2PHYCFG_SUSPHY; 324 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 325 } 326 327 return ret; 328 } 329 330 static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep) 331 { 332 struct dwc3 *dwc = dep->dwc; 333 struct dwc3_gadget_ep_cmd_params params; 334 u32 cmd = DWC3_DEPCMD_CLEARSTALL; 335 336 /* 337 * As of core revision 2.60a the recommended programming model 338 * is to set the ClearPendIN bit when issuing a Clear Stall EP 339 * command for IN endpoints. This is to prevent an issue where 340 * some (non-compliant) hosts may not send ACK TPs for pending 341 * IN transfers due to a mishandled error condition. Synopsys 342 * STAR 9000614252. 343 */ 344 if (dep->direction && (dwc->revision >= DWC3_REVISION_260A) && 345 (dwc->gadget.speed >= USB_SPEED_SUPER)) 346 cmd |= DWC3_DEPCMD_CLEARPENDIN; 347 348 memset(¶ms, 0, sizeof(params)); 349 350 return dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 351 } 352 353 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 354 struct dwc3_trb *trb) 355 { 356 u32 offset = (char *) trb - (char *) dep->trb_pool; 357 358 return dep->trb_pool_dma + offset; 359 } 360 361 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 362 { 363 struct dwc3 *dwc = dep->dwc; 364 365 if (dep->trb_pool) 366 return 0; 367 368 dep->trb_pool = dma_alloc_coherent(dwc->dev, 369 sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 370 &dep->trb_pool_dma, GFP_KERNEL); 371 if (!dep->trb_pool) { 372 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 373 dep->name); 374 return -ENOMEM; 375 } 376 377 return 0; 378 } 379 380 static void dwc3_free_trb_pool(struct dwc3_ep *dep) 381 { 382 struct dwc3 *dwc = dep->dwc; 383 384 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 385 dep->trb_pool, dep->trb_pool_dma); 386 387 dep->trb_pool = NULL; 388 dep->trb_pool_dma = 0; 389 } 390 391 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep); 392 393 /** 394 * dwc3_gadget_start_config - Configure EP resources 395 * @dwc: pointer to our controller context structure 396 * @dep: endpoint that is being enabled 397 * 398 * The assignment of transfer resources cannot perfectly follow the 399 * data book due to the fact that the controller driver does not have 400 * all knowledge of the configuration in advance. It is given this 401 * information piecemeal by the composite gadget framework after every 402 * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook 403 * programming model in this scenario can cause errors. For two 404 * reasons: 405 * 406 * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION 407 * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of 408 * multiple interfaces. 409 * 410 * 2) The databook does not mention doing more DEPXFERCFG for new 411 * endpoint on alt setting (8.1.6). 412 * 413 * The following simplified method is used instead: 414 * 415 * All hardware endpoints can be assigned a transfer resource and this 416 * setting will stay persistent until either a core reset or 417 * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and 418 * do DEPXFERCFG for every hardware endpoint as well. We are 419 * guaranteed that there are as many transfer resources as endpoints. 420 * 421 * This function is called for each endpoint when it is being enabled 422 * but is triggered only when called for EP0-out, which always happens 423 * first, and which should only happen in one of the above conditions. 424 */ 425 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 426 { 427 struct dwc3_gadget_ep_cmd_params params; 428 u32 cmd; 429 int i; 430 int ret; 431 432 if (dep->number) 433 return 0; 434 435 memset(¶ms, 0x00, sizeof(params)); 436 cmd = DWC3_DEPCMD_DEPSTARTCFG; 437 438 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 439 if (ret) 440 return ret; 441 442 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 443 struct dwc3_ep *dep = dwc->eps[i]; 444 445 if (!dep) 446 continue; 447 448 ret = dwc3_gadget_set_xfer_resource(dwc, dep); 449 if (ret) 450 return ret; 451 } 452 453 return 0; 454 } 455 456 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, 457 const struct usb_endpoint_descriptor *desc, 458 const struct usb_ss_ep_comp_descriptor *comp_desc, 459 bool modify, bool restore) 460 { 461 struct dwc3_gadget_ep_cmd_params params; 462 463 if (dev_WARN_ONCE(dwc->dev, modify && restore, 464 "Can't modify and restore\n")) 465 return -EINVAL; 466 467 memset(¶ms, 0x00, sizeof(params)); 468 469 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) 470 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); 471 472 /* Burst size is only needed in SuperSpeed mode */ 473 if (dwc->gadget.speed >= USB_SPEED_SUPER) { 474 u32 burst = dep->endpoint.maxburst; 475 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1); 476 } 477 478 if (modify) { 479 params.param0 |= DWC3_DEPCFG_ACTION_MODIFY; 480 } else if (restore) { 481 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE; 482 params.param2 |= dep->saved_state; 483 } else { 484 params.param0 |= DWC3_DEPCFG_ACTION_INIT; 485 } 486 487 if (usb_endpoint_xfer_control(desc)) 488 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN; 489 490 if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc)) 491 params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN; 492 493 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 494 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE 495 | DWC3_DEPCFG_STREAM_EVENT_EN; 496 dep->stream_capable = true; 497 } 498 499 if (!usb_endpoint_xfer_control(desc)) 500 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; 501 502 /* 503 * We are doing 1:1 mapping for endpoints, meaning 504 * Physical Endpoints 2 maps to Logical Endpoint 2 and 505 * so on. We consider the direction bit as part of the physical 506 * endpoint number. So USB endpoint 0x81 is 0x03. 507 */ 508 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); 509 510 /* 511 * We must use the lower 16 TX FIFOs even though 512 * HW might have more 513 */ 514 if (dep->direction) 515 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); 516 517 if (desc->bInterval) { 518 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); 519 dep->interval = 1 << (desc->bInterval - 1); 520 } 521 522 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, ¶ms); 523 } 524 525 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) 526 { 527 struct dwc3_gadget_ep_cmd_params params; 528 529 memset(¶ms, 0x00, sizeof(params)); 530 531 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 532 533 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE, 534 ¶ms); 535 } 536 537 /** 538 * __dwc3_gadget_ep_enable - Initializes a HW endpoint 539 * @dep: endpoint to be initialized 540 * @desc: USB Endpoint Descriptor 541 * 542 * Caller should take care of locking 543 */ 544 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, 545 const struct usb_endpoint_descriptor *desc, 546 const struct usb_ss_ep_comp_descriptor *comp_desc, 547 bool modify, bool restore) 548 { 549 struct dwc3 *dwc = dep->dwc; 550 u32 reg; 551 int ret; 552 553 dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name); 554 555 if (!(dep->flags & DWC3_EP_ENABLED)) { 556 ret = dwc3_gadget_start_config(dwc, dep); 557 if (ret) 558 return ret; 559 } 560 561 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, modify, 562 restore); 563 if (ret) 564 return ret; 565 566 if (!(dep->flags & DWC3_EP_ENABLED)) { 567 struct dwc3_trb *trb_st_hw; 568 struct dwc3_trb *trb_link; 569 570 dep->endpoint.desc = desc; 571 dep->comp_desc = comp_desc; 572 dep->type = usb_endpoint_type(desc); 573 dep->flags |= DWC3_EP_ENABLED; 574 575 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 576 reg |= DWC3_DALEPENA_EP(dep->number); 577 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 578 579 if (usb_endpoint_xfer_control(desc)) 580 return 0; 581 582 /* Initialize the TRB ring */ 583 dep->trb_dequeue = 0; 584 dep->trb_enqueue = 0; 585 memset(dep->trb_pool, 0, 586 sizeof(struct dwc3_trb) * DWC3_TRB_NUM); 587 588 /* Link TRB. The HWO bit is never reset */ 589 trb_st_hw = &dep->trb_pool[0]; 590 591 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 592 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 593 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 594 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB; 595 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 596 } 597 598 return 0; 599 } 600 601 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force); 602 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 603 { 604 struct dwc3_request *req; 605 606 dwc3_stop_active_transfer(dwc, dep->number, true); 607 608 /* - giveback all requests to gadget driver */ 609 while (!list_empty(&dep->started_list)) { 610 req = next_request(&dep->started_list); 611 612 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 613 } 614 615 while (!list_empty(&dep->pending_list)) { 616 req = next_request(&dep->pending_list); 617 618 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 619 } 620 } 621 622 /** 623 * __dwc3_gadget_ep_disable - Disables a HW endpoint 624 * @dep: the endpoint to disable 625 * 626 * This function also removes requests which are currently processed ny the 627 * hardware and those which are not yet scheduled. 628 * Caller should take care of locking. 629 */ 630 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 631 { 632 struct dwc3 *dwc = dep->dwc; 633 u32 reg; 634 635 dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name); 636 637 dwc3_remove_requests(dwc, dep); 638 639 /* make sure HW endpoint isn't stalled */ 640 if (dep->flags & DWC3_EP_STALL) 641 __dwc3_gadget_ep_set_halt(dep, 0, false); 642 643 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 644 reg &= ~DWC3_DALEPENA_EP(dep->number); 645 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 646 647 dep->stream_capable = false; 648 dep->endpoint.desc = NULL; 649 dep->comp_desc = NULL; 650 dep->type = 0; 651 dep->flags = 0; 652 653 return 0; 654 } 655 656 /* -------------------------------------------------------------------------- */ 657 658 static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 659 const struct usb_endpoint_descriptor *desc) 660 { 661 return -EINVAL; 662 } 663 664 static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 665 { 666 return -EINVAL; 667 } 668 669 /* -------------------------------------------------------------------------- */ 670 671 static int dwc3_gadget_ep_enable(struct usb_ep *ep, 672 const struct usb_endpoint_descriptor *desc) 673 { 674 struct dwc3_ep *dep; 675 struct dwc3 *dwc; 676 unsigned long flags; 677 int ret; 678 679 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 680 pr_debug("dwc3: invalid parameters\n"); 681 return -EINVAL; 682 } 683 684 if (!desc->wMaxPacketSize) { 685 pr_debug("dwc3: missing wMaxPacketSize\n"); 686 return -EINVAL; 687 } 688 689 dep = to_dwc3_ep(ep); 690 dwc = dep->dwc; 691 692 if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED, 693 "%s is already enabled\n", 694 dep->name)) 695 return 0; 696 697 spin_lock_irqsave(&dwc->lock, flags); 698 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false); 699 spin_unlock_irqrestore(&dwc->lock, flags); 700 701 return ret; 702 } 703 704 static int dwc3_gadget_ep_disable(struct usb_ep *ep) 705 { 706 struct dwc3_ep *dep; 707 struct dwc3 *dwc; 708 unsigned long flags; 709 int ret; 710 711 if (!ep) { 712 pr_debug("dwc3: invalid parameters\n"); 713 return -EINVAL; 714 } 715 716 dep = to_dwc3_ep(ep); 717 dwc = dep->dwc; 718 719 if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED), 720 "%s is already disabled\n", 721 dep->name)) 722 return 0; 723 724 spin_lock_irqsave(&dwc->lock, flags); 725 ret = __dwc3_gadget_ep_disable(dep); 726 spin_unlock_irqrestore(&dwc->lock, flags); 727 728 return ret; 729 } 730 731 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 732 gfp_t gfp_flags) 733 { 734 struct dwc3_request *req; 735 struct dwc3_ep *dep = to_dwc3_ep(ep); 736 737 req = kzalloc(sizeof(*req), gfp_flags); 738 if (!req) 739 return NULL; 740 741 req->epnum = dep->number; 742 req->dep = dep; 743 744 dep->allocated_requests++; 745 746 trace_dwc3_alloc_request(req); 747 748 return &req->request; 749 } 750 751 static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 752 struct usb_request *request) 753 { 754 struct dwc3_request *req = to_dwc3_request(request); 755 struct dwc3_ep *dep = to_dwc3_ep(ep); 756 757 dep->allocated_requests--; 758 trace_dwc3_free_request(req); 759 kfree(req); 760 } 761 762 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep); 763 764 /** 765 * dwc3_prepare_one_trb - setup one TRB from one request 766 * @dep: endpoint for which this request is prepared 767 * @req: dwc3_request pointer 768 */ 769 static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 770 struct dwc3_request *req, dma_addr_t dma, 771 unsigned length, unsigned chain, unsigned node) 772 { 773 struct dwc3_trb *trb; 774 775 dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s", 776 dep->name, req, (unsigned long long) dma, 777 length, chain ? " chain" : ""); 778 779 trb = &dep->trb_pool[dep->trb_enqueue]; 780 781 if (!req->trb) { 782 dwc3_gadget_move_started_request(req); 783 req->trb = trb; 784 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 785 req->first_trb_index = dep->trb_enqueue; 786 } 787 788 dwc3_ep_inc_enq(dep); 789 790 trb->size = DWC3_TRB_SIZE_LENGTH(length); 791 trb->bpl = lower_32_bits(dma); 792 trb->bph = upper_32_bits(dma); 793 794 switch (usb_endpoint_type(dep->endpoint.desc)) { 795 case USB_ENDPOINT_XFER_CONTROL: 796 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP; 797 break; 798 799 case USB_ENDPOINT_XFER_ISOC: 800 if (!node) 801 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 802 else 803 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 804 805 /* always enable Interrupt on Missed ISOC */ 806 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 807 break; 808 809 case USB_ENDPOINT_XFER_BULK: 810 case USB_ENDPOINT_XFER_INT: 811 trb->ctrl = DWC3_TRBCTL_NORMAL; 812 break; 813 default: 814 /* 815 * This is only possible with faulty memory because we 816 * checked it already :) 817 */ 818 BUG(); 819 } 820 821 /* always enable Continue on Short Packet */ 822 trb->ctrl |= DWC3_TRB_CTRL_CSP; 823 824 if ((!req->request.no_interrupt && !chain) || 825 (dwc3_calc_trbs_left(dep) == 0)) 826 trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI; 827 828 if (chain) 829 trb->ctrl |= DWC3_TRB_CTRL_CHN; 830 831 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) 832 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id); 833 834 trb->ctrl |= DWC3_TRB_CTRL_HWO; 835 836 dep->queued_requests++; 837 838 trace_dwc3_prepare_trb(dep, trb); 839 } 840 841 /** 842 * dwc3_ep_prev_trb() - Returns the previous TRB in the ring 843 * @dep: The endpoint with the TRB ring 844 * @index: The index of the current TRB in the ring 845 * 846 * Returns the TRB prior to the one pointed to by the index. If the 847 * index is 0, we will wrap backwards, skip the link TRB, and return 848 * the one just before that. 849 */ 850 static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index) 851 { 852 u8 tmp = index; 853 854 if (!tmp) 855 tmp = DWC3_TRB_NUM - 1; 856 857 return &dep->trb_pool[tmp - 1]; 858 } 859 860 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep) 861 { 862 struct dwc3_trb *tmp; 863 u8 trbs_left; 864 865 /* 866 * If enqueue & dequeue are equal than it is either full or empty. 867 * 868 * One way to know for sure is if the TRB right before us has HWO bit 869 * set or not. If it has, then we're definitely full and can't fit any 870 * more transfers in our ring. 871 */ 872 if (dep->trb_enqueue == dep->trb_dequeue) { 873 tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue); 874 if (tmp->ctrl & DWC3_TRB_CTRL_HWO) 875 return 0; 876 877 return DWC3_TRB_NUM - 1; 878 } 879 880 trbs_left = dep->trb_dequeue - dep->trb_enqueue; 881 trbs_left &= (DWC3_TRB_NUM - 1); 882 883 if (dep->trb_dequeue < dep->trb_enqueue) 884 trbs_left--; 885 886 return trbs_left; 887 } 888 889 static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, 890 struct dwc3_request *req) 891 { 892 struct scatterlist *sg = req->sg; 893 struct scatterlist *s; 894 unsigned int length; 895 dma_addr_t dma; 896 int i; 897 898 for_each_sg(sg, s, req->num_pending_sgs, i) { 899 unsigned chain = true; 900 901 length = sg_dma_len(s); 902 dma = sg_dma_address(s); 903 904 if (sg_is_last(s)) 905 chain = false; 906 907 dwc3_prepare_one_trb(dep, req, dma, length, 908 chain, i); 909 910 if (!dwc3_calc_trbs_left(dep)) 911 break; 912 } 913 } 914 915 static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, 916 struct dwc3_request *req) 917 { 918 unsigned int length; 919 dma_addr_t dma; 920 921 dma = req->request.dma; 922 length = req->request.length; 923 924 dwc3_prepare_one_trb(dep, req, dma, length, 925 false, 0); 926 } 927 928 /* 929 * dwc3_prepare_trbs - setup TRBs from requests 930 * @dep: endpoint for which requests are being prepared 931 * 932 * The function goes through the requests list and sets up TRBs for the 933 * transfers. The function returns once there are no more TRBs available or 934 * it runs out of requests. 935 */ 936 static void dwc3_prepare_trbs(struct dwc3_ep *dep) 937 { 938 struct dwc3_request *req, *n; 939 940 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 941 942 if (!dwc3_calc_trbs_left(dep)) 943 return; 944 945 list_for_each_entry_safe(req, n, &dep->pending_list, list) { 946 if (req->num_pending_sgs > 0) 947 dwc3_prepare_one_trb_sg(dep, req); 948 else 949 dwc3_prepare_one_trb_linear(dep, req); 950 951 if (!dwc3_calc_trbs_left(dep)) 952 return; 953 } 954 } 955 956 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param) 957 { 958 struct dwc3_gadget_ep_cmd_params params; 959 struct dwc3_request *req; 960 struct dwc3 *dwc = dep->dwc; 961 int starting; 962 int ret; 963 u32 cmd; 964 965 starting = !(dep->flags & DWC3_EP_BUSY); 966 967 dwc3_prepare_trbs(dep); 968 req = next_request(&dep->started_list); 969 if (!req) { 970 dep->flags |= DWC3_EP_PENDING_REQUEST; 971 return 0; 972 } 973 974 memset(¶ms, 0, sizeof(params)); 975 976 if (starting) { 977 params.param0 = upper_32_bits(req->trb_dma); 978 params.param1 = lower_32_bits(req->trb_dma); 979 cmd = DWC3_DEPCMD_STARTTRANSFER | 980 DWC3_DEPCMD_PARAM(cmd_param); 981 } else { 982 cmd = DWC3_DEPCMD_UPDATETRANSFER | 983 DWC3_DEPCMD_PARAM(dep->resource_index); 984 } 985 986 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 987 if (ret < 0) { 988 /* 989 * FIXME we need to iterate over the list of requests 990 * here and stop, unmap, free and del each of the linked 991 * requests instead of what we do now. 992 */ 993 usb_gadget_unmap_request(&dwc->gadget, &req->request, 994 req->direction); 995 list_del(&req->list); 996 return ret; 997 } 998 999 dep->flags |= DWC3_EP_BUSY; 1000 1001 if (starting) { 1002 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep); 1003 WARN_ON_ONCE(!dep->resource_index); 1004 } 1005 1006 return 0; 1007 } 1008 1009 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc, 1010 struct dwc3_ep *dep, u32 cur_uf) 1011 { 1012 u32 uf; 1013 1014 if (list_empty(&dep->pending_list)) { 1015 dwc3_trace(trace_dwc3_gadget, 1016 "ISOC ep %s run out for requests", 1017 dep->name); 1018 dep->flags |= DWC3_EP_PENDING_REQUEST; 1019 return; 1020 } 1021 1022 /* 4 micro frames in the future */ 1023 uf = cur_uf + dep->interval * 4; 1024 1025 __dwc3_gadget_kick_transfer(dep, uf); 1026 } 1027 1028 static void dwc3_gadget_start_isoc(struct dwc3 *dwc, 1029 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1030 { 1031 u32 cur_uf, mask; 1032 1033 mask = ~(dep->interval - 1); 1034 cur_uf = event->parameters & mask; 1035 1036 __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1037 } 1038 1039 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 1040 { 1041 struct dwc3 *dwc = dep->dwc; 1042 int ret; 1043 1044 if (!dep->endpoint.desc) { 1045 dwc3_trace(trace_dwc3_gadget, 1046 "trying to queue request %p to disabled %s", 1047 &req->request, dep->endpoint.name); 1048 return -ESHUTDOWN; 1049 } 1050 1051 if (WARN(req->dep != dep, "request %p belongs to '%s'\n", 1052 &req->request, req->dep->name)) { 1053 dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'", 1054 &req->request, req->dep->name); 1055 return -EINVAL; 1056 } 1057 1058 pm_runtime_get(dwc->dev); 1059 1060 req->request.actual = 0; 1061 req->request.status = -EINPROGRESS; 1062 req->direction = dep->direction; 1063 req->epnum = dep->number; 1064 1065 trace_dwc3_ep_queue(req); 1066 1067 ret = usb_gadget_map_request(&dwc->gadget, &req->request, 1068 dep->direction); 1069 if (ret) 1070 return ret; 1071 1072 req->sg = req->request.sg; 1073 req->num_pending_sgs = req->request.num_mapped_sgs; 1074 1075 list_add_tail(&req->list, &dep->pending_list); 1076 1077 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1078 dep->flags & DWC3_EP_PENDING_REQUEST) { 1079 if (list_empty(&dep->started_list)) { 1080 dwc3_stop_active_transfer(dwc, dep->number, true); 1081 dep->flags = DWC3_EP_ENABLED; 1082 } 1083 return 0; 1084 } 1085 1086 if (!dwc3_calc_trbs_left(dep)) 1087 return 0; 1088 1089 ret = __dwc3_gadget_kick_transfer(dep, 0); 1090 if (ret && ret != -EBUSY) 1091 dwc3_trace(trace_dwc3_gadget, 1092 "%s: failed to kick transfers", 1093 dep->name); 1094 if (ret == -EBUSY) 1095 ret = 0; 1096 1097 return ret; 1098 } 1099 1100 static void __dwc3_gadget_ep_zlp_complete(struct usb_ep *ep, 1101 struct usb_request *request) 1102 { 1103 dwc3_gadget_ep_free_request(ep, request); 1104 } 1105 1106 static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep) 1107 { 1108 struct dwc3_request *req; 1109 struct usb_request *request; 1110 struct usb_ep *ep = &dep->endpoint; 1111 1112 dwc3_trace(trace_dwc3_gadget, "queueing ZLP"); 1113 request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC); 1114 if (!request) 1115 return -ENOMEM; 1116 1117 request->length = 0; 1118 request->buf = dwc->zlp_buf; 1119 request->complete = __dwc3_gadget_ep_zlp_complete; 1120 1121 req = to_dwc3_request(request); 1122 1123 return __dwc3_gadget_ep_queue(dep, req); 1124 } 1125 1126 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 1127 gfp_t gfp_flags) 1128 { 1129 struct dwc3_request *req = to_dwc3_request(request); 1130 struct dwc3_ep *dep = to_dwc3_ep(ep); 1131 struct dwc3 *dwc = dep->dwc; 1132 1133 unsigned long flags; 1134 1135 int ret; 1136 1137 spin_lock_irqsave(&dwc->lock, flags); 1138 ret = __dwc3_gadget_ep_queue(dep, req); 1139 1140 /* 1141 * Okay, here's the thing, if gadget driver has requested for a ZLP by 1142 * setting request->zero, instead of doing magic, we will just queue an 1143 * extra usb_request ourselves so that it gets handled the same way as 1144 * any other request. 1145 */ 1146 if (ret == 0 && request->zero && request->length && 1147 (request->length % ep->maxpacket == 0)) 1148 ret = __dwc3_gadget_ep_queue_zlp(dwc, dep); 1149 1150 spin_unlock_irqrestore(&dwc->lock, flags); 1151 1152 return ret; 1153 } 1154 1155 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 1156 struct usb_request *request) 1157 { 1158 struct dwc3_request *req = to_dwc3_request(request); 1159 struct dwc3_request *r = NULL; 1160 1161 struct dwc3_ep *dep = to_dwc3_ep(ep); 1162 struct dwc3 *dwc = dep->dwc; 1163 1164 unsigned long flags; 1165 int ret = 0; 1166 1167 trace_dwc3_ep_dequeue(req); 1168 1169 spin_lock_irqsave(&dwc->lock, flags); 1170 1171 list_for_each_entry(r, &dep->pending_list, list) { 1172 if (r == req) 1173 break; 1174 } 1175 1176 if (r != req) { 1177 list_for_each_entry(r, &dep->started_list, list) { 1178 if (r == req) 1179 break; 1180 } 1181 if (r == req) { 1182 /* wait until it is processed */ 1183 dwc3_stop_active_transfer(dwc, dep->number, true); 1184 goto out1; 1185 } 1186 dev_err(dwc->dev, "request %p was not queued to %s\n", 1187 request, ep->name); 1188 ret = -EINVAL; 1189 goto out0; 1190 } 1191 1192 out1: 1193 /* giveback the request */ 1194 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1195 1196 out0: 1197 spin_unlock_irqrestore(&dwc->lock, flags); 1198 1199 return ret; 1200 } 1201 1202 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) 1203 { 1204 struct dwc3_gadget_ep_cmd_params params; 1205 struct dwc3 *dwc = dep->dwc; 1206 int ret; 1207 1208 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1209 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 1210 return -EINVAL; 1211 } 1212 1213 memset(¶ms, 0x00, sizeof(params)); 1214 1215 if (value) { 1216 struct dwc3_trb *trb; 1217 1218 unsigned transfer_in_flight; 1219 unsigned started; 1220 1221 if (dep->number > 1) 1222 trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue); 1223 else 1224 trb = &dwc->ep0_trb[dep->trb_enqueue]; 1225 1226 transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO; 1227 started = !list_empty(&dep->started_list); 1228 1229 if (!protocol && ((dep->direction && transfer_in_flight) || 1230 (!dep->direction && started))) { 1231 dwc3_trace(trace_dwc3_gadget, 1232 "%s: pending request, cannot halt", 1233 dep->name); 1234 return -EAGAIN; 1235 } 1236 1237 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL, 1238 ¶ms); 1239 if (ret) 1240 dev_err(dwc->dev, "failed to set STALL on %s\n", 1241 dep->name); 1242 else 1243 dep->flags |= DWC3_EP_STALL; 1244 } else { 1245 1246 ret = dwc3_send_clear_stall_ep_cmd(dep); 1247 if (ret) 1248 dev_err(dwc->dev, "failed to clear STALL on %s\n", 1249 dep->name); 1250 else 1251 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); 1252 } 1253 1254 return ret; 1255 } 1256 1257 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 1258 { 1259 struct dwc3_ep *dep = to_dwc3_ep(ep); 1260 struct dwc3 *dwc = dep->dwc; 1261 1262 unsigned long flags; 1263 1264 int ret; 1265 1266 spin_lock_irqsave(&dwc->lock, flags); 1267 ret = __dwc3_gadget_ep_set_halt(dep, value, false); 1268 spin_unlock_irqrestore(&dwc->lock, flags); 1269 1270 return ret; 1271 } 1272 1273 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 1274 { 1275 struct dwc3_ep *dep = to_dwc3_ep(ep); 1276 struct dwc3 *dwc = dep->dwc; 1277 unsigned long flags; 1278 int ret; 1279 1280 spin_lock_irqsave(&dwc->lock, flags); 1281 dep->flags |= DWC3_EP_WEDGE; 1282 1283 if (dep->number == 0 || dep->number == 1) 1284 ret = __dwc3_gadget_ep0_set_halt(ep, 1); 1285 else 1286 ret = __dwc3_gadget_ep_set_halt(dep, 1, false); 1287 spin_unlock_irqrestore(&dwc->lock, flags); 1288 1289 return ret; 1290 } 1291 1292 /* -------------------------------------------------------------------------- */ 1293 1294 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 1295 .bLength = USB_DT_ENDPOINT_SIZE, 1296 .bDescriptorType = USB_DT_ENDPOINT, 1297 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1298 }; 1299 1300 static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 1301 .enable = dwc3_gadget_ep0_enable, 1302 .disable = dwc3_gadget_ep0_disable, 1303 .alloc_request = dwc3_gadget_ep_alloc_request, 1304 .free_request = dwc3_gadget_ep_free_request, 1305 .queue = dwc3_gadget_ep0_queue, 1306 .dequeue = dwc3_gadget_ep_dequeue, 1307 .set_halt = dwc3_gadget_ep0_set_halt, 1308 .set_wedge = dwc3_gadget_ep_set_wedge, 1309 }; 1310 1311 static const struct usb_ep_ops dwc3_gadget_ep_ops = { 1312 .enable = dwc3_gadget_ep_enable, 1313 .disable = dwc3_gadget_ep_disable, 1314 .alloc_request = dwc3_gadget_ep_alloc_request, 1315 .free_request = dwc3_gadget_ep_free_request, 1316 .queue = dwc3_gadget_ep_queue, 1317 .dequeue = dwc3_gadget_ep_dequeue, 1318 .set_halt = dwc3_gadget_ep_set_halt, 1319 .set_wedge = dwc3_gadget_ep_set_wedge, 1320 }; 1321 1322 /* -------------------------------------------------------------------------- */ 1323 1324 static int dwc3_gadget_get_frame(struct usb_gadget *g) 1325 { 1326 struct dwc3 *dwc = gadget_to_dwc(g); 1327 u32 reg; 1328 1329 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1330 return DWC3_DSTS_SOFFN(reg); 1331 } 1332 1333 static int __dwc3_gadget_wakeup(struct dwc3 *dwc) 1334 { 1335 int retries; 1336 1337 int ret; 1338 u32 reg; 1339 1340 u8 link_state; 1341 u8 speed; 1342 1343 /* 1344 * According to the Databook Remote wakeup request should 1345 * be issued only when the device is in early suspend state. 1346 * 1347 * We can check that via USB Link State bits in DSTS register. 1348 */ 1349 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1350 1351 speed = reg & DWC3_DSTS_CONNECTSPD; 1352 if ((speed == DWC3_DSTS_SUPERSPEED) || 1353 (speed == DWC3_DSTS_SUPERSPEED_PLUS)) { 1354 dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed"); 1355 return 0; 1356 } 1357 1358 link_state = DWC3_DSTS_USBLNKST(reg); 1359 1360 switch (link_state) { 1361 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1362 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1363 break; 1364 default: 1365 dwc3_trace(trace_dwc3_gadget, 1366 "can't wakeup from '%s'", 1367 dwc3_gadget_link_string(link_state)); 1368 return -EINVAL; 1369 } 1370 1371 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); 1372 if (ret < 0) { 1373 dev_err(dwc->dev, "failed to put link in Recovery\n"); 1374 return ret; 1375 } 1376 1377 /* Recent versions do this automatically */ 1378 if (dwc->revision < DWC3_REVISION_194A) { 1379 /* write zeroes to Link Change Request */ 1380 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1381 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 1382 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1383 } 1384 1385 /* poll until Link State changes to ON */ 1386 retries = 20000; 1387 1388 while (retries--) { 1389 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1390 1391 /* in HS, means ON */ 1392 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 1393 break; 1394 } 1395 1396 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1397 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1398 return -EINVAL; 1399 } 1400 1401 return 0; 1402 } 1403 1404 static int dwc3_gadget_wakeup(struct usb_gadget *g) 1405 { 1406 struct dwc3 *dwc = gadget_to_dwc(g); 1407 unsigned long flags; 1408 int ret; 1409 1410 spin_lock_irqsave(&dwc->lock, flags); 1411 ret = __dwc3_gadget_wakeup(dwc); 1412 spin_unlock_irqrestore(&dwc->lock, flags); 1413 1414 return ret; 1415 } 1416 1417 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 1418 int is_selfpowered) 1419 { 1420 struct dwc3 *dwc = gadget_to_dwc(g); 1421 unsigned long flags; 1422 1423 spin_lock_irqsave(&dwc->lock, flags); 1424 g->is_selfpowered = !!is_selfpowered; 1425 spin_unlock_irqrestore(&dwc->lock, flags); 1426 1427 return 0; 1428 } 1429 1430 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) 1431 { 1432 u32 reg; 1433 u32 timeout = 500; 1434 1435 if (pm_runtime_suspended(dwc->dev)) 1436 return 0; 1437 1438 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1439 if (is_on) { 1440 if (dwc->revision <= DWC3_REVISION_187A) { 1441 reg &= ~DWC3_DCTL_TRGTULST_MASK; 1442 reg |= DWC3_DCTL_TRGTULST_RX_DET; 1443 } 1444 1445 if (dwc->revision >= DWC3_REVISION_194A) 1446 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1447 reg |= DWC3_DCTL_RUN_STOP; 1448 1449 if (dwc->has_hibernation) 1450 reg |= DWC3_DCTL_KEEP_CONNECT; 1451 1452 dwc->pullups_connected = true; 1453 } else { 1454 reg &= ~DWC3_DCTL_RUN_STOP; 1455 1456 if (dwc->has_hibernation && !suspend) 1457 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1458 1459 dwc->pullups_connected = false; 1460 } 1461 1462 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1463 1464 do { 1465 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1466 reg &= DWC3_DSTS_DEVCTRLHLT; 1467 } while (--timeout && !(!is_on ^ !reg)); 1468 1469 if (!timeout) 1470 return -ETIMEDOUT; 1471 1472 dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s", 1473 dwc->gadget_driver 1474 ? dwc->gadget_driver->function : "no-function", 1475 is_on ? "connect" : "disconnect"); 1476 1477 return 0; 1478 } 1479 1480 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 1481 { 1482 struct dwc3 *dwc = gadget_to_dwc(g); 1483 unsigned long flags; 1484 int ret; 1485 1486 is_on = !!is_on; 1487 1488 spin_lock_irqsave(&dwc->lock, flags); 1489 ret = dwc3_gadget_run_stop(dwc, is_on, false); 1490 spin_unlock_irqrestore(&dwc->lock, flags); 1491 1492 return ret; 1493 } 1494 1495 static void dwc3_gadget_enable_irq(struct dwc3 *dwc) 1496 { 1497 u32 reg; 1498 1499 /* Enable all but Start and End of Frame IRQs */ 1500 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | 1501 DWC3_DEVTEN_EVNTOVERFLOWEN | 1502 DWC3_DEVTEN_CMDCMPLTEN | 1503 DWC3_DEVTEN_ERRTICERREN | 1504 DWC3_DEVTEN_WKUPEVTEN | 1505 DWC3_DEVTEN_ULSTCNGEN | 1506 DWC3_DEVTEN_CONNECTDONEEN | 1507 DWC3_DEVTEN_USBRSTEN | 1508 DWC3_DEVTEN_DISCONNEVTEN); 1509 1510 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 1511 } 1512 1513 static void dwc3_gadget_disable_irq(struct dwc3 *dwc) 1514 { 1515 /* mask all interrupts */ 1516 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 1517 } 1518 1519 static irqreturn_t dwc3_interrupt(int irq, void *_dwc); 1520 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc); 1521 1522 /** 1523 * dwc3_gadget_setup_nump - Calculate and initialize NUMP field of DCFG 1524 * dwc: pointer to our context structure 1525 * 1526 * The following looks like complex but it's actually very simple. In order to 1527 * calculate the number of packets we can burst at once on OUT transfers, we're 1528 * gonna use RxFIFO size. 1529 * 1530 * To calculate RxFIFO size we need two numbers: 1531 * MDWIDTH = size, in bits, of the internal memory bus 1532 * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits) 1533 * 1534 * Given these two numbers, the formula is simple: 1535 * 1536 * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16; 1537 * 1538 * 24 bytes is for 3x SETUP packets 1539 * 16 bytes is a clock domain crossing tolerance 1540 * 1541 * Given RxFIFO Size, NUMP = RxFIFOSize / 1024; 1542 */ 1543 static void dwc3_gadget_setup_nump(struct dwc3 *dwc) 1544 { 1545 u32 ram2_depth; 1546 u32 mdwidth; 1547 u32 nump; 1548 u32 reg; 1549 1550 ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7); 1551 mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0); 1552 1553 nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024; 1554 nump = min_t(u32, nump, 16); 1555 1556 /* update NumP */ 1557 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1558 reg &= ~DWC3_DCFG_NUMP_MASK; 1559 reg |= nump << DWC3_DCFG_NUMP_SHIFT; 1560 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1561 } 1562 1563 static int __dwc3_gadget_start(struct dwc3 *dwc) 1564 { 1565 struct dwc3_ep *dep; 1566 int ret = 0; 1567 u32 reg; 1568 1569 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1570 reg &= ~(DWC3_DCFG_SPEED_MASK); 1571 1572 /** 1573 * WORKAROUND: DWC3 revision < 2.20a have an issue 1574 * which would cause metastability state on Run/Stop 1575 * bit if we try to force the IP to USB2-only mode. 1576 * 1577 * Because of that, we cannot configure the IP to any 1578 * speed other than the SuperSpeed 1579 * 1580 * Refers to: 1581 * 1582 * STAR#9000525659: Clock Domain Crossing on DCTL in 1583 * USB 2.0 Mode 1584 */ 1585 if (dwc->revision < DWC3_REVISION_220A) { 1586 reg |= DWC3_DCFG_SUPERSPEED; 1587 } else { 1588 switch (dwc->maximum_speed) { 1589 case USB_SPEED_LOW: 1590 reg |= DWC3_DCFG_LOWSPEED; 1591 break; 1592 case USB_SPEED_FULL: 1593 reg |= DWC3_DCFG_FULLSPEED1; 1594 break; 1595 case USB_SPEED_HIGH: 1596 reg |= DWC3_DCFG_HIGHSPEED; 1597 break; 1598 case USB_SPEED_SUPER_PLUS: 1599 reg |= DWC3_DCFG_SUPERSPEED_PLUS; 1600 break; 1601 default: 1602 dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n", 1603 dwc->maximum_speed); 1604 /* fall through */ 1605 case USB_SPEED_SUPER: 1606 reg |= DWC3_DCFG_SUPERSPEED; 1607 break; 1608 } 1609 } 1610 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1611 1612 /* 1613 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP 1614 * field instead of letting dwc3 itself calculate that automatically. 1615 * 1616 * This way, we maximize the chances that we'll be able to get several 1617 * bursts of data without going through any sort of endpoint throttling. 1618 */ 1619 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1620 reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL; 1621 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1622 1623 dwc3_gadget_setup_nump(dwc); 1624 1625 /* Start with SuperSpeed Default */ 1626 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1627 1628 dep = dwc->eps[0]; 1629 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 1630 false); 1631 if (ret) { 1632 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1633 goto err0; 1634 } 1635 1636 dep = dwc->eps[1]; 1637 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 1638 false); 1639 if (ret) { 1640 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1641 goto err1; 1642 } 1643 1644 /* begin to receive SETUP packets */ 1645 dwc->ep0state = EP0_SETUP_PHASE; 1646 dwc3_ep0_out_start(dwc); 1647 1648 dwc3_gadget_enable_irq(dwc); 1649 1650 return 0; 1651 1652 err1: 1653 __dwc3_gadget_ep_disable(dwc->eps[0]); 1654 1655 err0: 1656 return ret; 1657 } 1658 1659 static int dwc3_gadget_start(struct usb_gadget *g, 1660 struct usb_gadget_driver *driver) 1661 { 1662 struct dwc3 *dwc = gadget_to_dwc(g); 1663 unsigned long flags; 1664 int ret = 0; 1665 int irq; 1666 1667 irq = dwc->irq_gadget; 1668 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, 1669 IRQF_SHARED, "dwc3", dwc->ev_buf); 1670 if (ret) { 1671 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 1672 irq, ret); 1673 goto err0; 1674 } 1675 1676 spin_lock_irqsave(&dwc->lock, flags); 1677 if (dwc->gadget_driver) { 1678 dev_err(dwc->dev, "%s is already bound to %s\n", 1679 dwc->gadget.name, 1680 dwc->gadget_driver->driver.name); 1681 ret = -EBUSY; 1682 goto err1; 1683 } 1684 1685 dwc->gadget_driver = driver; 1686 1687 if (pm_runtime_active(dwc->dev)) 1688 __dwc3_gadget_start(dwc); 1689 1690 spin_unlock_irqrestore(&dwc->lock, flags); 1691 1692 return 0; 1693 1694 err1: 1695 spin_unlock_irqrestore(&dwc->lock, flags); 1696 free_irq(irq, dwc); 1697 1698 err0: 1699 return ret; 1700 } 1701 1702 static void __dwc3_gadget_stop(struct dwc3 *dwc) 1703 { 1704 if (pm_runtime_suspended(dwc->dev)) 1705 return; 1706 1707 dwc3_gadget_disable_irq(dwc); 1708 __dwc3_gadget_ep_disable(dwc->eps[0]); 1709 __dwc3_gadget_ep_disable(dwc->eps[1]); 1710 } 1711 1712 static int dwc3_gadget_stop(struct usb_gadget *g) 1713 { 1714 struct dwc3 *dwc = gadget_to_dwc(g); 1715 unsigned long flags; 1716 1717 spin_lock_irqsave(&dwc->lock, flags); 1718 __dwc3_gadget_stop(dwc); 1719 dwc->gadget_driver = NULL; 1720 spin_unlock_irqrestore(&dwc->lock, flags); 1721 1722 free_irq(dwc->irq_gadget, dwc->ev_buf); 1723 1724 return 0; 1725 } 1726 1727 static const struct usb_gadget_ops dwc3_gadget_ops = { 1728 .get_frame = dwc3_gadget_get_frame, 1729 .wakeup = dwc3_gadget_wakeup, 1730 .set_selfpowered = dwc3_gadget_set_selfpowered, 1731 .pullup = dwc3_gadget_pullup, 1732 .udc_start = dwc3_gadget_start, 1733 .udc_stop = dwc3_gadget_stop, 1734 }; 1735 1736 /* -------------------------------------------------------------------------- */ 1737 1738 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc, 1739 u8 num, u32 direction) 1740 { 1741 struct dwc3_ep *dep; 1742 u8 i; 1743 1744 for (i = 0; i < num; i++) { 1745 u8 epnum = (i << 1) | (direction ? 1 : 0); 1746 1747 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 1748 if (!dep) 1749 return -ENOMEM; 1750 1751 dep->dwc = dwc; 1752 dep->number = epnum; 1753 dep->direction = !!direction; 1754 dep->regs = dwc->regs + DWC3_DEP_BASE(epnum); 1755 dwc->eps[epnum] = dep; 1756 1757 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1, 1758 (epnum & 1) ? "in" : "out"); 1759 1760 dep->endpoint.name = dep->name; 1761 spin_lock_init(&dep->lock); 1762 1763 dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name); 1764 1765 if (epnum == 0 || epnum == 1) { 1766 usb_ep_set_maxpacket_limit(&dep->endpoint, 512); 1767 dep->endpoint.maxburst = 1; 1768 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 1769 if (!epnum) 1770 dwc->gadget.ep0 = &dep->endpoint; 1771 } else { 1772 int ret; 1773 1774 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024); 1775 dep->endpoint.max_streams = 15; 1776 dep->endpoint.ops = &dwc3_gadget_ep_ops; 1777 list_add_tail(&dep->endpoint.ep_list, 1778 &dwc->gadget.ep_list); 1779 1780 ret = dwc3_alloc_trb_pool(dep); 1781 if (ret) 1782 return ret; 1783 } 1784 1785 if (epnum == 0 || epnum == 1) { 1786 dep->endpoint.caps.type_control = true; 1787 } else { 1788 dep->endpoint.caps.type_iso = true; 1789 dep->endpoint.caps.type_bulk = true; 1790 dep->endpoint.caps.type_int = true; 1791 } 1792 1793 dep->endpoint.caps.dir_in = !!direction; 1794 dep->endpoint.caps.dir_out = !direction; 1795 1796 INIT_LIST_HEAD(&dep->pending_list); 1797 INIT_LIST_HEAD(&dep->started_list); 1798 } 1799 1800 return 0; 1801 } 1802 1803 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc) 1804 { 1805 int ret; 1806 1807 INIT_LIST_HEAD(&dwc->gadget.ep_list); 1808 1809 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0); 1810 if (ret < 0) { 1811 dwc3_trace(trace_dwc3_gadget, 1812 "failed to allocate OUT endpoints"); 1813 return ret; 1814 } 1815 1816 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1); 1817 if (ret < 0) { 1818 dwc3_trace(trace_dwc3_gadget, 1819 "failed to allocate IN endpoints"); 1820 return ret; 1821 } 1822 1823 return 0; 1824 } 1825 1826 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 1827 { 1828 struct dwc3_ep *dep; 1829 u8 epnum; 1830 1831 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1832 dep = dwc->eps[epnum]; 1833 if (!dep) 1834 continue; 1835 /* 1836 * Physical endpoints 0 and 1 are special; they form the 1837 * bi-directional USB endpoint 0. 1838 * 1839 * For those two physical endpoints, we don't allocate a TRB 1840 * pool nor do we add them the endpoints list. Due to that, we 1841 * shouldn't do these two operations otherwise we would end up 1842 * with all sorts of bugs when removing dwc3.ko. 1843 */ 1844 if (epnum != 0 && epnum != 1) { 1845 dwc3_free_trb_pool(dep); 1846 list_del(&dep->endpoint.ep_list); 1847 } 1848 1849 kfree(dep); 1850 } 1851 } 1852 1853 /* -------------------------------------------------------------------------- */ 1854 1855 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, 1856 struct dwc3_request *req, struct dwc3_trb *trb, 1857 const struct dwc3_event_depevt *event, int status, 1858 int chain) 1859 { 1860 unsigned int count; 1861 unsigned int s_pkt = 0; 1862 unsigned int trb_status; 1863 1864 dep->queued_requests--; 1865 dwc3_ep_inc_deq(dep); 1866 trace_dwc3_complete_trb(dep, trb); 1867 1868 /* 1869 * If we're in the middle of series of chained TRBs and we 1870 * receive a short transfer along the way, DWC3 will skip 1871 * through all TRBs including the last TRB in the chain (the 1872 * where CHN bit is zero. DWC3 will also avoid clearing HWO 1873 * bit and SW has to do it manually. 1874 * 1875 * We're going to do that here to avoid problems of HW trying 1876 * to use bogus TRBs for transfers. 1877 */ 1878 if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO)) 1879 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 1880 1881 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 1882 return 1; 1883 1884 count = trb->size & DWC3_TRB_SIZE_MASK; 1885 req->request.actual += count; 1886 1887 if (dep->direction) { 1888 if (count) { 1889 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); 1890 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) { 1891 dwc3_trace(trace_dwc3_gadget, 1892 "%s: incomplete IN transfer", 1893 dep->name); 1894 /* 1895 * If missed isoc occurred and there is 1896 * no request queued then issue END 1897 * TRANSFER, so that core generates 1898 * next xfernotready and we will issue 1899 * a fresh START TRANSFER. 1900 * If there are still queued request 1901 * then wait, do not issue either END 1902 * or UPDATE TRANSFER, just attach next 1903 * request in pending_list during 1904 * giveback.If any future queued request 1905 * is successfully transferred then we 1906 * will issue UPDATE TRANSFER for all 1907 * request in the pending_list. 1908 */ 1909 dep->flags |= DWC3_EP_MISSED_ISOC; 1910 } else { 1911 dev_err(dwc->dev, "incomplete IN transfer %s\n", 1912 dep->name); 1913 status = -ECONNRESET; 1914 } 1915 } else { 1916 dep->flags &= ~DWC3_EP_MISSED_ISOC; 1917 } 1918 } else { 1919 if (count && (event->status & DEPEVT_STATUS_SHORT)) 1920 s_pkt = 1; 1921 } 1922 1923 if (s_pkt && !chain) 1924 return 1; 1925 1926 if ((event->status & DEPEVT_STATUS_IOC) && 1927 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 1928 return 1; 1929 1930 return 0; 1931 } 1932 1933 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, 1934 const struct dwc3_event_depevt *event, int status) 1935 { 1936 struct dwc3_request *req, *n; 1937 struct dwc3_trb *trb; 1938 bool ioc = false; 1939 int ret; 1940 1941 list_for_each_entry_safe(req, n, &dep->started_list, list) { 1942 unsigned length; 1943 unsigned actual; 1944 int chain; 1945 1946 length = req->request.length; 1947 chain = req->num_pending_sgs > 0; 1948 if (chain) { 1949 struct scatterlist *sg = req->sg; 1950 struct scatterlist *s; 1951 unsigned int pending = req->num_pending_sgs; 1952 unsigned int i; 1953 1954 for_each_sg(sg, s, pending, i) { 1955 trb = &dep->trb_pool[dep->trb_dequeue]; 1956 1957 req->sg = sg_next(s); 1958 req->num_pending_sgs--; 1959 1960 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 1961 event, status, chain); 1962 if (ret) 1963 break; 1964 } 1965 } else { 1966 trb = &dep->trb_pool[dep->trb_dequeue]; 1967 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 1968 event, status, chain); 1969 } 1970 1971 /* 1972 * We assume here we will always receive the entire data block 1973 * which we should receive. Meaning, if we program RX to 1974 * receive 4K but we receive only 2K, we assume that's all we 1975 * should receive and we simply bounce the request back to the 1976 * gadget driver for further processing. 1977 */ 1978 actual = length - req->request.actual; 1979 req->request.actual = actual; 1980 1981 if (ret && chain && (actual < length) && req->num_pending_sgs) 1982 return __dwc3_gadget_kick_transfer(dep, 0); 1983 1984 dwc3_gadget_giveback(dep, req, status); 1985 1986 if (ret) { 1987 if ((event->status & DEPEVT_STATUS_IOC) && 1988 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 1989 ioc = true; 1990 break; 1991 } 1992 } 1993 1994 /* 1995 * Our endpoint might get disabled by another thread during 1996 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1 1997 * early on so DWC3_EP_BUSY flag gets cleared 1998 */ 1999 if (!dep->endpoint.desc) 2000 return 1; 2001 2002 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 2003 list_empty(&dep->started_list)) { 2004 if (list_empty(&dep->pending_list)) { 2005 /* 2006 * If there is no entry in request list then do 2007 * not issue END TRANSFER now. Just set PENDING 2008 * flag, so that END TRANSFER is issued when an 2009 * entry is added into request list. 2010 */ 2011 dep->flags = DWC3_EP_PENDING_REQUEST; 2012 } else { 2013 dwc3_stop_active_transfer(dwc, dep->number, true); 2014 dep->flags = DWC3_EP_ENABLED; 2015 } 2016 return 1; 2017 } 2018 2019 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && ioc) 2020 return 0; 2021 2022 return 1; 2023 } 2024 2025 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, 2026 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 2027 { 2028 unsigned status = 0; 2029 int clean_busy; 2030 u32 is_xfer_complete; 2031 2032 is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE); 2033 2034 if (event->status & DEPEVT_STATUS_BUSERR) 2035 status = -ECONNRESET; 2036 2037 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status); 2038 if (clean_busy && (!dep->endpoint.desc || is_xfer_complete || 2039 usb_endpoint_xfer_isoc(dep->endpoint.desc))) 2040 dep->flags &= ~DWC3_EP_BUSY; 2041 2042 /* 2043 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. 2044 * See dwc3_gadget_linksts_change_interrupt() for 1st half. 2045 */ 2046 if (dwc->revision < DWC3_REVISION_183A) { 2047 u32 reg; 2048 int i; 2049 2050 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 2051 dep = dwc->eps[i]; 2052 2053 if (!(dep->flags & DWC3_EP_ENABLED)) 2054 continue; 2055 2056 if (!list_empty(&dep->started_list)) 2057 return; 2058 } 2059 2060 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2061 reg |= dwc->u1u2; 2062 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2063 2064 dwc->u1u2 = 0; 2065 } 2066 2067 /* 2068 * Our endpoint might get disabled by another thread during 2069 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1 2070 * early on so DWC3_EP_BUSY flag gets cleared 2071 */ 2072 if (!dep->endpoint.desc) 2073 return; 2074 2075 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2076 int ret; 2077 2078 ret = __dwc3_gadget_kick_transfer(dep, 0); 2079 if (!ret || ret == -EBUSY) 2080 return; 2081 } 2082 } 2083 2084 static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 2085 const struct dwc3_event_depevt *event) 2086 { 2087 struct dwc3_ep *dep; 2088 u8 epnum = event->endpoint_number; 2089 2090 dep = dwc->eps[epnum]; 2091 2092 if (!(dep->flags & DWC3_EP_ENABLED)) 2093 return; 2094 2095 if (epnum == 0 || epnum == 1) { 2096 dwc3_ep0_interrupt(dwc, event); 2097 return; 2098 } 2099 2100 switch (event->endpoint_event) { 2101 case DWC3_DEPEVT_XFERCOMPLETE: 2102 dep->resource_index = 0; 2103 2104 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2105 dwc3_trace(trace_dwc3_gadget, 2106 "%s is an Isochronous endpoint", 2107 dep->name); 2108 return; 2109 } 2110 2111 dwc3_endpoint_transfer_complete(dwc, dep, event); 2112 break; 2113 case DWC3_DEPEVT_XFERINPROGRESS: 2114 dwc3_endpoint_transfer_complete(dwc, dep, event); 2115 break; 2116 case DWC3_DEPEVT_XFERNOTREADY: 2117 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2118 dwc3_gadget_start_isoc(dwc, dep, event); 2119 } else { 2120 int active; 2121 int ret; 2122 2123 active = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE; 2124 2125 dwc3_trace(trace_dwc3_gadget, "%s: reason %s", 2126 dep->name, active ? "Transfer Active" 2127 : "Transfer Not Active"); 2128 2129 ret = __dwc3_gadget_kick_transfer(dep, 0); 2130 if (!ret || ret == -EBUSY) 2131 return; 2132 2133 dwc3_trace(trace_dwc3_gadget, 2134 "%s: failed to kick transfers", 2135 dep->name); 2136 } 2137 2138 break; 2139 case DWC3_DEPEVT_STREAMEVT: 2140 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) { 2141 dev_err(dwc->dev, "Stream event for non-Bulk %s\n", 2142 dep->name); 2143 return; 2144 } 2145 2146 switch (event->status) { 2147 case DEPEVT_STREAMEVT_FOUND: 2148 dwc3_trace(trace_dwc3_gadget, 2149 "Stream %d found and started", 2150 event->parameters); 2151 2152 break; 2153 case DEPEVT_STREAMEVT_NOTFOUND: 2154 /* FALLTHROUGH */ 2155 default: 2156 dwc3_trace(trace_dwc3_gadget, 2157 "unable to find suitable stream"); 2158 } 2159 break; 2160 case DWC3_DEPEVT_RXTXFIFOEVT: 2161 dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun", dep->name); 2162 break; 2163 case DWC3_DEPEVT_EPCMDCMPLT: 2164 dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete"); 2165 break; 2166 } 2167 } 2168 2169 static void dwc3_disconnect_gadget(struct dwc3 *dwc) 2170 { 2171 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { 2172 spin_unlock(&dwc->lock); 2173 dwc->gadget_driver->disconnect(&dwc->gadget); 2174 spin_lock(&dwc->lock); 2175 } 2176 } 2177 2178 static void dwc3_suspend_gadget(struct dwc3 *dwc) 2179 { 2180 if (dwc->gadget_driver && dwc->gadget_driver->suspend) { 2181 spin_unlock(&dwc->lock); 2182 dwc->gadget_driver->suspend(&dwc->gadget); 2183 spin_lock(&dwc->lock); 2184 } 2185 } 2186 2187 static void dwc3_resume_gadget(struct dwc3 *dwc) 2188 { 2189 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2190 spin_unlock(&dwc->lock); 2191 dwc->gadget_driver->resume(&dwc->gadget); 2192 spin_lock(&dwc->lock); 2193 } 2194 } 2195 2196 static void dwc3_reset_gadget(struct dwc3 *dwc) 2197 { 2198 if (!dwc->gadget_driver) 2199 return; 2200 2201 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) { 2202 spin_unlock(&dwc->lock); 2203 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver); 2204 spin_lock(&dwc->lock); 2205 } 2206 } 2207 2208 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force) 2209 { 2210 struct dwc3_ep *dep; 2211 struct dwc3_gadget_ep_cmd_params params; 2212 u32 cmd; 2213 int ret; 2214 2215 dep = dwc->eps[epnum]; 2216 2217 if (!dep->resource_index) 2218 return; 2219 2220 /* 2221 * NOTICE: We are violating what the Databook says about the 2222 * EndTransfer command. Ideally we would _always_ wait for the 2223 * EndTransfer Command Completion IRQ, but that's causing too 2224 * much trouble synchronizing between us and gadget driver. 2225 * 2226 * We have discussed this with the IP Provider and it was 2227 * suggested to giveback all requests here, but give HW some 2228 * extra time to synchronize with the interconnect. We're using 2229 * an arbitrary 100us delay for that. 2230 * 2231 * Note also that a similar handling was tested by Synopsys 2232 * (thanks a lot Paul) and nothing bad has come out of it. 2233 * In short, what we're doing is: 2234 * 2235 * - Issue EndTransfer WITH CMDIOC bit set 2236 * - Wait 100us 2237 * 2238 * As of IP version 3.10a of the DWC_usb3 IP, the controller 2239 * supports a mode to work around the above limitation. The 2240 * software can poll the CMDACT bit in the DEPCMD register 2241 * after issuing a EndTransfer command. This mode is enabled 2242 * by writing GUCTL2[14]. This polling is already done in the 2243 * dwc3_send_gadget_ep_cmd() function so if the mode is 2244 * enabled, the EndTransfer command will have completed upon 2245 * returning from this function and we don't need to delay for 2246 * 100us. 2247 * 2248 * This mode is NOT available on the DWC_usb31 IP. 2249 */ 2250 2251 cmd = DWC3_DEPCMD_ENDTRANSFER; 2252 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0; 2253 cmd |= DWC3_DEPCMD_CMDIOC; 2254 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); 2255 memset(¶ms, 0, sizeof(params)); 2256 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 2257 WARN_ON_ONCE(ret); 2258 dep->resource_index = 0; 2259 dep->flags &= ~DWC3_EP_BUSY; 2260 2261 if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A) 2262 udelay(100); 2263 } 2264 2265 static void dwc3_stop_active_transfers(struct dwc3 *dwc) 2266 { 2267 u32 epnum; 2268 2269 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2270 struct dwc3_ep *dep; 2271 2272 dep = dwc->eps[epnum]; 2273 if (!dep) 2274 continue; 2275 2276 if (!(dep->flags & DWC3_EP_ENABLED)) 2277 continue; 2278 2279 dwc3_remove_requests(dwc, dep); 2280 } 2281 } 2282 2283 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 2284 { 2285 u32 epnum; 2286 2287 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2288 struct dwc3_ep *dep; 2289 int ret; 2290 2291 dep = dwc->eps[epnum]; 2292 if (!dep) 2293 continue; 2294 2295 if (!(dep->flags & DWC3_EP_STALL)) 2296 continue; 2297 2298 dep->flags &= ~DWC3_EP_STALL; 2299 2300 ret = dwc3_send_clear_stall_ep_cmd(dep); 2301 WARN_ON_ONCE(ret); 2302 } 2303 } 2304 2305 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 2306 { 2307 int reg; 2308 2309 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2310 reg &= ~DWC3_DCTL_INITU1ENA; 2311 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2312 2313 reg &= ~DWC3_DCTL_INITU2ENA; 2314 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2315 2316 dwc3_disconnect_gadget(dwc); 2317 2318 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2319 dwc->setup_packet_pending = false; 2320 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED); 2321 2322 dwc->connected = false; 2323 } 2324 2325 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 2326 { 2327 u32 reg; 2328 2329 dwc->connected = true; 2330 2331 /* 2332 * WORKAROUND: DWC3 revisions <1.88a have an issue which 2333 * would cause a missing Disconnect Event if there's a 2334 * pending Setup Packet in the FIFO. 2335 * 2336 * There's no suggested workaround on the official Bug 2337 * report, which states that "unless the driver/application 2338 * is doing any special handling of a disconnect event, 2339 * there is no functional issue". 2340 * 2341 * Unfortunately, it turns out that we _do_ some special 2342 * handling of a disconnect event, namely complete all 2343 * pending transfers, notify gadget driver of the 2344 * disconnection, and so on. 2345 * 2346 * Our suggested workaround is to follow the Disconnect 2347 * Event steps here, instead, based on a setup_packet_pending 2348 * flag. Such flag gets set whenever we have a SETUP_PENDING 2349 * status for EP0 TRBs and gets cleared on XferComplete for the 2350 * same endpoint. 2351 * 2352 * Refers to: 2353 * 2354 * STAR#9000466709: RTL: Device : Disconnect event not 2355 * generated if setup packet pending in FIFO 2356 */ 2357 if (dwc->revision < DWC3_REVISION_188A) { 2358 if (dwc->setup_packet_pending) 2359 dwc3_gadget_disconnect_interrupt(dwc); 2360 } 2361 2362 dwc3_reset_gadget(dwc); 2363 2364 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2365 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 2366 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2367 dwc->test_mode = false; 2368 2369 dwc3_stop_active_transfers(dwc); 2370 dwc3_clear_stall_all_ep(dwc); 2371 2372 /* Reset device address to zero */ 2373 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2374 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 2375 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2376 } 2377 2378 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed) 2379 { 2380 u32 reg; 2381 u32 usb30_clock = DWC3_GCTL_CLK_BUS; 2382 2383 /* 2384 * We change the clock only at SS but I dunno why I would want to do 2385 * this. Maybe it becomes part of the power saving plan. 2386 */ 2387 2388 if ((speed != DWC3_DSTS_SUPERSPEED) && 2389 (speed != DWC3_DSTS_SUPERSPEED_PLUS)) 2390 return; 2391 2392 /* 2393 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 2394 * each time on Connect Done. 2395 */ 2396 if (!usb30_clock) 2397 return; 2398 2399 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 2400 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock); 2401 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 2402 } 2403 2404 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 2405 { 2406 struct dwc3_ep *dep; 2407 int ret; 2408 u32 reg; 2409 u8 speed; 2410 2411 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2412 speed = reg & DWC3_DSTS_CONNECTSPD; 2413 dwc->speed = speed; 2414 2415 dwc3_update_ram_clk_sel(dwc, speed); 2416 2417 switch (speed) { 2418 case DWC3_DSTS_SUPERSPEED_PLUS: 2419 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2420 dwc->gadget.ep0->maxpacket = 512; 2421 dwc->gadget.speed = USB_SPEED_SUPER_PLUS; 2422 break; 2423 case DWC3_DSTS_SUPERSPEED: 2424 /* 2425 * WORKAROUND: DWC3 revisions <1.90a have an issue which 2426 * would cause a missing USB3 Reset event. 2427 * 2428 * In such situations, we should force a USB3 Reset 2429 * event by calling our dwc3_gadget_reset_interrupt() 2430 * routine. 2431 * 2432 * Refers to: 2433 * 2434 * STAR#9000483510: RTL: SS : USB3 reset event may 2435 * not be generated always when the link enters poll 2436 */ 2437 if (dwc->revision < DWC3_REVISION_190A) 2438 dwc3_gadget_reset_interrupt(dwc); 2439 2440 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2441 dwc->gadget.ep0->maxpacket = 512; 2442 dwc->gadget.speed = USB_SPEED_SUPER; 2443 break; 2444 case DWC3_DSTS_HIGHSPEED: 2445 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2446 dwc->gadget.ep0->maxpacket = 64; 2447 dwc->gadget.speed = USB_SPEED_HIGH; 2448 break; 2449 case DWC3_DSTS_FULLSPEED2: 2450 case DWC3_DSTS_FULLSPEED1: 2451 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2452 dwc->gadget.ep0->maxpacket = 64; 2453 dwc->gadget.speed = USB_SPEED_FULL; 2454 break; 2455 case DWC3_DSTS_LOWSPEED: 2456 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); 2457 dwc->gadget.ep0->maxpacket = 8; 2458 dwc->gadget.speed = USB_SPEED_LOW; 2459 break; 2460 } 2461 2462 /* Enable USB2 LPM Capability */ 2463 2464 if ((dwc->revision > DWC3_REVISION_194A) && 2465 (speed != DWC3_DSTS_SUPERSPEED) && 2466 (speed != DWC3_DSTS_SUPERSPEED_PLUS)) { 2467 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2468 reg |= DWC3_DCFG_LPM_CAP; 2469 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2470 2471 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2472 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); 2473 2474 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold); 2475 2476 /* 2477 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and 2478 * DCFG.LPMCap is set, core responses with an ACK and the 2479 * BESL value in the LPM token is less than or equal to LPM 2480 * NYET threshold. 2481 */ 2482 WARN_ONCE(dwc->revision < DWC3_REVISION_240A 2483 && dwc->has_lpm_erratum, 2484 "LPM Erratum not available on dwc3 revisisions < 2.40a\n"); 2485 2486 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A) 2487 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold); 2488 2489 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2490 } else { 2491 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2492 reg &= ~DWC3_DCTL_HIRD_THRES_MASK; 2493 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2494 } 2495 2496 dep = dwc->eps[0]; 2497 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, 2498 false); 2499 if (ret) { 2500 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2501 return; 2502 } 2503 2504 dep = dwc->eps[1]; 2505 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, 2506 false); 2507 if (ret) { 2508 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2509 return; 2510 } 2511 2512 /* 2513 * Configure PHY via GUSB3PIPECTLn if required. 2514 * 2515 * Update GTXFIFOSIZn 2516 * 2517 * In both cases reset values should be sufficient. 2518 */ 2519 } 2520 2521 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 2522 { 2523 /* 2524 * TODO take core out of low power mode when that's 2525 * implemented. 2526 */ 2527 2528 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2529 spin_unlock(&dwc->lock); 2530 dwc->gadget_driver->resume(&dwc->gadget); 2531 spin_lock(&dwc->lock); 2532 } 2533 } 2534 2535 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 2536 unsigned int evtinfo) 2537 { 2538 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2539 unsigned int pwropt; 2540 2541 /* 2542 * WORKAROUND: DWC3 < 2.50a have an issue when configured without 2543 * Hibernation mode enabled which would show up when device detects 2544 * host-initiated U3 exit. 2545 * 2546 * In that case, device will generate a Link State Change Interrupt 2547 * from U3 to RESUME which is only necessary if Hibernation is 2548 * configured in. 2549 * 2550 * There are no functional changes due to such spurious event and we 2551 * just need to ignore it. 2552 * 2553 * Refers to: 2554 * 2555 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation 2556 * operational mode 2557 */ 2558 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 2559 if ((dwc->revision < DWC3_REVISION_250A) && 2560 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { 2561 if ((dwc->link_state == DWC3_LINK_STATE_U3) && 2562 (next == DWC3_LINK_STATE_RESUME)) { 2563 dwc3_trace(trace_dwc3_gadget, 2564 "ignoring transition U3 -> Resume"); 2565 return; 2566 } 2567 } 2568 2569 /* 2570 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending 2571 * on the link partner, the USB session might do multiple entry/exit 2572 * of low power states before a transfer takes place. 2573 * 2574 * Due to this problem, we might experience lower throughput. The 2575 * suggested workaround is to disable DCTL[12:9] bits if we're 2576 * transitioning from U1/U2 to U0 and enable those bits again 2577 * after a transfer completes and there are no pending transfers 2578 * on any of the enabled endpoints. 2579 * 2580 * This is the first half of that workaround. 2581 * 2582 * Refers to: 2583 * 2584 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us 2585 * core send LGO_Ux entering U0 2586 */ 2587 if (dwc->revision < DWC3_REVISION_183A) { 2588 if (next == DWC3_LINK_STATE_U0) { 2589 u32 u1u2; 2590 u32 reg; 2591 2592 switch (dwc->link_state) { 2593 case DWC3_LINK_STATE_U1: 2594 case DWC3_LINK_STATE_U2: 2595 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2596 u1u2 = reg & (DWC3_DCTL_INITU2ENA 2597 | DWC3_DCTL_ACCEPTU2ENA 2598 | DWC3_DCTL_INITU1ENA 2599 | DWC3_DCTL_ACCEPTU1ENA); 2600 2601 if (!dwc->u1u2) 2602 dwc->u1u2 = reg & u1u2; 2603 2604 reg &= ~u1u2; 2605 2606 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2607 break; 2608 default: 2609 /* do nothing */ 2610 break; 2611 } 2612 } 2613 } 2614 2615 switch (next) { 2616 case DWC3_LINK_STATE_U1: 2617 if (dwc->speed == USB_SPEED_SUPER) 2618 dwc3_suspend_gadget(dwc); 2619 break; 2620 case DWC3_LINK_STATE_U2: 2621 case DWC3_LINK_STATE_U3: 2622 dwc3_suspend_gadget(dwc); 2623 break; 2624 case DWC3_LINK_STATE_RESUME: 2625 dwc3_resume_gadget(dwc); 2626 break; 2627 default: 2628 /* do nothing */ 2629 break; 2630 } 2631 2632 dwc->link_state = next; 2633 } 2634 2635 static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc, 2636 unsigned int evtinfo) 2637 { 2638 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2639 2640 if (dwc->link_state != next && next == DWC3_LINK_STATE_U3) 2641 dwc3_suspend_gadget(dwc); 2642 2643 dwc->link_state = next; 2644 } 2645 2646 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, 2647 unsigned int evtinfo) 2648 { 2649 unsigned int is_ss = evtinfo & BIT(4); 2650 2651 /** 2652 * WORKAROUND: DWC3 revison 2.20a with hibernation support 2653 * have a known issue which can cause USB CV TD.9.23 to fail 2654 * randomly. 2655 * 2656 * Because of this issue, core could generate bogus hibernation 2657 * events which SW needs to ignore. 2658 * 2659 * Refers to: 2660 * 2661 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0 2662 * Device Fallback from SuperSpeed 2663 */ 2664 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER)) 2665 return; 2666 2667 /* enter hibernation here */ 2668 } 2669 2670 static void dwc3_gadget_interrupt(struct dwc3 *dwc, 2671 const struct dwc3_event_devt *event) 2672 { 2673 switch (event->type) { 2674 case DWC3_DEVICE_EVENT_DISCONNECT: 2675 dwc3_gadget_disconnect_interrupt(dwc); 2676 break; 2677 case DWC3_DEVICE_EVENT_RESET: 2678 dwc3_gadget_reset_interrupt(dwc); 2679 break; 2680 case DWC3_DEVICE_EVENT_CONNECT_DONE: 2681 dwc3_gadget_conndone_interrupt(dwc); 2682 break; 2683 case DWC3_DEVICE_EVENT_WAKEUP: 2684 dwc3_gadget_wakeup_interrupt(dwc); 2685 break; 2686 case DWC3_DEVICE_EVENT_HIBER_REQ: 2687 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation, 2688 "unexpected hibernation event\n")) 2689 break; 2690 2691 dwc3_gadget_hibernation_interrupt(dwc, event->event_info); 2692 break; 2693 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 2694 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 2695 break; 2696 case DWC3_DEVICE_EVENT_EOPF: 2697 /* It changed to be suspend event for version 2.30a and above */ 2698 if (dwc->revision < DWC3_REVISION_230A) { 2699 dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame"); 2700 } else { 2701 dwc3_trace(trace_dwc3_gadget, "U3/L1-L2 Suspend Event"); 2702 2703 /* 2704 * Ignore suspend event until the gadget enters into 2705 * USB_STATE_CONFIGURED state. 2706 */ 2707 if (dwc->gadget.state >= USB_STATE_CONFIGURED) 2708 dwc3_gadget_suspend_interrupt(dwc, 2709 event->event_info); 2710 } 2711 break; 2712 case DWC3_DEVICE_EVENT_SOF: 2713 dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame"); 2714 break; 2715 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 2716 dwc3_trace(trace_dwc3_gadget, "Erratic Error"); 2717 break; 2718 case DWC3_DEVICE_EVENT_CMD_CMPL: 2719 dwc3_trace(trace_dwc3_gadget, "Command Complete"); 2720 break; 2721 case DWC3_DEVICE_EVENT_OVERFLOW: 2722 dwc3_trace(trace_dwc3_gadget, "Overflow"); 2723 break; 2724 default: 2725 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 2726 } 2727 } 2728 2729 static void dwc3_process_event_entry(struct dwc3 *dwc, 2730 const union dwc3_event *event) 2731 { 2732 trace_dwc3_event(event->raw); 2733 2734 /* Endpoint IRQ, handle it and return early */ 2735 if (event->type.is_devspec == 0) { 2736 /* depevt */ 2737 return dwc3_endpoint_interrupt(dwc, &event->depevt); 2738 } 2739 2740 switch (event->type.type) { 2741 case DWC3_EVENT_TYPE_DEV: 2742 dwc3_gadget_interrupt(dwc, &event->devt); 2743 break; 2744 /* REVISIT what to do with Carkit and I2C events ? */ 2745 default: 2746 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 2747 } 2748 } 2749 2750 static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) 2751 { 2752 struct dwc3 *dwc = evt->dwc; 2753 irqreturn_t ret = IRQ_NONE; 2754 int left; 2755 u32 reg; 2756 2757 left = evt->count; 2758 2759 if (!(evt->flags & DWC3_EVENT_PENDING)) 2760 return IRQ_NONE; 2761 2762 while (left > 0) { 2763 union dwc3_event event; 2764 2765 event.raw = *(u32 *) (evt->buf + evt->lpos); 2766 2767 dwc3_process_event_entry(dwc, &event); 2768 2769 /* 2770 * FIXME we wrap around correctly to the next entry as 2771 * almost all entries are 4 bytes in size. There is one 2772 * entry which has 12 bytes which is a regular entry 2773 * followed by 8 bytes data. ATM I don't know how 2774 * things are organized if we get next to the a 2775 * boundary so I worry about that once we try to handle 2776 * that. 2777 */ 2778 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE; 2779 left -= 4; 2780 2781 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 4); 2782 } 2783 2784 evt->count = 0; 2785 evt->flags &= ~DWC3_EVENT_PENDING; 2786 ret = IRQ_HANDLED; 2787 2788 /* Unmask interrupt */ 2789 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0)); 2790 reg &= ~DWC3_GEVNTSIZ_INTMASK; 2791 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); 2792 2793 return ret; 2794 } 2795 2796 static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt) 2797 { 2798 struct dwc3_event_buffer *evt = _evt; 2799 struct dwc3 *dwc = evt->dwc; 2800 unsigned long flags; 2801 irqreturn_t ret = IRQ_NONE; 2802 2803 spin_lock_irqsave(&dwc->lock, flags); 2804 ret = dwc3_process_event_buf(evt); 2805 spin_unlock_irqrestore(&dwc->lock, flags); 2806 2807 return ret; 2808 } 2809 2810 static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) 2811 { 2812 struct dwc3 *dwc = evt->dwc; 2813 u32 count; 2814 u32 reg; 2815 2816 if (pm_runtime_suspended(dwc->dev)) { 2817 pm_runtime_get(dwc->dev); 2818 disable_irq_nosync(dwc->irq_gadget); 2819 dwc->pending_events = true; 2820 return IRQ_HANDLED; 2821 } 2822 2823 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); 2824 count &= DWC3_GEVNTCOUNT_MASK; 2825 if (!count) 2826 return IRQ_NONE; 2827 2828 evt->count = count; 2829 evt->flags |= DWC3_EVENT_PENDING; 2830 2831 /* Mask interrupt */ 2832 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0)); 2833 reg |= DWC3_GEVNTSIZ_INTMASK; 2834 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); 2835 2836 return IRQ_WAKE_THREAD; 2837 } 2838 2839 static irqreturn_t dwc3_interrupt(int irq, void *_evt) 2840 { 2841 struct dwc3_event_buffer *evt = _evt; 2842 2843 return dwc3_check_event_buf(evt); 2844 } 2845 2846 /** 2847 * dwc3_gadget_init - Initializes gadget related registers 2848 * @dwc: pointer to our controller context structure 2849 * 2850 * Returns 0 on success otherwise negative errno. 2851 */ 2852 int dwc3_gadget_init(struct dwc3 *dwc) 2853 { 2854 int ret, irq; 2855 struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); 2856 2857 irq = platform_get_irq_byname(dwc3_pdev, "peripheral"); 2858 if (irq == -EPROBE_DEFER) 2859 return irq; 2860 2861 if (irq <= 0) { 2862 irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3"); 2863 if (irq == -EPROBE_DEFER) 2864 return irq; 2865 2866 if (irq <= 0) { 2867 irq = platform_get_irq(dwc3_pdev, 0); 2868 if (irq <= 0) { 2869 if (irq != -EPROBE_DEFER) { 2870 dev_err(dwc->dev, 2871 "missing peripheral IRQ\n"); 2872 } 2873 if (!irq) 2874 irq = -EINVAL; 2875 return irq; 2876 } 2877 } 2878 } 2879 2880 dwc->irq_gadget = irq; 2881 2882 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2883 &dwc->ctrl_req_addr, GFP_KERNEL); 2884 if (!dwc->ctrl_req) { 2885 dev_err(dwc->dev, "failed to allocate ctrl request\n"); 2886 ret = -ENOMEM; 2887 goto err0; 2888 } 2889 2890 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2, 2891 &dwc->ep0_trb_addr, GFP_KERNEL); 2892 if (!dwc->ep0_trb) { 2893 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 2894 ret = -ENOMEM; 2895 goto err1; 2896 } 2897 2898 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL); 2899 if (!dwc->setup_buf) { 2900 ret = -ENOMEM; 2901 goto err2; 2902 } 2903 2904 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev, 2905 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr, 2906 GFP_KERNEL); 2907 if (!dwc->ep0_bounce) { 2908 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n"); 2909 ret = -ENOMEM; 2910 goto err3; 2911 } 2912 2913 dwc->zlp_buf = kzalloc(DWC3_ZLP_BUF_SIZE, GFP_KERNEL); 2914 if (!dwc->zlp_buf) { 2915 ret = -ENOMEM; 2916 goto err4; 2917 } 2918 2919 dwc->gadget.ops = &dwc3_gadget_ops; 2920 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2921 dwc->gadget.sg_supported = true; 2922 dwc->gadget.name = "dwc3-gadget"; 2923 dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG; 2924 2925 /* 2926 * FIXME We might be setting max_speed to <SUPER, however versions 2927 * <2.20a of dwc3 have an issue with metastability (documented 2928 * elsewhere in this driver) which tells us we can't set max speed to 2929 * anything lower than SUPER. 2930 * 2931 * Because gadget.max_speed is only used by composite.c and function 2932 * drivers (i.e. it won't go into dwc3's registers) we are allowing this 2933 * to happen so we avoid sending SuperSpeed Capability descriptor 2934 * together with our BOS descriptor as that could confuse host into 2935 * thinking we can handle super speed. 2936 * 2937 * Note that, in fact, we won't even support GetBOS requests when speed 2938 * is less than super speed because we don't have means, yet, to tell 2939 * composite.c that we are USB 2.0 + LPM ECN. 2940 */ 2941 if (dwc->revision < DWC3_REVISION_220A) 2942 dwc3_trace(trace_dwc3_gadget, 2943 "Changing max_speed on rev %08x", 2944 dwc->revision); 2945 2946 dwc->gadget.max_speed = dwc->maximum_speed; 2947 2948 /* 2949 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize 2950 * on ep out. 2951 */ 2952 dwc->gadget.quirk_ep_out_aligned_size = true; 2953 2954 /* 2955 * REVISIT: Here we should clear all pending IRQs to be 2956 * sure we're starting from a well known location. 2957 */ 2958 2959 ret = dwc3_gadget_init_endpoints(dwc); 2960 if (ret) 2961 goto err5; 2962 2963 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 2964 if (ret) { 2965 dev_err(dwc->dev, "failed to register udc\n"); 2966 goto err5; 2967 } 2968 2969 return 0; 2970 2971 err5: 2972 kfree(dwc->zlp_buf); 2973 2974 err4: 2975 dwc3_gadget_free_endpoints(dwc); 2976 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2977 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2978 2979 err3: 2980 kfree(dwc->setup_buf); 2981 2982 err2: 2983 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2984 dwc->ep0_trb, dwc->ep0_trb_addr); 2985 2986 err1: 2987 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2988 dwc->ctrl_req, dwc->ctrl_req_addr); 2989 2990 err0: 2991 return ret; 2992 } 2993 2994 /* -------------------------------------------------------------------------- */ 2995 2996 void dwc3_gadget_exit(struct dwc3 *dwc) 2997 { 2998 usb_del_gadget_udc(&dwc->gadget); 2999 3000 dwc3_gadget_free_endpoints(dwc); 3001 3002 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 3003 dwc->ep0_bounce, dwc->ep0_bounce_addr); 3004 3005 kfree(dwc->setup_buf); 3006 kfree(dwc->zlp_buf); 3007 3008 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 3009 dwc->ep0_trb, dwc->ep0_trb_addr); 3010 3011 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 3012 dwc->ctrl_req, dwc->ctrl_req_addr); 3013 } 3014 3015 int dwc3_gadget_suspend(struct dwc3 *dwc) 3016 { 3017 int ret; 3018 3019 if (!dwc->gadget_driver) 3020 return 0; 3021 3022 ret = dwc3_gadget_run_stop(dwc, false, false); 3023 if (ret < 0) 3024 return ret; 3025 3026 dwc3_disconnect_gadget(dwc); 3027 __dwc3_gadget_stop(dwc); 3028 3029 return 0; 3030 } 3031 3032 int dwc3_gadget_resume(struct dwc3 *dwc) 3033 { 3034 int ret; 3035 3036 if (!dwc->gadget_driver) 3037 return 0; 3038 3039 ret = __dwc3_gadget_start(dwc); 3040 if (ret < 0) 3041 goto err0; 3042 3043 ret = dwc3_gadget_run_stop(dwc, true, false); 3044 if (ret < 0) 3045 goto err1; 3046 3047 return 0; 3048 3049 err1: 3050 __dwc3_gadget_stop(dwc); 3051 3052 err0: 3053 return ret; 3054 } 3055 3056 void dwc3_gadget_process_pending_events(struct dwc3 *dwc) 3057 { 3058 if (dwc->pending_events) { 3059 dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf); 3060 dwc->pending_events = false; 3061 enable_irq(dwc->irq_gadget); 3062 } 3063 } 3064