1 /** 2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 3 * 4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com 5 * 6 * Authors: Felipe Balbi <balbi@ti.com>, 7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 8 * 9 * This program is free software: you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 of 11 * the License as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/delay.h> 21 #include <linux/slab.h> 22 #include <linux/spinlock.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/interrupt.h> 26 #include <linux/io.h> 27 #include <linux/list.h> 28 #include <linux/dma-mapping.h> 29 30 #include <linux/usb/ch9.h> 31 #include <linux/usb/gadget.h> 32 33 #include "debug.h" 34 #include "core.h" 35 #include "gadget.h" 36 #include "io.h" 37 38 /** 39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes 40 * @dwc: pointer to our context structure 41 * @mode: the mode to set (J, K SE0 NAK, Force Enable) 42 * 43 * Caller should take care of locking. This function will 44 * return 0 on success or -EINVAL if wrong Test Selector 45 * is passed 46 */ 47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) 48 { 49 u32 reg; 50 51 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 52 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 53 54 switch (mode) { 55 case TEST_J: 56 case TEST_K: 57 case TEST_SE0_NAK: 58 case TEST_PACKET: 59 case TEST_FORCE_EN: 60 reg |= mode << 1; 61 break; 62 default: 63 return -EINVAL; 64 } 65 66 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 67 68 return 0; 69 } 70 71 /** 72 * dwc3_gadget_get_link_state - Gets current state of USB Link 73 * @dwc: pointer to our context structure 74 * 75 * Caller should take care of locking. This function will 76 * return the link state on success (>= 0) or -ETIMEDOUT. 77 */ 78 int dwc3_gadget_get_link_state(struct dwc3 *dwc) 79 { 80 u32 reg; 81 82 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 83 84 return DWC3_DSTS_USBLNKST(reg); 85 } 86 87 /** 88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State 89 * @dwc: pointer to our context structure 90 * @state: the state to put link into 91 * 92 * Caller should take care of locking. This function will 93 * return 0 on success or -ETIMEDOUT. 94 */ 95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) 96 { 97 int retries = 10000; 98 u32 reg; 99 100 /* 101 * Wait until device controller is ready. Only applies to 1.94a and 102 * later RTL. 103 */ 104 if (dwc->revision >= DWC3_REVISION_194A) { 105 while (--retries) { 106 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 107 if (reg & DWC3_DSTS_DCNRD) 108 udelay(5); 109 else 110 break; 111 } 112 113 if (retries <= 0) 114 return -ETIMEDOUT; 115 } 116 117 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 119 120 /* set requested state */ 121 reg |= DWC3_DCTL_ULSTCHNGREQ(state); 122 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 123 124 /* 125 * The following code is racy when called from dwc3_gadget_wakeup, 126 * and is not needed, at least on newer versions 127 */ 128 if (dwc->revision >= DWC3_REVISION_194A) 129 return 0; 130 131 /* wait for a change in DSTS */ 132 retries = 10000; 133 while (--retries) { 134 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 135 136 if (DWC3_DSTS_USBLNKST(reg) == state) 137 return 0; 138 139 udelay(5); 140 } 141 142 return -ETIMEDOUT; 143 } 144 145 /** 146 * dwc3_ep_inc_trb() - Increment a TRB index. 147 * @index - Pointer to the TRB index to increment. 148 * 149 * The index should never point to the link TRB. After incrementing, 150 * if it is point to the link TRB, wrap around to the beginning. The 151 * link TRB is always at the last TRB entry. 152 */ 153 static void dwc3_ep_inc_trb(u8 *index) 154 { 155 (*index)++; 156 if (*index == (DWC3_TRB_NUM - 1)) 157 *index = 0; 158 } 159 160 static void dwc3_ep_inc_enq(struct dwc3_ep *dep) 161 { 162 dwc3_ep_inc_trb(&dep->trb_enqueue); 163 } 164 165 static void dwc3_ep_inc_deq(struct dwc3_ep *dep) 166 { 167 dwc3_ep_inc_trb(&dep->trb_dequeue); 168 } 169 170 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 171 int status) 172 { 173 struct dwc3 *dwc = dep->dwc; 174 175 req->started = false; 176 list_del(&req->list); 177 req->trb = NULL; 178 req->remaining = 0; 179 180 if (req->request.status == -EINPROGRESS) 181 req->request.status = status; 182 183 if (dwc->ep0_bounced && dep->number <= 1) 184 dwc->ep0_bounced = false; 185 186 usb_gadget_unmap_request_by_dev(dwc->sysdev, 187 &req->request, req->direction); 188 189 trace_dwc3_gadget_giveback(req); 190 191 spin_unlock(&dwc->lock); 192 usb_gadget_giveback_request(&dep->endpoint, &req->request); 193 spin_lock(&dwc->lock); 194 195 if (dep->number > 1) 196 pm_runtime_put(dwc->dev); 197 } 198 199 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param) 200 { 201 u32 timeout = 500; 202 int status = 0; 203 int ret = 0; 204 u32 reg; 205 206 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param); 207 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT); 208 209 do { 210 reg = dwc3_readl(dwc->regs, DWC3_DGCMD); 211 if (!(reg & DWC3_DGCMD_CMDACT)) { 212 status = DWC3_DGCMD_STATUS(reg); 213 if (status) 214 ret = -EINVAL; 215 break; 216 } 217 } while (--timeout); 218 219 if (!timeout) { 220 ret = -ETIMEDOUT; 221 status = -ETIMEDOUT; 222 } 223 224 trace_dwc3_gadget_generic_cmd(cmd, param, status); 225 226 return ret; 227 } 228 229 static int __dwc3_gadget_wakeup(struct dwc3 *dwc); 230 231 int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, 232 struct dwc3_gadget_ep_cmd_params *params) 233 { 234 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; 235 struct dwc3 *dwc = dep->dwc; 236 u32 timeout = 500; 237 u32 reg; 238 239 int cmd_status = 0; 240 int susphy = false; 241 int ret = -EINVAL; 242 243 /* 244 * Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if 245 * we're issuing an endpoint command, we must check if 246 * GUSB2PHYCFG.SUSPHY bit is set. If it is, then we need to clear it. 247 * 248 * We will also set SUSPHY bit to what it was before returning as stated 249 * by the same section on Synopsys databook. 250 */ 251 if (dwc->gadget.speed <= USB_SPEED_HIGH) { 252 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 253 if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) { 254 susphy = true; 255 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 256 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 257 } 258 } 259 260 if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) { 261 int needs_wakeup; 262 263 needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 || 264 dwc->link_state == DWC3_LINK_STATE_U2 || 265 dwc->link_state == DWC3_LINK_STATE_U3); 266 267 if (unlikely(needs_wakeup)) { 268 ret = __dwc3_gadget_wakeup(dwc); 269 dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n", 270 ret); 271 } 272 } 273 274 dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0); 275 dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1); 276 dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2); 277 278 /* 279 * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're 280 * not relying on XferNotReady, we can make use of a special "No 281 * Response Update Transfer" command where we should clear both CmdAct 282 * and CmdIOC bits. 283 * 284 * With this, we don't need to wait for command completion and can 285 * straight away issue further commands to the endpoint. 286 * 287 * NOTICE: We're making an assumption that control endpoints will never 288 * make use of Update Transfer command. This is a safe assumption 289 * because we can never have more than one request at a time with 290 * Control Endpoints. If anybody changes that assumption, this chunk 291 * needs to be updated accordingly. 292 */ 293 if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER && 294 !usb_endpoint_xfer_isoc(desc)) 295 cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT); 296 else 297 cmd |= DWC3_DEPCMD_CMDACT; 298 299 dwc3_writel(dep->regs, DWC3_DEPCMD, cmd); 300 do { 301 reg = dwc3_readl(dep->regs, DWC3_DEPCMD); 302 if (!(reg & DWC3_DEPCMD_CMDACT)) { 303 cmd_status = DWC3_DEPCMD_STATUS(reg); 304 305 switch (cmd_status) { 306 case 0: 307 ret = 0; 308 break; 309 case DEPEVT_TRANSFER_NO_RESOURCE: 310 ret = -EINVAL; 311 break; 312 case DEPEVT_TRANSFER_BUS_EXPIRY: 313 /* 314 * SW issues START TRANSFER command to 315 * isochronous ep with future frame interval. If 316 * future interval time has already passed when 317 * core receives the command, it will respond 318 * with an error status of 'Bus Expiry'. 319 * 320 * Instead of always returning -EINVAL, let's 321 * give a hint to the gadget driver that this is 322 * the case by returning -EAGAIN. 323 */ 324 ret = -EAGAIN; 325 break; 326 default: 327 dev_WARN(dwc->dev, "UNKNOWN cmd status\n"); 328 } 329 330 break; 331 } 332 } while (--timeout); 333 334 if (timeout == 0) { 335 ret = -ETIMEDOUT; 336 cmd_status = -ETIMEDOUT; 337 } 338 339 trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status); 340 341 if (ret == 0) { 342 switch (DWC3_DEPCMD_CMD(cmd)) { 343 case DWC3_DEPCMD_STARTTRANSFER: 344 dep->flags |= DWC3_EP_TRANSFER_STARTED; 345 break; 346 case DWC3_DEPCMD_ENDTRANSFER: 347 dep->flags &= ~DWC3_EP_TRANSFER_STARTED; 348 break; 349 default: 350 /* nothing */ 351 break; 352 } 353 } 354 355 if (unlikely(susphy)) { 356 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 357 reg |= DWC3_GUSB2PHYCFG_SUSPHY; 358 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 359 } 360 361 return ret; 362 } 363 364 static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep) 365 { 366 struct dwc3 *dwc = dep->dwc; 367 struct dwc3_gadget_ep_cmd_params params; 368 u32 cmd = DWC3_DEPCMD_CLEARSTALL; 369 370 /* 371 * As of core revision 2.60a the recommended programming model 372 * is to set the ClearPendIN bit when issuing a Clear Stall EP 373 * command for IN endpoints. This is to prevent an issue where 374 * some (non-compliant) hosts may not send ACK TPs for pending 375 * IN transfers due to a mishandled error condition. Synopsys 376 * STAR 9000614252. 377 */ 378 if (dep->direction && (dwc->revision >= DWC3_REVISION_260A) && 379 (dwc->gadget.speed >= USB_SPEED_SUPER)) 380 cmd |= DWC3_DEPCMD_CLEARPENDIN; 381 382 memset(¶ms, 0, sizeof(params)); 383 384 return dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 385 } 386 387 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 388 struct dwc3_trb *trb) 389 { 390 u32 offset = (char *) trb - (char *) dep->trb_pool; 391 392 return dep->trb_pool_dma + offset; 393 } 394 395 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 396 { 397 struct dwc3 *dwc = dep->dwc; 398 399 if (dep->trb_pool) 400 return 0; 401 402 dep->trb_pool = dma_alloc_coherent(dwc->sysdev, 403 sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 404 &dep->trb_pool_dma, GFP_KERNEL); 405 if (!dep->trb_pool) { 406 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 407 dep->name); 408 return -ENOMEM; 409 } 410 411 return 0; 412 } 413 414 static void dwc3_free_trb_pool(struct dwc3_ep *dep) 415 { 416 struct dwc3 *dwc = dep->dwc; 417 418 dma_free_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 419 dep->trb_pool, dep->trb_pool_dma); 420 421 dep->trb_pool = NULL; 422 dep->trb_pool_dma = 0; 423 } 424 425 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep); 426 427 /** 428 * dwc3_gadget_start_config - Configure EP resources 429 * @dwc: pointer to our controller context structure 430 * @dep: endpoint that is being enabled 431 * 432 * The assignment of transfer resources cannot perfectly follow the 433 * data book due to the fact that the controller driver does not have 434 * all knowledge of the configuration in advance. It is given this 435 * information piecemeal by the composite gadget framework after every 436 * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook 437 * programming model in this scenario can cause errors. For two 438 * reasons: 439 * 440 * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION 441 * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of 442 * multiple interfaces. 443 * 444 * 2) The databook does not mention doing more DEPXFERCFG for new 445 * endpoint on alt setting (8.1.6). 446 * 447 * The following simplified method is used instead: 448 * 449 * All hardware endpoints can be assigned a transfer resource and this 450 * setting will stay persistent until either a core reset or 451 * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and 452 * do DEPXFERCFG for every hardware endpoint as well. We are 453 * guaranteed that there are as many transfer resources as endpoints. 454 * 455 * This function is called for each endpoint when it is being enabled 456 * but is triggered only when called for EP0-out, which always happens 457 * first, and which should only happen in one of the above conditions. 458 */ 459 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 460 { 461 struct dwc3_gadget_ep_cmd_params params; 462 u32 cmd; 463 int i; 464 int ret; 465 466 if (dep->number) 467 return 0; 468 469 memset(¶ms, 0x00, sizeof(params)); 470 cmd = DWC3_DEPCMD_DEPSTARTCFG; 471 472 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 473 if (ret) 474 return ret; 475 476 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 477 struct dwc3_ep *dep = dwc->eps[i]; 478 479 if (!dep) 480 continue; 481 482 ret = dwc3_gadget_set_xfer_resource(dwc, dep); 483 if (ret) 484 return ret; 485 } 486 487 return 0; 488 } 489 490 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, 491 bool modify, bool restore) 492 { 493 const struct usb_ss_ep_comp_descriptor *comp_desc; 494 const struct usb_endpoint_descriptor *desc; 495 struct dwc3_gadget_ep_cmd_params params; 496 497 if (dev_WARN_ONCE(dwc->dev, modify && restore, 498 "Can't modify and restore\n")) 499 return -EINVAL; 500 501 comp_desc = dep->endpoint.comp_desc; 502 desc = dep->endpoint.desc; 503 504 memset(¶ms, 0x00, sizeof(params)); 505 506 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) 507 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); 508 509 /* Burst size is only needed in SuperSpeed mode */ 510 if (dwc->gadget.speed >= USB_SPEED_SUPER) { 511 u32 burst = dep->endpoint.maxburst; 512 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1); 513 } 514 515 if (modify) { 516 params.param0 |= DWC3_DEPCFG_ACTION_MODIFY; 517 } else if (restore) { 518 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE; 519 params.param2 |= dep->saved_state; 520 } else { 521 params.param0 |= DWC3_DEPCFG_ACTION_INIT; 522 } 523 524 if (usb_endpoint_xfer_control(desc)) 525 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN; 526 527 if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc)) 528 params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN; 529 530 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 531 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE 532 | DWC3_DEPCFG_STREAM_EVENT_EN; 533 dep->stream_capable = true; 534 } 535 536 if (!usb_endpoint_xfer_control(desc)) 537 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; 538 539 /* 540 * We are doing 1:1 mapping for endpoints, meaning 541 * Physical Endpoints 2 maps to Logical Endpoint 2 and 542 * so on. We consider the direction bit as part of the physical 543 * endpoint number. So USB endpoint 0x81 is 0x03. 544 */ 545 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); 546 547 /* 548 * We must use the lower 16 TX FIFOs even though 549 * HW might have more 550 */ 551 if (dep->direction) 552 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); 553 554 if (desc->bInterval) { 555 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); 556 dep->interval = 1 << (desc->bInterval - 1); 557 } 558 559 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, ¶ms); 560 } 561 562 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) 563 { 564 struct dwc3_gadget_ep_cmd_params params; 565 566 memset(¶ms, 0x00, sizeof(params)); 567 568 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 569 570 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE, 571 ¶ms); 572 } 573 574 /** 575 * __dwc3_gadget_ep_enable - Initializes a HW endpoint 576 * @dep: endpoint to be initialized 577 * @desc: USB Endpoint Descriptor 578 * 579 * Caller should take care of locking 580 */ 581 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, 582 bool modify, bool restore) 583 { 584 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; 585 struct dwc3 *dwc = dep->dwc; 586 587 u32 reg; 588 int ret; 589 590 if (!(dep->flags & DWC3_EP_ENABLED)) { 591 ret = dwc3_gadget_start_config(dwc, dep); 592 if (ret) 593 return ret; 594 } 595 596 ret = dwc3_gadget_set_ep_config(dwc, dep, modify, restore); 597 if (ret) 598 return ret; 599 600 if (!(dep->flags & DWC3_EP_ENABLED)) { 601 struct dwc3_trb *trb_st_hw; 602 struct dwc3_trb *trb_link; 603 604 dep->type = usb_endpoint_type(desc); 605 dep->flags |= DWC3_EP_ENABLED; 606 dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; 607 608 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 609 reg |= DWC3_DALEPENA_EP(dep->number); 610 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 611 612 init_waitqueue_head(&dep->wait_end_transfer); 613 614 if (usb_endpoint_xfer_control(desc)) 615 goto out; 616 617 /* Initialize the TRB ring */ 618 dep->trb_dequeue = 0; 619 dep->trb_enqueue = 0; 620 memset(dep->trb_pool, 0, 621 sizeof(struct dwc3_trb) * DWC3_TRB_NUM); 622 623 /* Link TRB. The HWO bit is never reset */ 624 trb_st_hw = &dep->trb_pool[0]; 625 626 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 627 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 628 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 629 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB; 630 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 631 } 632 633 /* 634 * Issue StartTransfer here with no-op TRB so we can always rely on No 635 * Response Update Transfer command. 636 */ 637 if (usb_endpoint_xfer_bulk(desc)) { 638 struct dwc3_gadget_ep_cmd_params params; 639 struct dwc3_trb *trb; 640 dma_addr_t trb_dma; 641 u32 cmd; 642 643 memset(¶ms, 0, sizeof(params)); 644 trb = &dep->trb_pool[0]; 645 trb_dma = dwc3_trb_dma_offset(dep, trb); 646 647 params.param0 = upper_32_bits(trb_dma); 648 params.param1 = lower_32_bits(trb_dma); 649 650 cmd = DWC3_DEPCMD_STARTTRANSFER; 651 652 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 653 if (ret < 0) 654 return ret; 655 656 dep->flags |= DWC3_EP_BUSY; 657 658 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep); 659 WARN_ON_ONCE(!dep->resource_index); 660 } 661 662 663 out: 664 trace_dwc3_gadget_ep_enable(dep); 665 666 return 0; 667 } 668 669 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force); 670 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 671 { 672 struct dwc3_request *req; 673 674 dwc3_stop_active_transfer(dwc, dep->number, true); 675 676 /* - giveback all requests to gadget driver */ 677 while (!list_empty(&dep->started_list)) { 678 req = next_request(&dep->started_list); 679 680 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 681 } 682 683 while (!list_empty(&dep->pending_list)) { 684 req = next_request(&dep->pending_list); 685 686 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 687 } 688 } 689 690 /** 691 * __dwc3_gadget_ep_disable - Disables a HW endpoint 692 * @dep: the endpoint to disable 693 * 694 * This function also removes requests which are currently processed ny the 695 * hardware and those which are not yet scheduled. 696 * Caller should take care of locking. 697 */ 698 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 699 { 700 struct dwc3 *dwc = dep->dwc; 701 u32 reg; 702 703 trace_dwc3_gadget_ep_disable(dep); 704 705 dwc3_remove_requests(dwc, dep); 706 707 /* make sure HW endpoint isn't stalled */ 708 if (dep->flags & DWC3_EP_STALL) 709 __dwc3_gadget_ep_set_halt(dep, 0, false); 710 711 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 712 reg &= ~DWC3_DALEPENA_EP(dep->number); 713 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 714 715 dep->stream_capable = false; 716 dep->type = 0; 717 dep->flags &= DWC3_EP_END_TRANSFER_PENDING; 718 719 /* Clear out the ep descriptors for non-ep0 */ 720 if (dep->number > 1) { 721 dep->endpoint.comp_desc = NULL; 722 dep->endpoint.desc = NULL; 723 } 724 725 return 0; 726 } 727 728 /* -------------------------------------------------------------------------- */ 729 730 static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 731 const struct usb_endpoint_descriptor *desc) 732 { 733 return -EINVAL; 734 } 735 736 static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 737 { 738 return -EINVAL; 739 } 740 741 /* -------------------------------------------------------------------------- */ 742 743 static int dwc3_gadget_ep_enable(struct usb_ep *ep, 744 const struct usb_endpoint_descriptor *desc) 745 { 746 struct dwc3_ep *dep; 747 struct dwc3 *dwc; 748 unsigned long flags; 749 int ret; 750 751 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 752 pr_debug("dwc3: invalid parameters\n"); 753 return -EINVAL; 754 } 755 756 if (!desc->wMaxPacketSize) { 757 pr_debug("dwc3: missing wMaxPacketSize\n"); 758 return -EINVAL; 759 } 760 761 dep = to_dwc3_ep(ep); 762 dwc = dep->dwc; 763 764 if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED, 765 "%s is already enabled\n", 766 dep->name)) 767 return 0; 768 769 spin_lock_irqsave(&dwc->lock, flags); 770 ret = __dwc3_gadget_ep_enable(dep, false, false); 771 spin_unlock_irqrestore(&dwc->lock, flags); 772 773 return ret; 774 } 775 776 static int dwc3_gadget_ep_disable(struct usb_ep *ep) 777 { 778 struct dwc3_ep *dep; 779 struct dwc3 *dwc; 780 unsigned long flags; 781 int ret; 782 783 if (!ep) { 784 pr_debug("dwc3: invalid parameters\n"); 785 return -EINVAL; 786 } 787 788 dep = to_dwc3_ep(ep); 789 dwc = dep->dwc; 790 791 if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED), 792 "%s is already disabled\n", 793 dep->name)) 794 return 0; 795 796 spin_lock_irqsave(&dwc->lock, flags); 797 ret = __dwc3_gadget_ep_disable(dep); 798 spin_unlock_irqrestore(&dwc->lock, flags); 799 800 return ret; 801 } 802 803 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 804 gfp_t gfp_flags) 805 { 806 struct dwc3_request *req; 807 struct dwc3_ep *dep = to_dwc3_ep(ep); 808 809 req = kzalloc(sizeof(*req), gfp_flags); 810 if (!req) 811 return NULL; 812 813 req->epnum = dep->number; 814 req->dep = dep; 815 816 dep->allocated_requests++; 817 818 trace_dwc3_alloc_request(req); 819 820 return &req->request; 821 } 822 823 static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 824 struct usb_request *request) 825 { 826 struct dwc3_request *req = to_dwc3_request(request); 827 struct dwc3_ep *dep = to_dwc3_ep(ep); 828 829 dep->allocated_requests--; 830 trace_dwc3_free_request(req); 831 kfree(req); 832 } 833 834 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep); 835 836 static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, 837 dma_addr_t dma, unsigned length, unsigned chain, unsigned node, 838 unsigned stream_id, unsigned short_not_ok, unsigned no_interrupt) 839 { 840 struct dwc3 *dwc = dep->dwc; 841 struct usb_gadget *gadget = &dwc->gadget; 842 enum usb_device_speed speed = gadget->speed; 843 844 dwc3_ep_inc_enq(dep); 845 846 trb->size = DWC3_TRB_SIZE_LENGTH(length); 847 trb->bpl = lower_32_bits(dma); 848 trb->bph = upper_32_bits(dma); 849 850 switch (usb_endpoint_type(dep->endpoint.desc)) { 851 case USB_ENDPOINT_XFER_CONTROL: 852 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP; 853 break; 854 855 case USB_ENDPOINT_XFER_ISOC: 856 if (!node) { 857 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 858 859 if (speed == USB_SPEED_HIGH) { 860 struct usb_ep *ep = &dep->endpoint; 861 trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1); 862 } 863 } else { 864 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 865 } 866 867 /* always enable Interrupt on Missed ISOC */ 868 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 869 break; 870 871 case USB_ENDPOINT_XFER_BULK: 872 case USB_ENDPOINT_XFER_INT: 873 trb->ctrl = DWC3_TRBCTL_NORMAL; 874 break; 875 default: 876 /* 877 * This is only possible with faulty memory because we 878 * checked it already :) 879 */ 880 dev_WARN(dwc->dev, "Unknown endpoint type %d\n", 881 usb_endpoint_type(dep->endpoint.desc)); 882 } 883 884 /* always enable Continue on Short Packet */ 885 if (usb_endpoint_dir_out(dep->endpoint.desc)) { 886 trb->ctrl |= DWC3_TRB_CTRL_CSP; 887 888 if (short_not_ok) 889 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 890 } 891 892 if ((!no_interrupt && !chain) || 893 (dwc3_calc_trbs_left(dep) == 0)) 894 trb->ctrl |= DWC3_TRB_CTRL_IOC; 895 896 if (chain) 897 trb->ctrl |= DWC3_TRB_CTRL_CHN; 898 899 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) 900 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id); 901 902 trb->ctrl |= DWC3_TRB_CTRL_HWO; 903 904 trace_dwc3_prepare_trb(dep, trb); 905 } 906 907 /** 908 * dwc3_prepare_one_trb - setup one TRB from one request 909 * @dep: endpoint for which this request is prepared 910 * @req: dwc3_request pointer 911 * @chain: should this TRB be chained to the next? 912 * @node: only for isochronous endpoints. First TRB needs different type. 913 */ 914 static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 915 struct dwc3_request *req, unsigned chain, unsigned node) 916 { 917 struct dwc3_trb *trb; 918 unsigned length = req->request.length; 919 unsigned stream_id = req->request.stream_id; 920 unsigned short_not_ok = req->request.short_not_ok; 921 unsigned no_interrupt = req->request.no_interrupt; 922 dma_addr_t dma = req->request.dma; 923 924 trb = &dep->trb_pool[dep->trb_enqueue]; 925 926 if (!req->trb) { 927 dwc3_gadget_move_started_request(req); 928 req->trb = trb; 929 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 930 dep->queued_requests++; 931 } 932 933 __dwc3_prepare_one_trb(dep, trb, dma, length, chain, node, 934 stream_id, short_not_ok, no_interrupt); 935 } 936 937 /** 938 * dwc3_ep_prev_trb() - Returns the previous TRB in the ring 939 * @dep: The endpoint with the TRB ring 940 * @index: The index of the current TRB in the ring 941 * 942 * Returns the TRB prior to the one pointed to by the index. If the 943 * index is 0, we will wrap backwards, skip the link TRB, and return 944 * the one just before that. 945 */ 946 static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index) 947 { 948 u8 tmp = index; 949 950 if (!tmp) 951 tmp = DWC3_TRB_NUM - 1; 952 953 return &dep->trb_pool[tmp - 1]; 954 } 955 956 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep) 957 { 958 struct dwc3_trb *tmp; 959 struct dwc3 *dwc = dep->dwc; 960 u8 trbs_left; 961 962 /* 963 * If enqueue & dequeue are equal than it is either full or empty. 964 * 965 * One way to know for sure is if the TRB right before us has HWO bit 966 * set or not. If it has, then we're definitely full and can't fit any 967 * more transfers in our ring. 968 */ 969 if (dep->trb_enqueue == dep->trb_dequeue) { 970 tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue); 971 if (dev_WARN_ONCE(dwc->dev, tmp->ctrl & DWC3_TRB_CTRL_HWO, 972 "%s No TRBS left\n", dep->name)) 973 return 0; 974 975 return DWC3_TRB_NUM - 1; 976 } 977 978 trbs_left = dep->trb_dequeue - dep->trb_enqueue; 979 trbs_left &= (DWC3_TRB_NUM - 1); 980 981 if (dep->trb_dequeue < dep->trb_enqueue) 982 trbs_left--; 983 984 return trbs_left; 985 } 986 987 static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, 988 struct dwc3_request *req) 989 { 990 struct scatterlist *sg = req->sg; 991 struct scatterlist *s; 992 int i; 993 994 for_each_sg(sg, s, req->num_pending_sgs, i) { 995 unsigned int length = req->request.length; 996 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); 997 unsigned int rem = length % maxp; 998 unsigned chain = true; 999 1000 if (sg_is_last(s)) 1001 chain = false; 1002 1003 if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) { 1004 struct dwc3 *dwc = dep->dwc; 1005 struct dwc3_trb *trb; 1006 1007 req->unaligned = true; 1008 1009 /* prepare normal TRB */ 1010 dwc3_prepare_one_trb(dep, req, true, i); 1011 1012 /* Now prepare one extra TRB to align transfer size */ 1013 trb = &dep->trb_pool[dep->trb_enqueue]; 1014 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 1015 maxp - rem, false, 0, 1016 req->request.stream_id, 1017 req->request.short_not_ok, 1018 req->request.no_interrupt); 1019 } else { 1020 dwc3_prepare_one_trb(dep, req, chain, i); 1021 } 1022 1023 if (!dwc3_calc_trbs_left(dep)) 1024 break; 1025 } 1026 } 1027 1028 static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, 1029 struct dwc3_request *req) 1030 { 1031 unsigned int length = req->request.length; 1032 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); 1033 unsigned int rem = length % maxp; 1034 1035 if (rem && usb_endpoint_dir_out(dep->endpoint.desc)) { 1036 struct dwc3 *dwc = dep->dwc; 1037 struct dwc3_trb *trb; 1038 1039 req->unaligned = true; 1040 1041 /* prepare normal TRB */ 1042 dwc3_prepare_one_trb(dep, req, true, 0); 1043 1044 /* Now prepare one extra TRB to align transfer size */ 1045 trb = &dep->trb_pool[dep->trb_enqueue]; 1046 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem, 1047 false, 0, req->request.stream_id, 1048 req->request.short_not_ok, 1049 req->request.no_interrupt); 1050 } else { 1051 dwc3_prepare_one_trb(dep, req, false, 0); 1052 } 1053 } 1054 1055 /* 1056 * dwc3_prepare_trbs - setup TRBs from requests 1057 * @dep: endpoint for which requests are being prepared 1058 * 1059 * The function goes through the requests list and sets up TRBs for the 1060 * transfers. The function returns once there are no more TRBs available or 1061 * it runs out of requests. 1062 */ 1063 static void dwc3_prepare_trbs(struct dwc3_ep *dep) 1064 { 1065 struct dwc3_request *req, *n; 1066 1067 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 1068 1069 if (!dwc3_calc_trbs_left(dep)) 1070 return; 1071 1072 /* 1073 * We can get in a situation where there's a request in the started list 1074 * but there weren't enough TRBs to fully kick it in the first time 1075 * around, so it has been waiting for more TRBs to be freed up. 1076 * 1077 * In that case, we should check if we have a request with pending_sgs 1078 * in the started list and prepare TRBs for that request first, 1079 * otherwise we will prepare TRBs completely out of order and that will 1080 * break things. 1081 */ 1082 list_for_each_entry(req, &dep->started_list, list) { 1083 if (req->num_pending_sgs > 0) 1084 dwc3_prepare_one_trb_sg(dep, req); 1085 1086 if (!dwc3_calc_trbs_left(dep)) 1087 return; 1088 } 1089 1090 list_for_each_entry_safe(req, n, &dep->pending_list, list) { 1091 if (req->num_pending_sgs > 0) 1092 dwc3_prepare_one_trb_sg(dep, req); 1093 else 1094 dwc3_prepare_one_trb_linear(dep, req); 1095 1096 if (!dwc3_calc_trbs_left(dep)) 1097 return; 1098 } 1099 } 1100 1101 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param) 1102 { 1103 struct dwc3_gadget_ep_cmd_params params; 1104 struct dwc3_request *req; 1105 int starting; 1106 int ret; 1107 u32 cmd; 1108 1109 starting = !(dep->flags & DWC3_EP_BUSY); 1110 1111 dwc3_prepare_trbs(dep); 1112 req = next_request(&dep->started_list); 1113 if (!req) { 1114 dep->flags |= DWC3_EP_PENDING_REQUEST; 1115 return 0; 1116 } 1117 1118 memset(¶ms, 0, sizeof(params)); 1119 1120 if (starting) { 1121 params.param0 = upper_32_bits(req->trb_dma); 1122 params.param1 = lower_32_bits(req->trb_dma); 1123 cmd = DWC3_DEPCMD_STARTTRANSFER | 1124 DWC3_DEPCMD_PARAM(cmd_param); 1125 } else { 1126 cmd = DWC3_DEPCMD_UPDATETRANSFER | 1127 DWC3_DEPCMD_PARAM(dep->resource_index); 1128 } 1129 1130 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 1131 if (ret < 0) { 1132 /* 1133 * FIXME we need to iterate over the list of requests 1134 * here and stop, unmap, free and del each of the linked 1135 * requests instead of what we do now. 1136 */ 1137 if (req->trb) 1138 memset(req->trb, 0, sizeof(struct dwc3_trb)); 1139 dep->queued_requests--; 1140 dwc3_gadget_giveback(dep, req, ret); 1141 return ret; 1142 } 1143 1144 dep->flags |= DWC3_EP_BUSY; 1145 1146 if (starting) { 1147 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep); 1148 WARN_ON_ONCE(!dep->resource_index); 1149 } 1150 1151 return 0; 1152 } 1153 1154 static int __dwc3_gadget_get_frame(struct dwc3 *dwc) 1155 { 1156 u32 reg; 1157 1158 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1159 return DWC3_DSTS_SOFFN(reg); 1160 } 1161 1162 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc, 1163 struct dwc3_ep *dep, u32 cur_uf) 1164 { 1165 u32 uf; 1166 1167 if (list_empty(&dep->pending_list)) { 1168 dev_info(dwc->dev, "%s: ran out of requests\n", 1169 dep->name); 1170 dep->flags |= DWC3_EP_PENDING_REQUEST; 1171 return; 1172 } 1173 1174 /* 4 micro frames in the future */ 1175 uf = cur_uf + dep->interval * 4; 1176 1177 __dwc3_gadget_kick_transfer(dep, uf); 1178 } 1179 1180 static void dwc3_gadget_start_isoc(struct dwc3 *dwc, 1181 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1182 { 1183 u32 cur_uf, mask; 1184 1185 mask = ~(dep->interval - 1); 1186 cur_uf = event->parameters & mask; 1187 1188 __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1189 } 1190 1191 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 1192 { 1193 struct dwc3 *dwc = dep->dwc; 1194 int ret; 1195 1196 if (!dep->endpoint.desc) { 1197 dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n", 1198 dep->name); 1199 return -ESHUTDOWN; 1200 } 1201 1202 if (WARN(req->dep != dep, "request %p belongs to '%s'\n", 1203 &req->request, req->dep->name)) { 1204 dev_err(dwc->dev, "%s: request %p belongs to '%s'\n", 1205 dep->name, &req->request, req->dep->name); 1206 return -EINVAL; 1207 } 1208 1209 pm_runtime_get(dwc->dev); 1210 1211 req->request.actual = 0; 1212 req->request.status = -EINPROGRESS; 1213 req->direction = dep->direction; 1214 req->epnum = dep->number; 1215 1216 trace_dwc3_ep_queue(req); 1217 1218 ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request, 1219 dep->direction); 1220 if (ret) 1221 return ret; 1222 1223 req->sg = req->request.sg; 1224 req->num_pending_sgs = req->request.num_mapped_sgs; 1225 1226 list_add_tail(&req->list, &dep->pending_list); 1227 1228 /* 1229 * NOTICE: Isochronous endpoints should NEVER be prestarted. We must 1230 * wait for a XferNotReady event so we will know what's the current 1231 * (micro-)frame number. 1232 * 1233 * Without this trick, we are very, very likely gonna get Bus Expiry 1234 * errors which will force us issue EndTransfer command. 1235 */ 1236 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1237 if ((dep->flags & DWC3_EP_PENDING_REQUEST)) { 1238 if (dep->flags & DWC3_EP_TRANSFER_STARTED) { 1239 dwc3_stop_active_transfer(dwc, dep->number, true); 1240 dep->flags = DWC3_EP_ENABLED; 1241 } else { 1242 u32 cur_uf; 1243 1244 cur_uf = __dwc3_gadget_get_frame(dwc); 1245 __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1246 dep->flags &= ~DWC3_EP_PENDING_REQUEST; 1247 } 1248 } 1249 return 0; 1250 } 1251 1252 if (!dwc3_calc_trbs_left(dep)) 1253 return 0; 1254 1255 ret = __dwc3_gadget_kick_transfer(dep, 0); 1256 if (ret == -EBUSY) 1257 ret = 0; 1258 1259 return ret; 1260 } 1261 1262 static void __dwc3_gadget_ep_zlp_complete(struct usb_ep *ep, 1263 struct usb_request *request) 1264 { 1265 dwc3_gadget_ep_free_request(ep, request); 1266 } 1267 1268 static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep) 1269 { 1270 struct dwc3_request *req; 1271 struct usb_request *request; 1272 struct usb_ep *ep = &dep->endpoint; 1273 1274 request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC); 1275 if (!request) 1276 return -ENOMEM; 1277 1278 request->length = 0; 1279 request->buf = dwc->zlp_buf; 1280 request->complete = __dwc3_gadget_ep_zlp_complete; 1281 1282 req = to_dwc3_request(request); 1283 1284 return __dwc3_gadget_ep_queue(dep, req); 1285 } 1286 1287 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 1288 gfp_t gfp_flags) 1289 { 1290 struct dwc3_request *req = to_dwc3_request(request); 1291 struct dwc3_ep *dep = to_dwc3_ep(ep); 1292 struct dwc3 *dwc = dep->dwc; 1293 1294 unsigned long flags; 1295 1296 int ret; 1297 1298 spin_lock_irqsave(&dwc->lock, flags); 1299 ret = __dwc3_gadget_ep_queue(dep, req); 1300 1301 /* 1302 * Okay, here's the thing, if gadget driver has requested for a ZLP by 1303 * setting request->zero, instead of doing magic, we will just queue an 1304 * extra usb_request ourselves so that it gets handled the same way as 1305 * any other request. 1306 */ 1307 if (ret == 0 && request->zero && request->length && 1308 (request->length % ep->maxpacket == 0)) 1309 ret = __dwc3_gadget_ep_queue_zlp(dwc, dep); 1310 1311 spin_unlock_irqrestore(&dwc->lock, flags); 1312 1313 return ret; 1314 } 1315 1316 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 1317 struct usb_request *request) 1318 { 1319 struct dwc3_request *req = to_dwc3_request(request); 1320 struct dwc3_request *r = NULL; 1321 1322 struct dwc3_ep *dep = to_dwc3_ep(ep); 1323 struct dwc3 *dwc = dep->dwc; 1324 1325 unsigned long flags; 1326 int ret = 0; 1327 1328 trace_dwc3_ep_dequeue(req); 1329 1330 spin_lock_irqsave(&dwc->lock, flags); 1331 1332 list_for_each_entry(r, &dep->pending_list, list) { 1333 if (r == req) 1334 break; 1335 } 1336 1337 if (r != req) { 1338 list_for_each_entry(r, &dep->started_list, list) { 1339 if (r == req) 1340 break; 1341 } 1342 if (r == req) { 1343 /* wait until it is processed */ 1344 dwc3_stop_active_transfer(dwc, dep->number, true); 1345 goto out1; 1346 } 1347 dev_err(dwc->dev, "request %p was not queued to %s\n", 1348 request, ep->name); 1349 ret = -EINVAL; 1350 goto out0; 1351 } 1352 1353 out1: 1354 /* giveback the request */ 1355 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1356 1357 out0: 1358 spin_unlock_irqrestore(&dwc->lock, flags); 1359 1360 return ret; 1361 } 1362 1363 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) 1364 { 1365 struct dwc3_gadget_ep_cmd_params params; 1366 struct dwc3 *dwc = dep->dwc; 1367 int ret; 1368 1369 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1370 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 1371 return -EINVAL; 1372 } 1373 1374 memset(¶ms, 0x00, sizeof(params)); 1375 1376 if (value) { 1377 struct dwc3_trb *trb; 1378 1379 unsigned transfer_in_flight; 1380 unsigned started; 1381 1382 if (dep->flags & DWC3_EP_STALL) 1383 return 0; 1384 1385 if (dep->number > 1) 1386 trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue); 1387 else 1388 trb = &dwc->ep0_trb[dep->trb_enqueue]; 1389 1390 transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO; 1391 started = !list_empty(&dep->started_list); 1392 1393 if (!protocol && ((dep->direction && transfer_in_flight) || 1394 (!dep->direction && started))) { 1395 return -EAGAIN; 1396 } 1397 1398 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL, 1399 ¶ms); 1400 if (ret) 1401 dev_err(dwc->dev, "failed to set STALL on %s\n", 1402 dep->name); 1403 else 1404 dep->flags |= DWC3_EP_STALL; 1405 } else { 1406 if (!(dep->flags & DWC3_EP_STALL)) 1407 return 0; 1408 1409 ret = dwc3_send_clear_stall_ep_cmd(dep); 1410 if (ret) 1411 dev_err(dwc->dev, "failed to clear STALL on %s\n", 1412 dep->name); 1413 else 1414 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); 1415 } 1416 1417 return ret; 1418 } 1419 1420 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 1421 { 1422 struct dwc3_ep *dep = to_dwc3_ep(ep); 1423 struct dwc3 *dwc = dep->dwc; 1424 1425 unsigned long flags; 1426 1427 int ret; 1428 1429 spin_lock_irqsave(&dwc->lock, flags); 1430 ret = __dwc3_gadget_ep_set_halt(dep, value, false); 1431 spin_unlock_irqrestore(&dwc->lock, flags); 1432 1433 return ret; 1434 } 1435 1436 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 1437 { 1438 struct dwc3_ep *dep = to_dwc3_ep(ep); 1439 struct dwc3 *dwc = dep->dwc; 1440 unsigned long flags; 1441 int ret; 1442 1443 spin_lock_irqsave(&dwc->lock, flags); 1444 dep->flags |= DWC3_EP_WEDGE; 1445 1446 if (dep->number == 0 || dep->number == 1) 1447 ret = __dwc3_gadget_ep0_set_halt(ep, 1); 1448 else 1449 ret = __dwc3_gadget_ep_set_halt(dep, 1, false); 1450 spin_unlock_irqrestore(&dwc->lock, flags); 1451 1452 return ret; 1453 } 1454 1455 /* -------------------------------------------------------------------------- */ 1456 1457 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 1458 .bLength = USB_DT_ENDPOINT_SIZE, 1459 .bDescriptorType = USB_DT_ENDPOINT, 1460 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1461 }; 1462 1463 static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 1464 .enable = dwc3_gadget_ep0_enable, 1465 .disable = dwc3_gadget_ep0_disable, 1466 .alloc_request = dwc3_gadget_ep_alloc_request, 1467 .free_request = dwc3_gadget_ep_free_request, 1468 .queue = dwc3_gadget_ep0_queue, 1469 .dequeue = dwc3_gadget_ep_dequeue, 1470 .set_halt = dwc3_gadget_ep0_set_halt, 1471 .set_wedge = dwc3_gadget_ep_set_wedge, 1472 }; 1473 1474 static const struct usb_ep_ops dwc3_gadget_ep_ops = { 1475 .enable = dwc3_gadget_ep_enable, 1476 .disable = dwc3_gadget_ep_disable, 1477 .alloc_request = dwc3_gadget_ep_alloc_request, 1478 .free_request = dwc3_gadget_ep_free_request, 1479 .queue = dwc3_gadget_ep_queue, 1480 .dequeue = dwc3_gadget_ep_dequeue, 1481 .set_halt = dwc3_gadget_ep_set_halt, 1482 .set_wedge = dwc3_gadget_ep_set_wedge, 1483 }; 1484 1485 /* -------------------------------------------------------------------------- */ 1486 1487 static int dwc3_gadget_get_frame(struct usb_gadget *g) 1488 { 1489 struct dwc3 *dwc = gadget_to_dwc(g); 1490 1491 return __dwc3_gadget_get_frame(dwc); 1492 } 1493 1494 static int __dwc3_gadget_wakeup(struct dwc3 *dwc) 1495 { 1496 int retries; 1497 1498 int ret; 1499 u32 reg; 1500 1501 u8 link_state; 1502 u8 speed; 1503 1504 /* 1505 * According to the Databook Remote wakeup request should 1506 * be issued only when the device is in early suspend state. 1507 * 1508 * We can check that via USB Link State bits in DSTS register. 1509 */ 1510 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1511 1512 speed = reg & DWC3_DSTS_CONNECTSPD; 1513 if ((speed == DWC3_DSTS_SUPERSPEED) || 1514 (speed == DWC3_DSTS_SUPERSPEED_PLUS)) 1515 return 0; 1516 1517 link_state = DWC3_DSTS_USBLNKST(reg); 1518 1519 switch (link_state) { 1520 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1521 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1522 break; 1523 default: 1524 return -EINVAL; 1525 } 1526 1527 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); 1528 if (ret < 0) { 1529 dev_err(dwc->dev, "failed to put link in Recovery\n"); 1530 return ret; 1531 } 1532 1533 /* Recent versions do this automatically */ 1534 if (dwc->revision < DWC3_REVISION_194A) { 1535 /* write zeroes to Link Change Request */ 1536 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1537 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 1538 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1539 } 1540 1541 /* poll until Link State changes to ON */ 1542 retries = 20000; 1543 1544 while (retries--) { 1545 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1546 1547 /* in HS, means ON */ 1548 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 1549 break; 1550 } 1551 1552 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1553 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1554 return -EINVAL; 1555 } 1556 1557 return 0; 1558 } 1559 1560 static int dwc3_gadget_wakeup(struct usb_gadget *g) 1561 { 1562 struct dwc3 *dwc = gadget_to_dwc(g); 1563 unsigned long flags; 1564 int ret; 1565 1566 spin_lock_irqsave(&dwc->lock, flags); 1567 ret = __dwc3_gadget_wakeup(dwc); 1568 spin_unlock_irqrestore(&dwc->lock, flags); 1569 1570 return ret; 1571 } 1572 1573 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 1574 int is_selfpowered) 1575 { 1576 struct dwc3 *dwc = gadget_to_dwc(g); 1577 unsigned long flags; 1578 1579 spin_lock_irqsave(&dwc->lock, flags); 1580 g->is_selfpowered = !!is_selfpowered; 1581 spin_unlock_irqrestore(&dwc->lock, flags); 1582 1583 return 0; 1584 } 1585 1586 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) 1587 { 1588 u32 reg; 1589 u32 timeout = 500; 1590 1591 if (pm_runtime_suspended(dwc->dev)) 1592 return 0; 1593 1594 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1595 if (is_on) { 1596 if (dwc->revision <= DWC3_REVISION_187A) { 1597 reg &= ~DWC3_DCTL_TRGTULST_MASK; 1598 reg |= DWC3_DCTL_TRGTULST_RX_DET; 1599 } 1600 1601 if (dwc->revision >= DWC3_REVISION_194A) 1602 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1603 reg |= DWC3_DCTL_RUN_STOP; 1604 1605 if (dwc->has_hibernation) 1606 reg |= DWC3_DCTL_KEEP_CONNECT; 1607 1608 dwc->pullups_connected = true; 1609 } else { 1610 reg &= ~DWC3_DCTL_RUN_STOP; 1611 1612 if (dwc->has_hibernation && !suspend) 1613 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1614 1615 dwc->pullups_connected = false; 1616 } 1617 1618 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1619 1620 do { 1621 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1622 reg &= DWC3_DSTS_DEVCTRLHLT; 1623 } while (--timeout && !(!is_on ^ !reg)); 1624 1625 if (!timeout) 1626 return -ETIMEDOUT; 1627 1628 return 0; 1629 } 1630 1631 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 1632 { 1633 struct dwc3 *dwc = gadget_to_dwc(g); 1634 unsigned long flags; 1635 int ret; 1636 1637 is_on = !!is_on; 1638 1639 /* 1640 * Per databook, when we want to stop the gadget, if a control transfer 1641 * is still in process, complete it and get the core into setup phase. 1642 */ 1643 if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) { 1644 reinit_completion(&dwc->ep0_in_setup); 1645 1646 ret = wait_for_completion_timeout(&dwc->ep0_in_setup, 1647 msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT)); 1648 if (ret == 0) { 1649 dev_err(dwc->dev, "timed out waiting for SETUP phase\n"); 1650 return -ETIMEDOUT; 1651 } 1652 } 1653 1654 spin_lock_irqsave(&dwc->lock, flags); 1655 ret = dwc3_gadget_run_stop(dwc, is_on, false); 1656 spin_unlock_irqrestore(&dwc->lock, flags); 1657 1658 return ret; 1659 } 1660 1661 static void dwc3_gadget_enable_irq(struct dwc3 *dwc) 1662 { 1663 u32 reg; 1664 1665 /* Enable all but Start and End of Frame IRQs */ 1666 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | 1667 DWC3_DEVTEN_EVNTOVERFLOWEN | 1668 DWC3_DEVTEN_CMDCMPLTEN | 1669 DWC3_DEVTEN_ERRTICERREN | 1670 DWC3_DEVTEN_WKUPEVTEN | 1671 DWC3_DEVTEN_CONNECTDONEEN | 1672 DWC3_DEVTEN_USBRSTEN | 1673 DWC3_DEVTEN_DISCONNEVTEN); 1674 1675 if (dwc->revision < DWC3_REVISION_250A) 1676 reg |= DWC3_DEVTEN_ULSTCNGEN; 1677 1678 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 1679 } 1680 1681 static void dwc3_gadget_disable_irq(struct dwc3 *dwc) 1682 { 1683 /* mask all interrupts */ 1684 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 1685 } 1686 1687 static irqreturn_t dwc3_interrupt(int irq, void *_dwc); 1688 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc); 1689 1690 /** 1691 * dwc3_gadget_setup_nump - Calculate and initialize NUMP field of DCFG 1692 * dwc: pointer to our context structure 1693 * 1694 * The following looks like complex but it's actually very simple. In order to 1695 * calculate the number of packets we can burst at once on OUT transfers, we're 1696 * gonna use RxFIFO size. 1697 * 1698 * To calculate RxFIFO size we need two numbers: 1699 * MDWIDTH = size, in bits, of the internal memory bus 1700 * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits) 1701 * 1702 * Given these two numbers, the formula is simple: 1703 * 1704 * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16; 1705 * 1706 * 24 bytes is for 3x SETUP packets 1707 * 16 bytes is a clock domain crossing tolerance 1708 * 1709 * Given RxFIFO Size, NUMP = RxFIFOSize / 1024; 1710 */ 1711 static void dwc3_gadget_setup_nump(struct dwc3 *dwc) 1712 { 1713 u32 ram2_depth; 1714 u32 mdwidth; 1715 u32 nump; 1716 u32 reg; 1717 1718 ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7); 1719 mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0); 1720 1721 nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024; 1722 nump = min_t(u32, nump, 16); 1723 1724 /* update NumP */ 1725 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1726 reg &= ~DWC3_DCFG_NUMP_MASK; 1727 reg |= nump << DWC3_DCFG_NUMP_SHIFT; 1728 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1729 } 1730 1731 static int __dwc3_gadget_start(struct dwc3 *dwc) 1732 { 1733 struct dwc3_ep *dep; 1734 int ret = 0; 1735 u32 reg; 1736 1737 /* 1738 * Use IMOD if enabled via dwc->imod_interval. Otherwise, if 1739 * the core supports IMOD, disable it. 1740 */ 1741 if (dwc->imod_interval) { 1742 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); 1743 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); 1744 } else if (dwc3_has_imod(dwc)) { 1745 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0); 1746 } 1747 1748 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1749 reg &= ~(DWC3_DCFG_SPEED_MASK); 1750 1751 /** 1752 * WORKAROUND: DWC3 revision < 2.20a have an issue 1753 * which would cause metastability state on Run/Stop 1754 * bit if we try to force the IP to USB2-only mode. 1755 * 1756 * Because of that, we cannot configure the IP to any 1757 * speed other than the SuperSpeed 1758 * 1759 * Refers to: 1760 * 1761 * STAR#9000525659: Clock Domain Crossing on DCTL in 1762 * USB 2.0 Mode 1763 */ 1764 if (dwc->revision < DWC3_REVISION_220A) { 1765 reg |= DWC3_DCFG_SUPERSPEED; 1766 } else { 1767 switch (dwc->maximum_speed) { 1768 case USB_SPEED_LOW: 1769 reg |= DWC3_DCFG_LOWSPEED; 1770 break; 1771 case USB_SPEED_FULL: 1772 reg |= DWC3_DCFG_FULLSPEED; 1773 break; 1774 case USB_SPEED_HIGH: 1775 reg |= DWC3_DCFG_HIGHSPEED; 1776 break; 1777 case USB_SPEED_SUPER_PLUS: 1778 reg |= DWC3_DCFG_SUPERSPEED_PLUS; 1779 break; 1780 default: 1781 dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n", 1782 dwc->maximum_speed); 1783 /* fall through */ 1784 case USB_SPEED_SUPER: 1785 reg |= DWC3_DCFG_SUPERSPEED; 1786 break; 1787 } 1788 } 1789 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1790 1791 /* 1792 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP 1793 * field instead of letting dwc3 itself calculate that automatically. 1794 * 1795 * This way, we maximize the chances that we'll be able to get several 1796 * bursts of data without going through any sort of endpoint throttling. 1797 */ 1798 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1799 reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL; 1800 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1801 1802 dwc3_gadget_setup_nump(dwc); 1803 1804 /* Start with SuperSpeed Default */ 1805 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1806 1807 dep = dwc->eps[0]; 1808 ret = __dwc3_gadget_ep_enable(dep, false, false); 1809 if (ret) { 1810 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1811 goto err0; 1812 } 1813 1814 dep = dwc->eps[1]; 1815 ret = __dwc3_gadget_ep_enable(dep, false, false); 1816 if (ret) { 1817 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1818 goto err1; 1819 } 1820 1821 /* begin to receive SETUP packets */ 1822 dwc->ep0state = EP0_SETUP_PHASE; 1823 dwc3_ep0_out_start(dwc); 1824 1825 dwc3_gadget_enable_irq(dwc); 1826 1827 return 0; 1828 1829 err1: 1830 __dwc3_gadget_ep_disable(dwc->eps[0]); 1831 1832 err0: 1833 return ret; 1834 } 1835 1836 static int dwc3_gadget_start(struct usb_gadget *g, 1837 struct usb_gadget_driver *driver) 1838 { 1839 struct dwc3 *dwc = gadget_to_dwc(g); 1840 unsigned long flags; 1841 int ret = 0; 1842 int irq; 1843 1844 irq = dwc->irq_gadget; 1845 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, 1846 IRQF_SHARED, "dwc3", dwc->ev_buf); 1847 if (ret) { 1848 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 1849 irq, ret); 1850 goto err0; 1851 } 1852 1853 spin_lock_irqsave(&dwc->lock, flags); 1854 if (dwc->gadget_driver) { 1855 dev_err(dwc->dev, "%s is already bound to %s\n", 1856 dwc->gadget.name, 1857 dwc->gadget_driver->driver.name); 1858 ret = -EBUSY; 1859 goto err1; 1860 } 1861 1862 dwc->gadget_driver = driver; 1863 1864 if (pm_runtime_active(dwc->dev)) 1865 __dwc3_gadget_start(dwc); 1866 1867 spin_unlock_irqrestore(&dwc->lock, flags); 1868 1869 return 0; 1870 1871 err1: 1872 spin_unlock_irqrestore(&dwc->lock, flags); 1873 free_irq(irq, dwc); 1874 1875 err0: 1876 return ret; 1877 } 1878 1879 static void __dwc3_gadget_stop(struct dwc3 *dwc) 1880 { 1881 dwc3_gadget_disable_irq(dwc); 1882 __dwc3_gadget_ep_disable(dwc->eps[0]); 1883 __dwc3_gadget_ep_disable(dwc->eps[1]); 1884 } 1885 1886 static int dwc3_gadget_stop(struct usb_gadget *g) 1887 { 1888 struct dwc3 *dwc = gadget_to_dwc(g); 1889 unsigned long flags; 1890 int epnum; 1891 1892 spin_lock_irqsave(&dwc->lock, flags); 1893 1894 if (pm_runtime_suspended(dwc->dev)) 1895 goto out; 1896 1897 __dwc3_gadget_stop(dwc); 1898 1899 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1900 struct dwc3_ep *dep = dwc->eps[epnum]; 1901 1902 if (!dep) 1903 continue; 1904 1905 if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) 1906 continue; 1907 1908 wait_event_lock_irq(dep->wait_end_transfer, 1909 !(dep->flags & DWC3_EP_END_TRANSFER_PENDING), 1910 dwc->lock); 1911 } 1912 1913 out: 1914 dwc->gadget_driver = NULL; 1915 spin_unlock_irqrestore(&dwc->lock, flags); 1916 1917 free_irq(dwc->irq_gadget, dwc->ev_buf); 1918 1919 return 0; 1920 } 1921 1922 static const struct usb_gadget_ops dwc3_gadget_ops = { 1923 .get_frame = dwc3_gadget_get_frame, 1924 .wakeup = dwc3_gadget_wakeup, 1925 .set_selfpowered = dwc3_gadget_set_selfpowered, 1926 .pullup = dwc3_gadget_pullup, 1927 .udc_start = dwc3_gadget_start, 1928 .udc_stop = dwc3_gadget_stop, 1929 }; 1930 1931 /* -------------------------------------------------------------------------- */ 1932 1933 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc, 1934 u8 num, u32 direction) 1935 { 1936 struct dwc3_ep *dep; 1937 u8 i; 1938 1939 for (i = 0; i < num; i++) { 1940 u8 epnum = (i << 1) | (direction ? 1 : 0); 1941 1942 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 1943 if (!dep) 1944 return -ENOMEM; 1945 1946 dep->dwc = dwc; 1947 dep->number = epnum; 1948 dep->direction = !!direction; 1949 dep->regs = dwc->regs + DWC3_DEP_BASE(epnum); 1950 dwc->eps[epnum] = dep; 1951 1952 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1, 1953 (epnum & 1) ? "in" : "out"); 1954 1955 dep->endpoint.name = dep->name; 1956 1957 if (!(dep->number > 1)) { 1958 dep->endpoint.desc = &dwc3_gadget_ep0_desc; 1959 dep->endpoint.comp_desc = NULL; 1960 } 1961 1962 spin_lock_init(&dep->lock); 1963 1964 if (epnum == 0 || epnum == 1) { 1965 usb_ep_set_maxpacket_limit(&dep->endpoint, 512); 1966 dep->endpoint.maxburst = 1; 1967 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 1968 if (!epnum) 1969 dwc->gadget.ep0 = &dep->endpoint; 1970 } else if (direction) { 1971 int mdwidth; 1972 int size; 1973 int ret; 1974 int num; 1975 1976 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); 1977 /* MDWIDTH is represented in bits, we need it in bytes */ 1978 mdwidth /= 8; 1979 1980 size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(i)); 1981 size = DWC3_GTXFIFOSIZ_TXFDEF(size); 1982 1983 /* FIFO Depth is in MDWDITH bytes. Multiply */ 1984 size *= mdwidth; 1985 1986 num = size / 1024; 1987 if (num == 0) 1988 num = 1; 1989 1990 /* 1991 * FIFO sizes account an extra MDWIDTH * (num + 1) bytes for 1992 * internal overhead. We don't really know how these are used, 1993 * but documentation say it exists. 1994 */ 1995 size -= mdwidth * (num + 1); 1996 size /= num; 1997 1998 usb_ep_set_maxpacket_limit(&dep->endpoint, size); 1999 2000 dep->endpoint.max_streams = 15; 2001 dep->endpoint.ops = &dwc3_gadget_ep_ops; 2002 list_add_tail(&dep->endpoint.ep_list, 2003 &dwc->gadget.ep_list); 2004 2005 ret = dwc3_alloc_trb_pool(dep); 2006 if (ret) 2007 return ret; 2008 } else { 2009 int ret; 2010 2011 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024); 2012 dep->endpoint.max_streams = 15; 2013 dep->endpoint.ops = &dwc3_gadget_ep_ops; 2014 list_add_tail(&dep->endpoint.ep_list, 2015 &dwc->gadget.ep_list); 2016 2017 ret = dwc3_alloc_trb_pool(dep); 2018 if (ret) 2019 return ret; 2020 } 2021 2022 if (epnum == 0 || epnum == 1) { 2023 dep->endpoint.caps.type_control = true; 2024 } else { 2025 dep->endpoint.caps.type_iso = true; 2026 dep->endpoint.caps.type_bulk = true; 2027 dep->endpoint.caps.type_int = true; 2028 } 2029 2030 dep->endpoint.caps.dir_in = !!direction; 2031 dep->endpoint.caps.dir_out = !direction; 2032 2033 INIT_LIST_HEAD(&dep->pending_list); 2034 INIT_LIST_HEAD(&dep->started_list); 2035 } 2036 2037 return 0; 2038 } 2039 2040 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc) 2041 { 2042 int ret; 2043 2044 INIT_LIST_HEAD(&dwc->gadget.ep_list); 2045 2046 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0); 2047 if (ret < 0) { 2048 dev_err(dwc->dev, "failed to initialize OUT endpoints\n"); 2049 return ret; 2050 } 2051 2052 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1); 2053 if (ret < 0) { 2054 dev_err(dwc->dev, "failed to initialize IN endpoints\n"); 2055 return ret; 2056 } 2057 2058 return 0; 2059 } 2060 2061 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 2062 { 2063 struct dwc3_ep *dep; 2064 u8 epnum; 2065 2066 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2067 dep = dwc->eps[epnum]; 2068 if (!dep) 2069 continue; 2070 /* 2071 * Physical endpoints 0 and 1 are special; they form the 2072 * bi-directional USB endpoint 0. 2073 * 2074 * For those two physical endpoints, we don't allocate a TRB 2075 * pool nor do we add them the endpoints list. Due to that, we 2076 * shouldn't do these two operations otherwise we would end up 2077 * with all sorts of bugs when removing dwc3.ko. 2078 */ 2079 if (epnum != 0 && epnum != 1) { 2080 dwc3_free_trb_pool(dep); 2081 list_del(&dep->endpoint.ep_list); 2082 } 2083 2084 kfree(dep); 2085 } 2086 } 2087 2088 /* -------------------------------------------------------------------------- */ 2089 2090 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, 2091 struct dwc3_request *req, struct dwc3_trb *trb, 2092 const struct dwc3_event_depevt *event, int status, 2093 int chain) 2094 { 2095 unsigned int count; 2096 unsigned int s_pkt = 0; 2097 unsigned int trb_status; 2098 2099 dwc3_ep_inc_deq(dep); 2100 2101 if (req->trb == trb) 2102 dep->queued_requests--; 2103 2104 trace_dwc3_complete_trb(dep, trb); 2105 2106 /* 2107 * If we're in the middle of series of chained TRBs and we 2108 * receive a short transfer along the way, DWC3 will skip 2109 * through all TRBs including the last TRB in the chain (the 2110 * where CHN bit is zero. DWC3 will also avoid clearing HWO 2111 * bit and SW has to do it manually. 2112 * 2113 * We're going to do that here to avoid problems of HW trying 2114 * to use bogus TRBs for transfers. 2115 */ 2116 if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO)) 2117 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 2118 2119 /* 2120 * If we're dealing with unaligned size OUT transfer, we will be left 2121 * with one TRB pending in the ring. We need to manually clear HWO bit 2122 * from that TRB. 2123 */ 2124 if (req->unaligned && (trb->ctrl & DWC3_TRB_CTRL_HWO)) { 2125 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 2126 return 1; 2127 } 2128 2129 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 2130 return 1; 2131 2132 count = trb->size & DWC3_TRB_SIZE_MASK; 2133 req->remaining += count; 2134 2135 if (dep->direction) { 2136 if (count) { 2137 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); 2138 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) { 2139 /* 2140 * If missed isoc occurred and there is 2141 * no request queued then issue END 2142 * TRANSFER, so that core generates 2143 * next xfernotready and we will issue 2144 * a fresh START TRANSFER. 2145 * If there are still queued request 2146 * then wait, do not issue either END 2147 * or UPDATE TRANSFER, just attach next 2148 * request in pending_list during 2149 * giveback.If any future queued request 2150 * is successfully transferred then we 2151 * will issue UPDATE TRANSFER for all 2152 * request in the pending_list. 2153 */ 2154 dep->flags |= DWC3_EP_MISSED_ISOC; 2155 } else { 2156 dev_err(dwc->dev, "incomplete IN transfer %s\n", 2157 dep->name); 2158 status = -ECONNRESET; 2159 } 2160 } else { 2161 dep->flags &= ~DWC3_EP_MISSED_ISOC; 2162 } 2163 } else { 2164 if (count && (event->status & DEPEVT_STATUS_SHORT)) 2165 s_pkt = 1; 2166 } 2167 2168 if (s_pkt && !chain) 2169 return 1; 2170 2171 if ((event->status & DEPEVT_STATUS_IOC) && 2172 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 2173 return 1; 2174 2175 return 0; 2176 } 2177 2178 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, 2179 const struct dwc3_event_depevt *event, int status) 2180 { 2181 struct dwc3_request *req, *n; 2182 struct dwc3_trb *trb; 2183 bool ioc = false; 2184 int ret = 0; 2185 2186 list_for_each_entry_safe(req, n, &dep->started_list, list) { 2187 unsigned length; 2188 int chain; 2189 2190 length = req->request.length; 2191 chain = req->num_pending_sgs > 0; 2192 if (chain) { 2193 struct scatterlist *sg = req->sg; 2194 struct scatterlist *s; 2195 unsigned int pending = req->num_pending_sgs; 2196 unsigned int i; 2197 2198 for_each_sg(sg, s, pending, i) { 2199 trb = &dep->trb_pool[dep->trb_dequeue]; 2200 2201 if (trb->ctrl & DWC3_TRB_CTRL_HWO) 2202 break; 2203 2204 req->sg = sg_next(s); 2205 req->num_pending_sgs--; 2206 2207 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 2208 event, status, chain); 2209 if (ret) 2210 break; 2211 } 2212 } else { 2213 trb = &dep->trb_pool[dep->trb_dequeue]; 2214 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 2215 event, status, chain); 2216 } 2217 2218 if (req->unaligned) { 2219 trb = &dep->trb_pool[dep->trb_dequeue]; 2220 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 2221 event, status, false); 2222 req->unaligned = false; 2223 } 2224 2225 req->request.actual = length - req->remaining; 2226 2227 if ((req->request.actual < length) && req->num_pending_sgs) 2228 return __dwc3_gadget_kick_transfer(dep, 0); 2229 2230 dwc3_gadget_giveback(dep, req, status); 2231 2232 if (ret) { 2233 if ((event->status & DEPEVT_STATUS_IOC) && 2234 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 2235 ioc = true; 2236 break; 2237 } 2238 } 2239 2240 /* 2241 * Our endpoint might get disabled by another thread during 2242 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1 2243 * early on so DWC3_EP_BUSY flag gets cleared 2244 */ 2245 if (!dep->endpoint.desc) 2246 return 1; 2247 2248 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 2249 list_empty(&dep->started_list)) { 2250 if (list_empty(&dep->pending_list)) { 2251 /* 2252 * If there is no entry in request list then do 2253 * not issue END TRANSFER now. Just set PENDING 2254 * flag, so that END TRANSFER is issued when an 2255 * entry is added into request list. 2256 */ 2257 dep->flags = DWC3_EP_PENDING_REQUEST; 2258 } else { 2259 dwc3_stop_active_transfer(dwc, dep->number, true); 2260 dep->flags = DWC3_EP_ENABLED; 2261 } 2262 return 1; 2263 } 2264 2265 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && ioc) 2266 return 0; 2267 2268 return 1; 2269 } 2270 2271 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, 2272 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 2273 { 2274 unsigned status = 0; 2275 int clean_busy; 2276 u32 is_xfer_complete; 2277 2278 is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE); 2279 2280 if (event->status & DEPEVT_STATUS_BUSERR) 2281 status = -ECONNRESET; 2282 2283 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status); 2284 if (clean_busy && (!dep->endpoint.desc || is_xfer_complete || 2285 usb_endpoint_xfer_isoc(dep->endpoint.desc))) 2286 dep->flags &= ~DWC3_EP_BUSY; 2287 2288 /* 2289 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. 2290 * See dwc3_gadget_linksts_change_interrupt() for 1st half. 2291 */ 2292 if (dwc->revision < DWC3_REVISION_183A) { 2293 u32 reg; 2294 int i; 2295 2296 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 2297 dep = dwc->eps[i]; 2298 2299 if (!(dep->flags & DWC3_EP_ENABLED)) 2300 continue; 2301 2302 if (!list_empty(&dep->started_list)) 2303 return; 2304 } 2305 2306 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2307 reg |= dwc->u1u2; 2308 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2309 2310 dwc->u1u2 = 0; 2311 } 2312 2313 /* 2314 * Our endpoint might get disabled by another thread during 2315 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1 2316 * early on so DWC3_EP_BUSY flag gets cleared 2317 */ 2318 if (!dep->endpoint.desc) 2319 return; 2320 2321 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2322 int ret; 2323 2324 ret = __dwc3_gadget_kick_transfer(dep, 0); 2325 if (!ret || ret == -EBUSY) 2326 return; 2327 } 2328 } 2329 2330 static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 2331 const struct dwc3_event_depevt *event) 2332 { 2333 struct dwc3_ep *dep; 2334 u8 epnum = event->endpoint_number; 2335 u8 cmd; 2336 2337 dep = dwc->eps[epnum]; 2338 2339 if (!(dep->flags & DWC3_EP_ENABLED)) { 2340 if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) 2341 return; 2342 2343 /* Handle only EPCMDCMPLT when EP disabled */ 2344 if (event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT) 2345 return; 2346 } 2347 2348 if (epnum == 0 || epnum == 1) { 2349 dwc3_ep0_interrupt(dwc, event); 2350 return; 2351 } 2352 2353 switch (event->endpoint_event) { 2354 case DWC3_DEPEVT_XFERCOMPLETE: 2355 dep->resource_index = 0; 2356 2357 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2358 dev_err(dwc->dev, "XferComplete for Isochronous endpoint\n"); 2359 return; 2360 } 2361 2362 dwc3_endpoint_transfer_complete(dwc, dep, event); 2363 break; 2364 case DWC3_DEPEVT_XFERINPROGRESS: 2365 dwc3_endpoint_transfer_complete(dwc, dep, event); 2366 break; 2367 case DWC3_DEPEVT_XFERNOTREADY: 2368 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2369 dwc3_gadget_start_isoc(dwc, dep, event); 2370 } else { 2371 int ret; 2372 2373 ret = __dwc3_gadget_kick_transfer(dep, 0); 2374 if (!ret || ret == -EBUSY) 2375 return; 2376 } 2377 2378 break; 2379 case DWC3_DEPEVT_STREAMEVT: 2380 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) { 2381 dev_err(dwc->dev, "Stream event for non-Bulk %s\n", 2382 dep->name); 2383 return; 2384 } 2385 break; 2386 case DWC3_DEPEVT_EPCMDCMPLT: 2387 cmd = DEPEVT_PARAMETER_CMD(event->parameters); 2388 2389 if (cmd == DWC3_DEPCMD_ENDTRANSFER) { 2390 dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; 2391 wake_up(&dep->wait_end_transfer); 2392 } 2393 break; 2394 case DWC3_DEPEVT_RXTXFIFOEVT: 2395 break; 2396 } 2397 } 2398 2399 static void dwc3_disconnect_gadget(struct dwc3 *dwc) 2400 { 2401 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { 2402 spin_unlock(&dwc->lock); 2403 dwc->gadget_driver->disconnect(&dwc->gadget); 2404 spin_lock(&dwc->lock); 2405 } 2406 } 2407 2408 static void dwc3_suspend_gadget(struct dwc3 *dwc) 2409 { 2410 if (dwc->gadget_driver && dwc->gadget_driver->suspend) { 2411 spin_unlock(&dwc->lock); 2412 dwc->gadget_driver->suspend(&dwc->gadget); 2413 spin_lock(&dwc->lock); 2414 } 2415 } 2416 2417 static void dwc3_resume_gadget(struct dwc3 *dwc) 2418 { 2419 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2420 spin_unlock(&dwc->lock); 2421 dwc->gadget_driver->resume(&dwc->gadget); 2422 spin_lock(&dwc->lock); 2423 } 2424 } 2425 2426 static void dwc3_reset_gadget(struct dwc3 *dwc) 2427 { 2428 if (!dwc->gadget_driver) 2429 return; 2430 2431 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) { 2432 spin_unlock(&dwc->lock); 2433 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver); 2434 spin_lock(&dwc->lock); 2435 } 2436 } 2437 2438 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force) 2439 { 2440 struct dwc3_ep *dep; 2441 struct dwc3_gadget_ep_cmd_params params; 2442 u32 cmd; 2443 int ret; 2444 2445 dep = dwc->eps[epnum]; 2446 2447 if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) || 2448 !dep->resource_index) 2449 return; 2450 2451 /* 2452 * NOTICE: We are violating what the Databook says about the 2453 * EndTransfer command. Ideally we would _always_ wait for the 2454 * EndTransfer Command Completion IRQ, but that's causing too 2455 * much trouble synchronizing between us and gadget driver. 2456 * 2457 * We have discussed this with the IP Provider and it was 2458 * suggested to giveback all requests here, but give HW some 2459 * extra time to synchronize with the interconnect. We're using 2460 * an arbitrary 100us delay for that. 2461 * 2462 * Note also that a similar handling was tested by Synopsys 2463 * (thanks a lot Paul) and nothing bad has come out of it. 2464 * In short, what we're doing is: 2465 * 2466 * - Issue EndTransfer WITH CMDIOC bit set 2467 * - Wait 100us 2468 * 2469 * As of IP version 3.10a of the DWC_usb3 IP, the controller 2470 * supports a mode to work around the above limitation. The 2471 * software can poll the CMDACT bit in the DEPCMD register 2472 * after issuing a EndTransfer command. This mode is enabled 2473 * by writing GUCTL2[14]. This polling is already done in the 2474 * dwc3_send_gadget_ep_cmd() function so if the mode is 2475 * enabled, the EndTransfer command will have completed upon 2476 * returning from this function and we don't need to delay for 2477 * 100us. 2478 * 2479 * This mode is NOT available on the DWC_usb31 IP. 2480 */ 2481 2482 cmd = DWC3_DEPCMD_ENDTRANSFER; 2483 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0; 2484 cmd |= DWC3_DEPCMD_CMDIOC; 2485 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); 2486 memset(¶ms, 0, sizeof(params)); 2487 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 2488 WARN_ON_ONCE(ret); 2489 dep->resource_index = 0; 2490 dep->flags &= ~DWC3_EP_BUSY; 2491 2492 if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A) { 2493 dep->flags |= DWC3_EP_END_TRANSFER_PENDING; 2494 udelay(100); 2495 } 2496 } 2497 2498 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 2499 { 2500 u32 epnum; 2501 2502 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2503 struct dwc3_ep *dep; 2504 int ret; 2505 2506 dep = dwc->eps[epnum]; 2507 if (!dep) 2508 continue; 2509 2510 if (!(dep->flags & DWC3_EP_STALL)) 2511 continue; 2512 2513 dep->flags &= ~DWC3_EP_STALL; 2514 2515 ret = dwc3_send_clear_stall_ep_cmd(dep); 2516 WARN_ON_ONCE(ret); 2517 } 2518 } 2519 2520 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 2521 { 2522 int reg; 2523 2524 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2525 reg &= ~DWC3_DCTL_INITU1ENA; 2526 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2527 2528 reg &= ~DWC3_DCTL_INITU2ENA; 2529 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2530 2531 dwc3_disconnect_gadget(dwc); 2532 2533 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2534 dwc->setup_packet_pending = false; 2535 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED); 2536 2537 dwc->connected = false; 2538 } 2539 2540 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 2541 { 2542 u32 reg; 2543 2544 dwc->connected = true; 2545 2546 /* 2547 * WORKAROUND: DWC3 revisions <1.88a have an issue which 2548 * would cause a missing Disconnect Event if there's a 2549 * pending Setup Packet in the FIFO. 2550 * 2551 * There's no suggested workaround on the official Bug 2552 * report, which states that "unless the driver/application 2553 * is doing any special handling of a disconnect event, 2554 * there is no functional issue". 2555 * 2556 * Unfortunately, it turns out that we _do_ some special 2557 * handling of a disconnect event, namely complete all 2558 * pending transfers, notify gadget driver of the 2559 * disconnection, and so on. 2560 * 2561 * Our suggested workaround is to follow the Disconnect 2562 * Event steps here, instead, based on a setup_packet_pending 2563 * flag. Such flag gets set whenever we have a SETUP_PENDING 2564 * status for EP0 TRBs and gets cleared on XferComplete for the 2565 * same endpoint. 2566 * 2567 * Refers to: 2568 * 2569 * STAR#9000466709: RTL: Device : Disconnect event not 2570 * generated if setup packet pending in FIFO 2571 */ 2572 if (dwc->revision < DWC3_REVISION_188A) { 2573 if (dwc->setup_packet_pending) 2574 dwc3_gadget_disconnect_interrupt(dwc); 2575 } 2576 2577 dwc3_reset_gadget(dwc); 2578 2579 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2580 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 2581 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2582 dwc->test_mode = false; 2583 dwc3_clear_stall_all_ep(dwc); 2584 2585 /* Reset device address to zero */ 2586 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2587 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 2588 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2589 } 2590 2591 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 2592 { 2593 struct dwc3_ep *dep; 2594 int ret; 2595 u32 reg; 2596 u8 speed; 2597 2598 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2599 speed = reg & DWC3_DSTS_CONNECTSPD; 2600 dwc->speed = speed; 2601 2602 /* 2603 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 2604 * each time on Connect Done. 2605 * 2606 * Currently we always use the reset value. If any platform 2607 * wants to set this to a different value, we need to add a 2608 * setting and update GCTL.RAMCLKSEL here. 2609 */ 2610 2611 switch (speed) { 2612 case DWC3_DSTS_SUPERSPEED_PLUS: 2613 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2614 dwc->gadget.ep0->maxpacket = 512; 2615 dwc->gadget.speed = USB_SPEED_SUPER_PLUS; 2616 break; 2617 case DWC3_DSTS_SUPERSPEED: 2618 /* 2619 * WORKAROUND: DWC3 revisions <1.90a have an issue which 2620 * would cause a missing USB3 Reset event. 2621 * 2622 * In such situations, we should force a USB3 Reset 2623 * event by calling our dwc3_gadget_reset_interrupt() 2624 * routine. 2625 * 2626 * Refers to: 2627 * 2628 * STAR#9000483510: RTL: SS : USB3 reset event may 2629 * not be generated always when the link enters poll 2630 */ 2631 if (dwc->revision < DWC3_REVISION_190A) 2632 dwc3_gadget_reset_interrupt(dwc); 2633 2634 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2635 dwc->gadget.ep0->maxpacket = 512; 2636 dwc->gadget.speed = USB_SPEED_SUPER; 2637 break; 2638 case DWC3_DSTS_HIGHSPEED: 2639 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2640 dwc->gadget.ep0->maxpacket = 64; 2641 dwc->gadget.speed = USB_SPEED_HIGH; 2642 break; 2643 case DWC3_DSTS_FULLSPEED: 2644 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2645 dwc->gadget.ep0->maxpacket = 64; 2646 dwc->gadget.speed = USB_SPEED_FULL; 2647 break; 2648 case DWC3_DSTS_LOWSPEED: 2649 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); 2650 dwc->gadget.ep0->maxpacket = 8; 2651 dwc->gadget.speed = USB_SPEED_LOW; 2652 break; 2653 } 2654 2655 /* Enable USB2 LPM Capability */ 2656 2657 if ((dwc->revision > DWC3_REVISION_194A) && 2658 (speed != DWC3_DSTS_SUPERSPEED) && 2659 (speed != DWC3_DSTS_SUPERSPEED_PLUS)) { 2660 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2661 reg |= DWC3_DCFG_LPM_CAP; 2662 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2663 2664 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2665 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); 2666 2667 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold); 2668 2669 /* 2670 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and 2671 * DCFG.LPMCap is set, core responses with an ACK and the 2672 * BESL value in the LPM token is less than or equal to LPM 2673 * NYET threshold. 2674 */ 2675 WARN_ONCE(dwc->revision < DWC3_REVISION_240A 2676 && dwc->has_lpm_erratum, 2677 "LPM Erratum not available on dwc3 revisions < 2.40a\n"); 2678 2679 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A) 2680 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold); 2681 2682 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2683 } else { 2684 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2685 reg &= ~DWC3_DCTL_HIRD_THRES_MASK; 2686 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2687 } 2688 2689 dep = dwc->eps[0]; 2690 ret = __dwc3_gadget_ep_enable(dep, true, false); 2691 if (ret) { 2692 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2693 return; 2694 } 2695 2696 dep = dwc->eps[1]; 2697 ret = __dwc3_gadget_ep_enable(dep, true, false); 2698 if (ret) { 2699 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2700 return; 2701 } 2702 2703 /* 2704 * Configure PHY via GUSB3PIPECTLn if required. 2705 * 2706 * Update GTXFIFOSIZn 2707 * 2708 * In both cases reset values should be sufficient. 2709 */ 2710 } 2711 2712 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 2713 { 2714 /* 2715 * TODO take core out of low power mode when that's 2716 * implemented. 2717 */ 2718 2719 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2720 spin_unlock(&dwc->lock); 2721 dwc->gadget_driver->resume(&dwc->gadget); 2722 spin_lock(&dwc->lock); 2723 } 2724 } 2725 2726 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 2727 unsigned int evtinfo) 2728 { 2729 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2730 unsigned int pwropt; 2731 2732 /* 2733 * WORKAROUND: DWC3 < 2.50a have an issue when configured without 2734 * Hibernation mode enabled which would show up when device detects 2735 * host-initiated U3 exit. 2736 * 2737 * In that case, device will generate a Link State Change Interrupt 2738 * from U3 to RESUME which is only necessary if Hibernation is 2739 * configured in. 2740 * 2741 * There are no functional changes due to such spurious event and we 2742 * just need to ignore it. 2743 * 2744 * Refers to: 2745 * 2746 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation 2747 * operational mode 2748 */ 2749 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 2750 if ((dwc->revision < DWC3_REVISION_250A) && 2751 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { 2752 if ((dwc->link_state == DWC3_LINK_STATE_U3) && 2753 (next == DWC3_LINK_STATE_RESUME)) { 2754 return; 2755 } 2756 } 2757 2758 /* 2759 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending 2760 * on the link partner, the USB session might do multiple entry/exit 2761 * of low power states before a transfer takes place. 2762 * 2763 * Due to this problem, we might experience lower throughput. The 2764 * suggested workaround is to disable DCTL[12:9] bits if we're 2765 * transitioning from U1/U2 to U0 and enable those bits again 2766 * after a transfer completes and there are no pending transfers 2767 * on any of the enabled endpoints. 2768 * 2769 * This is the first half of that workaround. 2770 * 2771 * Refers to: 2772 * 2773 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us 2774 * core send LGO_Ux entering U0 2775 */ 2776 if (dwc->revision < DWC3_REVISION_183A) { 2777 if (next == DWC3_LINK_STATE_U0) { 2778 u32 u1u2; 2779 u32 reg; 2780 2781 switch (dwc->link_state) { 2782 case DWC3_LINK_STATE_U1: 2783 case DWC3_LINK_STATE_U2: 2784 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2785 u1u2 = reg & (DWC3_DCTL_INITU2ENA 2786 | DWC3_DCTL_ACCEPTU2ENA 2787 | DWC3_DCTL_INITU1ENA 2788 | DWC3_DCTL_ACCEPTU1ENA); 2789 2790 if (!dwc->u1u2) 2791 dwc->u1u2 = reg & u1u2; 2792 2793 reg &= ~u1u2; 2794 2795 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2796 break; 2797 default: 2798 /* do nothing */ 2799 break; 2800 } 2801 } 2802 } 2803 2804 switch (next) { 2805 case DWC3_LINK_STATE_U1: 2806 if (dwc->speed == USB_SPEED_SUPER) 2807 dwc3_suspend_gadget(dwc); 2808 break; 2809 case DWC3_LINK_STATE_U2: 2810 case DWC3_LINK_STATE_U3: 2811 dwc3_suspend_gadget(dwc); 2812 break; 2813 case DWC3_LINK_STATE_RESUME: 2814 dwc3_resume_gadget(dwc); 2815 break; 2816 default: 2817 /* do nothing */ 2818 break; 2819 } 2820 2821 dwc->link_state = next; 2822 } 2823 2824 static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc, 2825 unsigned int evtinfo) 2826 { 2827 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2828 2829 if (dwc->link_state != next && next == DWC3_LINK_STATE_U3) 2830 dwc3_suspend_gadget(dwc); 2831 2832 dwc->link_state = next; 2833 } 2834 2835 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, 2836 unsigned int evtinfo) 2837 { 2838 unsigned int is_ss = evtinfo & BIT(4); 2839 2840 /** 2841 * WORKAROUND: DWC3 revison 2.20a with hibernation support 2842 * have a known issue which can cause USB CV TD.9.23 to fail 2843 * randomly. 2844 * 2845 * Because of this issue, core could generate bogus hibernation 2846 * events which SW needs to ignore. 2847 * 2848 * Refers to: 2849 * 2850 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0 2851 * Device Fallback from SuperSpeed 2852 */ 2853 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER)) 2854 return; 2855 2856 /* enter hibernation here */ 2857 } 2858 2859 static void dwc3_gadget_interrupt(struct dwc3 *dwc, 2860 const struct dwc3_event_devt *event) 2861 { 2862 switch (event->type) { 2863 case DWC3_DEVICE_EVENT_DISCONNECT: 2864 dwc3_gadget_disconnect_interrupt(dwc); 2865 break; 2866 case DWC3_DEVICE_EVENT_RESET: 2867 dwc3_gadget_reset_interrupt(dwc); 2868 break; 2869 case DWC3_DEVICE_EVENT_CONNECT_DONE: 2870 dwc3_gadget_conndone_interrupt(dwc); 2871 break; 2872 case DWC3_DEVICE_EVENT_WAKEUP: 2873 dwc3_gadget_wakeup_interrupt(dwc); 2874 break; 2875 case DWC3_DEVICE_EVENT_HIBER_REQ: 2876 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation, 2877 "unexpected hibernation event\n")) 2878 break; 2879 2880 dwc3_gadget_hibernation_interrupt(dwc, event->event_info); 2881 break; 2882 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 2883 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 2884 break; 2885 case DWC3_DEVICE_EVENT_EOPF: 2886 /* It changed to be suspend event for version 2.30a and above */ 2887 if (dwc->revision >= DWC3_REVISION_230A) { 2888 /* 2889 * Ignore suspend event until the gadget enters into 2890 * USB_STATE_CONFIGURED state. 2891 */ 2892 if (dwc->gadget.state >= USB_STATE_CONFIGURED) 2893 dwc3_gadget_suspend_interrupt(dwc, 2894 event->event_info); 2895 } 2896 break; 2897 case DWC3_DEVICE_EVENT_SOF: 2898 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 2899 case DWC3_DEVICE_EVENT_CMD_CMPL: 2900 case DWC3_DEVICE_EVENT_OVERFLOW: 2901 break; 2902 default: 2903 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 2904 } 2905 } 2906 2907 static void dwc3_process_event_entry(struct dwc3 *dwc, 2908 const union dwc3_event *event) 2909 { 2910 trace_dwc3_event(event->raw, dwc); 2911 2912 /* Endpoint IRQ, handle it and return early */ 2913 if (event->type.is_devspec == 0) { 2914 /* depevt */ 2915 return dwc3_endpoint_interrupt(dwc, &event->depevt); 2916 } 2917 2918 switch (event->type.type) { 2919 case DWC3_EVENT_TYPE_DEV: 2920 dwc3_gadget_interrupt(dwc, &event->devt); 2921 break; 2922 /* REVISIT what to do with Carkit and I2C events ? */ 2923 default: 2924 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 2925 } 2926 } 2927 2928 static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) 2929 { 2930 struct dwc3 *dwc = evt->dwc; 2931 irqreturn_t ret = IRQ_NONE; 2932 int left; 2933 u32 reg; 2934 2935 left = evt->count; 2936 2937 if (!(evt->flags & DWC3_EVENT_PENDING)) 2938 return IRQ_NONE; 2939 2940 while (left > 0) { 2941 union dwc3_event event; 2942 2943 event.raw = *(u32 *) (evt->cache + evt->lpos); 2944 2945 dwc3_process_event_entry(dwc, &event); 2946 2947 /* 2948 * FIXME we wrap around correctly to the next entry as 2949 * almost all entries are 4 bytes in size. There is one 2950 * entry which has 12 bytes which is a regular entry 2951 * followed by 8 bytes data. ATM I don't know how 2952 * things are organized if we get next to the a 2953 * boundary so I worry about that once we try to handle 2954 * that. 2955 */ 2956 evt->lpos = (evt->lpos + 4) % evt->length; 2957 left -= 4; 2958 } 2959 2960 evt->count = 0; 2961 evt->flags &= ~DWC3_EVENT_PENDING; 2962 ret = IRQ_HANDLED; 2963 2964 /* Unmask interrupt */ 2965 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0)); 2966 reg &= ~DWC3_GEVNTSIZ_INTMASK; 2967 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); 2968 2969 if (dwc->imod_interval) { 2970 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); 2971 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); 2972 } 2973 2974 return ret; 2975 } 2976 2977 static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt) 2978 { 2979 struct dwc3_event_buffer *evt = _evt; 2980 struct dwc3 *dwc = evt->dwc; 2981 unsigned long flags; 2982 irqreturn_t ret = IRQ_NONE; 2983 2984 spin_lock_irqsave(&dwc->lock, flags); 2985 ret = dwc3_process_event_buf(evt); 2986 spin_unlock_irqrestore(&dwc->lock, flags); 2987 2988 return ret; 2989 } 2990 2991 static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) 2992 { 2993 struct dwc3 *dwc = evt->dwc; 2994 u32 amount; 2995 u32 count; 2996 u32 reg; 2997 2998 if (pm_runtime_suspended(dwc->dev)) { 2999 pm_runtime_get(dwc->dev); 3000 disable_irq_nosync(dwc->irq_gadget); 3001 dwc->pending_events = true; 3002 return IRQ_HANDLED; 3003 } 3004 3005 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); 3006 count &= DWC3_GEVNTCOUNT_MASK; 3007 if (!count) 3008 return IRQ_NONE; 3009 3010 evt->count = count; 3011 evt->flags |= DWC3_EVENT_PENDING; 3012 3013 /* Mask interrupt */ 3014 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0)); 3015 reg |= DWC3_GEVNTSIZ_INTMASK; 3016 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); 3017 3018 amount = min(count, evt->length - evt->lpos); 3019 memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount); 3020 3021 if (amount < count) 3022 memcpy(evt->cache, evt->buf, count - amount); 3023 3024 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count); 3025 3026 return IRQ_WAKE_THREAD; 3027 } 3028 3029 static irqreturn_t dwc3_interrupt(int irq, void *_evt) 3030 { 3031 struct dwc3_event_buffer *evt = _evt; 3032 3033 return dwc3_check_event_buf(evt); 3034 } 3035 3036 static int dwc3_gadget_get_irq(struct dwc3 *dwc) 3037 { 3038 struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); 3039 int irq; 3040 3041 irq = platform_get_irq_byname(dwc3_pdev, "peripheral"); 3042 if (irq > 0) 3043 goto out; 3044 3045 if (irq == -EPROBE_DEFER) 3046 goto out; 3047 3048 irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3"); 3049 if (irq > 0) 3050 goto out; 3051 3052 if (irq == -EPROBE_DEFER) 3053 goto out; 3054 3055 irq = platform_get_irq(dwc3_pdev, 0); 3056 if (irq > 0) 3057 goto out; 3058 3059 if (irq != -EPROBE_DEFER) 3060 dev_err(dwc->dev, "missing peripheral IRQ\n"); 3061 3062 if (!irq) 3063 irq = -EINVAL; 3064 3065 out: 3066 return irq; 3067 } 3068 3069 /** 3070 * dwc3_gadget_init - Initializes gadget related registers 3071 * @dwc: pointer to our controller context structure 3072 * 3073 * Returns 0 on success otherwise negative errno. 3074 */ 3075 int dwc3_gadget_init(struct dwc3 *dwc) 3076 { 3077 int ret; 3078 int irq; 3079 3080 irq = dwc3_gadget_get_irq(dwc); 3081 if (irq < 0) { 3082 ret = irq; 3083 goto err0; 3084 } 3085 3086 dwc->irq_gadget = irq; 3087 3088 dwc->ctrl_req = dma_alloc_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req), 3089 &dwc->ctrl_req_addr, GFP_KERNEL); 3090 if (!dwc->ctrl_req) { 3091 dev_err(dwc->dev, "failed to allocate ctrl request\n"); 3092 ret = -ENOMEM; 3093 goto err0; 3094 } 3095 3096 dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev, 3097 sizeof(*dwc->ep0_trb) * 2, 3098 &dwc->ep0_trb_addr, GFP_KERNEL); 3099 if (!dwc->ep0_trb) { 3100 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 3101 ret = -ENOMEM; 3102 goto err1; 3103 } 3104 3105 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL); 3106 if (!dwc->setup_buf) { 3107 ret = -ENOMEM; 3108 goto err2; 3109 } 3110 3111 dwc->ep0_bounce = dma_alloc_coherent(dwc->sysdev, 3112 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr, 3113 GFP_KERNEL); 3114 if (!dwc->ep0_bounce) { 3115 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n"); 3116 ret = -ENOMEM; 3117 goto err3; 3118 } 3119 3120 dwc->zlp_buf = kzalloc(DWC3_ZLP_BUF_SIZE, GFP_KERNEL); 3121 if (!dwc->zlp_buf) { 3122 ret = -ENOMEM; 3123 goto err4; 3124 } 3125 3126 dwc->bounce = dma_alloc_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, 3127 &dwc->bounce_addr, GFP_KERNEL); 3128 if (!dwc->bounce) { 3129 ret = -ENOMEM; 3130 goto err5; 3131 } 3132 3133 init_completion(&dwc->ep0_in_setup); 3134 3135 dwc->gadget.ops = &dwc3_gadget_ops; 3136 dwc->gadget.speed = USB_SPEED_UNKNOWN; 3137 dwc->gadget.sg_supported = true; 3138 dwc->gadget.name = "dwc3-gadget"; 3139 dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG; 3140 3141 /* 3142 * FIXME We might be setting max_speed to <SUPER, however versions 3143 * <2.20a of dwc3 have an issue with metastability (documented 3144 * elsewhere in this driver) which tells us we can't set max speed to 3145 * anything lower than SUPER. 3146 * 3147 * Because gadget.max_speed is only used by composite.c and function 3148 * drivers (i.e. it won't go into dwc3's registers) we are allowing this 3149 * to happen so we avoid sending SuperSpeed Capability descriptor 3150 * together with our BOS descriptor as that could confuse host into 3151 * thinking we can handle super speed. 3152 * 3153 * Note that, in fact, we won't even support GetBOS requests when speed 3154 * is less than super speed because we don't have means, yet, to tell 3155 * composite.c that we are USB 2.0 + LPM ECN. 3156 */ 3157 if (dwc->revision < DWC3_REVISION_220A) 3158 dev_info(dwc->dev, "changing max_speed on rev %08x\n", 3159 dwc->revision); 3160 3161 dwc->gadget.max_speed = dwc->maximum_speed; 3162 3163 /* 3164 * REVISIT: Here we should clear all pending IRQs to be 3165 * sure we're starting from a well known location. 3166 */ 3167 3168 ret = dwc3_gadget_init_endpoints(dwc); 3169 if (ret) 3170 goto err6; 3171 3172 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 3173 if (ret) { 3174 dev_err(dwc->dev, "failed to register udc\n"); 3175 goto err6; 3176 } 3177 3178 return 0; 3179 err6: 3180 dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce, 3181 dwc->bounce_addr); 3182 3183 err5: 3184 kfree(dwc->zlp_buf); 3185 3186 err4: 3187 dwc3_gadget_free_endpoints(dwc); 3188 dma_free_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE, 3189 dwc->ep0_bounce, dwc->ep0_bounce_addr); 3190 3191 err3: 3192 kfree(dwc->setup_buf); 3193 3194 err2: 3195 dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2, 3196 dwc->ep0_trb, dwc->ep0_trb_addr); 3197 3198 err1: 3199 dma_free_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req), 3200 dwc->ctrl_req, dwc->ctrl_req_addr); 3201 3202 err0: 3203 return ret; 3204 } 3205 3206 /* -------------------------------------------------------------------------- */ 3207 3208 void dwc3_gadget_exit(struct dwc3 *dwc) 3209 { 3210 usb_del_gadget_udc(&dwc->gadget); 3211 3212 dwc3_gadget_free_endpoints(dwc); 3213 3214 dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce, 3215 dwc->bounce_addr); 3216 dma_free_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE, 3217 dwc->ep0_bounce, dwc->ep0_bounce_addr); 3218 3219 kfree(dwc->setup_buf); 3220 kfree(dwc->zlp_buf); 3221 3222 dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2, 3223 dwc->ep0_trb, dwc->ep0_trb_addr); 3224 3225 dma_free_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req), 3226 dwc->ctrl_req, dwc->ctrl_req_addr); 3227 } 3228 3229 int dwc3_gadget_suspend(struct dwc3 *dwc) 3230 { 3231 int ret; 3232 3233 if (!dwc->gadget_driver) 3234 return 0; 3235 3236 ret = dwc3_gadget_run_stop(dwc, false, false); 3237 if (ret < 0) 3238 return ret; 3239 3240 dwc3_disconnect_gadget(dwc); 3241 __dwc3_gadget_stop(dwc); 3242 3243 return 0; 3244 } 3245 3246 int dwc3_gadget_resume(struct dwc3 *dwc) 3247 { 3248 int ret; 3249 3250 if (!dwc->gadget_driver) 3251 return 0; 3252 3253 ret = __dwc3_gadget_start(dwc); 3254 if (ret < 0) 3255 goto err0; 3256 3257 ret = dwc3_gadget_run_stop(dwc, true, false); 3258 if (ret < 0) 3259 goto err1; 3260 3261 return 0; 3262 3263 err1: 3264 __dwc3_gadget_stop(dwc); 3265 3266 err0: 3267 return ret; 3268 } 3269 3270 void dwc3_gadget_process_pending_events(struct dwc3 *dwc) 3271 { 3272 if (dwc->pending_events) { 3273 dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf); 3274 dwc->pending_events = false; 3275 enable_irq(dwc->irq_gadget); 3276 } 3277 } 3278