1 /** 2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 3 * 4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com 5 * 6 * Authors: Felipe Balbi <balbi@ti.com>, 7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 8 * 9 * This program is free software: you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 of 11 * the License as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/delay.h> 21 #include <linux/slab.h> 22 #include <linux/spinlock.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/interrupt.h> 26 #include <linux/io.h> 27 #include <linux/list.h> 28 #include <linux/dma-mapping.h> 29 30 #include <linux/usb/ch9.h> 31 #include <linux/usb/gadget.h> 32 33 #include "debug.h" 34 #include "core.h" 35 #include "gadget.h" 36 #include "io.h" 37 38 /** 39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes 40 * @dwc: pointer to our context structure 41 * @mode: the mode to set (J, K SE0 NAK, Force Enable) 42 * 43 * Caller should take care of locking. This function will 44 * return 0 on success or -EINVAL if wrong Test Selector 45 * is passed 46 */ 47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) 48 { 49 u32 reg; 50 51 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 52 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 53 54 switch (mode) { 55 case TEST_J: 56 case TEST_K: 57 case TEST_SE0_NAK: 58 case TEST_PACKET: 59 case TEST_FORCE_EN: 60 reg |= mode << 1; 61 break; 62 default: 63 return -EINVAL; 64 } 65 66 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 67 68 return 0; 69 } 70 71 /** 72 * dwc3_gadget_get_link_state - Gets current state of USB Link 73 * @dwc: pointer to our context structure 74 * 75 * Caller should take care of locking. This function will 76 * return the link state on success (>= 0) or -ETIMEDOUT. 77 */ 78 int dwc3_gadget_get_link_state(struct dwc3 *dwc) 79 { 80 u32 reg; 81 82 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 83 84 return DWC3_DSTS_USBLNKST(reg); 85 } 86 87 /** 88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State 89 * @dwc: pointer to our context structure 90 * @state: the state to put link into 91 * 92 * Caller should take care of locking. This function will 93 * return 0 on success or -ETIMEDOUT. 94 */ 95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) 96 { 97 int retries = 10000; 98 u32 reg; 99 100 /* 101 * Wait until device controller is ready. Only applies to 1.94a and 102 * later RTL. 103 */ 104 if (dwc->revision >= DWC3_REVISION_194A) { 105 while (--retries) { 106 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 107 if (reg & DWC3_DSTS_DCNRD) 108 udelay(5); 109 else 110 break; 111 } 112 113 if (retries <= 0) 114 return -ETIMEDOUT; 115 } 116 117 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 119 120 /* set requested state */ 121 reg |= DWC3_DCTL_ULSTCHNGREQ(state); 122 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 123 124 /* 125 * The following code is racy when called from dwc3_gadget_wakeup, 126 * and is not needed, at least on newer versions 127 */ 128 if (dwc->revision >= DWC3_REVISION_194A) 129 return 0; 130 131 /* wait for a change in DSTS */ 132 retries = 10000; 133 while (--retries) { 134 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 135 136 if (DWC3_DSTS_USBLNKST(reg) == state) 137 return 0; 138 139 udelay(5); 140 } 141 142 return -ETIMEDOUT; 143 } 144 145 /** 146 * dwc3_ep_inc_trb() - Increment a TRB index. 147 * @index - Pointer to the TRB index to increment. 148 * 149 * The index should never point to the link TRB. After incrementing, 150 * if it is point to the link TRB, wrap around to the beginning. The 151 * link TRB is always at the last TRB entry. 152 */ 153 static void dwc3_ep_inc_trb(u8 *index) 154 { 155 (*index)++; 156 if (*index == (DWC3_TRB_NUM - 1)) 157 *index = 0; 158 } 159 160 static void dwc3_ep_inc_enq(struct dwc3_ep *dep) 161 { 162 dwc3_ep_inc_trb(&dep->trb_enqueue); 163 } 164 165 static void dwc3_ep_inc_deq(struct dwc3_ep *dep) 166 { 167 dwc3_ep_inc_trb(&dep->trb_dequeue); 168 } 169 170 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 171 int status) 172 { 173 struct dwc3 *dwc = dep->dwc; 174 175 req->started = false; 176 list_del(&req->list); 177 req->trb = NULL; 178 req->remaining = 0; 179 180 if (req->request.status == -EINPROGRESS) 181 req->request.status = status; 182 183 if (dwc->ep0_bounced && dep->number <= 1) 184 dwc->ep0_bounced = false; 185 186 usb_gadget_unmap_request_by_dev(dwc->sysdev, 187 &req->request, req->direction); 188 189 trace_dwc3_gadget_giveback(req); 190 191 spin_unlock(&dwc->lock); 192 usb_gadget_giveback_request(&dep->endpoint, &req->request); 193 spin_lock(&dwc->lock); 194 195 if (dep->number > 1) 196 pm_runtime_put(dwc->dev); 197 } 198 199 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param) 200 { 201 u32 timeout = 500; 202 int status = 0; 203 int ret = 0; 204 u32 reg; 205 206 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param); 207 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT); 208 209 do { 210 reg = dwc3_readl(dwc->regs, DWC3_DGCMD); 211 if (!(reg & DWC3_DGCMD_CMDACT)) { 212 status = DWC3_DGCMD_STATUS(reg); 213 if (status) 214 ret = -EINVAL; 215 break; 216 } 217 } while (--timeout); 218 219 if (!timeout) { 220 ret = -ETIMEDOUT; 221 status = -ETIMEDOUT; 222 } 223 224 trace_dwc3_gadget_generic_cmd(cmd, param, status); 225 226 return ret; 227 } 228 229 static int __dwc3_gadget_wakeup(struct dwc3 *dwc); 230 231 int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, 232 struct dwc3_gadget_ep_cmd_params *params) 233 { 234 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; 235 struct dwc3 *dwc = dep->dwc; 236 u32 timeout = 500; 237 u32 reg; 238 239 int cmd_status = 0; 240 int susphy = false; 241 int ret = -EINVAL; 242 243 /* 244 * Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if 245 * we're issuing an endpoint command, we must check if 246 * GUSB2PHYCFG.SUSPHY bit is set. If it is, then we need to clear it. 247 * 248 * We will also set SUSPHY bit to what it was before returning as stated 249 * by the same section on Synopsys databook. 250 */ 251 if (dwc->gadget.speed <= USB_SPEED_HIGH) { 252 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 253 if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) { 254 susphy = true; 255 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 256 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 257 } 258 } 259 260 if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) { 261 int needs_wakeup; 262 263 needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 || 264 dwc->link_state == DWC3_LINK_STATE_U2 || 265 dwc->link_state == DWC3_LINK_STATE_U3); 266 267 if (unlikely(needs_wakeup)) { 268 ret = __dwc3_gadget_wakeup(dwc); 269 dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n", 270 ret); 271 } 272 } 273 274 dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0); 275 dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1); 276 dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2); 277 278 /* 279 * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're 280 * not relying on XferNotReady, we can make use of a special "No 281 * Response Update Transfer" command where we should clear both CmdAct 282 * and CmdIOC bits. 283 * 284 * With this, we don't need to wait for command completion and can 285 * straight away issue further commands to the endpoint. 286 * 287 * NOTICE: We're making an assumption that control endpoints will never 288 * make use of Update Transfer command. This is a safe assumption 289 * because we can never have more than one request at a time with 290 * Control Endpoints. If anybody changes that assumption, this chunk 291 * needs to be updated accordingly. 292 */ 293 if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER && 294 !usb_endpoint_xfer_isoc(desc)) 295 cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT); 296 else 297 cmd |= DWC3_DEPCMD_CMDACT; 298 299 dwc3_writel(dep->regs, DWC3_DEPCMD, cmd); 300 do { 301 reg = dwc3_readl(dep->regs, DWC3_DEPCMD); 302 if (!(reg & DWC3_DEPCMD_CMDACT)) { 303 cmd_status = DWC3_DEPCMD_STATUS(reg); 304 305 switch (cmd_status) { 306 case 0: 307 ret = 0; 308 break; 309 case DEPEVT_TRANSFER_NO_RESOURCE: 310 ret = -EINVAL; 311 break; 312 case DEPEVT_TRANSFER_BUS_EXPIRY: 313 /* 314 * SW issues START TRANSFER command to 315 * isochronous ep with future frame interval. If 316 * future interval time has already passed when 317 * core receives the command, it will respond 318 * with an error status of 'Bus Expiry'. 319 * 320 * Instead of always returning -EINVAL, let's 321 * give a hint to the gadget driver that this is 322 * the case by returning -EAGAIN. 323 */ 324 ret = -EAGAIN; 325 break; 326 default: 327 dev_WARN(dwc->dev, "UNKNOWN cmd status\n"); 328 } 329 330 break; 331 } 332 } while (--timeout); 333 334 if (timeout == 0) { 335 ret = -ETIMEDOUT; 336 cmd_status = -ETIMEDOUT; 337 } 338 339 trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status); 340 341 if (ret == 0) { 342 switch (DWC3_DEPCMD_CMD(cmd)) { 343 case DWC3_DEPCMD_STARTTRANSFER: 344 dep->flags |= DWC3_EP_TRANSFER_STARTED; 345 break; 346 case DWC3_DEPCMD_ENDTRANSFER: 347 dep->flags &= ~DWC3_EP_TRANSFER_STARTED; 348 break; 349 default: 350 /* nothing */ 351 break; 352 } 353 } 354 355 if (unlikely(susphy)) { 356 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 357 reg |= DWC3_GUSB2PHYCFG_SUSPHY; 358 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 359 } 360 361 return ret; 362 } 363 364 static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep) 365 { 366 struct dwc3 *dwc = dep->dwc; 367 struct dwc3_gadget_ep_cmd_params params; 368 u32 cmd = DWC3_DEPCMD_CLEARSTALL; 369 370 /* 371 * As of core revision 2.60a the recommended programming model 372 * is to set the ClearPendIN bit when issuing a Clear Stall EP 373 * command for IN endpoints. This is to prevent an issue where 374 * some (non-compliant) hosts may not send ACK TPs for pending 375 * IN transfers due to a mishandled error condition. Synopsys 376 * STAR 9000614252. 377 */ 378 if (dep->direction && (dwc->revision >= DWC3_REVISION_260A) && 379 (dwc->gadget.speed >= USB_SPEED_SUPER)) 380 cmd |= DWC3_DEPCMD_CLEARPENDIN; 381 382 memset(¶ms, 0, sizeof(params)); 383 384 return dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 385 } 386 387 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 388 struct dwc3_trb *trb) 389 { 390 u32 offset = (char *) trb - (char *) dep->trb_pool; 391 392 return dep->trb_pool_dma + offset; 393 } 394 395 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 396 { 397 struct dwc3 *dwc = dep->dwc; 398 399 if (dep->trb_pool) 400 return 0; 401 402 dep->trb_pool = dma_alloc_coherent(dwc->sysdev, 403 sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 404 &dep->trb_pool_dma, GFP_KERNEL); 405 if (!dep->trb_pool) { 406 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 407 dep->name); 408 return -ENOMEM; 409 } 410 411 return 0; 412 } 413 414 static void dwc3_free_trb_pool(struct dwc3_ep *dep) 415 { 416 struct dwc3 *dwc = dep->dwc; 417 418 dma_free_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 419 dep->trb_pool, dep->trb_pool_dma); 420 421 dep->trb_pool = NULL; 422 dep->trb_pool_dma = 0; 423 } 424 425 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep); 426 427 /** 428 * dwc3_gadget_start_config - Configure EP resources 429 * @dwc: pointer to our controller context structure 430 * @dep: endpoint that is being enabled 431 * 432 * The assignment of transfer resources cannot perfectly follow the 433 * data book due to the fact that the controller driver does not have 434 * all knowledge of the configuration in advance. It is given this 435 * information piecemeal by the composite gadget framework after every 436 * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook 437 * programming model in this scenario can cause errors. For two 438 * reasons: 439 * 440 * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION 441 * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of 442 * multiple interfaces. 443 * 444 * 2) The databook does not mention doing more DEPXFERCFG for new 445 * endpoint on alt setting (8.1.6). 446 * 447 * The following simplified method is used instead: 448 * 449 * All hardware endpoints can be assigned a transfer resource and this 450 * setting will stay persistent until either a core reset or 451 * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and 452 * do DEPXFERCFG for every hardware endpoint as well. We are 453 * guaranteed that there are as many transfer resources as endpoints. 454 * 455 * This function is called for each endpoint when it is being enabled 456 * but is triggered only when called for EP0-out, which always happens 457 * first, and which should only happen in one of the above conditions. 458 */ 459 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 460 { 461 struct dwc3_gadget_ep_cmd_params params; 462 u32 cmd; 463 int i; 464 int ret; 465 466 if (dep->number) 467 return 0; 468 469 memset(¶ms, 0x00, sizeof(params)); 470 cmd = DWC3_DEPCMD_DEPSTARTCFG; 471 472 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 473 if (ret) 474 return ret; 475 476 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 477 struct dwc3_ep *dep = dwc->eps[i]; 478 479 if (!dep) 480 continue; 481 482 ret = dwc3_gadget_set_xfer_resource(dwc, dep); 483 if (ret) 484 return ret; 485 } 486 487 return 0; 488 } 489 490 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, 491 bool modify, bool restore) 492 { 493 const struct usb_ss_ep_comp_descriptor *comp_desc; 494 const struct usb_endpoint_descriptor *desc; 495 struct dwc3_gadget_ep_cmd_params params; 496 497 if (dev_WARN_ONCE(dwc->dev, modify && restore, 498 "Can't modify and restore\n")) 499 return -EINVAL; 500 501 comp_desc = dep->endpoint.comp_desc; 502 desc = dep->endpoint.desc; 503 504 memset(¶ms, 0x00, sizeof(params)); 505 506 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) 507 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); 508 509 /* Burst size is only needed in SuperSpeed mode */ 510 if (dwc->gadget.speed >= USB_SPEED_SUPER) { 511 u32 burst = dep->endpoint.maxburst; 512 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1); 513 } 514 515 if (modify) { 516 params.param0 |= DWC3_DEPCFG_ACTION_MODIFY; 517 } else if (restore) { 518 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE; 519 params.param2 |= dep->saved_state; 520 } else { 521 params.param0 |= DWC3_DEPCFG_ACTION_INIT; 522 } 523 524 if (usb_endpoint_xfer_control(desc)) 525 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN; 526 527 if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc)) 528 params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN; 529 530 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 531 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE 532 | DWC3_DEPCFG_STREAM_EVENT_EN; 533 dep->stream_capable = true; 534 } 535 536 if (!usb_endpoint_xfer_control(desc)) 537 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; 538 539 /* 540 * We are doing 1:1 mapping for endpoints, meaning 541 * Physical Endpoints 2 maps to Logical Endpoint 2 and 542 * so on. We consider the direction bit as part of the physical 543 * endpoint number. So USB endpoint 0x81 is 0x03. 544 */ 545 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); 546 547 /* 548 * We must use the lower 16 TX FIFOs even though 549 * HW might have more 550 */ 551 if (dep->direction) 552 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); 553 554 if (desc->bInterval) { 555 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); 556 dep->interval = 1 << (desc->bInterval - 1); 557 } 558 559 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, ¶ms); 560 } 561 562 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) 563 { 564 struct dwc3_gadget_ep_cmd_params params; 565 566 memset(¶ms, 0x00, sizeof(params)); 567 568 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 569 570 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE, 571 ¶ms); 572 } 573 574 /** 575 * __dwc3_gadget_ep_enable - Initializes a HW endpoint 576 * @dep: endpoint to be initialized 577 * @desc: USB Endpoint Descriptor 578 * 579 * Caller should take care of locking 580 */ 581 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, 582 bool modify, bool restore) 583 { 584 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; 585 struct dwc3 *dwc = dep->dwc; 586 587 u32 reg; 588 int ret; 589 590 if (!(dep->flags & DWC3_EP_ENABLED)) { 591 ret = dwc3_gadget_start_config(dwc, dep); 592 if (ret) 593 return ret; 594 } 595 596 ret = dwc3_gadget_set_ep_config(dwc, dep, modify, restore); 597 if (ret) 598 return ret; 599 600 if (!(dep->flags & DWC3_EP_ENABLED)) { 601 struct dwc3_trb *trb_st_hw; 602 struct dwc3_trb *trb_link; 603 604 dep->type = usb_endpoint_type(desc); 605 dep->flags |= DWC3_EP_ENABLED; 606 dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; 607 608 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 609 reg |= DWC3_DALEPENA_EP(dep->number); 610 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 611 612 init_waitqueue_head(&dep->wait_end_transfer); 613 614 if (usb_endpoint_xfer_control(desc)) 615 goto out; 616 617 /* Initialize the TRB ring */ 618 dep->trb_dequeue = 0; 619 dep->trb_enqueue = 0; 620 memset(dep->trb_pool, 0, 621 sizeof(struct dwc3_trb) * DWC3_TRB_NUM); 622 623 /* Link TRB. The HWO bit is never reset */ 624 trb_st_hw = &dep->trb_pool[0]; 625 626 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 627 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 628 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 629 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB; 630 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 631 } 632 633 /* 634 * Issue StartTransfer here with no-op TRB so we can always rely on No 635 * Response Update Transfer command. 636 */ 637 if (usb_endpoint_xfer_bulk(desc)) { 638 struct dwc3_gadget_ep_cmd_params params; 639 struct dwc3_trb *trb; 640 dma_addr_t trb_dma; 641 u32 cmd; 642 643 memset(¶ms, 0, sizeof(params)); 644 trb = &dep->trb_pool[0]; 645 trb_dma = dwc3_trb_dma_offset(dep, trb); 646 647 params.param0 = upper_32_bits(trb_dma); 648 params.param1 = lower_32_bits(trb_dma); 649 650 cmd = DWC3_DEPCMD_STARTTRANSFER; 651 652 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 653 if (ret < 0) 654 return ret; 655 656 dep->flags |= DWC3_EP_BUSY; 657 658 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep); 659 WARN_ON_ONCE(!dep->resource_index); 660 } 661 662 663 out: 664 trace_dwc3_gadget_ep_enable(dep); 665 666 return 0; 667 } 668 669 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force); 670 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 671 { 672 struct dwc3_request *req; 673 674 dwc3_stop_active_transfer(dwc, dep->number, true); 675 676 /* - giveback all requests to gadget driver */ 677 while (!list_empty(&dep->started_list)) { 678 req = next_request(&dep->started_list); 679 680 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 681 } 682 683 while (!list_empty(&dep->pending_list)) { 684 req = next_request(&dep->pending_list); 685 686 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 687 } 688 } 689 690 /** 691 * __dwc3_gadget_ep_disable - Disables a HW endpoint 692 * @dep: the endpoint to disable 693 * 694 * This function also removes requests which are currently processed ny the 695 * hardware and those which are not yet scheduled. 696 * Caller should take care of locking. 697 */ 698 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 699 { 700 struct dwc3 *dwc = dep->dwc; 701 u32 reg; 702 703 trace_dwc3_gadget_ep_disable(dep); 704 705 dwc3_remove_requests(dwc, dep); 706 707 /* make sure HW endpoint isn't stalled */ 708 if (dep->flags & DWC3_EP_STALL) 709 __dwc3_gadget_ep_set_halt(dep, 0, false); 710 711 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 712 reg &= ~DWC3_DALEPENA_EP(dep->number); 713 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 714 715 dep->stream_capable = false; 716 dep->type = 0; 717 dep->flags &= DWC3_EP_END_TRANSFER_PENDING; 718 719 /* Clear out the ep descriptors for non-ep0 */ 720 if (dep->number > 1) { 721 dep->endpoint.comp_desc = NULL; 722 dep->endpoint.desc = NULL; 723 } 724 725 return 0; 726 } 727 728 /* -------------------------------------------------------------------------- */ 729 730 static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 731 const struct usb_endpoint_descriptor *desc) 732 { 733 return -EINVAL; 734 } 735 736 static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 737 { 738 return -EINVAL; 739 } 740 741 /* -------------------------------------------------------------------------- */ 742 743 static int dwc3_gadget_ep_enable(struct usb_ep *ep, 744 const struct usb_endpoint_descriptor *desc) 745 { 746 struct dwc3_ep *dep; 747 struct dwc3 *dwc; 748 unsigned long flags; 749 int ret; 750 751 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 752 pr_debug("dwc3: invalid parameters\n"); 753 return -EINVAL; 754 } 755 756 if (!desc->wMaxPacketSize) { 757 pr_debug("dwc3: missing wMaxPacketSize\n"); 758 return -EINVAL; 759 } 760 761 dep = to_dwc3_ep(ep); 762 dwc = dep->dwc; 763 764 if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED, 765 "%s is already enabled\n", 766 dep->name)) 767 return 0; 768 769 spin_lock_irqsave(&dwc->lock, flags); 770 ret = __dwc3_gadget_ep_enable(dep, false, false); 771 spin_unlock_irqrestore(&dwc->lock, flags); 772 773 return ret; 774 } 775 776 static int dwc3_gadget_ep_disable(struct usb_ep *ep) 777 { 778 struct dwc3_ep *dep; 779 struct dwc3 *dwc; 780 unsigned long flags; 781 int ret; 782 783 if (!ep) { 784 pr_debug("dwc3: invalid parameters\n"); 785 return -EINVAL; 786 } 787 788 dep = to_dwc3_ep(ep); 789 dwc = dep->dwc; 790 791 if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED), 792 "%s is already disabled\n", 793 dep->name)) 794 return 0; 795 796 spin_lock_irqsave(&dwc->lock, flags); 797 ret = __dwc3_gadget_ep_disable(dep); 798 spin_unlock_irqrestore(&dwc->lock, flags); 799 800 return ret; 801 } 802 803 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 804 gfp_t gfp_flags) 805 { 806 struct dwc3_request *req; 807 struct dwc3_ep *dep = to_dwc3_ep(ep); 808 809 req = kzalloc(sizeof(*req), gfp_flags); 810 if (!req) 811 return NULL; 812 813 req->epnum = dep->number; 814 req->dep = dep; 815 816 dep->allocated_requests++; 817 818 trace_dwc3_alloc_request(req); 819 820 return &req->request; 821 } 822 823 static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 824 struct usb_request *request) 825 { 826 struct dwc3_request *req = to_dwc3_request(request); 827 struct dwc3_ep *dep = to_dwc3_ep(ep); 828 829 dep->allocated_requests--; 830 trace_dwc3_free_request(req); 831 kfree(req); 832 } 833 834 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep); 835 836 /** 837 * dwc3_prepare_one_trb - setup one TRB from one request 838 * @dep: endpoint for which this request is prepared 839 * @req: dwc3_request pointer 840 */ 841 static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 842 struct dwc3_request *req, dma_addr_t dma, 843 unsigned length, unsigned chain, unsigned node) 844 { 845 struct dwc3_trb *trb; 846 struct dwc3 *dwc = dep->dwc; 847 struct usb_gadget *gadget = &dwc->gadget; 848 enum usb_device_speed speed = gadget->speed; 849 850 trb = &dep->trb_pool[dep->trb_enqueue]; 851 852 if (!req->trb) { 853 dwc3_gadget_move_started_request(req); 854 req->trb = trb; 855 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 856 dep->queued_requests++; 857 } 858 859 dwc3_ep_inc_enq(dep); 860 861 trb->size = DWC3_TRB_SIZE_LENGTH(length); 862 trb->bpl = lower_32_bits(dma); 863 trb->bph = upper_32_bits(dma); 864 865 switch (usb_endpoint_type(dep->endpoint.desc)) { 866 case USB_ENDPOINT_XFER_CONTROL: 867 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP; 868 break; 869 870 case USB_ENDPOINT_XFER_ISOC: 871 if (!node) { 872 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 873 874 if (speed == USB_SPEED_HIGH) { 875 struct usb_ep *ep = &dep->endpoint; 876 trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1); 877 } 878 } else { 879 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 880 } 881 882 /* always enable Interrupt on Missed ISOC */ 883 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 884 break; 885 886 case USB_ENDPOINT_XFER_BULK: 887 case USB_ENDPOINT_XFER_INT: 888 trb->ctrl = DWC3_TRBCTL_NORMAL; 889 break; 890 default: 891 /* 892 * This is only possible with faulty memory because we 893 * checked it already :) 894 */ 895 dev_WARN(dwc->dev, "Unknown endpoint type %d\n", 896 usb_endpoint_type(dep->endpoint.desc)); 897 } 898 899 /* always enable Continue on Short Packet */ 900 if (usb_endpoint_dir_out(dep->endpoint.desc)) { 901 trb->ctrl |= DWC3_TRB_CTRL_CSP; 902 903 if (req->request.short_not_ok) 904 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 905 } 906 907 if ((!req->request.no_interrupt && !chain) || 908 (dwc3_calc_trbs_left(dep) == 0)) 909 trb->ctrl |= DWC3_TRB_CTRL_IOC; 910 911 if (chain) 912 trb->ctrl |= DWC3_TRB_CTRL_CHN; 913 914 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) 915 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id); 916 917 trb->ctrl |= DWC3_TRB_CTRL_HWO; 918 919 trace_dwc3_prepare_trb(dep, trb); 920 } 921 922 /** 923 * dwc3_ep_prev_trb() - Returns the previous TRB in the ring 924 * @dep: The endpoint with the TRB ring 925 * @index: The index of the current TRB in the ring 926 * 927 * Returns the TRB prior to the one pointed to by the index. If the 928 * index is 0, we will wrap backwards, skip the link TRB, and return 929 * the one just before that. 930 */ 931 static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index) 932 { 933 u8 tmp = index; 934 935 if (!tmp) 936 tmp = DWC3_TRB_NUM - 1; 937 938 return &dep->trb_pool[tmp - 1]; 939 } 940 941 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep) 942 { 943 struct dwc3_trb *tmp; 944 struct dwc3 *dwc = dep->dwc; 945 u8 trbs_left; 946 947 /* 948 * If enqueue & dequeue are equal than it is either full or empty. 949 * 950 * One way to know for sure is if the TRB right before us has HWO bit 951 * set or not. If it has, then we're definitely full and can't fit any 952 * more transfers in our ring. 953 */ 954 if (dep->trb_enqueue == dep->trb_dequeue) { 955 tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue); 956 if (dev_WARN_ONCE(dwc->dev, tmp->ctrl & DWC3_TRB_CTRL_HWO, 957 "%s No TRBS left\n", dep->name)) 958 return 0; 959 960 return DWC3_TRB_NUM - 1; 961 } 962 963 trbs_left = dep->trb_dequeue - dep->trb_enqueue; 964 trbs_left &= (DWC3_TRB_NUM - 1); 965 966 if (dep->trb_dequeue < dep->trb_enqueue) 967 trbs_left--; 968 969 return trbs_left; 970 } 971 972 static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, 973 struct dwc3_request *req) 974 { 975 struct scatterlist *sg = req->sg; 976 struct scatterlist *s; 977 unsigned int length; 978 dma_addr_t dma; 979 int i; 980 981 for_each_sg(sg, s, req->num_pending_sgs, i) { 982 unsigned chain = true; 983 984 length = sg_dma_len(s); 985 dma = sg_dma_address(s); 986 987 if (sg_is_last(s)) 988 chain = false; 989 990 dwc3_prepare_one_trb(dep, req, dma, length, 991 chain, i); 992 993 if (!dwc3_calc_trbs_left(dep)) 994 break; 995 } 996 } 997 998 static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, 999 struct dwc3_request *req) 1000 { 1001 unsigned int length; 1002 dma_addr_t dma; 1003 1004 dma = req->request.dma; 1005 length = req->request.length; 1006 1007 dwc3_prepare_one_trb(dep, req, dma, length, 1008 false, 0); 1009 } 1010 1011 /* 1012 * dwc3_prepare_trbs - setup TRBs from requests 1013 * @dep: endpoint for which requests are being prepared 1014 * 1015 * The function goes through the requests list and sets up TRBs for the 1016 * transfers. The function returns once there are no more TRBs available or 1017 * it runs out of requests. 1018 */ 1019 static void dwc3_prepare_trbs(struct dwc3_ep *dep) 1020 { 1021 struct dwc3_request *req, *n; 1022 1023 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 1024 1025 if (!dwc3_calc_trbs_left(dep)) 1026 return; 1027 1028 /* 1029 * We can get in a situation where there's a request in the started list 1030 * but there weren't enough TRBs to fully kick it in the first time 1031 * around, so it has been waiting for more TRBs to be freed up. 1032 * 1033 * In that case, we should check if we have a request with pending_sgs 1034 * in the started list and prepare TRBs for that request first, 1035 * otherwise we will prepare TRBs completely out of order and that will 1036 * break things. 1037 */ 1038 list_for_each_entry(req, &dep->started_list, list) { 1039 if (req->num_pending_sgs > 0) 1040 dwc3_prepare_one_trb_sg(dep, req); 1041 1042 if (!dwc3_calc_trbs_left(dep)) 1043 return; 1044 } 1045 1046 list_for_each_entry_safe(req, n, &dep->pending_list, list) { 1047 if (req->num_pending_sgs > 0) 1048 dwc3_prepare_one_trb_sg(dep, req); 1049 else 1050 dwc3_prepare_one_trb_linear(dep, req); 1051 1052 if (!dwc3_calc_trbs_left(dep)) 1053 return; 1054 } 1055 } 1056 1057 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param) 1058 { 1059 struct dwc3_gadget_ep_cmd_params params; 1060 struct dwc3_request *req; 1061 int starting; 1062 int ret; 1063 u32 cmd; 1064 1065 starting = !(dep->flags & DWC3_EP_BUSY); 1066 1067 dwc3_prepare_trbs(dep); 1068 req = next_request(&dep->started_list); 1069 if (!req) { 1070 dep->flags |= DWC3_EP_PENDING_REQUEST; 1071 return 0; 1072 } 1073 1074 memset(¶ms, 0, sizeof(params)); 1075 1076 if (starting) { 1077 params.param0 = upper_32_bits(req->trb_dma); 1078 params.param1 = lower_32_bits(req->trb_dma); 1079 cmd = DWC3_DEPCMD_STARTTRANSFER | 1080 DWC3_DEPCMD_PARAM(cmd_param); 1081 } else { 1082 cmd = DWC3_DEPCMD_UPDATETRANSFER | 1083 DWC3_DEPCMD_PARAM(dep->resource_index); 1084 } 1085 1086 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 1087 if (ret < 0) { 1088 /* 1089 * FIXME we need to iterate over the list of requests 1090 * here and stop, unmap, free and del each of the linked 1091 * requests instead of what we do now. 1092 */ 1093 if (req->trb) 1094 memset(req->trb, 0, sizeof(struct dwc3_trb)); 1095 dep->queued_requests--; 1096 dwc3_gadget_giveback(dep, req, ret); 1097 return ret; 1098 } 1099 1100 dep->flags |= DWC3_EP_BUSY; 1101 1102 if (starting) { 1103 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep); 1104 WARN_ON_ONCE(!dep->resource_index); 1105 } 1106 1107 return 0; 1108 } 1109 1110 static int __dwc3_gadget_get_frame(struct dwc3 *dwc) 1111 { 1112 u32 reg; 1113 1114 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1115 return DWC3_DSTS_SOFFN(reg); 1116 } 1117 1118 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc, 1119 struct dwc3_ep *dep, u32 cur_uf) 1120 { 1121 u32 uf; 1122 1123 if (list_empty(&dep->pending_list)) { 1124 dev_info(dwc->dev, "%s: ran out of requests\n", 1125 dep->name); 1126 dep->flags |= DWC3_EP_PENDING_REQUEST; 1127 return; 1128 } 1129 1130 /* 4 micro frames in the future */ 1131 uf = cur_uf + dep->interval * 4; 1132 1133 __dwc3_gadget_kick_transfer(dep, uf); 1134 } 1135 1136 static void dwc3_gadget_start_isoc(struct dwc3 *dwc, 1137 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1138 { 1139 u32 cur_uf, mask; 1140 1141 mask = ~(dep->interval - 1); 1142 cur_uf = event->parameters & mask; 1143 1144 __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1145 } 1146 1147 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 1148 { 1149 struct dwc3 *dwc = dep->dwc; 1150 int ret; 1151 1152 if (!dep->endpoint.desc) { 1153 dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n", 1154 dep->name); 1155 return -ESHUTDOWN; 1156 } 1157 1158 if (WARN(req->dep != dep, "request %p belongs to '%s'\n", 1159 &req->request, req->dep->name)) { 1160 dev_err(dwc->dev, "%s: request %p belongs to '%s'\n", 1161 dep->name, &req->request, req->dep->name); 1162 return -EINVAL; 1163 } 1164 1165 pm_runtime_get(dwc->dev); 1166 1167 req->request.actual = 0; 1168 req->request.status = -EINPROGRESS; 1169 req->direction = dep->direction; 1170 req->epnum = dep->number; 1171 1172 trace_dwc3_ep_queue(req); 1173 1174 ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request, 1175 dep->direction); 1176 if (ret) 1177 return ret; 1178 1179 req->sg = req->request.sg; 1180 req->num_pending_sgs = req->request.num_mapped_sgs; 1181 1182 list_add_tail(&req->list, &dep->pending_list); 1183 1184 /* 1185 * NOTICE: Isochronous endpoints should NEVER be prestarted. We must 1186 * wait for a XferNotReady event so we will know what's the current 1187 * (micro-)frame number. 1188 * 1189 * Without this trick, we are very, very likely gonna get Bus Expiry 1190 * errors which will force us issue EndTransfer command. 1191 */ 1192 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1193 if ((dep->flags & DWC3_EP_PENDING_REQUEST)) { 1194 if (dep->flags & DWC3_EP_TRANSFER_STARTED) { 1195 dwc3_stop_active_transfer(dwc, dep->number, true); 1196 dep->flags = DWC3_EP_ENABLED; 1197 } else { 1198 u32 cur_uf; 1199 1200 cur_uf = __dwc3_gadget_get_frame(dwc); 1201 __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1202 dep->flags &= ~DWC3_EP_PENDING_REQUEST; 1203 } 1204 } 1205 return 0; 1206 } 1207 1208 if (!dwc3_calc_trbs_left(dep)) 1209 return 0; 1210 1211 ret = __dwc3_gadget_kick_transfer(dep, 0); 1212 if (ret == -EBUSY) 1213 ret = 0; 1214 1215 return ret; 1216 } 1217 1218 static void __dwc3_gadget_ep_zlp_complete(struct usb_ep *ep, 1219 struct usb_request *request) 1220 { 1221 dwc3_gadget_ep_free_request(ep, request); 1222 } 1223 1224 static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep) 1225 { 1226 struct dwc3_request *req; 1227 struct usb_request *request; 1228 struct usb_ep *ep = &dep->endpoint; 1229 1230 request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC); 1231 if (!request) 1232 return -ENOMEM; 1233 1234 request->length = 0; 1235 request->buf = dwc->zlp_buf; 1236 request->complete = __dwc3_gadget_ep_zlp_complete; 1237 1238 req = to_dwc3_request(request); 1239 1240 return __dwc3_gadget_ep_queue(dep, req); 1241 } 1242 1243 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 1244 gfp_t gfp_flags) 1245 { 1246 struct dwc3_request *req = to_dwc3_request(request); 1247 struct dwc3_ep *dep = to_dwc3_ep(ep); 1248 struct dwc3 *dwc = dep->dwc; 1249 1250 unsigned long flags; 1251 1252 int ret; 1253 1254 spin_lock_irqsave(&dwc->lock, flags); 1255 ret = __dwc3_gadget_ep_queue(dep, req); 1256 1257 /* 1258 * Okay, here's the thing, if gadget driver has requested for a ZLP by 1259 * setting request->zero, instead of doing magic, we will just queue an 1260 * extra usb_request ourselves so that it gets handled the same way as 1261 * any other request. 1262 */ 1263 if (ret == 0 && request->zero && request->length && 1264 (request->length % ep->maxpacket == 0)) 1265 ret = __dwc3_gadget_ep_queue_zlp(dwc, dep); 1266 1267 spin_unlock_irqrestore(&dwc->lock, flags); 1268 1269 return ret; 1270 } 1271 1272 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 1273 struct usb_request *request) 1274 { 1275 struct dwc3_request *req = to_dwc3_request(request); 1276 struct dwc3_request *r = NULL; 1277 1278 struct dwc3_ep *dep = to_dwc3_ep(ep); 1279 struct dwc3 *dwc = dep->dwc; 1280 1281 unsigned long flags; 1282 int ret = 0; 1283 1284 trace_dwc3_ep_dequeue(req); 1285 1286 spin_lock_irqsave(&dwc->lock, flags); 1287 1288 list_for_each_entry(r, &dep->pending_list, list) { 1289 if (r == req) 1290 break; 1291 } 1292 1293 if (r != req) { 1294 list_for_each_entry(r, &dep->started_list, list) { 1295 if (r == req) 1296 break; 1297 } 1298 if (r == req) { 1299 /* wait until it is processed */ 1300 dwc3_stop_active_transfer(dwc, dep->number, true); 1301 goto out1; 1302 } 1303 dev_err(dwc->dev, "request %p was not queued to %s\n", 1304 request, ep->name); 1305 ret = -EINVAL; 1306 goto out0; 1307 } 1308 1309 out1: 1310 /* giveback the request */ 1311 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1312 1313 out0: 1314 spin_unlock_irqrestore(&dwc->lock, flags); 1315 1316 return ret; 1317 } 1318 1319 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) 1320 { 1321 struct dwc3_gadget_ep_cmd_params params; 1322 struct dwc3 *dwc = dep->dwc; 1323 int ret; 1324 1325 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1326 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 1327 return -EINVAL; 1328 } 1329 1330 memset(¶ms, 0x00, sizeof(params)); 1331 1332 if (value) { 1333 struct dwc3_trb *trb; 1334 1335 unsigned transfer_in_flight; 1336 unsigned started; 1337 1338 if (dep->number > 1) 1339 trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue); 1340 else 1341 trb = &dwc->ep0_trb[dep->trb_enqueue]; 1342 1343 transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO; 1344 started = !list_empty(&dep->started_list); 1345 1346 if (!protocol && ((dep->direction && transfer_in_flight) || 1347 (!dep->direction && started))) { 1348 return -EAGAIN; 1349 } 1350 1351 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL, 1352 ¶ms); 1353 if (ret) 1354 dev_err(dwc->dev, "failed to set STALL on %s\n", 1355 dep->name); 1356 else 1357 dep->flags |= DWC3_EP_STALL; 1358 } else { 1359 1360 ret = dwc3_send_clear_stall_ep_cmd(dep); 1361 if (ret) 1362 dev_err(dwc->dev, "failed to clear STALL on %s\n", 1363 dep->name); 1364 else 1365 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); 1366 } 1367 1368 return ret; 1369 } 1370 1371 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 1372 { 1373 struct dwc3_ep *dep = to_dwc3_ep(ep); 1374 struct dwc3 *dwc = dep->dwc; 1375 1376 unsigned long flags; 1377 1378 int ret; 1379 1380 spin_lock_irqsave(&dwc->lock, flags); 1381 ret = __dwc3_gadget_ep_set_halt(dep, value, false); 1382 spin_unlock_irqrestore(&dwc->lock, flags); 1383 1384 return ret; 1385 } 1386 1387 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 1388 { 1389 struct dwc3_ep *dep = to_dwc3_ep(ep); 1390 struct dwc3 *dwc = dep->dwc; 1391 unsigned long flags; 1392 int ret; 1393 1394 spin_lock_irqsave(&dwc->lock, flags); 1395 dep->flags |= DWC3_EP_WEDGE; 1396 1397 if (dep->number == 0 || dep->number == 1) 1398 ret = __dwc3_gadget_ep0_set_halt(ep, 1); 1399 else 1400 ret = __dwc3_gadget_ep_set_halt(dep, 1, false); 1401 spin_unlock_irqrestore(&dwc->lock, flags); 1402 1403 return ret; 1404 } 1405 1406 /* -------------------------------------------------------------------------- */ 1407 1408 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 1409 .bLength = USB_DT_ENDPOINT_SIZE, 1410 .bDescriptorType = USB_DT_ENDPOINT, 1411 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1412 }; 1413 1414 static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 1415 .enable = dwc3_gadget_ep0_enable, 1416 .disable = dwc3_gadget_ep0_disable, 1417 .alloc_request = dwc3_gadget_ep_alloc_request, 1418 .free_request = dwc3_gadget_ep_free_request, 1419 .queue = dwc3_gadget_ep0_queue, 1420 .dequeue = dwc3_gadget_ep_dequeue, 1421 .set_halt = dwc3_gadget_ep0_set_halt, 1422 .set_wedge = dwc3_gadget_ep_set_wedge, 1423 }; 1424 1425 static const struct usb_ep_ops dwc3_gadget_ep_ops = { 1426 .enable = dwc3_gadget_ep_enable, 1427 .disable = dwc3_gadget_ep_disable, 1428 .alloc_request = dwc3_gadget_ep_alloc_request, 1429 .free_request = dwc3_gadget_ep_free_request, 1430 .queue = dwc3_gadget_ep_queue, 1431 .dequeue = dwc3_gadget_ep_dequeue, 1432 .set_halt = dwc3_gadget_ep_set_halt, 1433 .set_wedge = dwc3_gadget_ep_set_wedge, 1434 }; 1435 1436 /* -------------------------------------------------------------------------- */ 1437 1438 static int dwc3_gadget_get_frame(struct usb_gadget *g) 1439 { 1440 struct dwc3 *dwc = gadget_to_dwc(g); 1441 1442 return __dwc3_gadget_get_frame(dwc); 1443 } 1444 1445 static int __dwc3_gadget_wakeup(struct dwc3 *dwc) 1446 { 1447 int retries; 1448 1449 int ret; 1450 u32 reg; 1451 1452 u8 link_state; 1453 u8 speed; 1454 1455 /* 1456 * According to the Databook Remote wakeup request should 1457 * be issued only when the device is in early suspend state. 1458 * 1459 * We can check that via USB Link State bits in DSTS register. 1460 */ 1461 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1462 1463 speed = reg & DWC3_DSTS_CONNECTSPD; 1464 if ((speed == DWC3_DSTS_SUPERSPEED) || 1465 (speed == DWC3_DSTS_SUPERSPEED_PLUS)) 1466 return 0; 1467 1468 link_state = DWC3_DSTS_USBLNKST(reg); 1469 1470 switch (link_state) { 1471 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1472 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1473 break; 1474 default: 1475 return -EINVAL; 1476 } 1477 1478 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); 1479 if (ret < 0) { 1480 dev_err(dwc->dev, "failed to put link in Recovery\n"); 1481 return ret; 1482 } 1483 1484 /* Recent versions do this automatically */ 1485 if (dwc->revision < DWC3_REVISION_194A) { 1486 /* write zeroes to Link Change Request */ 1487 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1488 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 1489 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1490 } 1491 1492 /* poll until Link State changes to ON */ 1493 retries = 20000; 1494 1495 while (retries--) { 1496 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1497 1498 /* in HS, means ON */ 1499 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 1500 break; 1501 } 1502 1503 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1504 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1505 return -EINVAL; 1506 } 1507 1508 return 0; 1509 } 1510 1511 static int dwc3_gadget_wakeup(struct usb_gadget *g) 1512 { 1513 struct dwc3 *dwc = gadget_to_dwc(g); 1514 unsigned long flags; 1515 int ret; 1516 1517 spin_lock_irqsave(&dwc->lock, flags); 1518 ret = __dwc3_gadget_wakeup(dwc); 1519 spin_unlock_irqrestore(&dwc->lock, flags); 1520 1521 return ret; 1522 } 1523 1524 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 1525 int is_selfpowered) 1526 { 1527 struct dwc3 *dwc = gadget_to_dwc(g); 1528 unsigned long flags; 1529 1530 spin_lock_irqsave(&dwc->lock, flags); 1531 g->is_selfpowered = !!is_selfpowered; 1532 spin_unlock_irqrestore(&dwc->lock, flags); 1533 1534 return 0; 1535 } 1536 1537 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) 1538 { 1539 u32 reg; 1540 u32 timeout = 500; 1541 1542 if (pm_runtime_suspended(dwc->dev)) 1543 return 0; 1544 1545 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1546 if (is_on) { 1547 if (dwc->revision <= DWC3_REVISION_187A) { 1548 reg &= ~DWC3_DCTL_TRGTULST_MASK; 1549 reg |= DWC3_DCTL_TRGTULST_RX_DET; 1550 } 1551 1552 if (dwc->revision >= DWC3_REVISION_194A) 1553 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1554 reg |= DWC3_DCTL_RUN_STOP; 1555 1556 if (dwc->has_hibernation) 1557 reg |= DWC3_DCTL_KEEP_CONNECT; 1558 1559 dwc->pullups_connected = true; 1560 } else { 1561 reg &= ~DWC3_DCTL_RUN_STOP; 1562 1563 if (dwc->has_hibernation && !suspend) 1564 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1565 1566 dwc->pullups_connected = false; 1567 } 1568 1569 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1570 1571 do { 1572 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1573 reg &= DWC3_DSTS_DEVCTRLHLT; 1574 } while (--timeout && !(!is_on ^ !reg)); 1575 1576 if (!timeout) 1577 return -ETIMEDOUT; 1578 1579 return 0; 1580 } 1581 1582 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 1583 { 1584 struct dwc3 *dwc = gadget_to_dwc(g); 1585 unsigned long flags; 1586 int ret; 1587 1588 is_on = !!is_on; 1589 1590 /* 1591 * Per databook, when we want to stop the gadget, if a control transfer 1592 * is still in process, complete it and get the core into setup phase. 1593 */ 1594 if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) { 1595 reinit_completion(&dwc->ep0_in_setup); 1596 1597 ret = wait_for_completion_timeout(&dwc->ep0_in_setup, 1598 msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT)); 1599 if (ret == 0) { 1600 dev_err(dwc->dev, "timed out waiting for SETUP phase\n"); 1601 return -ETIMEDOUT; 1602 } 1603 } 1604 1605 spin_lock_irqsave(&dwc->lock, flags); 1606 ret = dwc3_gadget_run_stop(dwc, is_on, false); 1607 spin_unlock_irqrestore(&dwc->lock, flags); 1608 1609 return ret; 1610 } 1611 1612 static void dwc3_gadget_enable_irq(struct dwc3 *dwc) 1613 { 1614 u32 reg; 1615 1616 /* Enable all but Start and End of Frame IRQs */ 1617 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | 1618 DWC3_DEVTEN_EVNTOVERFLOWEN | 1619 DWC3_DEVTEN_CMDCMPLTEN | 1620 DWC3_DEVTEN_ERRTICERREN | 1621 DWC3_DEVTEN_WKUPEVTEN | 1622 DWC3_DEVTEN_CONNECTDONEEN | 1623 DWC3_DEVTEN_USBRSTEN | 1624 DWC3_DEVTEN_DISCONNEVTEN); 1625 1626 if (dwc->revision < DWC3_REVISION_250A) 1627 reg |= DWC3_DEVTEN_ULSTCNGEN; 1628 1629 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 1630 } 1631 1632 static void dwc3_gadget_disable_irq(struct dwc3 *dwc) 1633 { 1634 /* mask all interrupts */ 1635 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 1636 } 1637 1638 static irqreturn_t dwc3_interrupt(int irq, void *_dwc); 1639 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc); 1640 1641 /** 1642 * dwc3_gadget_setup_nump - Calculate and initialize NUMP field of DCFG 1643 * dwc: pointer to our context structure 1644 * 1645 * The following looks like complex but it's actually very simple. In order to 1646 * calculate the number of packets we can burst at once on OUT transfers, we're 1647 * gonna use RxFIFO size. 1648 * 1649 * To calculate RxFIFO size we need two numbers: 1650 * MDWIDTH = size, in bits, of the internal memory bus 1651 * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits) 1652 * 1653 * Given these two numbers, the formula is simple: 1654 * 1655 * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16; 1656 * 1657 * 24 bytes is for 3x SETUP packets 1658 * 16 bytes is a clock domain crossing tolerance 1659 * 1660 * Given RxFIFO Size, NUMP = RxFIFOSize / 1024; 1661 */ 1662 static void dwc3_gadget_setup_nump(struct dwc3 *dwc) 1663 { 1664 u32 ram2_depth; 1665 u32 mdwidth; 1666 u32 nump; 1667 u32 reg; 1668 1669 ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7); 1670 mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0); 1671 1672 nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024; 1673 nump = min_t(u32, nump, 16); 1674 1675 /* update NumP */ 1676 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1677 reg &= ~DWC3_DCFG_NUMP_MASK; 1678 reg |= nump << DWC3_DCFG_NUMP_SHIFT; 1679 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1680 } 1681 1682 static int __dwc3_gadget_start(struct dwc3 *dwc) 1683 { 1684 struct dwc3_ep *dep; 1685 int ret = 0; 1686 u32 reg; 1687 1688 /* 1689 * Use IMOD if enabled via dwc->imod_interval. Otherwise, if 1690 * the core supports IMOD, disable it. 1691 */ 1692 if (dwc->imod_interval) { 1693 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); 1694 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); 1695 } else if (dwc3_has_imod(dwc)) { 1696 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0); 1697 } 1698 1699 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1700 reg &= ~(DWC3_DCFG_SPEED_MASK); 1701 1702 /** 1703 * WORKAROUND: DWC3 revision < 2.20a have an issue 1704 * which would cause metastability state on Run/Stop 1705 * bit if we try to force the IP to USB2-only mode. 1706 * 1707 * Because of that, we cannot configure the IP to any 1708 * speed other than the SuperSpeed 1709 * 1710 * Refers to: 1711 * 1712 * STAR#9000525659: Clock Domain Crossing on DCTL in 1713 * USB 2.0 Mode 1714 */ 1715 if (dwc->revision < DWC3_REVISION_220A) { 1716 reg |= DWC3_DCFG_SUPERSPEED; 1717 } else { 1718 switch (dwc->maximum_speed) { 1719 case USB_SPEED_LOW: 1720 reg |= DWC3_DCFG_LOWSPEED; 1721 break; 1722 case USB_SPEED_FULL: 1723 reg |= DWC3_DCFG_FULLSPEED; 1724 break; 1725 case USB_SPEED_HIGH: 1726 reg |= DWC3_DCFG_HIGHSPEED; 1727 break; 1728 case USB_SPEED_SUPER_PLUS: 1729 reg |= DWC3_DCFG_SUPERSPEED_PLUS; 1730 break; 1731 default: 1732 dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n", 1733 dwc->maximum_speed); 1734 /* fall through */ 1735 case USB_SPEED_SUPER: 1736 reg |= DWC3_DCFG_SUPERSPEED; 1737 break; 1738 } 1739 } 1740 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1741 1742 /* 1743 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP 1744 * field instead of letting dwc3 itself calculate that automatically. 1745 * 1746 * This way, we maximize the chances that we'll be able to get several 1747 * bursts of data without going through any sort of endpoint throttling. 1748 */ 1749 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1750 reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL; 1751 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1752 1753 dwc3_gadget_setup_nump(dwc); 1754 1755 /* Start with SuperSpeed Default */ 1756 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1757 1758 dep = dwc->eps[0]; 1759 ret = __dwc3_gadget_ep_enable(dep, false, false); 1760 if (ret) { 1761 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1762 goto err0; 1763 } 1764 1765 dep = dwc->eps[1]; 1766 ret = __dwc3_gadget_ep_enable(dep, false, false); 1767 if (ret) { 1768 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1769 goto err1; 1770 } 1771 1772 /* begin to receive SETUP packets */ 1773 dwc->ep0state = EP0_SETUP_PHASE; 1774 dwc3_ep0_out_start(dwc); 1775 1776 dwc3_gadget_enable_irq(dwc); 1777 1778 return 0; 1779 1780 err1: 1781 __dwc3_gadget_ep_disable(dwc->eps[0]); 1782 1783 err0: 1784 return ret; 1785 } 1786 1787 static int dwc3_gadget_start(struct usb_gadget *g, 1788 struct usb_gadget_driver *driver) 1789 { 1790 struct dwc3 *dwc = gadget_to_dwc(g); 1791 unsigned long flags; 1792 int ret = 0; 1793 int irq; 1794 1795 irq = dwc->irq_gadget; 1796 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, 1797 IRQF_SHARED, "dwc3", dwc->ev_buf); 1798 if (ret) { 1799 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 1800 irq, ret); 1801 goto err0; 1802 } 1803 1804 spin_lock_irqsave(&dwc->lock, flags); 1805 if (dwc->gadget_driver) { 1806 dev_err(dwc->dev, "%s is already bound to %s\n", 1807 dwc->gadget.name, 1808 dwc->gadget_driver->driver.name); 1809 ret = -EBUSY; 1810 goto err1; 1811 } 1812 1813 dwc->gadget_driver = driver; 1814 1815 if (pm_runtime_active(dwc->dev)) 1816 __dwc3_gadget_start(dwc); 1817 1818 spin_unlock_irqrestore(&dwc->lock, flags); 1819 1820 return 0; 1821 1822 err1: 1823 spin_unlock_irqrestore(&dwc->lock, flags); 1824 free_irq(irq, dwc); 1825 1826 err0: 1827 return ret; 1828 } 1829 1830 static void __dwc3_gadget_stop(struct dwc3 *dwc) 1831 { 1832 dwc3_gadget_disable_irq(dwc); 1833 __dwc3_gadget_ep_disable(dwc->eps[0]); 1834 __dwc3_gadget_ep_disable(dwc->eps[1]); 1835 } 1836 1837 static int dwc3_gadget_stop(struct usb_gadget *g) 1838 { 1839 struct dwc3 *dwc = gadget_to_dwc(g); 1840 unsigned long flags; 1841 int epnum; 1842 1843 spin_lock_irqsave(&dwc->lock, flags); 1844 1845 if (pm_runtime_suspended(dwc->dev)) 1846 goto out; 1847 1848 __dwc3_gadget_stop(dwc); 1849 1850 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1851 struct dwc3_ep *dep = dwc->eps[epnum]; 1852 1853 if (!dep) 1854 continue; 1855 1856 if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) 1857 continue; 1858 1859 wait_event_lock_irq(dep->wait_end_transfer, 1860 !(dep->flags & DWC3_EP_END_TRANSFER_PENDING), 1861 dwc->lock); 1862 } 1863 1864 out: 1865 dwc->gadget_driver = NULL; 1866 spin_unlock_irqrestore(&dwc->lock, flags); 1867 1868 free_irq(dwc->irq_gadget, dwc->ev_buf); 1869 1870 return 0; 1871 } 1872 1873 static const struct usb_gadget_ops dwc3_gadget_ops = { 1874 .get_frame = dwc3_gadget_get_frame, 1875 .wakeup = dwc3_gadget_wakeup, 1876 .set_selfpowered = dwc3_gadget_set_selfpowered, 1877 .pullup = dwc3_gadget_pullup, 1878 .udc_start = dwc3_gadget_start, 1879 .udc_stop = dwc3_gadget_stop, 1880 }; 1881 1882 /* -------------------------------------------------------------------------- */ 1883 1884 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc, 1885 u8 num, u32 direction) 1886 { 1887 struct dwc3_ep *dep; 1888 u8 i; 1889 1890 for (i = 0; i < num; i++) { 1891 u8 epnum = (i << 1) | (direction ? 1 : 0); 1892 1893 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 1894 if (!dep) 1895 return -ENOMEM; 1896 1897 dep->dwc = dwc; 1898 dep->number = epnum; 1899 dep->direction = !!direction; 1900 dep->regs = dwc->regs + DWC3_DEP_BASE(epnum); 1901 dwc->eps[epnum] = dep; 1902 1903 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1, 1904 (epnum & 1) ? "in" : "out"); 1905 1906 dep->endpoint.name = dep->name; 1907 1908 if (!(dep->number > 1)) { 1909 dep->endpoint.desc = &dwc3_gadget_ep0_desc; 1910 dep->endpoint.comp_desc = NULL; 1911 } 1912 1913 spin_lock_init(&dep->lock); 1914 1915 if (epnum == 0 || epnum == 1) { 1916 usb_ep_set_maxpacket_limit(&dep->endpoint, 512); 1917 dep->endpoint.maxburst = 1; 1918 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 1919 if (!epnum) 1920 dwc->gadget.ep0 = &dep->endpoint; 1921 } else { 1922 int ret; 1923 1924 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024); 1925 dep->endpoint.max_streams = 15; 1926 dep->endpoint.ops = &dwc3_gadget_ep_ops; 1927 list_add_tail(&dep->endpoint.ep_list, 1928 &dwc->gadget.ep_list); 1929 1930 ret = dwc3_alloc_trb_pool(dep); 1931 if (ret) 1932 return ret; 1933 } 1934 1935 if (epnum == 0 || epnum == 1) { 1936 dep->endpoint.caps.type_control = true; 1937 } else { 1938 dep->endpoint.caps.type_iso = true; 1939 dep->endpoint.caps.type_bulk = true; 1940 dep->endpoint.caps.type_int = true; 1941 } 1942 1943 dep->endpoint.caps.dir_in = !!direction; 1944 dep->endpoint.caps.dir_out = !direction; 1945 1946 INIT_LIST_HEAD(&dep->pending_list); 1947 INIT_LIST_HEAD(&dep->started_list); 1948 } 1949 1950 return 0; 1951 } 1952 1953 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc) 1954 { 1955 int ret; 1956 1957 INIT_LIST_HEAD(&dwc->gadget.ep_list); 1958 1959 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0); 1960 if (ret < 0) { 1961 dev_err(dwc->dev, "failed to initialize OUT endpoints\n"); 1962 return ret; 1963 } 1964 1965 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1); 1966 if (ret < 0) { 1967 dev_err(dwc->dev, "failed to initialize IN endpoints\n"); 1968 return ret; 1969 } 1970 1971 return 0; 1972 } 1973 1974 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 1975 { 1976 struct dwc3_ep *dep; 1977 u8 epnum; 1978 1979 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1980 dep = dwc->eps[epnum]; 1981 if (!dep) 1982 continue; 1983 /* 1984 * Physical endpoints 0 and 1 are special; they form the 1985 * bi-directional USB endpoint 0. 1986 * 1987 * For those two physical endpoints, we don't allocate a TRB 1988 * pool nor do we add them the endpoints list. Due to that, we 1989 * shouldn't do these two operations otherwise we would end up 1990 * with all sorts of bugs when removing dwc3.ko. 1991 */ 1992 if (epnum != 0 && epnum != 1) { 1993 dwc3_free_trb_pool(dep); 1994 list_del(&dep->endpoint.ep_list); 1995 } 1996 1997 kfree(dep); 1998 } 1999 } 2000 2001 /* -------------------------------------------------------------------------- */ 2002 2003 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, 2004 struct dwc3_request *req, struct dwc3_trb *trb, 2005 const struct dwc3_event_depevt *event, int status, 2006 int chain) 2007 { 2008 unsigned int count; 2009 unsigned int s_pkt = 0; 2010 unsigned int trb_status; 2011 2012 dwc3_ep_inc_deq(dep); 2013 2014 if (req->trb == trb) 2015 dep->queued_requests--; 2016 2017 trace_dwc3_complete_trb(dep, trb); 2018 2019 /* 2020 * If we're in the middle of series of chained TRBs and we 2021 * receive a short transfer along the way, DWC3 will skip 2022 * through all TRBs including the last TRB in the chain (the 2023 * where CHN bit is zero. DWC3 will also avoid clearing HWO 2024 * bit and SW has to do it manually. 2025 * 2026 * We're going to do that here to avoid problems of HW trying 2027 * to use bogus TRBs for transfers. 2028 */ 2029 if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO)) 2030 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 2031 2032 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 2033 return 1; 2034 2035 count = trb->size & DWC3_TRB_SIZE_MASK; 2036 req->remaining += count; 2037 2038 if (dep->direction) { 2039 if (count) { 2040 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); 2041 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) { 2042 /* 2043 * If missed isoc occurred and there is 2044 * no request queued then issue END 2045 * TRANSFER, so that core generates 2046 * next xfernotready and we will issue 2047 * a fresh START TRANSFER. 2048 * If there are still queued request 2049 * then wait, do not issue either END 2050 * or UPDATE TRANSFER, just attach next 2051 * request in pending_list during 2052 * giveback.If any future queued request 2053 * is successfully transferred then we 2054 * will issue UPDATE TRANSFER for all 2055 * request in the pending_list. 2056 */ 2057 dep->flags |= DWC3_EP_MISSED_ISOC; 2058 } else { 2059 dev_err(dwc->dev, "incomplete IN transfer %s\n", 2060 dep->name); 2061 status = -ECONNRESET; 2062 } 2063 } else { 2064 dep->flags &= ~DWC3_EP_MISSED_ISOC; 2065 } 2066 } else { 2067 if (count && (event->status & DEPEVT_STATUS_SHORT)) 2068 s_pkt = 1; 2069 } 2070 2071 if (s_pkt && !chain) 2072 return 1; 2073 2074 if ((event->status & DEPEVT_STATUS_IOC) && 2075 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 2076 return 1; 2077 2078 return 0; 2079 } 2080 2081 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, 2082 const struct dwc3_event_depevt *event, int status) 2083 { 2084 struct dwc3_request *req, *n; 2085 struct dwc3_trb *trb; 2086 bool ioc = false; 2087 int ret = 0; 2088 2089 list_for_each_entry_safe(req, n, &dep->started_list, list) { 2090 unsigned length; 2091 int chain; 2092 2093 length = req->request.length; 2094 chain = req->num_pending_sgs > 0; 2095 if (chain) { 2096 struct scatterlist *sg = req->sg; 2097 struct scatterlist *s; 2098 unsigned int pending = req->num_pending_sgs; 2099 unsigned int i; 2100 2101 for_each_sg(sg, s, pending, i) { 2102 trb = &dep->trb_pool[dep->trb_dequeue]; 2103 2104 if (trb->ctrl & DWC3_TRB_CTRL_HWO) 2105 break; 2106 2107 req->sg = sg_next(s); 2108 req->num_pending_sgs--; 2109 2110 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 2111 event, status, chain); 2112 if (ret) 2113 break; 2114 } 2115 } else { 2116 trb = &dep->trb_pool[dep->trb_dequeue]; 2117 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 2118 event, status, chain); 2119 } 2120 2121 req->request.actual = length - req->remaining; 2122 2123 if ((req->request.actual < length) && req->num_pending_sgs) 2124 return __dwc3_gadget_kick_transfer(dep, 0); 2125 2126 dwc3_gadget_giveback(dep, req, status); 2127 2128 if (ret) { 2129 if ((event->status & DEPEVT_STATUS_IOC) && 2130 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 2131 ioc = true; 2132 break; 2133 } 2134 } 2135 2136 /* 2137 * Our endpoint might get disabled by another thread during 2138 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1 2139 * early on so DWC3_EP_BUSY flag gets cleared 2140 */ 2141 if (!dep->endpoint.desc) 2142 return 1; 2143 2144 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 2145 list_empty(&dep->started_list)) { 2146 if (list_empty(&dep->pending_list)) { 2147 /* 2148 * If there is no entry in request list then do 2149 * not issue END TRANSFER now. Just set PENDING 2150 * flag, so that END TRANSFER is issued when an 2151 * entry is added into request list. 2152 */ 2153 dep->flags = DWC3_EP_PENDING_REQUEST; 2154 } else { 2155 dwc3_stop_active_transfer(dwc, dep->number, true); 2156 dep->flags = DWC3_EP_ENABLED; 2157 } 2158 return 1; 2159 } 2160 2161 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && ioc) 2162 return 0; 2163 2164 return 1; 2165 } 2166 2167 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, 2168 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 2169 { 2170 unsigned status = 0; 2171 int clean_busy; 2172 u32 is_xfer_complete; 2173 2174 is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE); 2175 2176 if (event->status & DEPEVT_STATUS_BUSERR) 2177 status = -ECONNRESET; 2178 2179 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status); 2180 if (clean_busy && (!dep->endpoint.desc || is_xfer_complete || 2181 usb_endpoint_xfer_isoc(dep->endpoint.desc))) 2182 dep->flags &= ~DWC3_EP_BUSY; 2183 2184 /* 2185 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. 2186 * See dwc3_gadget_linksts_change_interrupt() for 1st half. 2187 */ 2188 if (dwc->revision < DWC3_REVISION_183A) { 2189 u32 reg; 2190 int i; 2191 2192 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 2193 dep = dwc->eps[i]; 2194 2195 if (!(dep->flags & DWC3_EP_ENABLED)) 2196 continue; 2197 2198 if (!list_empty(&dep->started_list)) 2199 return; 2200 } 2201 2202 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2203 reg |= dwc->u1u2; 2204 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2205 2206 dwc->u1u2 = 0; 2207 } 2208 2209 /* 2210 * Our endpoint might get disabled by another thread during 2211 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1 2212 * early on so DWC3_EP_BUSY flag gets cleared 2213 */ 2214 if (!dep->endpoint.desc) 2215 return; 2216 2217 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2218 int ret; 2219 2220 ret = __dwc3_gadget_kick_transfer(dep, 0); 2221 if (!ret || ret == -EBUSY) 2222 return; 2223 } 2224 } 2225 2226 static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 2227 const struct dwc3_event_depevt *event) 2228 { 2229 struct dwc3_ep *dep; 2230 u8 epnum = event->endpoint_number; 2231 u8 cmd; 2232 2233 dep = dwc->eps[epnum]; 2234 2235 if (!(dep->flags & DWC3_EP_ENABLED)) { 2236 if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) 2237 return; 2238 2239 /* Handle only EPCMDCMPLT when EP disabled */ 2240 if (event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT) 2241 return; 2242 } 2243 2244 if (epnum == 0 || epnum == 1) { 2245 dwc3_ep0_interrupt(dwc, event); 2246 return; 2247 } 2248 2249 switch (event->endpoint_event) { 2250 case DWC3_DEPEVT_XFERCOMPLETE: 2251 dep->resource_index = 0; 2252 2253 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2254 dev_err(dwc->dev, "XferComplete for Isochronous endpoint\n"); 2255 return; 2256 } 2257 2258 dwc3_endpoint_transfer_complete(dwc, dep, event); 2259 break; 2260 case DWC3_DEPEVT_XFERINPROGRESS: 2261 dwc3_endpoint_transfer_complete(dwc, dep, event); 2262 break; 2263 case DWC3_DEPEVT_XFERNOTREADY: 2264 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2265 dwc3_gadget_start_isoc(dwc, dep, event); 2266 } else { 2267 int ret; 2268 2269 ret = __dwc3_gadget_kick_transfer(dep, 0); 2270 if (!ret || ret == -EBUSY) 2271 return; 2272 } 2273 2274 break; 2275 case DWC3_DEPEVT_STREAMEVT: 2276 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) { 2277 dev_err(dwc->dev, "Stream event for non-Bulk %s\n", 2278 dep->name); 2279 return; 2280 } 2281 break; 2282 case DWC3_DEPEVT_EPCMDCMPLT: 2283 cmd = DEPEVT_PARAMETER_CMD(event->parameters); 2284 2285 if (cmd == DWC3_DEPCMD_ENDTRANSFER) { 2286 dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; 2287 wake_up(&dep->wait_end_transfer); 2288 } 2289 break; 2290 case DWC3_DEPEVT_RXTXFIFOEVT: 2291 break; 2292 } 2293 } 2294 2295 static void dwc3_disconnect_gadget(struct dwc3 *dwc) 2296 { 2297 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { 2298 spin_unlock(&dwc->lock); 2299 dwc->gadget_driver->disconnect(&dwc->gadget); 2300 spin_lock(&dwc->lock); 2301 } 2302 } 2303 2304 static void dwc3_suspend_gadget(struct dwc3 *dwc) 2305 { 2306 if (dwc->gadget_driver && dwc->gadget_driver->suspend) { 2307 spin_unlock(&dwc->lock); 2308 dwc->gadget_driver->suspend(&dwc->gadget); 2309 spin_lock(&dwc->lock); 2310 } 2311 } 2312 2313 static void dwc3_resume_gadget(struct dwc3 *dwc) 2314 { 2315 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2316 spin_unlock(&dwc->lock); 2317 dwc->gadget_driver->resume(&dwc->gadget); 2318 spin_lock(&dwc->lock); 2319 } 2320 } 2321 2322 static void dwc3_reset_gadget(struct dwc3 *dwc) 2323 { 2324 if (!dwc->gadget_driver) 2325 return; 2326 2327 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) { 2328 spin_unlock(&dwc->lock); 2329 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver); 2330 spin_lock(&dwc->lock); 2331 } 2332 } 2333 2334 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force) 2335 { 2336 struct dwc3_ep *dep; 2337 struct dwc3_gadget_ep_cmd_params params; 2338 u32 cmd; 2339 int ret; 2340 2341 dep = dwc->eps[epnum]; 2342 2343 if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) || 2344 !dep->resource_index) 2345 return; 2346 2347 /* 2348 * NOTICE: We are violating what the Databook says about the 2349 * EndTransfer command. Ideally we would _always_ wait for the 2350 * EndTransfer Command Completion IRQ, but that's causing too 2351 * much trouble synchronizing between us and gadget driver. 2352 * 2353 * We have discussed this with the IP Provider and it was 2354 * suggested to giveback all requests here, but give HW some 2355 * extra time to synchronize with the interconnect. We're using 2356 * an arbitrary 100us delay for that. 2357 * 2358 * Note also that a similar handling was tested by Synopsys 2359 * (thanks a lot Paul) and nothing bad has come out of it. 2360 * In short, what we're doing is: 2361 * 2362 * - Issue EndTransfer WITH CMDIOC bit set 2363 * - Wait 100us 2364 * 2365 * As of IP version 3.10a of the DWC_usb3 IP, the controller 2366 * supports a mode to work around the above limitation. The 2367 * software can poll the CMDACT bit in the DEPCMD register 2368 * after issuing a EndTransfer command. This mode is enabled 2369 * by writing GUCTL2[14]. This polling is already done in the 2370 * dwc3_send_gadget_ep_cmd() function so if the mode is 2371 * enabled, the EndTransfer command will have completed upon 2372 * returning from this function and we don't need to delay for 2373 * 100us. 2374 * 2375 * This mode is NOT available on the DWC_usb31 IP. 2376 */ 2377 2378 cmd = DWC3_DEPCMD_ENDTRANSFER; 2379 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0; 2380 cmd |= DWC3_DEPCMD_CMDIOC; 2381 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); 2382 memset(¶ms, 0, sizeof(params)); 2383 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 2384 WARN_ON_ONCE(ret); 2385 dep->resource_index = 0; 2386 dep->flags &= ~DWC3_EP_BUSY; 2387 2388 if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A) { 2389 dep->flags |= DWC3_EP_END_TRANSFER_PENDING; 2390 udelay(100); 2391 } 2392 } 2393 2394 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 2395 { 2396 u32 epnum; 2397 2398 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2399 struct dwc3_ep *dep; 2400 int ret; 2401 2402 dep = dwc->eps[epnum]; 2403 if (!dep) 2404 continue; 2405 2406 if (!(dep->flags & DWC3_EP_STALL)) 2407 continue; 2408 2409 dep->flags &= ~DWC3_EP_STALL; 2410 2411 ret = dwc3_send_clear_stall_ep_cmd(dep); 2412 WARN_ON_ONCE(ret); 2413 } 2414 } 2415 2416 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 2417 { 2418 int reg; 2419 2420 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2421 reg &= ~DWC3_DCTL_INITU1ENA; 2422 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2423 2424 reg &= ~DWC3_DCTL_INITU2ENA; 2425 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2426 2427 dwc3_disconnect_gadget(dwc); 2428 2429 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2430 dwc->setup_packet_pending = false; 2431 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED); 2432 2433 dwc->connected = false; 2434 } 2435 2436 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 2437 { 2438 u32 reg; 2439 2440 dwc->connected = true; 2441 2442 /* 2443 * WORKAROUND: DWC3 revisions <1.88a have an issue which 2444 * would cause a missing Disconnect Event if there's a 2445 * pending Setup Packet in the FIFO. 2446 * 2447 * There's no suggested workaround on the official Bug 2448 * report, which states that "unless the driver/application 2449 * is doing any special handling of a disconnect event, 2450 * there is no functional issue". 2451 * 2452 * Unfortunately, it turns out that we _do_ some special 2453 * handling of a disconnect event, namely complete all 2454 * pending transfers, notify gadget driver of the 2455 * disconnection, and so on. 2456 * 2457 * Our suggested workaround is to follow the Disconnect 2458 * Event steps here, instead, based on a setup_packet_pending 2459 * flag. Such flag gets set whenever we have a SETUP_PENDING 2460 * status for EP0 TRBs and gets cleared on XferComplete for the 2461 * same endpoint. 2462 * 2463 * Refers to: 2464 * 2465 * STAR#9000466709: RTL: Device : Disconnect event not 2466 * generated if setup packet pending in FIFO 2467 */ 2468 if (dwc->revision < DWC3_REVISION_188A) { 2469 if (dwc->setup_packet_pending) 2470 dwc3_gadget_disconnect_interrupt(dwc); 2471 } 2472 2473 dwc3_reset_gadget(dwc); 2474 2475 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2476 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 2477 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2478 dwc->test_mode = false; 2479 dwc3_clear_stall_all_ep(dwc); 2480 2481 /* Reset device address to zero */ 2482 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2483 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 2484 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2485 } 2486 2487 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 2488 { 2489 struct dwc3_ep *dep; 2490 int ret; 2491 u32 reg; 2492 u8 speed; 2493 2494 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2495 speed = reg & DWC3_DSTS_CONNECTSPD; 2496 dwc->speed = speed; 2497 2498 /* 2499 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 2500 * each time on Connect Done. 2501 * 2502 * Currently we always use the reset value. If any platform 2503 * wants to set this to a different value, we need to add a 2504 * setting and update GCTL.RAMCLKSEL here. 2505 */ 2506 2507 switch (speed) { 2508 case DWC3_DSTS_SUPERSPEED_PLUS: 2509 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2510 dwc->gadget.ep0->maxpacket = 512; 2511 dwc->gadget.speed = USB_SPEED_SUPER_PLUS; 2512 break; 2513 case DWC3_DSTS_SUPERSPEED: 2514 /* 2515 * WORKAROUND: DWC3 revisions <1.90a have an issue which 2516 * would cause a missing USB3 Reset event. 2517 * 2518 * In such situations, we should force a USB3 Reset 2519 * event by calling our dwc3_gadget_reset_interrupt() 2520 * routine. 2521 * 2522 * Refers to: 2523 * 2524 * STAR#9000483510: RTL: SS : USB3 reset event may 2525 * not be generated always when the link enters poll 2526 */ 2527 if (dwc->revision < DWC3_REVISION_190A) 2528 dwc3_gadget_reset_interrupt(dwc); 2529 2530 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2531 dwc->gadget.ep0->maxpacket = 512; 2532 dwc->gadget.speed = USB_SPEED_SUPER; 2533 break; 2534 case DWC3_DSTS_HIGHSPEED: 2535 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2536 dwc->gadget.ep0->maxpacket = 64; 2537 dwc->gadget.speed = USB_SPEED_HIGH; 2538 break; 2539 case DWC3_DSTS_FULLSPEED: 2540 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2541 dwc->gadget.ep0->maxpacket = 64; 2542 dwc->gadget.speed = USB_SPEED_FULL; 2543 break; 2544 case DWC3_DSTS_LOWSPEED: 2545 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); 2546 dwc->gadget.ep0->maxpacket = 8; 2547 dwc->gadget.speed = USB_SPEED_LOW; 2548 break; 2549 } 2550 2551 /* Enable USB2 LPM Capability */ 2552 2553 if ((dwc->revision > DWC3_REVISION_194A) && 2554 (speed != DWC3_DSTS_SUPERSPEED) && 2555 (speed != DWC3_DSTS_SUPERSPEED_PLUS)) { 2556 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2557 reg |= DWC3_DCFG_LPM_CAP; 2558 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2559 2560 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2561 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); 2562 2563 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold); 2564 2565 /* 2566 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and 2567 * DCFG.LPMCap is set, core responses with an ACK and the 2568 * BESL value in the LPM token is less than or equal to LPM 2569 * NYET threshold. 2570 */ 2571 WARN_ONCE(dwc->revision < DWC3_REVISION_240A 2572 && dwc->has_lpm_erratum, 2573 "LPM Erratum not available on dwc3 revisions < 2.40a\n"); 2574 2575 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A) 2576 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold); 2577 2578 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2579 } else { 2580 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2581 reg &= ~DWC3_DCTL_HIRD_THRES_MASK; 2582 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2583 } 2584 2585 dep = dwc->eps[0]; 2586 ret = __dwc3_gadget_ep_enable(dep, true, false); 2587 if (ret) { 2588 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2589 return; 2590 } 2591 2592 dep = dwc->eps[1]; 2593 ret = __dwc3_gadget_ep_enable(dep, true, false); 2594 if (ret) { 2595 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2596 return; 2597 } 2598 2599 /* 2600 * Configure PHY via GUSB3PIPECTLn if required. 2601 * 2602 * Update GTXFIFOSIZn 2603 * 2604 * In both cases reset values should be sufficient. 2605 */ 2606 } 2607 2608 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 2609 { 2610 /* 2611 * TODO take core out of low power mode when that's 2612 * implemented. 2613 */ 2614 2615 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2616 spin_unlock(&dwc->lock); 2617 dwc->gadget_driver->resume(&dwc->gadget); 2618 spin_lock(&dwc->lock); 2619 } 2620 } 2621 2622 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 2623 unsigned int evtinfo) 2624 { 2625 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2626 unsigned int pwropt; 2627 2628 /* 2629 * WORKAROUND: DWC3 < 2.50a have an issue when configured without 2630 * Hibernation mode enabled which would show up when device detects 2631 * host-initiated U3 exit. 2632 * 2633 * In that case, device will generate a Link State Change Interrupt 2634 * from U3 to RESUME which is only necessary if Hibernation is 2635 * configured in. 2636 * 2637 * There are no functional changes due to such spurious event and we 2638 * just need to ignore it. 2639 * 2640 * Refers to: 2641 * 2642 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation 2643 * operational mode 2644 */ 2645 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 2646 if ((dwc->revision < DWC3_REVISION_250A) && 2647 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { 2648 if ((dwc->link_state == DWC3_LINK_STATE_U3) && 2649 (next == DWC3_LINK_STATE_RESUME)) { 2650 return; 2651 } 2652 } 2653 2654 /* 2655 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending 2656 * on the link partner, the USB session might do multiple entry/exit 2657 * of low power states before a transfer takes place. 2658 * 2659 * Due to this problem, we might experience lower throughput. The 2660 * suggested workaround is to disable DCTL[12:9] bits if we're 2661 * transitioning from U1/U2 to U0 and enable those bits again 2662 * after a transfer completes and there are no pending transfers 2663 * on any of the enabled endpoints. 2664 * 2665 * This is the first half of that workaround. 2666 * 2667 * Refers to: 2668 * 2669 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us 2670 * core send LGO_Ux entering U0 2671 */ 2672 if (dwc->revision < DWC3_REVISION_183A) { 2673 if (next == DWC3_LINK_STATE_U0) { 2674 u32 u1u2; 2675 u32 reg; 2676 2677 switch (dwc->link_state) { 2678 case DWC3_LINK_STATE_U1: 2679 case DWC3_LINK_STATE_U2: 2680 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2681 u1u2 = reg & (DWC3_DCTL_INITU2ENA 2682 | DWC3_DCTL_ACCEPTU2ENA 2683 | DWC3_DCTL_INITU1ENA 2684 | DWC3_DCTL_ACCEPTU1ENA); 2685 2686 if (!dwc->u1u2) 2687 dwc->u1u2 = reg & u1u2; 2688 2689 reg &= ~u1u2; 2690 2691 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2692 break; 2693 default: 2694 /* do nothing */ 2695 break; 2696 } 2697 } 2698 } 2699 2700 switch (next) { 2701 case DWC3_LINK_STATE_U1: 2702 if (dwc->speed == USB_SPEED_SUPER) 2703 dwc3_suspend_gadget(dwc); 2704 break; 2705 case DWC3_LINK_STATE_U2: 2706 case DWC3_LINK_STATE_U3: 2707 dwc3_suspend_gadget(dwc); 2708 break; 2709 case DWC3_LINK_STATE_RESUME: 2710 dwc3_resume_gadget(dwc); 2711 break; 2712 default: 2713 /* do nothing */ 2714 break; 2715 } 2716 2717 dwc->link_state = next; 2718 } 2719 2720 static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc, 2721 unsigned int evtinfo) 2722 { 2723 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2724 2725 if (dwc->link_state != next && next == DWC3_LINK_STATE_U3) 2726 dwc3_suspend_gadget(dwc); 2727 2728 dwc->link_state = next; 2729 } 2730 2731 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, 2732 unsigned int evtinfo) 2733 { 2734 unsigned int is_ss = evtinfo & BIT(4); 2735 2736 /** 2737 * WORKAROUND: DWC3 revison 2.20a with hibernation support 2738 * have a known issue which can cause USB CV TD.9.23 to fail 2739 * randomly. 2740 * 2741 * Because of this issue, core could generate bogus hibernation 2742 * events which SW needs to ignore. 2743 * 2744 * Refers to: 2745 * 2746 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0 2747 * Device Fallback from SuperSpeed 2748 */ 2749 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER)) 2750 return; 2751 2752 /* enter hibernation here */ 2753 } 2754 2755 static void dwc3_gadget_interrupt(struct dwc3 *dwc, 2756 const struct dwc3_event_devt *event) 2757 { 2758 switch (event->type) { 2759 case DWC3_DEVICE_EVENT_DISCONNECT: 2760 dwc3_gadget_disconnect_interrupt(dwc); 2761 break; 2762 case DWC3_DEVICE_EVENT_RESET: 2763 dwc3_gadget_reset_interrupt(dwc); 2764 break; 2765 case DWC3_DEVICE_EVENT_CONNECT_DONE: 2766 dwc3_gadget_conndone_interrupt(dwc); 2767 break; 2768 case DWC3_DEVICE_EVENT_WAKEUP: 2769 dwc3_gadget_wakeup_interrupt(dwc); 2770 break; 2771 case DWC3_DEVICE_EVENT_HIBER_REQ: 2772 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation, 2773 "unexpected hibernation event\n")) 2774 break; 2775 2776 dwc3_gadget_hibernation_interrupt(dwc, event->event_info); 2777 break; 2778 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 2779 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 2780 break; 2781 case DWC3_DEVICE_EVENT_EOPF: 2782 /* It changed to be suspend event for version 2.30a and above */ 2783 if (dwc->revision >= DWC3_REVISION_230A) { 2784 /* 2785 * Ignore suspend event until the gadget enters into 2786 * USB_STATE_CONFIGURED state. 2787 */ 2788 if (dwc->gadget.state >= USB_STATE_CONFIGURED) 2789 dwc3_gadget_suspend_interrupt(dwc, 2790 event->event_info); 2791 } 2792 break; 2793 case DWC3_DEVICE_EVENT_SOF: 2794 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 2795 case DWC3_DEVICE_EVENT_CMD_CMPL: 2796 case DWC3_DEVICE_EVENT_OVERFLOW: 2797 break; 2798 default: 2799 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 2800 } 2801 } 2802 2803 static void dwc3_process_event_entry(struct dwc3 *dwc, 2804 const union dwc3_event *event) 2805 { 2806 trace_dwc3_event(event->raw, dwc); 2807 2808 /* Endpoint IRQ, handle it and return early */ 2809 if (event->type.is_devspec == 0) { 2810 /* depevt */ 2811 return dwc3_endpoint_interrupt(dwc, &event->depevt); 2812 } 2813 2814 switch (event->type.type) { 2815 case DWC3_EVENT_TYPE_DEV: 2816 dwc3_gadget_interrupt(dwc, &event->devt); 2817 break; 2818 /* REVISIT what to do with Carkit and I2C events ? */ 2819 default: 2820 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 2821 } 2822 } 2823 2824 static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) 2825 { 2826 struct dwc3 *dwc = evt->dwc; 2827 irqreturn_t ret = IRQ_NONE; 2828 int left; 2829 u32 reg; 2830 2831 left = evt->count; 2832 2833 if (!(evt->flags & DWC3_EVENT_PENDING)) 2834 return IRQ_NONE; 2835 2836 while (left > 0) { 2837 union dwc3_event event; 2838 2839 event.raw = *(u32 *) (evt->cache + evt->lpos); 2840 2841 dwc3_process_event_entry(dwc, &event); 2842 2843 /* 2844 * FIXME we wrap around correctly to the next entry as 2845 * almost all entries are 4 bytes in size. There is one 2846 * entry which has 12 bytes which is a regular entry 2847 * followed by 8 bytes data. ATM I don't know how 2848 * things are organized if we get next to the a 2849 * boundary so I worry about that once we try to handle 2850 * that. 2851 */ 2852 evt->lpos = (evt->lpos + 4) % evt->length; 2853 left -= 4; 2854 } 2855 2856 evt->count = 0; 2857 evt->flags &= ~DWC3_EVENT_PENDING; 2858 ret = IRQ_HANDLED; 2859 2860 /* Unmask interrupt */ 2861 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0)); 2862 reg &= ~DWC3_GEVNTSIZ_INTMASK; 2863 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); 2864 2865 if (dwc->imod_interval) { 2866 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); 2867 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); 2868 } 2869 2870 return ret; 2871 } 2872 2873 static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt) 2874 { 2875 struct dwc3_event_buffer *evt = _evt; 2876 struct dwc3 *dwc = evt->dwc; 2877 unsigned long flags; 2878 irqreturn_t ret = IRQ_NONE; 2879 2880 spin_lock_irqsave(&dwc->lock, flags); 2881 ret = dwc3_process_event_buf(evt); 2882 spin_unlock_irqrestore(&dwc->lock, flags); 2883 2884 return ret; 2885 } 2886 2887 static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) 2888 { 2889 struct dwc3 *dwc = evt->dwc; 2890 u32 amount; 2891 u32 count; 2892 u32 reg; 2893 2894 if (pm_runtime_suspended(dwc->dev)) { 2895 pm_runtime_get(dwc->dev); 2896 disable_irq_nosync(dwc->irq_gadget); 2897 dwc->pending_events = true; 2898 return IRQ_HANDLED; 2899 } 2900 2901 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); 2902 count &= DWC3_GEVNTCOUNT_MASK; 2903 if (!count) 2904 return IRQ_NONE; 2905 2906 evt->count = count; 2907 evt->flags |= DWC3_EVENT_PENDING; 2908 2909 /* Mask interrupt */ 2910 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0)); 2911 reg |= DWC3_GEVNTSIZ_INTMASK; 2912 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); 2913 2914 amount = min(count, evt->length - evt->lpos); 2915 memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount); 2916 2917 if (amount < count) 2918 memcpy(evt->cache, evt->buf, count - amount); 2919 2920 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count); 2921 2922 return IRQ_WAKE_THREAD; 2923 } 2924 2925 static irqreturn_t dwc3_interrupt(int irq, void *_evt) 2926 { 2927 struct dwc3_event_buffer *evt = _evt; 2928 2929 return dwc3_check_event_buf(evt); 2930 } 2931 2932 static int dwc3_gadget_get_irq(struct dwc3 *dwc) 2933 { 2934 struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); 2935 int irq; 2936 2937 irq = platform_get_irq_byname(dwc3_pdev, "peripheral"); 2938 if (irq > 0) 2939 goto out; 2940 2941 if (irq == -EPROBE_DEFER) 2942 goto out; 2943 2944 irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3"); 2945 if (irq > 0) 2946 goto out; 2947 2948 if (irq == -EPROBE_DEFER) 2949 goto out; 2950 2951 irq = platform_get_irq(dwc3_pdev, 0); 2952 if (irq > 0) 2953 goto out; 2954 2955 if (irq != -EPROBE_DEFER) 2956 dev_err(dwc->dev, "missing peripheral IRQ\n"); 2957 2958 if (!irq) 2959 irq = -EINVAL; 2960 2961 out: 2962 return irq; 2963 } 2964 2965 /** 2966 * dwc3_gadget_init - Initializes gadget related registers 2967 * @dwc: pointer to our controller context structure 2968 * 2969 * Returns 0 on success otherwise negative errno. 2970 */ 2971 int dwc3_gadget_init(struct dwc3 *dwc) 2972 { 2973 int ret; 2974 int irq; 2975 2976 irq = dwc3_gadget_get_irq(dwc); 2977 if (irq < 0) { 2978 ret = irq; 2979 goto err0; 2980 } 2981 2982 dwc->irq_gadget = irq; 2983 2984 dwc->ctrl_req = dma_alloc_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req), 2985 &dwc->ctrl_req_addr, GFP_KERNEL); 2986 if (!dwc->ctrl_req) { 2987 dev_err(dwc->dev, "failed to allocate ctrl request\n"); 2988 ret = -ENOMEM; 2989 goto err0; 2990 } 2991 2992 dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev, 2993 sizeof(*dwc->ep0_trb) * 2, 2994 &dwc->ep0_trb_addr, GFP_KERNEL); 2995 if (!dwc->ep0_trb) { 2996 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 2997 ret = -ENOMEM; 2998 goto err1; 2999 } 3000 3001 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL); 3002 if (!dwc->setup_buf) { 3003 ret = -ENOMEM; 3004 goto err2; 3005 } 3006 3007 dwc->ep0_bounce = dma_alloc_coherent(dwc->sysdev, 3008 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr, 3009 GFP_KERNEL); 3010 if (!dwc->ep0_bounce) { 3011 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n"); 3012 ret = -ENOMEM; 3013 goto err3; 3014 } 3015 3016 dwc->zlp_buf = kzalloc(DWC3_ZLP_BUF_SIZE, GFP_KERNEL); 3017 if (!dwc->zlp_buf) { 3018 ret = -ENOMEM; 3019 goto err4; 3020 } 3021 3022 init_completion(&dwc->ep0_in_setup); 3023 3024 dwc->gadget.ops = &dwc3_gadget_ops; 3025 dwc->gadget.speed = USB_SPEED_UNKNOWN; 3026 dwc->gadget.sg_supported = true; 3027 dwc->gadget.name = "dwc3-gadget"; 3028 dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG; 3029 3030 /* 3031 * FIXME We might be setting max_speed to <SUPER, however versions 3032 * <2.20a of dwc3 have an issue with metastability (documented 3033 * elsewhere in this driver) which tells us we can't set max speed to 3034 * anything lower than SUPER. 3035 * 3036 * Because gadget.max_speed is only used by composite.c and function 3037 * drivers (i.e. it won't go into dwc3's registers) we are allowing this 3038 * to happen so we avoid sending SuperSpeed Capability descriptor 3039 * together with our BOS descriptor as that could confuse host into 3040 * thinking we can handle super speed. 3041 * 3042 * Note that, in fact, we won't even support GetBOS requests when speed 3043 * is less than super speed because we don't have means, yet, to tell 3044 * composite.c that we are USB 2.0 + LPM ECN. 3045 */ 3046 if (dwc->revision < DWC3_REVISION_220A) 3047 dev_info(dwc->dev, "changing max_speed on rev %08x\n", 3048 dwc->revision); 3049 3050 dwc->gadget.max_speed = dwc->maximum_speed; 3051 3052 /* 3053 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize 3054 * on ep out. 3055 */ 3056 dwc->gadget.quirk_ep_out_aligned_size = true; 3057 3058 /* 3059 * REVISIT: Here we should clear all pending IRQs to be 3060 * sure we're starting from a well known location. 3061 */ 3062 3063 ret = dwc3_gadget_init_endpoints(dwc); 3064 if (ret) 3065 goto err5; 3066 3067 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 3068 if (ret) { 3069 dev_err(dwc->dev, "failed to register udc\n"); 3070 goto err5; 3071 } 3072 3073 return 0; 3074 3075 err5: 3076 kfree(dwc->zlp_buf); 3077 3078 err4: 3079 dwc3_gadget_free_endpoints(dwc); 3080 dma_free_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE, 3081 dwc->ep0_bounce, dwc->ep0_bounce_addr); 3082 3083 err3: 3084 kfree(dwc->setup_buf); 3085 3086 err2: 3087 dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2, 3088 dwc->ep0_trb, dwc->ep0_trb_addr); 3089 3090 err1: 3091 dma_free_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req), 3092 dwc->ctrl_req, dwc->ctrl_req_addr); 3093 3094 err0: 3095 return ret; 3096 } 3097 3098 /* -------------------------------------------------------------------------- */ 3099 3100 void dwc3_gadget_exit(struct dwc3 *dwc) 3101 { 3102 usb_del_gadget_udc(&dwc->gadget); 3103 3104 dwc3_gadget_free_endpoints(dwc); 3105 3106 dma_free_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE, 3107 dwc->ep0_bounce, dwc->ep0_bounce_addr); 3108 3109 kfree(dwc->setup_buf); 3110 kfree(dwc->zlp_buf); 3111 3112 dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2, 3113 dwc->ep0_trb, dwc->ep0_trb_addr); 3114 3115 dma_free_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req), 3116 dwc->ctrl_req, dwc->ctrl_req_addr); 3117 } 3118 3119 int dwc3_gadget_suspend(struct dwc3 *dwc) 3120 { 3121 int ret; 3122 3123 if (!dwc->gadget_driver) 3124 return 0; 3125 3126 ret = dwc3_gadget_run_stop(dwc, false, false); 3127 if (ret < 0) 3128 return ret; 3129 3130 dwc3_disconnect_gadget(dwc); 3131 __dwc3_gadget_stop(dwc); 3132 3133 return 0; 3134 } 3135 3136 int dwc3_gadget_resume(struct dwc3 *dwc) 3137 { 3138 int ret; 3139 3140 if (!dwc->gadget_driver) 3141 return 0; 3142 3143 ret = __dwc3_gadget_start(dwc); 3144 if (ret < 0) 3145 goto err0; 3146 3147 ret = dwc3_gadget_run_stop(dwc, true, false); 3148 if (ret < 0) 3149 goto err1; 3150 3151 return 0; 3152 3153 err1: 3154 __dwc3_gadget_stop(dwc); 3155 3156 err0: 3157 return ret; 3158 } 3159 3160 void dwc3_gadget_process_pending_events(struct dwc3 *dwc) 3161 { 3162 if (dwc->pending_events) { 3163 dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf); 3164 dwc->pending_events = false; 3165 enable_irq(dwc->irq_gadget); 3166 } 3167 } 3168