1 /** 2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 3 * 4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com 5 * 6 * Authors: Felipe Balbi <balbi@ti.com>, 7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 8 * 9 * This program is free software: you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 of 11 * the License as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/delay.h> 21 #include <linux/slab.h> 22 #include <linux/spinlock.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/interrupt.h> 26 #include <linux/io.h> 27 #include <linux/list.h> 28 #include <linux/dma-mapping.h> 29 30 #include <linux/usb/ch9.h> 31 #include <linux/usb/gadget.h> 32 33 #include "debug.h" 34 #include "core.h" 35 #include "gadget.h" 36 #include "io.h" 37 38 /** 39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes 40 * @dwc: pointer to our context structure 41 * @mode: the mode to set (J, K SE0 NAK, Force Enable) 42 * 43 * Caller should take care of locking. This function will 44 * return 0 on success or -EINVAL if wrong Test Selector 45 * is passed 46 */ 47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) 48 { 49 u32 reg; 50 51 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 52 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 53 54 switch (mode) { 55 case TEST_J: 56 case TEST_K: 57 case TEST_SE0_NAK: 58 case TEST_PACKET: 59 case TEST_FORCE_EN: 60 reg |= mode << 1; 61 break; 62 default: 63 return -EINVAL; 64 } 65 66 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 67 68 return 0; 69 } 70 71 /** 72 * dwc3_gadget_get_link_state - Gets current state of USB Link 73 * @dwc: pointer to our context structure 74 * 75 * Caller should take care of locking. This function will 76 * return the link state on success (>= 0) or -ETIMEDOUT. 77 */ 78 int dwc3_gadget_get_link_state(struct dwc3 *dwc) 79 { 80 u32 reg; 81 82 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 83 84 return DWC3_DSTS_USBLNKST(reg); 85 } 86 87 /** 88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State 89 * @dwc: pointer to our context structure 90 * @state: the state to put link into 91 * 92 * Caller should take care of locking. This function will 93 * return 0 on success or -ETIMEDOUT. 94 */ 95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) 96 { 97 int retries = 10000; 98 u32 reg; 99 100 /* 101 * Wait until device controller is ready. Only applies to 1.94a and 102 * later RTL. 103 */ 104 if (dwc->revision >= DWC3_REVISION_194A) { 105 while (--retries) { 106 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 107 if (reg & DWC3_DSTS_DCNRD) 108 udelay(5); 109 else 110 break; 111 } 112 113 if (retries <= 0) 114 return -ETIMEDOUT; 115 } 116 117 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 119 120 /* set requested state */ 121 reg |= DWC3_DCTL_ULSTCHNGREQ(state); 122 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 123 124 /* 125 * The following code is racy when called from dwc3_gadget_wakeup, 126 * and is not needed, at least on newer versions 127 */ 128 if (dwc->revision >= DWC3_REVISION_194A) 129 return 0; 130 131 /* wait for a change in DSTS */ 132 retries = 10000; 133 while (--retries) { 134 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 135 136 if (DWC3_DSTS_USBLNKST(reg) == state) 137 return 0; 138 139 udelay(5); 140 } 141 142 dwc3_trace(trace_dwc3_gadget, 143 "link state change request timed out"); 144 145 return -ETIMEDOUT; 146 } 147 148 /** 149 * dwc3_ep_inc_trb() - Increment a TRB index. 150 * @index - Pointer to the TRB index to increment. 151 * 152 * The index should never point to the link TRB. After incrementing, 153 * if it is point to the link TRB, wrap around to the beginning. The 154 * link TRB is always at the last TRB entry. 155 */ 156 static void dwc3_ep_inc_trb(u8 *index) 157 { 158 (*index)++; 159 if (*index == (DWC3_TRB_NUM - 1)) 160 *index = 0; 161 } 162 163 static void dwc3_ep_inc_enq(struct dwc3_ep *dep) 164 { 165 dwc3_ep_inc_trb(&dep->trb_enqueue); 166 } 167 168 static void dwc3_ep_inc_deq(struct dwc3_ep *dep) 169 { 170 dwc3_ep_inc_trb(&dep->trb_dequeue); 171 } 172 173 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 174 int status) 175 { 176 struct dwc3 *dwc = dep->dwc; 177 178 req->started = false; 179 list_del(&req->list); 180 req->trb = NULL; 181 182 if (req->request.status == -EINPROGRESS) 183 req->request.status = status; 184 185 if (dwc->ep0_bounced && dep->number == 0) 186 dwc->ep0_bounced = false; 187 else 188 usb_gadget_unmap_request(&dwc->gadget, &req->request, 189 req->direction); 190 191 trace_dwc3_gadget_giveback(req); 192 193 spin_unlock(&dwc->lock); 194 usb_gadget_giveback_request(&dep->endpoint, &req->request); 195 spin_lock(&dwc->lock); 196 197 if (dep->number > 1) 198 pm_runtime_put(dwc->dev); 199 } 200 201 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param) 202 { 203 u32 timeout = 500; 204 int status = 0; 205 int ret = 0; 206 u32 reg; 207 208 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param); 209 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT); 210 211 do { 212 reg = dwc3_readl(dwc->regs, DWC3_DGCMD); 213 if (!(reg & DWC3_DGCMD_CMDACT)) { 214 status = DWC3_DGCMD_STATUS(reg); 215 if (status) 216 ret = -EINVAL; 217 break; 218 } 219 } while (timeout--); 220 221 if (!timeout) { 222 ret = -ETIMEDOUT; 223 status = -ETIMEDOUT; 224 } 225 226 trace_dwc3_gadget_generic_cmd(cmd, param, status); 227 228 return ret; 229 } 230 231 static int __dwc3_gadget_wakeup(struct dwc3 *dwc); 232 233 int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, 234 struct dwc3_gadget_ep_cmd_params *params) 235 { 236 struct dwc3 *dwc = dep->dwc; 237 u32 timeout = 500; 238 u32 reg; 239 240 int cmd_status = 0; 241 int susphy = false; 242 int ret = -EINVAL; 243 244 /* 245 * Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if 246 * we're issuing an endpoint command, we must check if 247 * GUSB2PHYCFG.SUSPHY bit is set. If it is, then we need to clear it. 248 * 249 * We will also set SUSPHY bit to what it was before returning as stated 250 * by the same section on Synopsys databook. 251 */ 252 if (dwc->gadget.speed <= USB_SPEED_HIGH) { 253 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 254 if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) { 255 susphy = true; 256 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 257 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 258 } 259 } 260 261 if (cmd == DWC3_DEPCMD_STARTTRANSFER) { 262 int needs_wakeup; 263 264 needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 || 265 dwc->link_state == DWC3_LINK_STATE_U2 || 266 dwc->link_state == DWC3_LINK_STATE_U3); 267 268 if (unlikely(needs_wakeup)) { 269 ret = __dwc3_gadget_wakeup(dwc); 270 dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n", 271 ret); 272 } 273 } 274 275 dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0); 276 dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1); 277 dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2); 278 279 dwc3_writel(dep->regs, DWC3_DEPCMD, cmd | DWC3_DEPCMD_CMDACT); 280 do { 281 reg = dwc3_readl(dep->regs, DWC3_DEPCMD); 282 if (!(reg & DWC3_DEPCMD_CMDACT)) { 283 cmd_status = DWC3_DEPCMD_STATUS(reg); 284 285 switch (cmd_status) { 286 case 0: 287 ret = 0; 288 break; 289 case DEPEVT_TRANSFER_NO_RESOURCE: 290 ret = -EINVAL; 291 break; 292 case DEPEVT_TRANSFER_BUS_EXPIRY: 293 /* 294 * SW issues START TRANSFER command to 295 * isochronous ep with future frame interval. If 296 * future interval time has already passed when 297 * core receives the command, it will respond 298 * with an error status of 'Bus Expiry'. 299 * 300 * Instead of always returning -EINVAL, let's 301 * give a hint to the gadget driver that this is 302 * the case by returning -EAGAIN. 303 */ 304 ret = -EAGAIN; 305 break; 306 default: 307 dev_WARN(dwc->dev, "UNKNOWN cmd status\n"); 308 } 309 310 break; 311 } 312 } while (--timeout); 313 314 if (timeout == 0) { 315 ret = -ETIMEDOUT; 316 cmd_status = -ETIMEDOUT; 317 } 318 319 trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status); 320 321 if (unlikely(susphy)) { 322 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 323 reg |= DWC3_GUSB2PHYCFG_SUSPHY; 324 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 325 } 326 327 return ret; 328 } 329 330 static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep) 331 { 332 struct dwc3 *dwc = dep->dwc; 333 struct dwc3_gadget_ep_cmd_params params; 334 u32 cmd = DWC3_DEPCMD_CLEARSTALL; 335 336 /* 337 * As of core revision 2.60a the recommended programming model 338 * is to set the ClearPendIN bit when issuing a Clear Stall EP 339 * command for IN endpoints. This is to prevent an issue where 340 * some (non-compliant) hosts may not send ACK TPs for pending 341 * IN transfers due to a mishandled error condition. Synopsys 342 * STAR 9000614252. 343 */ 344 if (dep->direction && (dwc->revision >= DWC3_REVISION_260A) && 345 (dwc->gadget.speed >= USB_SPEED_SUPER)) 346 cmd |= DWC3_DEPCMD_CLEARPENDIN; 347 348 memset(¶ms, 0, sizeof(params)); 349 350 return dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 351 } 352 353 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 354 struct dwc3_trb *trb) 355 { 356 u32 offset = (char *) trb - (char *) dep->trb_pool; 357 358 return dep->trb_pool_dma + offset; 359 } 360 361 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 362 { 363 struct dwc3 *dwc = dep->dwc; 364 365 if (dep->trb_pool) 366 return 0; 367 368 dep->trb_pool = dma_alloc_coherent(dwc->dev, 369 sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 370 &dep->trb_pool_dma, GFP_KERNEL); 371 if (!dep->trb_pool) { 372 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 373 dep->name); 374 return -ENOMEM; 375 } 376 377 return 0; 378 } 379 380 static void dwc3_free_trb_pool(struct dwc3_ep *dep) 381 { 382 struct dwc3 *dwc = dep->dwc; 383 384 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 385 dep->trb_pool, dep->trb_pool_dma); 386 387 dep->trb_pool = NULL; 388 dep->trb_pool_dma = 0; 389 } 390 391 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep); 392 393 /** 394 * dwc3_gadget_start_config - Configure EP resources 395 * @dwc: pointer to our controller context structure 396 * @dep: endpoint that is being enabled 397 * 398 * The assignment of transfer resources cannot perfectly follow the 399 * data book due to the fact that the controller driver does not have 400 * all knowledge of the configuration in advance. It is given this 401 * information piecemeal by the composite gadget framework after every 402 * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook 403 * programming model in this scenario can cause errors. For two 404 * reasons: 405 * 406 * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION 407 * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of 408 * multiple interfaces. 409 * 410 * 2) The databook does not mention doing more DEPXFERCFG for new 411 * endpoint on alt setting (8.1.6). 412 * 413 * The following simplified method is used instead: 414 * 415 * All hardware endpoints can be assigned a transfer resource and this 416 * setting will stay persistent until either a core reset or 417 * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and 418 * do DEPXFERCFG for every hardware endpoint as well. We are 419 * guaranteed that there are as many transfer resources as endpoints. 420 * 421 * This function is called for each endpoint when it is being enabled 422 * but is triggered only when called for EP0-out, which always happens 423 * first, and which should only happen in one of the above conditions. 424 */ 425 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 426 { 427 struct dwc3_gadget_ep_cmd_params params; 428 u32 cmd; 429 int i; 430 int ret; 431 432 if (dep->number) 433 return 0; 434 435 memset(¶ms, 0x00, sizeof(params)); 436 cmd = DWC3_DEPCMD_DEPSTARTCFG; 437 438 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 439 if (ret) 440 return ret; 441 442 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 443 struct dwc3_ep *dep = dwc->eps[i]; 444 445 if (!dep) 446 continue; 447 448 ret = dwc3_gadget_set_xfer_resource(dwc, dep); 449 if (ret) 450 return ret; 451 } 452 453 return 0; 454 } 455 456 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, 457 const struct usb_endpoint_descriptor *desc, 458 const struct usb_ss_ep_comp_descriptor *comp_desc, 459 bool modify, bool restore) 460 { 461 struct dwc3_gadget_ep_cmd_params params; 462 463 if (dev_WARN_ONCE(dwc->dev, modify && restore, 464 "Can't modify and restore\n")) 465 return -EINVAL; 466 467 memset(¶ms, 0x00, sizeof(params)); 468 469 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) 470 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); 471 472 /* Burst size is only needed in SuperSpeed mode */ 473 if (dwc->gadget.speed >= USB_SPEED_SUPER) { 474 u32 burst = dep->endpoint.maxburst; 475 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1); 476 } 477 478 if (modify) { 479 params.param0 |= DWC3_DEPCFG_ACTION_MODIFY; 480 } else if (restore) { 481 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE; 482 params.param2 |= dep->saved_state; 483 } else { 484 params.param0 |= DWC3_DEPCFG_ACTION_INIT; 485 } 486 487 if (usb_endpoint_xfer_control(desc)) 488 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN; 489 490 if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc)) 491 params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN; 492 493 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 494 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE 495 | DWC3_DEPCFG_STREAM_EVENT_EN; 496 dep->stream_capable = true; 497 } 498 499 if (!usb_endpoint_xfer_control(desc)) 500 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; 501 502 /* 503 * We are doing 1:1 mapping for endpoints, meaning 504 * Physical Endpoints 2 maps to Logical Endpoint 2 and 505 * so on. We consider the direction bit as part of the physical 506 * endpoint number. So USB endpoint 0x81 is 0x03. 507 */ 508 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); 509 510 /* 511 * We must use the lower 16 TX FIFOs even though 512 * HW might have more 513 */ 514 if (dep->direction) 515 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); 516 517 if (desc->bInterval) { 518 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); 519 dep->interval = 1 << (desc->bInterval - 1); 520 } 521 522 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, ¶ms); 523 } 524 525 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) 526 { 527 struct dwc3_gadget_ep_cmd_params params; 528 529 memset(¶ms, 0x00, sizeof(params)); 530 531 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 532 533 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE, 534 ¶ms); 535 } 536 537 /** 538 * __dwc3_gadget_ep_enable - Initializes a HW endpoint 539 * @dep: endpoint to be initialized 540 * @desc: USB Endpoint Descriptor 541 * 542 * Caller should take care of locking 543 */ 544 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, 545 const struct usb_endpoint_descriptor *desc, 546 const struct usb_ss_ep_comp_descriptor *comp_desc, 547 bool modify, bool restore) 548 { 549 struct dwc3 *dwc = dep->dwc; 550 u32 reg; 551 int ret; 552 553 dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name); 554 555 if (!(dep->flags & DWC3_EP_ENABLED)) { 556 ret = dwc3_gadget_start_config(dwc, dep); 557 if (ret) 558 return ret; 559 } 560 561 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, modify, 562 restore); 563 if (ret) 564 return ret; 565 566 if (!(dep->flags & DWC3_EP_ENABLED)) { 567 struct dwc3_trb *trb_st_hw; 568 struct dwc3_trb *trb_link; 569 570 dep->endpoint.desc = desc; 571 dep->comp_desc = comp_desc; 572 dep->type = usb_endpoint_type(desc); 573 dep->flags |= DWC3_EP_ENABLED; 574 575 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 576 reg |= DWC3_DALEPENA_EP(dep->number); 577 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 578 579 if (usb_endpoint_xfer_control(desc)) 580 return 0; 581 582 /* Initialize the TRB ring */ 583 dep->trb_dequeue = 0; 584 dep->trb_enqueue = 0; 585 memset(dep->trb_pool, 0, 586 sizeof(struct dwc3_trb) * DWC3_TRB_NUM); 587 588 /* Link TRB. The HWO bit is never reset */ 589 trb_st_hw = &dep->trb_pool[0]; 590 591 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 592 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 593 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 594 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB; 595 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 596 } 597 598 return 0; 599 } 600 601 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force); 602 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 603 { 604 struct dwc3_request *req; 605 606 dwc3_stop_active_transfer(dwc, dep->number, true); 607 608 /* - giveback all requests to gadget driver */ 609 while (!list_empty(&dep->started_list)) { 610 req = next_request(&dep->started_list); 611 612 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 613 } 614 615 while (!list_empty(&dep->pending_list)) { 616 req = next_request(&dep->pending_list); 617 618 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 619 } 620 } 621 622 /** 623 * __dwc3_gadget_ep_disable - Disables a HW endpoint 624 * @dep: the endpoint to disable 625 * 626 * This function also removes requests which are currently processed ny the 627 * hardware and those which are not yet scheduled. 628 * Caller should take care of locking. 629 */ 630 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 631 { 632 struct dwc3 *dwc = dep->dwc; 633 u32 reg; 634 635 dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name); 636 637 dwc3_remove_requests(dwc, dep); 638 639 /* make sure HW endpoint isn't stalled */ 640 if (dep->flags & DWC3_EP_STALL) 641 __dwc3_gadget_ep_set_halt(dep, 0, false); 642 643 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 644 reg &= ~DWC3_DALEPENA_EP(dep->number); 645 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 646 647 dep->stream_capable = false; 648 dep->endpoint.desc = NULL; 649 dep->comp_desc = NULL; 650 dep->type = 0; 651 dep->flags = 0; 652 653 return 0; 654 } 655 656 /* -------------------------------------------------------------------------- */ 657 658 static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 659 const struct usb_endpoint_descriptor *desc) 660 { 661 return -EINVAL; 662 } 663 664 static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 665 { 666 return -EINVAL; 667 } 668 669 /* -------------------------------------------------------------------------- */ 670 671 static int dwc3_gadget_ep_enable(struct usb_ep *ep, 672 const struct usb_endpoint_descriptor *desc) 673 { 674 struct dwc3_ep *dep; 675 struct dwc3 *dwc; 676 unsigned long flags; 677 int ret; 678 679 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 680 pr_debug("dwc3: invalid parameters\n"); 681 return -EINVAL; 682 } 683 684 if (!desc->wMaxPacketSize) { 685 pr_debug("dwc3: missing wMaxPacketSize\n"); 686 return -EINVAL; 687 } 688 689 dep = to_dwc3_ep(ep); 690 dwc = dep->dwc; 691 692 if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED, 693 "%s is already enabled\n", 694 dep->name)) 695 return 0; 696 697 spin_lock_irqsave(&dwc->lock, flags); 698 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false); 699 spin_unlock_irqrestore(&dwc->lock, flags); 700 701 return ret; 702 } 703 704 static int dwc3_gadget_ep_disable(struct usb_ep *ep) 705 { 706 struct dwc3_ep *dep; 707 struct dwc3 *dwc; 708 unsigned long flags; 709 int ret; 710 711 if (!ep) { 712 pr_debug("dwc3: invalid parameters\n"); 713 return -EINVAL; 714 } 715 716 dep = to_dwc3_ep(ep); 717 dwc = dep->dwc; 718 719 if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED), 720 "%s is already disabled\n", 721 dep->name)) 722 return 0; 723 724 spin_lock_irqsave(&dwc->lock, flags); 725 ret = __dwc3_gadget_ep_disable(dep); 726 spin_unlock_irqrestore(&dwc->lock, flags); 727 728 return ret; 729 } 730 731 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 732 gfp_t gfp_flags) 733 { 734 struct dwc3_request *req; 735 struct dwc3_ep *dep = to_dwc3_ep(ep); 736 737 req = kzalloc(sizeof(*req), gfp_flags); 738 if (!req) 739 return NULL; 740 741 req->epnum = dep->number; 742 req->dep = dep; 743 744 dep->allocated_requests++; 745 746 trace_dwc3_alloc_request(req); 747 748 return &req->request; 749 } 750 751 static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 752 struct usb_request *request) 753 { 754 struct dwc3_request *req = to_dwc3_request(request); 755 struct dwc3_ep *dep = to_dwc3_ep(ep); 756 757 dep->allocated_requests--; 758 trace_dwc3_free_request(req); 759 kfree(req); 760 } 761 762 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep); 763 764 /** 765 * dwc3_prepare_one_trb - setup one TRB from one request 766 * @dep: endpoint for which this request is prepared 767 * @req: dwc3_request pointer 768 */ 769 static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 770 struct dwc3_request *req, dma_addr_t dma, 771 unsigned length, unsigned chain, unsigned node) 772 { 773 struct dwc3_trb *trb; 774 775 dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s", 776 dep->name, req, (unsigned long long) dma, 777 length, chain ? " chain" : ""); 778 779 trb = &dep->trb_pool[dep->trb_enqueue]; 780 781 if (!req->trb) { 782 dwc3_gadget_move_started_request(req); 783 req->trb = trb; 784 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 785 req->first_trb_index = dep->trb_enqueue; 786 dep->queued_requests++; 787 } 788 789 dwc3_ep_inc_enq(dep); 790 791 trb->size = DWC3_TRB_SIZE_LENGTH(length); 792 trb->bpl = lower_32_bits(dma); 793 trb->bph = upper_32_bits(dma); 794 795 switch (usb_endpoint_type(dep->endpoint.desc)) { 796 case USB_ENDPOINT_XFER_CONTROL: 797 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP; 798 break; 799 800 case USB_ENDPOINT_XFER_ISOC: 801 if (!node) 802 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 803 else 804 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 805 806 /* always enable Interrupt on Missed ISOC */ 807 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 808 break; 809 810 case USB_ENDPOINT_XFER_BULK: 811 case USB_ENDPOINT_XFER_INT: 812 trb->ctrl = DWC3_TRBCTL_NORMAL; 813 break; 814 default: 815 /* 816 * This is only possible with faulty memory because we 817 * checked it already :) 818 */ 819 BUG(); 820 } 821 822 /* always enable Continue on Short Packet */ 823 trb->ctrl |= DWC3_TRB_CTRL_CSP; 824 825 if ((!req->request.no_interrupt && !chain) || 826 (dwc3_calc_trbs_left(dep) == 0)) 827 trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI; 828 829 if (chain) 830 trb->ctrl |= DWC3_TRB_CTRL_CHN; 831 832 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) 833 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id); 834 835 trb->ctrl |= DWC3_TRB_CTRL_HWO; 836 837 trace_dwc3_prepare_trb(dep, trb); 838 } 839 840 /** 841 * dwc3_ep_prev_trb() - Returns the previous TRB in the ring 842 * @dep: The endpoint with the TRB ring 843 * @index: The index of the current TRB in the ring 844 * 845 * Returns the TRB prior to the one pointed to by the index. If the 846 * index is 0, we will wrap backwards, skip the link TRB, and return 847 * the one just before that. 848 */ 849 static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index) 850 { 851 u8 tmp = index; 852 853 if (!tmp) 854 tmp = DWC3_TRB_NUM - 1; 855 856 return &dep->trb_pool[tmp - 1]; 857 } 858 859 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep) 860 { 861 struct dwc3_trb *tmp; 862 u8 trbs_left; 863 864 /* 865 * If enqueue & dequeue are equal than it is either full or empty. 866 * 867 * One way to know for sure is if the TRB right before us has HWO bit 868 * set or not. If it has, then we're definitely full and can't fit any 869 * more transfers in our ring. 870 */ 871 if (dep->trb_enqueue == dep->trb_dequeue) { 872 tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue); 873 if (tmp->ctrl & DWC3_TRB_CTRL_HWO) 874 return 0; 875 876 return DWC3_TRB_NUM - 1; 877 } 878 879 trbs_left = dep->trb_dequeue - dep->trb_enqueue; 880 trbs_left &= (DWC3_TRB_NUM - 1); 881 882 if (dep->trb_dequeue < dep->trb_enqueue) 883 trbs_left--; 884 885 return trbs_left; 886 } 887 888 static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, 889 struct dwc3_request *req) 890 { 891 struct scatterlist *sg = req->sg; 892 struct scatterlist *s; 893 unsigned int length; 894 dma_addr_t dma; 895 int i; 896 897 for_each_sg(sg, s, req->num_pending_sgs, i) { 898 unsigned chain = true; 899 900 length = sg_dma_len(s); 901 dma = sg_dma_address(s); 902 903 if (sg_is_last(s)) 904 chain = false; 905 906 dwc3_prepare_one_trb(dep, req, dma, length, 907 chain, i); 908 909 if (!dwc3_calc_trbs_left(dep)) 910 break; 911 } 912 } 913 914 static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, 915 struct dwc3_request *req) 916 { 917 unsigned int length; 918 dma_addr_t dma; 919 920 dma = req->request.dma; 921 length = req->request.length; 922 923 dwc3_prepare_one_trb(dep, req, dma, length, 924 false, 0); 925 } 926 927 /* 928 * dwc3_prepare_trbs - setup TRBs from requests 929 * @dep: endpoint for which requests are being prepared 930 * 931 * The function goes through the requests list and sets up TRBs for the 932 * transfers. The function returns once there are no more TRBs available or 933 * it runs out of requests. 934 */ 935 static void dwc3_prepare_trbs(struct dwc3_ep *dep) 936 { 937 struct dwc3_request *req, *n; 938 939 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 940 941 if (!dwc3_calc_trbs_left(dep)) 942 return; 943 944 list_for_each_entry_safe(req, n, &dep->pending_list, list) { 945 if (req->num_pending_sgs > 0) 946 dwc3_prepare_one_trb_sg(dep, req); 947 else 948 dwc3_prepare_one_trb_linear(dep, req); 949 950 if (!dwc3_calc_trbs_left(dep)) 951 return; 952 } 953 } 954 955 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param) 956 { 957 struct dwc3_gadget_ep_cmd_params params; 958 struct dwc3_request *req; 959 struct dwc3 *dwc = dep->dwc; 960 int starting; 961 int ret; 962 u32 cmd; 963 964 starting = !(dep->flags & DWC3_EP_BUSY); 965 966 dwc3_prepare_trbs(dep); 967 req = next_request(&dep->started_list); 968 if (!req) { 969 dep->flags |= DWC3_EP_PENDING_REQUEST; 970 return 0; 971 } 972 973 memset(¶ms, 0, sizeof(params)); 974 975 if (starting) { 976 params.param0 = upper_32_bits(req->trb_dma); 977 params.param1 = lower_32_bits(req->trb_dma); 978 cmd = DWC3_DEPCMD_STARTTRANSFER | 979 DWC3_DEPCMD_PARAM(cmd_param); 980 } else { 981 cmd = DWC3_DEPCMD_UPDATETRANSFER | 982 DWC3_DEPCMD_PARAM(dep->resource_index); 983 } 984 985 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 986 if (ret < 0) { 987 /* 988 * FIXME we need to iterate over the list of requests 989 * here and stop, unmap, free and del each of the linked 990 * requests instead of what we do now. 991 */ 992 usb_gadget_unmap_request(&dwc->gadget, &req->request, 993 req->direction); 994 list_del(&req->list); 995 return ret; 996 } 997 998 dep->flags |= DWC3_EP_BUSY; 999 1000 if (starting) { 1001 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep); 1002 WARN_ON_ONCE(!dep->resource_index); 1003 } 1004 1005 return 0; 1006 } 1007 1008 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc, 1009 struct dwc3_ep *dep, u32 cur_uf) 1010 { 1011 u32 uf; 1012 1013 if (list_empty(&dep->pending_list)) { 1014 dwc3_trace(trace_dwc3_gadget, 1015 "ISOC ep %s run out for requests", 1016 dep->name); 1017 dep->flags |= DWC3_EP_PENDING_REQUEST; 1018 return; 1019 } 1020 1021 /* 4 micro frames in the future */ 1022 uf = cur_uf + dep->interval * 4; 1023 1024 __dwc3_gadget_kick_transfer(dep, uf); 1025 } 1026 1027 static void dwc3_gadget_start_isoc(struct dwc3 *dwc, 1028 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1029 { 1030 u32 cur_uf, mask; 1031 1032 mask = ~(dep->interval - 1); 1033 cur_uf = event->parameters & mask; 1034 1035 __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1036 } 1037 1038 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 1039 { 1040 struct dwc3 *dwc = dep->dwc; 1041 int ret; 1042 1043 if (!dep->endpoint.desc) { 1044 dwc3_trace(trace_dwc3_gadget, 1045 "trying to queue request %p to disabled %s", 1046 &req->request, dep->endpoint.name); 1047 return -ESHUTDOWN; 1048 } 1049 1050 if (WARN(req->dep != dep, "request %p belongs to '%s'\n", 1051 &req->request, req->dep->name)) { 1052 dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'", 1053 &req->request, req->dep->name); 1054 return -EINVAL; 1055 } 1056 1057 pm_runtime_get(dwc->dev); 1058 1059 req->request.actual = 0; 1060 req->request.status = -EINPROGRESS; 1061 req->direction = dep->direction; 1062 req->epnum = dep->number; 1063 1064 trace_dwc3_ep_queue(req); 1065 1066 ret = usb_gadget_map_request(&dwc->gadget, &req->request, 1067 dep->direction); 1068 if (ret) 1069 return ret; 1070 1071 req->sg = req->request.sg; 1072 req->num_pending_sgs = req->request.num_mapped_sgs; 1073 1074 list_add_tail(&req->list, &dep->pending_list); 1075 1076 /* 1077 * NOTICE: Isochronous endpoints should NEVER be prestarted. We must 1078 * wait for a XferNotReady event so we will know what's the current 1079 * (micro-)frame number. 1080 * 1081 * Without this trick, we are very, very likely gonna get Bus Expiry 1082 * errors which will force us issue EndTransfer command. 1083 */ 1084 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1085 if ((dep->flags & DWC3_EP_PENDING_REQUEST) && 1086 list_empty(&dep->started_list)) { 1087 dwc3_stop_active_transfer(dwc, dep->number, true); 1088 dep->flags = DWC3_EP_ENABLED; 1089 } 1090 return 0; 1091 } 1092 1093 if (!dwc3_calc_trbs_left(dep)) 1094 return 0; 1095 1096 ret = __dwc3_gadget_kick_transfer(dep, 0); 1097 if (ret && ret != -EBUSY) 1098 dwc3_trace(trace_dwc3_gadget, 1099 "%s: failed to kick transfers", 1100 dep->name); 1101 if (ret == -EBUSY) 1102 ret = 0; 1103 1104 return ret; 1105 } 1106 1107 static void __dwc3_gadget_ep_zlp_complete(struct usb_ep *ep, 1108 struct usb_request *request) 1109 { 1110 dwc3_gadget_ep_free_request(ep, request); 1111 } 1112 1113 static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep) 1114 { 1115 struct dwc3_request *req; 1116 struct usb_request *request; 1117 struct usb_ep *ep = &dep->endpoint; 1118 1119 dwc3_trace(trace_dwc3_gadget, "queueing ZLP"); 1120 request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC); 1121 if (!request) 1122 return -ENOMEM; 1123 1124 request->length = 0; 1125 request->buf = dwc->zlp_buf; 1126 request->complete = __dwc3_gadget_ep_zlp_complete; 1127 1128 req = to_dwc3_request(request); 1129 1130 return __dwc3_gadget_ep_queue(dep, req); 1131 } 1132 1133 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 1134 gfp_t gfp_flags) 1135 { 1136 struct dwc3_request *req = to_dwc3_request(request); 1137 struct dwc3_ep *dep = to_dwc3_ep(ep); 1138 struct dwc3 *dwc = dep->dwc; 1139 1140 unsigned long flags; 1141 1142 int ret; 1143 1144 spin_lock_irqsave(&dwc->lock, flags); 1145 ret = __dwc3_gadget_ep_queue(dep, req); 1146 1147 /* 1148 * Okay, here's the thing, if gadget driver has requested for a ZLP by 1149 * setting request->zero, instead of doing magic, we will just queue an 1150 * extra usb_request ourselves so that it gets handled the same way as 1151 * any other request. 1152 */ 1153 if (ret == 0 && request->zero && request->length && 1154 (request->length % ep->maxpacket == 0)) 1155 ret = __dwc3_gadget_ep_queue_zlp(dwc, dep); 1156 1157 spin_unlock_irqrestore(&dwc->lock, flags); 1158 1159 return ret; 1160 } 1161 1162 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 1163 struct usb_request *request) 1164 { 1165 struct dwc3_request *req = to_dwc3_request(request); 1166 struct dwc3_request *r = NULL; 1167 1168 struct dwc3_ep *dep = to_dwc3_ep(ep); 1169 struct dwc3 *dwc = dep->dwc; 1170 1171 unsigned long flags; 1172 int ret = 0; 1173 1174 trace_dwc3_ep_dequeue(req); 1175 1176 spin_lock_irqsave(&dwc->lock, flags); 1177 1178 list_for_each_entry(r, &dep->pending_list, list) { 1179 if (r == req) 1180 break; 1181 } 1182 1183 if (r != req) { 1184 list_for_each_entry(r, &dep->started_list, list) { 1185 if (r == req) 1186 break; 1187 } 1188 if (r == req) { 1189 /* wait until it is processed */ 1190 dwc3_stop_active_transfer(dwc, dep->number, true); 1191 goto out1; 1192 } 1193 dev_err(dwc->dev, "request %p was not queued to %s\n", 1194 request, ep->name); 1195 ret = -EINVAL; 1196 goto out0; 1197 } 1198 1199 out1: 1200 /* giveback the request */ 1201 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1202 1203 out0: 1204 spin_unlock_irqrestore(&dwc->lock, flags); 1205 1206 return ret; 1207 } 1208 1209 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) 1210 { 1211 struct dwc3_gadget_ep_cmd_params params; 1212 struct dwc3 *dwc = dep->dwc; 1213 int ret; 1214 1215 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1216 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 1217 return -EINVAL; 1218 } 1219 1220 memset(¶ms, 0x00, sizeof(params)); 1221 1222 if (value) { 1223 struct dwc3_trb *trb; 1224 1225 unsigned transfer_in_flight; 1226 unsigned started; 1227 1228 if (dep->number > 1) 1229 trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue); 1230 else 1231 trb = &dwc->ep0_trb[dep->trb_enqueue]; 1232 1233 transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO; 1234 started = !list_empty(&dep->started_list); 1235 1236 if (!protocol && ((dep->direction && transfer_in_flight) || 1237 (!dep->direction && started))) { 1238 dwc3_trace(trace_dwc3_gadget, 1239 "%s: pending request, cannot halt", 1240 dep->name); 1241 return -EAGAIN; 1242 } 1243 1244 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL, 1245 ¶ms); 1246 if (ret) 1247 dev_err(dwc->dev, "failed to set STALL on %s\n", 1248 dep->name); 1249 else 1250 dep->flags |= DWC3_EP_STALL; 1251 } else { 1252 1253 ret = dwc3_send_clear_stall_ep_cmd(dep); 1254 if (ret) 1255 dev_err(dwc->dev, "failed to clear STALL on %s\n", 1256 dep->name); 1257 else 1258 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); 1259 } 1260 1261 return ret; 1262 } 1263 1264 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 1265 { 1266 struct dwc3_ep *dep = to_dwc3_ep(ep); 1267 struct dwc3 *dwc = dep->dwc; 1268 1269 unsigned long flags; 1270 1271 int ret; 1272 1273 spin_lock_irqsave(&dwc->lock, flags); 1274 ret = __dwc3_gadget_ep_set_halt(dep, value, false); 1275 spin_unlock_irqrestore(&dwc->lock, flags); 1276 1277 return ret; 1278 } 1279 1280 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 1281 { 1282 struct dwc3_ep *dep = to_dwc3_ep(ep); 1283 struct dwc3 *dwc = dep->dwc; 1284 unsigned long flags; 1285 int ret; 1286 1287 spin_lock_irqsave(&dwc->lock, flags); 1288 dep->flags |= DWC3_EP_WEDGE; 1289 1290 if (dep->number == 0 || dep->number == 1) 1291 ret = __dwc3_gadget_ep0_set_halt(ep, 1); 1292 else 1293 ret = __dwc3_gadget_ep_set_halt(dep, 1, false); 1294 spin_unlock_irqrestore(&dwc->lock, flags); 1295 1296 return ret; 1297 } 1298 1299 /* -------------------------------------------------------------------------- */ 1300 1301 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 1302 .bLength = USB_DT_ENDPOINT_SIZE, 1303 .bDescriptorType = USB_DT_ENDPOINT, 1304 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1305 }; 1306 1307 static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 1308 .enable = dwc3_gadget_ep0_enable, 1309 .disable = dwc3_gadget_ep0_disable, 1310 .alloc_request = dwc3_gadget_ep_alloc_request, 1311 .free_request = dwc3_gadget_ep_free_request, 1312 .queue = dwc3_gadget_ep0_queue, 1313 .dequeue = dwc3_gadget_ep_dequeue, 1314 .set_halt = dwc3_gadget_ep0_set_halt, 1315 .set_wedge = dwc3_gadget_ep_set_wedge, 1316 }; 1317 1318 static const struct usb_ep_ops dwc3_gadget_ep_ops = { 1319 .enable = dwc3_gadget_ep_enable, 1320 .disable = dwc3_gadget_ep_disable, 1321 .alloc_request = dwc3_gadget_ep_alloc_request, 1322 .free_request = dwc3_gadget_ep_free_request, 1323 .queue = dwc3_gadget_ep_queue, 1324 .dequeue = dwc3_gadget_ep_dequeue, 1325 .set_halt = dwc3_gadget_ep_set_halt, 1326 .set_wedge = dwc3_gadget_ep_set_wedge, 1327 }; 1328 1329 /* -------------------------------------------------------------------------- */ 1330 1331 static int dwc3_gadget_get_frame(struct usb_gadget *g) 1332 { 1333 struct dwc3 *dwc = gadget_to_dwc(g); 1334 u32 reg; 1335 1336 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1337 return DWC3_DSTS_SOFFN(reg); 1338 } 1339 1340 static int __dwc3_gadget_wakeup(struct dwc3 *dwc) 1341 { 1342 int retries; 1343 1344 int ret; 1345 u32 reg; 1346 1347 u8 link_state; 1348 u8 speed; 1349 1350 /* 1351 * According to the Databook Remote wakeup request should 1352 * be issued only when the device is in early suspend state. 1353 * 1354 * We can check that via USB Link State bits in DSTS register. 1355 */ 1356 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1357 1358 speed = reg & DWC3_DSTS_CONNECTSPD; 1359 if ((speed == DWC3_DSTS_SUPERSPEED) || 1360 (speed == DWC3_DSTS_SUPERSPEED_PLUS)) { 1361 dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed"); 1362 return 0; 1363 } 1364 1365 link_state = DWC3_DSTS_USBLNKST(reg); 1366 1367 switch (link_state) { 1368 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1369 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1370 break; 1371 default: 1372 dwc3_trace(trace_dwc3_gadget, 1373 "can't wakeup from '%s'", 1374 dwc3_gadget_link_string(link_state)); 1375 return -EINVAL; 1376 } 1377 1378 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); 1379 if (ret < 0) { 1380 dev_err(dwc->dev, "failed to put link in Recovery\n"); 1381 return ret; 1382 } 1383 1384 /* Recent versions do this automatically */ 1385 if (dwc->revision < DWC3_REVISION_194A) { 1386 /* write zeroes to Link Change Request */ 1387 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1388 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 1389 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1390 } 1391 1392 /* poll until Link State changes to ON */ 1393 retries = 20000; 1394 1395 while (retries--) { 1396 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1397 1398 /* in HS, means ON */ 1399 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 1400 break; 1401 } 1402 1403 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1404 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1405 return -EINVAL; 1406 } 1407 1408 return 0; 1409 } 1410 1411 static int dwc3_gadget_wakeup(struct usb_gadget *g) 1412 { 1413 struct dwc3 *dwc = gadget_to_dwc(g); 1414 unsigned long flags; 1415 int ret; 1416 1417 spin_lock_irqsave(&dwc->lock, flags); 1418 ret = __dwc3_gadget_wakeup(dwc); 1419 spin_unlock_irqrestore(&dwc->lock, flags); 1420 1421 return ret; 1422 } 1423 1424 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 1425 int is_selfpowered) 1426 { 1427 struct dwc3 *dwc = gadget_to_dwc(g); 1428 unsigned long flags; 1429 1430 spin_lock_irqsave(&dwc->lock, flags); 1431 g->is_selfpowered = !!is_selfpowered; 1432 spin_unlock_irqrestore(&dwc->lock, flags); 1433 1434 return 0; 1435 } 1436 1437 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) 1438 { 1439 u32 reg; 1440 u32 timeout = 500; 1441 1442 if (pm_runtime_suspended(dwc->dev)) 1443 return 0; 1444 1445 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1446 if (is_on) { 1447 if (dwc->revision <= DWC3_REVISION_187A) { 1448 reg &= ~DWC3_DCTL_TRGTULST_MASK; 1449 reg |= DWC3_DCTL_TRGTULST_RX_DET; 1450 } 1451 1452 if (dwc->revision >= DWC3_REVISION_194A) 1453 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1454 reg |= DWC3_DCTL_RUN_STOP; 1455 1456 if (dwc->has_hibernation) 1457 reg |= DWC3_DCTL_KEEP_CONNECT; 1458 1459 dwc->pullups_connected = true; 1460 } else { 1461 reg &= ~DWC3_DCTL_RUN_STOP; 1462 1463 if (dwc->has_hibernation && !suspend) 1464 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1465 1466 dwc->pullups_connected = false; 1467 } 1468 1469 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1470 1471 do { 1472 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1473 reg &= DWC3_DSTS_DEVCTRLHLT; 1474 } while (--timeout && !(!is_on ^ !reg)); 1475 1476 if (!timeout) 1477 return -ETIMEDOUT; 1478 1479 dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s", 1480 dwc->gadget_driver 1481 ? dwc->gadget_driver->function : "no-function", 1482 is_on ? "connect" : "disconnect"); 1483 1484 return 0; 1485 } 1486 1487 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 1488 { 1489 struct dwc3 *dwc = gadget_to_dwc(g); 1490 unsigned long flags; 1491 int ret; 1492 1493 is_on = !!is_on; 1494 1495 spin_lock_irqsave(&dwc->lock, flags); 1496 ret = dwc3_gadget_run_stop(dwc, is_on, false); 1497 spin_unlock_irqrestore(&dwc->lock, flags); 1498 1499 return ret; 1500 } 1501 1502 static void dwc3_gadget_enable_irq(struct dwc3 *dwc) 1503 { 1504 u32 reg; 1505 1506 /* Enable all but Start and End of Frame IRQs */ 1507 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | 1508 DWC3_DEVTEN_EVNTOVERFLOWEN | 1509 DWC3_DEVTEN_CMDCMPLTEN | 1510 DWC3_DEVTEN_ERRTICERREN | 1511 DWC3_DEVTEN_WKUPEVTEN | 1512 DWC3_DEVTEN_ULSTCNGEN | 1513 DWC3_DEVTEN_CONNECTDONEEN | 1514 DWC3_DEVTEN_USBRSTEN | 1515 DWC3_DEVTEN_DISCONNEVTEN); 1516 1517 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 1518 } 1519 1520 static void dwc3_gadget_disable_irq(struct dwc3 *dwc) 1521 { 1522 /* mask all interrupts */ 1523 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 1524 } 1525 1526 static irqreturn_t dwc3_interrupt(int irq, void *_dwc); 1527 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc); 1528 1529 /** 1530 * dwc3_gadget_setup_nump - Calculate and initialize NUMP field of DCFG 1531 * dwc: pointer to our context structure 1532 * 1533 * The following looks like complex but it's actually very simple. In order to 1534 * calculate the number of packets we can burst at once on OUT transfers, we're 1535 * gonna use RxFIFO size. 1536 * 1537 * To calculate RxFIFO size we need two numbers: 1538 * MDWIDTH = size, in bits, of the internal memory bus 1539 * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits) 1540 * 1541 * Given these two numbers, the formula is simple: 1542 * 1543 * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16; 1544 * 1545 * 24 bytes is for 3x SETUP packets 1546 * 16 bytes is a clock domain crossing tolerance 1547 * 1548 * Given RxFIFO Size, NUMP = RxFIFOSize / 1024; 1549 */ 1550 static void dwc3_gadget_setup_nump(struct dwc3 *dwc) 1551 { 1552 u32 ram2_depth; 1553 u32 mdwidth; 1554 u32 nump; 1555 u32 reg; 1556 1557 ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7); 1558 mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0); 1559 1560 nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024; 1561 nump = min_t(u32, nump, 16); 1562 1563 /* update NumP */ 1564 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1565 reg &= ~DWC3_DCFG_NUMP_MASK; 1566 reg |= nump << DWC3_DCFG_NUMP_SHIFT; 1567 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1568 } 1569 1570 static int __dwc3_gadget_start(struct dwc3 *dwc) 1571 { 1572 struct dwc3_ep *dep; 1573 int ret = 0; 1574 u32 reg; 1575 1576 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1577 reg &= ~(DWC3_DCFG_SPEED_MASK); 1578 1579 /** 1580 * WORKAROUND: DWC3 revision < 2.20a have an issue 1581 * which would cause metastability state on Run/Stop 1582 * bit if we try to force the IP to USB2-only mode. 1583 * 1584 * Because of that, we cannot configure the IP to any 1585 * speed other than the SuperSpeed 1586 * 1587 * Refers to: 1588 * 1589 * STAR#9000525659: Clock Domain Crossing on DCTL in 1590 * USB 2.0 Mode 1591 */ 1592 if (dwc->revision < DWC3_REVISION_220A) { 1593 reg |= DWC3_DCFG_SUPERSPEED; 1594 } else { 1595 switch (dwc->maximum_speed) { 1596 case USB_SPEED_LOW: 1597 reg |= DWC3_DCFG_LOWSPEED; 1598 break; 1599 case USB_SPEED_FULL: 1600 reg |= DWC3_DCFG_FULLSPEED1; 1601 break; 1602 case USB_SPEED_HIGH: 1603 reg |= DWC3_DCFG_HIGHSPEED; 1604 break; 1605 case USB_SPEED_SUPER_PLUS: 1606 reg |= DWC3_DCFG_SUPERSPEED_PLUS; 1607 break; 1608 default: 1609 dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n", 1610 dwc->maximum_speed); 1611 /* fall through */ 1612 case USB_SPEED_SUPER: 1613 reg |= DWC3_DCFG_SUPERSPEED; 1614 break; 1615 } 1616 } 1617 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1618 1619 /* 1620 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP 1621 * field instead of letting dwc3 itself calculate that automatically. 1622 * 1623 * This way, we maximize the chances that we'll be able to get several 1624 * bursts of data without going through any sort of endpoint throttling. 1625 */ 1626 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1627 reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL; 1628 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1629 1630 dwc3_gadget_setup_nump(dwc); 1631 1632 /* Start with SuperSpeed Default */ 1633 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1634 1635 dep = dwc->eps[0]; 1636 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 1637 false); 1638 if (ret) { 1639 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1640 goto err0; 1641 } 1642 1643 dep = dwc->eps[1]; 1644 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 1645 false); 1646 if (ret) { 1647 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1648 goto err1; 1649 } 1650 1651 /* begin to receive SETUP packets */ 1652 dwc->ep0state = EP0_SETUP_PHASE; 1653 dwc3_ep0_out_start(dwc); 1654 1655 dwc3_gadget_enable_irq(dwc); 1656 1657 return 0; 1658 1659 err1: 1660 __dwc3_gadget_ep_disable(dwc->eps[0]); 1661 1662 err0: 1663 return ret; 1664 } 1665 1666 static int dwc3_gadget_start(struct usb_gadget *g, 1667 struct usb_gadget_driver *driver) 1668 { 1669 struct dwc3 *dwc = gadget_to_dwc(g); 1670 unsigned long flags; 1671 int ret = 0; 1672 int irq; 1673 1674 irq = dwc->irq_gadget; 1675 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, 1676 IRQF_SHARED, "dwc3", dwc->ev_buf); 1677 if (ret) { 1678 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 1679 irq, ret); 1680 goto err0; 1681 } 1682 1683 spin_lock_irqsave(&dwc->lock, flags); 1684 if (dwc->gadget_driver) { 1685 dev_err(dwc->dev, "%s is already bound to %s\n", 1686 dwc->gadget.name, 1687 dwc->gadget_driver->driver.name); 1688 ret = -EBUSY; 1689 goto err1; 1690 } 1691 1692 dwc->gadget_driver = driver; 1693 1694 if (pm_runtime_active(dwc->dev)) 1695 __dwc3_gadget_start(dwc); 1696 1697 spin_unlock_irqrestore(&dwc->lock, flags); 1698 1699 return 0; 1700 1701 err1: 1702 spin_unlock_irqrestore(&dwc->lock, flags); 1703 free_irq(irq, dwc); 1704 1705 err0: 1706 return ret; 1707 } 1708 1709 static void __dwc3_gadget_stop(struct dwc3 *dwc) 1710 { 1711 if (pm_runtime_suspended(dwc->dev)) 1712 return; 1713 1714 dwc3_gadget_disable_irq(dwc); 1715 __dwc3_gadget_ep_disable(dwc->eps[0]); 1716 __dwc3_gadget_ep_disable(dwc->eps[1]); 1717 } 1718 1719 static int dwc3_gadget_stop(struct usb_gadget *g) 1720 { 1721 struct dwc3 *dwc = gadget_to_dwc(g); 1722 unsigned long flags; 1723 1724 spin_lock_irqsave(&dwc->lock, flags); 1725 __dwc3_gadget_stop(dwc); 1726 dwc->gadget_driver = NULL; 1727 spin_unlock_irqrestore(&dwc->lock, flags); 1728 1729 free_irq(dwc->irq_gadget, dwc->ev_buf); 1730 1731 return 0; 1732 } 1733 1734 static const struct usb_gadget_ops dwc3_gadget_ops = { 1735 .get_frame = dwc3_gadget_get_frame, 1736 .wakeup = dwc3_gadget_wakeup, 1737 .set_selfpowered = dwc3_gadget_set_selfpowered, 1738 .pullup = dwc3_gadget_pullup, 1739 .udc_start = dwc3_gadget_start, 1740 .udc_stop = dwc3_gadget_stop, 1741 }; 1742 1743 /* -------------------------------------------------------------------------- */ 1744 1745 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc, 1746 u8 num, u32 direction) 1747 { 1748 struct dwc3_ep *dep; 1749 u8 i; 1750 1751 for (i = 0; i < num; i++) { 1752 u8 epnum = (i << 1) | (direction ? 1 : 0); 1753 1754 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 1755 if (!dep) 1756 return -ENOMEM; 1757 1758 dep->dwc = dwc; 1759 dep->number = epnum; 1760 dep->direction = !!direction; 1761 dep->regs = dwc->regs + DWC3_DEP_BASE(epnum); 1762 dwc->eps[epnum] = dep; 1763 1764 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1, 1765 (epnum & 1) ? "in" : "out"); 1766 1767 dep->endpoint.name = dep->name; 1768 spin_lock_init(&dep->lock); 1769 1770 dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name); 1771 1772 if (epnum == 0 || epnum == 1) { 1773 usb_ep_set_maxpacket_limit(&dep->endpoint, 512); 1774 dep->endpoint.maxburst = 1; 1775 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 1776 if (!epnum) 1777 dwc->gadget.ep0 = &dep->endpoint; 1778 } else { 1779 int ret; 1780 1781 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024); 1782 dep->endpoint.max_streams = 15; 1783 dep->endpoint.ops = &dwc3_gadget_ep_ops; 1784 list_add_tail(&dep->endpoint.ep_list, 1785 &dwc->gadget.ep_list); 1786 1787 ret = dwc3_alloc_trb_pool(dep); 1788 if (ret) 1789 return ret; 1790 } 1791 1792 if (epnum == 0 || epnum == 1) { 1793 dep->endpoint.caps.type_control = true; 1794 } else { 1795 dep->endpoint.caps.type_iso = true; 1796 dep->endpoint.caps.type_bulk = true; 1797 dep->endpoint.caps.type_int = true; 1798 } 1799 1800 dep->endpoint.caps.dir_in = !!direction; 1801 dep->endpoint.caps.dir_out = !direction; 1802 1803 INIT_LIST_HEAD(&dep->pending_list); 1804 INIT_LIST_HEAD(&dep->started_list); 1805 } 1806 1807 return 0; 1808 } 1809 1810 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc) 1811 { 1812 int ret; 1813 1814 INIT_LIST_HEAD(&dwc->gadget.ep_list); 1815 1816 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0); 1817 if (ret < 0) { 1818 dwc3_trace(trace_dwc3_gadget, 1819 "failed to allocate OUT endpoints"); 1820 return ret; 1821 } 1822 1823 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1); 1824 if (ret < 0) { 1825 dwc3_trace(trace_dwc3_gadget, 1826 "failed to allocate IN endpoints"); 1827 return ret; 1828 } 1829 1830 return 0; 1831 } 1832 1833 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 1834 { 1835 struct dwc3_ep *dep; 1836 u8 epnum; 1837 1838 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1839 dep = dwc->eps[epnum]; 1840 if (!dep) 1841 continue; 1842 /* 1843 * Physical endpoints 0 and 1 are special; they form the 1844 * bi-directional USB endpoint 0. 1845 * 1846 * For those two physical endpoints, we don't allocate a TRB 1847 * pool nor do we add them the endpoints list. Due to that, we 1848 * shouldn't do these two operations otherwise we would end up 1849 * with all sorts of bugs when removing dwc3.ko. 1850 */ 1851 if (epnum != 0 && epnum != 1) { 1852 dwc3_free_trb_pool(dep); 1853 list_del(&dep->endpoint.ep_list); 1854 } 1855 1856 kfree(dep); 1857 } 1858 } 1859 1860 /* -------------------------------------------------------------------------- */ 1861 1862 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, 1863 struct dwc3_request *req, struct dwc3_trb *trb, 1864 const struct dwc3_event_depevt *event, int status, 1865 int chain) 1866 { 1867 unsigned int count; 1868 unsigned int s_pkt = 0; 1869 unsigned int trb_status; 1870 1871 dwc3_ep_inc_deq(dep); 1872 1873 if (req->trb == trb) 1874 dep->queued_requests--; 1875 1876 trace_dwc3_complete_trb(dep, trb); 1877 1878 /* 1879 * If we're in the middle of series of chained TRBs and we 1880 * receive a short transfer along the way, DWC3 will skip 1881 * through all TRBs including the last TRB in the chain (the 1882 * where CHN bit is zero. DWC3 will also avoid clearing HWO 1883 * bit and SW has to do it manually. 1884 * 1885 * We're going to do that here to avoid problems of HW trying 1886 * to use bogus TRBs for transfers. 1887 */ 1888 if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO)) 1889 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 1890 1891 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 1892 return 1; 1893 1894 count = trb->size & DWC3_TRB_SIZE_MASK; 1895 req->request.actual += count; 1896 1897 if (dep->direction) { 1898 if (count) { 1899 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); 1900 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) { 1901 dwc3_trace(trace_dwc3_gadget, 1902 "%s: incomplete IN transfer", 1903 dep->name); 1904 /* 1905 * If missed isoc occurred and there is 1906 * no request queued then issue END 1907 * TRANSFER, so that core generates 1908 * next xfernotready and we will issue 1909 * a fresh START TRANSFER. 1910 * If there are still queued request 1911 * then wait, do not issue either END 1912 * or UPDATE TRANSFER, just attach next 1913 * request in pending_list during 1914 * giveback.If any future queued request 1915 * is successfully transferred then we 1916 * will issue UPDATE TRANSFER for all 1917 * request in the pending_list. 1918 */ 1919 dep->flags |= DWC3_EP_MISSED_ISOC; 1920 } else { 1921 dev_err(dwc->dev, "incomplete IN transfer %s\n", 1922 dep->name); 1923 status = -ECONNRESET; 1924 } 1925 } else { 1926 dep->flags &= ~DWC3_EP_MISSED_ISOC; 1927 } 1928 } else { 1929 if (count && (event->status & DEPEVT_STATUS_SHORT)) 1930 s_pkt = 1; 1931 } 1932 1933 if (s_pkt && !chain) 1934 return 1; 1935 1936 if ((event->status & DEPEVT_STATUS_IOC) && 1937 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 1938 return 1; 1939 1940 return 0; 1941 } 1942 1943 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, 1944 const struct dwc3_event_depevt *event, int status) 1945 { 1946 struct dwc3_request *req, *n; 1947 struct dwc3_trb *trb; 1948 bool ioc = false; 1949 int ret; 1950 1951 list_for_each_entry_safe(req, n, &dep->started_list, list) { 1952 unsigned length; 1953 unsigned actual; 1954 int chain; 1955 1956 length = req->request.length; 1957 chain = req->num_pending_sgs > 0; 1958 if (chain) { 1959 struct scatterlist *sg = req->sg; 1960 struct scatterlist *s; 1961 unsigned int pending = req->num_pending_sgs; 1962 unsigned int i; 1963 1964 for_each_sg(sg, s, pending, i) { 1965 trb = &dep->trb_pool[dep->trb_dequeue]; 1966 1967 req->sg = sg_next(s); 1968 req->num_pending_sgs--; 1969 1970 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 1971 event, status, chain); 1972 if (ret) 1973 break; 1974 } 1975 } else { 1976 trb = &dep->trb_pool[dep->trb_dequeue]; 1977 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 1978 event, status, chain); 1979 } 1980 1981 /* 1982 * We assume here we will always receive the entire data block 1983 * which we should receive. Meaning, if we program RX to 1984 * receive 4K but we receive only 2K, we assume that's all we 1985 * should receive and we simply bounce the request back to the 1986 * gadget driver for further processing. 1987 */ 1988 actual = length - req->request.actual; 1989 req->request.actual = actual; 1990 1991 if (ret && chain && (actual < length) && req->num_pending_sgs) 1992 return __dwc3_gadget_kick_transfer(dep, 0); 1993 1994 dwc3_gadget_giveback(dep, req, status); 1995 1996 if (ret) { 1997 if ((event->status & DEPEVT_STATUS_IOC) && 1998 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 1999 ioc = true; 2000 break; 2001 } 2002 } 2003 2004 /* 2005 * Our endpoint might get disabled by another thread during 2006 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1 2007 * early on so DWC3_EP_BUSY flag gets cleared 2008 */ 2009 if (!dep->endpoint.desc) 2010 return 1; 2011 2012 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 2013 list_empty(&dep->started_list)) { 2014 if (list_empty(&dep->pending_list)) { 2015 /* 2016 * If there is no entry in request list then do 2017 * not issue END TRANSFER now. Just set PENDING 2018 * flag, so that END TRANSFER is issued when an 2019 * entry is added into request list. 2020 */ 2021 dep->flags = DWC3_EP_PENDING_REQUEST; 2022 } else { 2023 dwc3_stop_active_transfer(dwc, dep->number, true); 2024 dep->flags = DWC3_EP_ENABLED; 2025 } 2026 return 1; 2027 } 2028 2029 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && ioc) 2030 return 0; 2031 2032 return 1; 2033 } 2034 2035 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, 2036 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 2037 { 2038 unsigned status = 0; 2039 int clean_busy; 2040 u32 is_xfer_complete; 2041 2042 is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE); 2043 2044 if (event->status & DEPEVT_STATUS_BUSERR) 2045 status = -ECONNRESET; 2046 2047 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status); 2048 if (clean_busy && (!dep->endpoint.desc || is_xfer_complete || 2049 usb_endpoint_xfer_isoc(dep->endpoint.desc))) 2050 dep->flags &= ~DWC3_EP_BUSY; 2051 2052 /* 2053 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. 2054 * See dwc3_gadget_linksts_change_interrupt() for 1st half. 2055 */ 2056 if (dwc->revision < DWC3_REVISION_183A) { 2057 u32 reg; 2058 int i; 2059 2060 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 2061 dep = dwc->eps[i]; 2062 2063 if (!(dep->flags & DWC3_EP_ENABLED)) 2064 continue; 2065 2066 if (!list_empty(&dep->started_list)) 2067 return; 2068 } 2069 2070 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2071 reg |= dwc->u1u2; 2072 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2073 2074 dwc->u1u2 = 0; 2075 } 2076 2077 /* 2078 * Our endpoint might get disabled by another thread during 2079 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1 2080 * early on so DWC3_EP_BUSY flag gets cleared 2081 */ 2082 if (!dep->endpoint.desc) 2083 return; 2084 2085 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2086 int ret; 2087 2088 ret = __dwc3_gadget_kick_transfer(dep, 0); 2089 if (!ret || ret == -EBUSY) 2090 return; 2091 } 2092 } 2093 2094 static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 2095 const struct dwc3_event_depevt *event) 2096 { 2097 struct dwc3_ep *dep; 2098 u8 epnum = event->endpoint_number; 2099 2100 dep = dwc->eps[epnum]; 2101 2102 if (!(dep->flags & DWC3_EP_ENABLED)) 2103 return; 2104 2105 if (epnum == 0 || epnum == 1) { 2106 dwc3_ep0_interrupt(dwc, event); 2107 return; 2108 } 2109 2110 switch (event->endpoint_event) { 2111 case DWC3_DEPEVT_XFERCOMPLETE: 2112 dep->resource_index = 0; 2113 2114 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2115 dwc3_trace(trace_dwc3_gadget, 2116 "%s is an Isochronous endpoint", 2117 dep->name); 2118 return; 2119 } 2120 2121 dwc3_endpoint_transfer_complete(dwc, dep, event); 2122 break; 2123 case DWC3_DEPEVT_XFERINPROGRESS: 2124 dwc3_endpoint_transfer_complete(dwc, dep, event); 2125 break; 2126 case DWC3_DEPEVT_XFERNOTREADY: 2127 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2128 dwc3_gadget_start_isoc(dwc, dep, event); 2129 } else { 2130 int active; 2131 int ret; 2132 2133 active = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE; 2134 2135 dwc3_trace(trace_dwc3_gadget, "%s: reason %s", 2136 dep->name, active ? "Transfer Active" 2137 : "Transfer Not Active"); 2138 2139 ret = __dwc3_gadget_kick_transfer(dep, 0); 2140 if (!ret || ret == -EBUSY) 2141 return; 2142 2143 dwc3_trace(trace_dwc3_gadget, 2144 "%s: failed to kick transfers", 2145 dep->name); 2146 } 2147 2148 break; 2149 case DWC3_DEPEVT_STREAMEVT: 2150 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) { 2151 dev_err(dwc->dev, "Stream event for non-Bulk %s\n", 2152 dep->name); 2153 return; 2154 } 2155 2156 switch (event->status) { 2157 case DEPEVT_STREAMEVT_FOUND: 2158 dwc3_trace(trace_dwc3_gadget, 2159 "Stream %d found and started", 2160 event->parameters); 2161 2162 break; 2163 case DEPEVT_STREAMEVT_NOTFOUND: 2164 /* FALLTHROUGH */ 2165 default: 2166 dwc3_trace(trace_dwc3_gadget, 2167 "unable to find suitable stream"); 2168 } 2169 break; 2170 case DWC3_DEPEVT_RXTXFIFOEVT: 2171 dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun", dep->name); 2172 break; 2173 case DWC3_DEPEVT_EPCMDCMPLT: 2174 dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete"); 2175 break; 2176 } 2177 } 2178 2179 static void dwc3_disconnect_gadget(struct dwc3 *dwc) 2180 { 2181 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { 2182 spin_unlock(&dwc->lock); 2183 dwc->gadget_driver->disconnect(&dwc->gadget); 2184 spin_lock(&dwc->lock); 2185 } 2186 } 2187 2188 static void dwc3_suspend_gadget(struct dwc3 *dwc) 2189 { 2190 if (dwc->gadget_driver && dwc->gadget_driver->suspend) { 2191 spin_unlock(&dwc->lock); 2192 dwc->gadget_driver->suspend(&dwc->gadget); 2193 spin_lock(&dwc->lock); 2194 } 2195 } 2196 2197 static void dwc3_resume_gadget(struct dwc3 *dwc) 2198 { 2199 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2200 spin_unlock(&dwc->lock); 2201 dwc->gadget_driver->resume(&dwc->gadget); 2202 spin_lock(&dwc->lock); 2203 } 2204 } 2205 2206 static void dwc3_reset_gadget(struct dwc3 *dwc) 2207 { 2208 if (!dwc->gadget_driver) 2209 return; 2210 2211 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) { 2212 spin_unlock(&dwc->lock); 2213 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver); 2214 spin_lock(&dwc->lock); 2215 } 2216 } 2217 2218 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force) 2219 { 2220 struct dwc3_ep *dep; 2221 struct dwc3_gadget_ep_cmd_params params; 2222 u32 cmd; 2223 int ret; 2224 2225 dep = dwc->eps[epnum]; 2226 2227 if (!dep->resource_index) 2228 return; 2229 2230 /* 2231 * NOTICE: We are violating what the Databook says about the 2232 * EndTransfer command. Ideally we would _always_ wait for the 2233 * EndTransfer Command Completion IRQ, but that's causing too 2234 * much trouble synchronizing between us and gadget driver. 2235 * 2236 * We have discussed this with the IP Provider and it was 2237 * suggested to giveback all requests here, but give HW some 2238 * extra time to synchronize with the interconnect. We're using 2239 * an arbitrary 100us delay for that. 2240 * 2241 * Note also that a similar handling was tested by Synopsys 2242 * (thanks a lot Paul) and nothing bad has come out of it. 2243 * In short, what we're doing is: 2244 * 2245 * - Issue EndTransfer WITH CMDIOC bit set 2246 * - Wait 100us 2247 * 2248 * As of IP version 3.10a of the DWC_usb3 IP, the controller 2249 * supports a mode to work around the above limitation. The 2250 * software can poll the CMDACT bit in the DEPCMD register 2251 * after issuing a EndTransfer command. This mode is enabled 2252 * by writing GUCTL2[14]. This polling is already done in the 2253 * dwc3_send_gadget_ep_cmd() function so if the mode is 2254 * enabled, the EndTransfer command will have completed upon 2255 * returning from this function and we don't need to delay for 2256 * 100us. 2257 * 2258 * This mode is NOT available on the DWC_usb31 IP. 2259 */ 2260 2261 cmd = DWC3_DEPCMD_ENDTRANSFER; 2262 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0; 2263 cmd |= DWC3_DEPCMD_CMDIOC; 2264 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); 2265 memset(¶ms, 0, sizeof(params)); 2266 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 2267 WARN_ON_ONCE(ret); 2268 dep->resource_index = 0; 2269 dep->flags &= ~DWC3_EP_BUSY; 2270 2271 if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A) 2272 udelay(100); 2273 } 2274 2275 static void dwc3_stop_active_transfers(struct dwc3 *dwc) 2276 { 2277 u32 epnum; 2278 2279 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2280 struct dwc3_ep *dep; 2281 2282 dep = dwc->eps[epnum]; 2283 if (!dep) 2284 continue; 2285 2286 if (!(dep->flags & DWC3_EP_ENABLED)) 2287 continue; 2288 2289 dwc3_remove_requests(dwc, dep); 2290 } 2291 } 2292 2293 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 2294 { 2295 u32 epnum; 2296 2297 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2298 struct dwc3_ep *dep; 2299 int ret; 2300 2301 dep = dwc->eps[epnum]; 2302 if (!dep) 2303 continue; 2304 2305 if (!(dep->flags & DWC3_EP_STALL)) 2306 continue; 2307 2308 dep->flags &= ~DWC3_EP_STALL; 2309 2310 ret = dwc3_send_clear_stall_ep_cmd(dep); 2311 WARN_ON_ONCE(ret); 2312 } 2313 } 2314 2315 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 2316 { 2317 int reg; 2318 2319 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2320 reg &= ~DWC3_DCTL_INITU1ENA; 2321 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2322 2323 reg &= ~DWC3_DCTL_INITU2ENA; 2324 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2325 2326 dwc3_disconnect_gadget(dwc); 2327 2328 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2329 dwc->setup_packet_pending = false; 2330 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED); 2331 2332 dwc->connected = false; 2333 } 2334 2335 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 2336 { 2337 u32 reg; 2338 2339 dwc->connected = true; 2340 2341 /* 2342 * WORKAROUND: DWC3 revisions <1.88a have an issue which 2343 * would cause a missing Disconnect Event if there's a 2344 * pending Setup Packet in the FIFO. 2345 * 2346 * There's no suggested workaround on the official Bug 2347 * report, which states that "unless the driver/application 2348 * is doing any special handling of a disconnect event, 2349 * there is no functional issue". 2350 * 2351 * Unfortunately, it turns out that we _do_ some special 2352 * handling of a disconnect event, namely complete all 2353 * pending transfers, notify gadget driver of the 2354 * disconnection, and so on. 2355 * 2356 * Our suggested workaround is to follow the Disconnect 2357 * Event steps here, instead, based on a setup_packet_pending 2358 * flag. Such flag gets set whenever we have a SETUP_PENDING 2359 * status for EP0 TRBs and gets cleared on XferComplete for the 2360 * same endpoint. 2361 * 2362 * Refers to: 2363 * 2364 * STAR#9000466709: RTL: Device : Disconnect event not 2365 * generated if setup packet pending in FIFO 2366 */ 2367 if (dwc->revision < DWC3_REVISION_188A) { 2368 if (dwc->setup_packet_pending) 2369 dwc3_gadget_disconnect_interrupt(dwc); 2370 } 2371 2372 dwc3_reset_gadget(dwc); 2373 2374 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2375 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 2376 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2377 dwc->test_mode = false; 2378 2379 dwc3_stop_active_transfers(dwc); 2380 dwc3_clear_stall_all_ep(dwc); 2381 2382 /* Reset device address to zero */ 2383 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2384 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 2385 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2386 } 2387 2388 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed) 2389 { 2390 u32 reg; 2391 u32 usb30_clock = DWC3_GCTL_CLK_BUS; 2392 2393 /* 2394 * We change the clock only at SS but I dunno why I would want to do 2395 * this. Maybe it becomes part of the power saving plan. 2396 */ 2397 2398 if ((speed != DWC3_DSTS_SUPERSPEED) && 2399 (speed != DWC3_DSTS_SUPERSPEED_PLUS)) 2400 return; 2401 2402 /* 2403 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 2404 * each time on Connect Done. 2405 */ 2406 if (!usb30_clock) 2407 return; 2408 2409 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 2410 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock); 2411 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 2412 } 2413 2414 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 2415 { 2416 struct dwc3_ep *dep; 2417 int ret; 2418 u32 reg; 2419 u8 speed; 2420 2421 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2422 speed = reg & DWC3_DSTS_CONNECTSPD; 2423 dwc->speed = speed; 2424 2425 dwc3_update_ram_clk_sel(dwc, speed); 2426 2427 switch (speed) { 2428 case DWC3_DSTS_SUPERSPEED_PLUS: 2429 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2430 dwc->gadget.ep0->maxpacket = 512; 2431 dwc->gadget.speed = USB_SPEED_SUPER_PLUS; 2432 break; 2433 case DWC3_DSTS_SUPERSPEED: 2434 /* 2435 * WORKAROUND: DWC3 revisions <1.90a have an issue which 2436 * would cause a missing USB3 Reset event. 2437 * 2438 * In such situations, we should force a USB3 Reset 2439 * event by calling our dwc3_gadget_reset_interrupt() 2440 * routine. 2441 * 2442 * Refers to: 2443 * 2444 * STAR#9000483510: RTL: SS : USB3 reset event may 2445 * not be generated always when the link enters poll 2446 */ 2447 if (dwc->revision < DWC3_REVISION_190A) 2448 dwc3_gadget_reset_interrupt(dwc); 2449 2450 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2451 dwc->gadget.ep0->maxpacket = 512; 2452 dwc->gadget.speed = USB_SPEED_SUPER; 2453 break; 2454 case DWC3_DSTS_HIGHSPEED: 2455 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2456 dwc->gadget.ep0->maxpacket = 64; 2457 dwc->gadget.speed = USB_SPEED_HIGH; 2458 break; 2459 case DWC3_DSTS_FULLSPEED2: 2460 case DWC3_DSTS_FULLSPEED1: 2461 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2462 dwc->gadget.ep0->maxpacket = 64; 2463 dwc->gadget.speed = USB_SPEED_FULL; 2464 break; 2465 case DWC3_DSTS_LOWSPEED: 2466 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); 2467 dwc->gadget.ep0->maxpacket = 8; 2468 dwc->gadget.speed = USB_SPEED_LOW; 2469 break; 2470 } 2471 2472 /* Enable USB2 LPM Capability */ 2473 2474 if ((dwc->revision > DWC3_REVISION_194A) && 2475 (speed != DWC3_DSTS_SUPERSPEED) && 2476 (speed != DWC3_DSTS_SUPERSPEED_PLUS)) { 2477 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2478 reg |= DWC3_DCFG_LPM_CAP; 2479 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2480 2481 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2482 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); 2483 2484 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold); 2485 2486 /* 2487 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and 2488 * DCFG.LPMCap is set, core responses with an ACK and the 2489 * BESL value in the LPM token is less than or equal to LPM 2490 * NYET threshold. 2491 */ 2492 WARN_ONCE(dwc->revision < DWC3_REVISION_240A 2493 && dwc->has_lpm_erratum, 2494 "LPM Erratum not available on dwc3 revisisions < 2.40a\n"); 2495 2496 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A) 2497 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold); 2498 2499 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2500 } else { 2501 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2502 reg &= ~DWC3_DCTL_HIRD_THRES_MASK; 2503 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2504 } 2505 2506 dep = dwc->eps[0]; 2507 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, 2508 false); 2509 if (ret) { 2510 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2511 return; 2512 } 2513 2514 dep = dwc->eps[1]; 2515 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, 2516 false); 2517 if (ret) { 2518 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2519 return; 2520 } 2521 2522 /* 2523 * Configure PHY via GUSB3PIPECTLn if required. 2524 * 2525 * Update GTXFIFOSIZn 2526 * 2527 * In both cases reset values should be sufficient. 2528 */ 2529 } 2530 2531 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 2532 { 2533 /* 2534 * TODO take core out of low power mode when that's 2535 * implemented. 2536 */ 2537 2538 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2539 spin_unlock(&dwc->lock); 2540 dwc->gadget_driver->resume(&dwc->gadget); 2541 spin_lock(&dwc->lock); 2542 } 2543 } 2544 2545 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 2546 unsigned int evtinfo) 2547 { 2548 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2549 unsigned int pwropt; 2550 2551 /* 2552 * WORKAROUND: DWC3 < 2.50a have an issue when configured without 2553 * Hibernation mode enabled which would show up when device detects 2554 * host-initiated U3 exit. 2555 * 2556 * In that case, device will generate a Link State Change Interrupt 2557 * from U3 to RESUME which is only necessary if Hibernation is 2558 * configured in. 2559 * 2560 * There are no functional changes due to such spurious event and we 2561 * just need to ignore it. 2562 * 2563 * Refers to: 2564 * 2565 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation 2566 * operational mode 2567 */ 2568 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 2569 if ((dwc->revision < DWC3_REVISION_250A) && 2570 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { 2571 if ((dwc->link_state == DWC3_LINK_STATE_U3) && 2572 (next == DWC3_LINK_STATE_RESUME)) { 2573 dwc3_trace(trace_dwc3_gadget, 2574 "ignoring transition U3 -> Resume"); 2575 return; 2576 } 2577 } 2578 2579 /* 2580 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending 2581 * on the link partner, the USB session might do multiple entry/exit 2582 * of low power states before a transfer takes place. 2583 * 2584 * Due to this problem, we might experience lower throughput. The 2585 * suggested workaround is to disable DCTL[12:9] bits if we're 2586 * transitioning from U1/U2 to U0 and enable those bits again 2587 * after a transfer completes and there are no pending transfers 2588 * on any of the enabled endpoints. 2589 * 2590 * This is the first half of that workaround. 2591 * 2592 * Refers to: 2593 * 2594 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us 2595 * core send LGO_Ux entering U0 2596 */ 2597 if (dwc->revision < DWC3_REVISION_183A) { 2598 if (next == DWC3_LINK_STATE_U0) { 2599 u32 u1u2; 2600 u32 reg; 2601 2602 switch (dwc->link_state) { 2603 case DWC3_LINK_STATE_U1: 2604 case DWC3_LINK_STATE_U2: 2605 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2606 u1u2 = reg & (DWC3_DCTL_INITU2ENA 2607 | DWC3_DCTL_ACCEPTU2ENA 2608 | DWC3_DCTL_INITU1ENA 2609 | DWC3_DCTL_ACCEPTU1ENA); 2610 2611 if (!dwc->u1u2) 2612 dwc->u1u2 = reg & u1u2; 2613 2614 reg &= ~u1u2; 2615 2616 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2617 break; 2618 default: 2619 /* do nothing */ 2620 break; 2621 } 2622 } 2623 } 2624 2625 switch (next) { 2626 case DWC3_LINK_STATE_U1: 2627 if (dwc->speed == USB_SPEED_SUPER) 2628 dwc3_suspend_gadget(dwc); 2629 break; 2630 case DWC3_LINK_STATE_U2: 2631 case DWC3_LINK_STATE_U3: 2632 dwc3_suspend_gadget(dwc); 2633 break; 2634 case DWC3_LINK_STATE_RESUME: 2635 dwc3_resume_gadget(dwc); 2636 break; 2637 default: 2638 /* do nothing */ 2639 break; 2640 } 2641 2642 dwc->link_state = next; 2643 } 2644 2645 static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc, 2646 unsigned int evtinfo) 2647 { 2648 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2649 2650 if (dwc->link_state != next && next == DWC3_LINK_STATE_U3) 2651 dwc3_suspend_gadget(dwc); 2652 2653 dwc->link_state = next; 2654 } 2655 2656 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, 2657 unsigned int evtinfo) 2658 { 2659 unsigned int is_ss = evtinfo & BIT(4); 2660 2661 /** 2662 * WORKAROUND: DWC3 revison 2.20a with hibernation support 2663 * have a known issue which can cause USB CV TD.9.23 to fail 2664 * randomly. 2665 * 2666 * Because of this issue, core could generate bogus hibernation 2667 * events which SW needs to ignore. 2668 * 2669 * Refers to: 2670 * 2671 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0 2672 * Device Fallback from SuperSpeed 2673 */ 2674 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER)) 2675 return; 2676 2677 /* enter hibernation here */ 2678 } 2679 2680 static void dwc3_gadget_interrupt(struct dwc3 *dwc, 2681 const struct dwc3_event_devt *event) 2682 { 2683 switch (event->type) { 2684 case DWC3_DEVICE_EVENT_DISCONNECT: 2685 dwc3_gadget_disconnect_interrupt(dwc); 2686 break; 2687 case DWC3_DEVICE_EVENT_RESET: 2688 dwc3_gadget_reset_interrupt(dwc); 2689 break; 2690 case DWC3_DEVICE_EVENT_CONNECT_DONE: 2691 dwc3_gadget_conndone_interrupt(dwc); 2692 break; 2693 case DWC3_DEVICE_EVENT_WAKEUP: 2694 dwc3_gadget_wakeup_interrupt(dwc); 2695 break; 2696 case DWC3_DEVICE_EVENT_HIBER_REQ: 2697 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation, 2698 "unexpected hibernation event\n")) 2699 break; 2700 2701 dwc3_gadget_hibernation_interrupt(dwc, event->event_info); 2702 break; 2703 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 2704 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 2705 break; 2706 case DWC3_DEVICE_EVENT_EOPF: 2707 /* It changed to be suspend event for version 2.30a and above */ 2708 if (dwc->revision < DWC3_REVISION_230A) { 2709 dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame"); 2710 } else { 2711 dwc3_trace(trace_dwc3_gadget, "U3/L1-L2 Suspend Event"); 2712 2713 /* 2714 * Ignore suspend event until the gadget enters into 2715 * USB_STATE_CONFIGURED state. 2716 */ 2717 if (dwc->gadget.state >= USB_STATE_CONFIGURED) 2718 dwc3_gadget_suspend_interrupt(dwc, 2719 event->event_info); 2720 } 2721 break; 2722 case DWC3_DEVICE_EVENT_SOF: 2723 dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame"); 2724 break; 2725 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 2726 dwc3_trace(trace_dwc3_gadget, "Erratic Error"); 2727 break; 2728 case DWC3_DEVICE_EVENT_CMD_CMPL: 2729 dwc3_trace(trace_dwc3_gadget, "Command Complete"); 2730 break; 2731 case DWC3_DEVICE_EVENT_OVERFLOW: 2732 dwc3_trace(trace_dwc3_gadget, "Overflow"); 2733 break; 2734 default: 2735 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 2736 } 2737 } 2738 2739 static void dwc3_process_event_entry(struct dwc3 *dwc, 2740 const union dwc3_event *event) 2741 { 2742 trace_dwc3_event(event->raw); 2743 2744 /* Endpoint IRQ, handle it and return early */ 2745 if (event->type.is_devspec == 0) { 2746 /* depevt */ 2747 return dwc3_endpoint_interrupt(dwc, &event->depevt); 2748 } 2749 2750 switch (event->type.type) { 2751 case DWC3_EVENT_TYPE_DEV: 2752 dwc3_gadget_interrupt(dwc, &event->devt); 2753 break; 2754 /* REVISIT what to do with Carkit and I2C events ? */ 2755 default: 2756 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 2757 } 2758 } 2759 2760 static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) 2761 { 2762 struct dwc3 *dwc = evt->dwc; 2763 irqreturn_t ret = IRQ_NONE; 2764 int left; 2765 u32 reg; 2766 2767 left = evt->count; 2768 2769 if (!(evt->flags & DWC3_EVENT_PENDING)) 2770 return IRQ_NONE; 2771 2772 while (left > 0) { 2773 union dwc3_event event; 2774 2775 event.raw = *(u32 *) (evt->buf + evt->lpos); 2776 2777 dwc3_process_event_entry(dwc, &event); 2778 2779 /* 2780 * FIXME we wrap around correctly to the next entry as 2781 * almost all entries are 4 bytes in size. There is one 2782 * entry which has 12 bytes which is a regular entry 2783 * followed by 8 bytes data. ATM I don't know how 2784 * things are organized if we get next to the a 2785 * boundary so I worry about that once we try to handle 2786 * that. 2787 */ 2788 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE; 2789 left -= 4; 2790 2791 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 4); 2792 } 2793 2794 evt->count = 0; 2795 evt->flags &= ~DWC3_EVENT_PENDING; 2796 ret = IRQ_HANDLED; 2797 2798 /* Unmask interrupt */ 2799 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0)); 2800 reg &= ~DWC3_GEVNTSIZ_INTMASK; 2801 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); 2802 2803 return ret; 2804 } 2805 2806 static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt) 2807 { 2808 struct dwc3_event_buffer *evt = _evt; 2809 struct dwc3 *dwc = evt->dwc; 2810 unsigned long flags; 2811 irqreturn_t ret = IRQ_NONE; 2812 2813 spin_lock_irqsave(&dwc->lock, flags); 2814 ret = dwc3_process_event_buf(evt); 2815 spin_unlock_irqrestore(&dwc->lock, flags); 2816 2817 return ret; 2818 } 2819 2820 static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) 2821 { 2822 struct dwc3 *dwc = evt->dwc; 2823 u32 count; 2824 u32 reg; 2825 2826 if (pm_runtime_suspended(dwc->dev)) { 2827 pm_runtime_get(dwc->dev); 2828 disable_irq_nosync(dwc->irq_gadget); 2829 dwc->pending_events = true; 2830 return IRQ_HANDLED; 2831 } 2832 2833 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); 2834 count &= DWC3_GEVNTCOUNT_MASK; 2835 if (!count) 2836 return IRQ_NONE; 2837 2838 evt->count = count; 2839 evt->flags |= DWC3_EVENT_PENDING; 2840 2841 /* Mask interrupt */ 2842 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0)); 2843 reg |= DWC3_GEVNTSIZ_INTMASK; 2844 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); 2845 2846 return IRQ_WAKE_THREAD; 2847 } 2848 2849 static irqreturn_t dwc3_interrupt(int irq, void *_evt) 2850 { 2851 struct dwc3_event_buffer *evt = _evt; 2852 2853 return dwc3_check_event_buf(evt); 2854 } 2855 2856 /** 2857 * dwc3_gadget_init - Initializes gadget related registers 2858 * @dwc: pointer to our controller context structure 2859 * 2860 * Returns 0 on success otherwise negative errno. 2861 */ 2862 int dwc3_gadget_init(struct dwc3 *dwc) 2863 { 2864 int ret, irq; 2865 struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); 2866 2867 irq = platform_get_irq_byname(dwc3_pdev, "peripheral"); 2868 if (irq == -EPROBE_DEFER) 2869 return irq; 2870 2871 if (irq <= 0) { 2872 irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3"); 2873 if (irq == -EPROBE_DEFER) 2874 return irq; 2875 2876 if (irq <= 0) { 2877 irq = platform_get_irq(dwc3_pdev, 0); 2878 if (irq <= 0) { 2879 if (irq != -EPROBE_DEFER) { 2880 dev_err(dwc->dev, 2881 "missing peripheral IRQ\n"); 2882 } 2883 if (!irq) 2884 irq = -EINVAL; 2885 return irq; 2886 } 2887 } 2888 } 2889 2890 dwc->irq_gadget = irq; 2891 2892 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2893 &dwc->ctrl_req_addr, GFP_KERNEL); 2894 if (!dwc->ctrl_req) { 2895 dev_err(dwc->dev, "failed to allocate ctrl request\n"); 2896 ret = -ENOMEM; 2897 goto err0; 2898 } 2899 2900 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2, 2901 &dwc->ep0_trb_addr, GFP_KERNEL); 2902 if (!dwc->ep0_trb) { 2903 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 2904 ret = -ENOMEM; 2905 goto err1; 2906 } 2907 2908 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL); 2909 if (!dwc->setup_buf) { 2910 ret = -ENOMEM; 2911 goto err2; 2912 } 2913 2914 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev, 2915 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr, 2916 GFP_KERNEL); 2917 if (!dwc->ep0_bounce) { 2918 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n"); 2919 ret = -ENOMEM; 2920 goto err3; 2921 } 2922 2923 dwc->zlp_buf = kzalloc(DWC3_ZLP_BUF_SIZE, GFP_KERNEL); 2924 if (!dwc->zlp_buf) { 2925 ret = -ENOMEM; 2926 goto err4; 2927 } 2928 2929 dwc->gadget.ops = &dwc3_gadget_ops; 2930 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2931 dwc->gadget.sg_supported = true; 2932 dwc->gadget.name = "dwc3-gadget"; 2933 dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG; 2934 2935 /* 2936 * FIXME We might be setting max_speed to <SUPER, however versions 2937 * <2.20a of dwc3 have an issue with metastability (documented 2938 * elsewhere in this driver) which tells us we can't set max speed to 2939 * anything lower than SUPER. 2940 * 2941 * Because gadget.max_speed is only used by composite.c and function 2942 * drivers (i.e. it won't go into dwc3's registers) we are allowing this 2943 * to happen so we avoid sending SuperSpeed Capability descriptor 2944 * together with our BOS descriptor as that could confuse host into 2945 * thinking we can handle super speed. 2946 * 2947 * Note that, in fact, we won't even support GetBOS requests when speed 2948 * is less than super speed because we don't have means, yet, to tell 2949 * composite.c that we are USB 2.0 + LPM ECN. 2950 */ 2951 if (dwc->revision < DWC3_REVISION_220A) 2952 dwc3_trace(trace_dwc3_gadget, 2953 "Changing max_speed on rev %08x", 2954 dwc->revision); 2955 2956 dwc->gadget.max_speed = dwc->maximum_speed; 2957 2958 /* 2959 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize 2960 * on ep out. 2961 */ 2962 dwc->gadget.quirk_ep_out_aligned_size = true; 2963 2964 /* 2965 * REVISIT: Here we should clear all pending IRQs to be 2966 * sure we're starting from a well known location. 2967 */ 2968 2969 ret = dwc3_gadget_init_endpoints(dwc); 2970 if (ret) 2971 goto err5; 2972 2973 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 2974 if (ret) { 2975 dev_err(dwc->dev, "failed to register udc\n"); 2976 goto err5; 2977 } 2978 2979 return 0; 2980 2981 err5: 2982 kfree(dwc->zlp_buf); 2983 2984 err4: 2985 dwc3_gadget_free_endpoints(dwc); 2986 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2987 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2988 2989 err3: 2990 kfree(dwc->setup_buf); 2991 2992 err2: 2993 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2, 2994 dwc->ep0_trb, dwc->ep0_trb_addr); 2995 2996 err1: 2997 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2998 dwc->ctrl_req, dwc->ctrl_req_addr); 2999 3000 err0: 3001 return ret; 3002 } 3003 3004 /* -------------------------------------------------------------------------- */ 3005 3006 void dwc3_gadget_exit(struct dwc3 *dwc) 3007 { 3008 usb_del_gadget_udc(&dwc->gadget); 3009 3010 dwc3_gadget_free_endpoints(dwc); 3011 3012 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 3013 dwc->ep0_bounce, dwc->ep0_bounce_addr); 3014 3015 kfree(dwc->setup_buf); 3016 kfree(dwc->zlp_buf); 3017 3018 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2, 3019 dwc->ep0_trb, dwc->ep0_trb_addr); 3020 3021 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 3022 dwc->ctrl_req, dwc->ctrl_req_addr); 3023 } 3024 3025 int dwc3_gadget_suspend(struct dwc3 *dwc) 3026 { 3027 int ret; 3028 3029 if (!dwc->gadget_driver) 3030 return 0; 3031 3032 ret = dwc3_gadget_run_stop(dwc, false, false); 3033 if (ret < 0) 3034 return ret; 3035 3036 dwc3_disconnect_gadget(dwc); 3037 __dwc3_gadget_stop(dwc); 3038 3039 return 0; 3040 } 3041 3042 int dwc3_gadget_resume(struct dwc3 *dwc) 3043 { 3044 int ret; 3045 3046 if (!dwc->gadget_driver) 3047 return 0; 3048 3049 ret = __dwc3_gadget_start(dwc); 3050 if (ret < 0) 3051 goto err0; 3052 3053 ret = dwc3_gadget_run_stop(dwc, true, false); 3054 if (ret < 0) 3055 goto err1; 3056 3057 return 0; 3058 3059 err1: 3060 __dwc3_gadget_stop(dwc); 3061 3062 err0: 3063 return ret; 3064 } 3065 3066 void dwc3_gadget_process_pending_events(struct dwc3 *dwc) 3067 { 3068 if (dwc->pending_events) { 3069 dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf); 3070 dwc->pending_events = false; 3071 enable_irq(dwc->irq_gadget); 3072 } 3073 } 3074