1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 4 * 5 * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com 6 * 7 * Authors: Felipe Balbi <balbi@ti.com>, 8 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/delay.h> 13 #include <linux/slab.h> 14 #include <linux/spinlock.h> 15 #include <linux/platform_device.h> 16 #include <linux/pm_runtime.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/list.h> 20 #include <linux/dma-mapping.h> 21 22 #include <linux/usb/ch9.h> 23 #include <linux/usb/gadget.h> 24 25 #include "debug.h" 26 #include "core.h" 27 #include "gadget.h" 28 #include "io.h" 29 30 #define DWC3_ALIGN_FRAME(d, n) (((d)->frame_number + ((d)->interval * (n))) \ 31 & ~((d)->interval - 1)) 32 33 /** 34 * dwc3_gadget_set_test_mode - enables usb2 test modes 35 * @dwc: pointer to our context structure 36 * @mode: the mode to set (J, K SE0 NAK, Force Enable) 37 * 38 * Caller should take care of locking. This function will return 0 on 39 * success or -EINVAL if wrong Test Selector is passed. 40 */ 41 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) 42 { 43 u32 reg; 44 45 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 46 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 47 48 switch (mode) { 49 case USB_TEST_J: 50 case USB_TEST_K: 51 case USB_TEST_SE0_NAK: 52 case USB_TEST_PACKET: 53 case USB_TEST_FORCE_ENABLE: 54 reg |= mode << 1; 55 break; 56 default: 57 return -EINVAL; 58 } 59 60 dwc3_gadget_dctl_write_safe(dwc, reg); 61 62 return 0; 63 } 64 65 /** 66 * dwc3_gadget_get_link_state - gets current state of usb link 67 * @dwc: pointer to our context structure 68 * 69 * Caller should take care of locking. This function will 70 * return the link state on success (>= 0) or -ETIMEDOUT. 71 */ 72 int dwc3_gadget_get_link_state(struct dwc3 *dwc) 73 { 74 u32 reg; 75 76 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 77 78 return DWC3_DSTS_USBLNKST(reg); 79 } 80 81 /** 82 * dwc3_gadget_set_link_state - sets usb link to a particular state 83 * @dwc: pointer to our context structure 84 * @state: the state to put link into 85 * 86 * Caller should take care of locking. This function will 87 * return 0 on success or -ETIMEDOUT. 88 */ 89 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) 90 { 91 int retries = 10000; 92 u32 reg; 93 94 /* 95 * Wait until device controller is ready. Only applies to 1.94a and 96 * later RTL. 97 */ 98 if (!DWC3_VER_IS_PRIOR(DWC3, 194A)) { 99 while (--retries) { 100 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 101 if (reg & DWC3_DSTS_DCNRD) 102 udelay(5); 103 else 104 break; 105 } 106 107 if (retries <= 0) 108 return -ETIMEDOUT; 109 } 110 111 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 112 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 113 114 /* set no action before sending new link state change */ 115 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 116 117 /* set requested state */ 118 reg |= DWC3_DCTL_ULSTCHNGREQ(state); 119 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 120 121 /* 122 * The following code is racy when called from dwc3_gadget_wakeup, 123 * and is not needed, at least on newer versions 124 */ 125 if (!DWC3_VER_IS_PRIOR(DWC3, 194A)) 126 return 0; 127 128 /* wait for a change in DSTS */ 129 retries = 10000; 130 while (--retries) { 131 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 132 133 if (DWC3_DSTS_USBLNKST(reg) == state) 134 return 0; 135 136 udelay(5); 137 } 138 139 return -ETIMEDOUT; 140 } 141 142 /** 143 * dwc3_ep_inc_trb - increment a trb index. 144 * @index: Pointer to the TRB index to increment. 145 * 146 * The index should never point to the link TRB. After incrementing, 147 * if it is point to the link TRB, wrap around to the beginning. The 148 * link TRB is always at the last TRB entry. 149 */ 150 static void dwc3_ep_inc_trb(u8 *index) 151 { 152 (*index)++; 153 if (*index == (DWC3_TRB_NUM - 1)) 154 *index = 0; 155 } 156 157 /** 158 * dwc3_ep_inc_enq - increment endpoint's enqueue pointer 159 * @dep: The endpoint whose enqueue pointer we're incrementing 160 */ 161 static void dwc3_ep_inc_enq(struct dwc3_ep *dep) 162 { 163 dwc3_ep_inc_trb(&dep->trb_enqueue); 164 } 165 166 /** 167 * dwc3_ep_inc_deq - increment endpoint's dequeue pointer 168 * @dep: The endpoint whose enqueue pointer we're incrementing 169 */ 170 static void dwc3_ep_inc_deq(struct dwc3_ep *dep) 171 { 172 dwc3_ep_inc_trb(&dep->trb_dequeue); 173 } 174 175 static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep, 176 struct dwc3_request *req, int status) 177 { 178 struct dwc3 *dwc = dep->dwc; 179 180 list_del(&req->list); 181 req->remaining = 0; 182 req->needs_extra_trb = false; 183 184 if (req->request.status == -EINPROGRESS) 185 req->request.status = status; 186 187 if (req->trb) 188 usb_gadget_unmap_request_by_dev(dwc->sysdev, 189 &req->request, req->direction); 190 191 req->trb = NULL; 192 trace_dwc3_gadget_giveback(req); 193 194 if (dep->number > 1) 195 pm_runtime_put(dwc->dev); 196 } 197 198 /** 199 * dwc3_gadget_giveback - call struct usb_request's ->complete callback 200 * @dep: The endpoint to whom the request belongs to 201 * @req: The request we're giving back 202 * @status: completion code for the request 203 * 204 * Must be called with controller's lock held and interrupts disabled. This 205 * function will unmap @req and call its ->complete() callback to notify upper 206 * layers that it has completed. 207 */ 208 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 209 int status) 210 { 211 struct dwc3 *dwc = dep->dwc; 212 213 dwc3_gadget_del_and_unmap_request(dep, req, status); 214 req->status = DWC3_REQUEST_STATUS_COMPLETED; 215 216 spin_unlock(&dwc->lock); 217 usb_gadget_giveback_request(&dep->endpoint, &req->request); 218 spin_lock(&dwc->lock); 219 } 220 221 /** 222 * dwc3_send_gadget_generic_command - issue a generic command for the controller 223 * @dwc: pointer to the controller context 224 * @cmd: the command to be issued 225 * @param: command parameter 226 * 227 * Caller should take care of locking. Issue @cmd with a given @param to @dwc 228 * and wait for its completion. 229 */ 230 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned int cmd, 231 u32 param) 232 { 233 u32 timeout = 500; 234 int status = 0; 235 int ret = 0; 236 u32 reg; 237 238 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param); 239 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT); 240 241 do { 242 reg = dwc3_readl(dwc->regs, DWC3_DGCMD); 243 if (!(reg & DWC3_DGCMD_CMDACT)) { 244 status = DWC3_DGCMD_STATUS(reg); 245 if (status) 246 ret = -EINVAL; 247 break; 248 } 249 } while (--timeout); 250 251 if (!timeout) { 252 ret = -ETIMEDOUT; 253 status = -ETIMEDOUT; 254 } 255 256 trace_dwc3_gadget_generic_cmd(cmd, param, status); 257 258 return ret; 259 } 260 261 static int __dwc3_gadget_wakeup(struct dwc3 *dwc); 262 263 /** 264 * dwc3_send_gadget_ep_cmd - issue an endpoint command 265 * @dep: the endpoint to which the command is going to be issued 266 * @cmd: the command to be issued 267 * @params: parameters to the command 268 * 269 * Caller should handle locking. This function will issue @cmd with given 270 * @params to @dep and wait for its completion. 271 */ 272 int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd, 273 struct dwc3_gadget_ep_cmd_params *params) 274 { 275 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; 276 struct dwc3 *dwc = dep->dwc; 277 u32 timeout = 5000; 278 u32 saved_config = 0; 279 u32 reg; 280 281 int cmd_status = 0; 282 int ret = -EINVAL; 283 284 /* 285 * When operating in USB 2.0 speeds (HS/FS), if GUSB2PHYCFG.ENBLSLPM or 286 * GUSB2PHYCFG.SUSPHY is set, it must be cleared before issuing an 287 * endpoint command. 288 * 289 * Save and clear both GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY 290 * settings. Restore them after the command is completed. 291 * 292 * DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2 293 */ 294 if (dwc->gadget->speed <= USB_SPEED_HIGH) { 295 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 296 if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) { 297 saved_config |= DWC3_GUSB2PHYCFG_SUSPHY; 298 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 299 } 300 301 if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) { 302 saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM; 303 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; 304 } 305 306 if (saved_config) 307 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 308 } 309 310 if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) { 311 int link_state; 312 313 /* 314 * Initiate remote wakeup if the link state is in U3 when 315 * operating in SS/SSP or L1/L2 when operating in HS/FS. If the 316 * link state is in U1/U2, no remote wakeup is needed. The Start 317 * Transfer command will initiate the link recovery. 318 */ 319 link_state = dwc3_gadget_get_link_state(dwc); 320 switch (link_state) { 321 case DWC3_LINK_STATE_U2: 322 if (dwc->gadget->speed >= USB_SPEED_SUPER) 323 break; 324 325 fallthrough; 326 case DWC3_LINK_STATE_U3: 327 ret = __dwc3_gadget_wakeup(dwc); 328 dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n", 329 ret); 330 break; 331 } 332 } 333 334 /* 335 * For some commands such as Update Transfer command, DEPCMDPARn 336 * registers are reserved. Since the driver often sends Update Transfer 337 * command, don't write to DEPCMDPARn to avoid register write delays and 338 * improve performance. 339 */ 340 if (DWC3_DEPCMD_CMD(cmd) != DWC3_DEPCMD_UPDATETRANSFER) { 341 dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0); 342 dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1); 343 dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2); 344 } 345 346 /* 347 * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're 348 * not relying on XferNotReady, we can make use of a special "No 349 * Response Update Transfer" command where we should clear both CmdAct 350 * and CmdIOC bits. 351 * 352 * With this, we don't need to wait for command completion and can 353 * straight away issue further commands to the endpoint. 354 * 355 * NOTICE: We're making an assumption that control endpoints will never 356 * make use of Update Transfer command. This is a safe assumption 357 * because we can never have more than one request at a time with 358 * Control Endpoints. If anybody changes that assumption, this chunk 359 * needs to be updated accordingly. 360 */ 361 if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER && 362 !usb_endpoint_xfer_isoc(desc)) 363 cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT); 364 else 365 cmd |= DWC3_DEPCMD_CMDACT; 366 367 dwc3_writel(dep->regs, DWC3_DEPCMD, cmd); 368 369 if (!(cmd & DWC3_DEPCMD_CMDACT) || 370 (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_ENDTRANSFER && 371 !(cmd & DWC3_DEPCMD_CMDIOC))) { 372 ret = 0; 373 goto skip_status; 374 } 375 376 do { 377 reg = dwc3_readl(dep->regs, DWC3_DEPCMD); 378 if (!(reg & DWC3_DEPCMD_CMDACT)) { 379 cmd_status = DWC3_DEPCMD_STATUS(reg); 380 381 switch (cmd_status) { 382 case 0: 383 ret = 0; 384 break; 385 case DEPEVT_TRANSFER_NO_RESOURCE: 386 dev_WARN(dwc->dev, "No resource for %s\n", 387 dep->name); 388 ret = -EINVAL; 389 break; 390 case DEPEVT_TRANSFER_BUS_EXPIRY: 391 /* 392 * SW issues START TRANSFER command to 393 * isochronous ep with future frame interval. If 394 * future interval time has already passed when 395 * core receives the command, it will respond 396 * with an error status of 'Bus Expiry'. 397 * 398 * Instead of always returning -EINVAL, let's 399 * give a hint to the gadget driver that this is 400 * the case by returning -EAGAIN. 401 */ 402 ret = -EAGAIN; 403 break; 404 default: 405 dev_WARN(dwc->dev, "UNKNOWN cmd status\n"); 406 } 407 408 break; 409 } 410 } while (--timeout); 411 412 if (timeout == 0) { 413 ret = -ETIMEDOUT; 414 cmd_status = -ETIMEDOUT; 415 } 416 417 skip_status: 418 trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status); 419 420 if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) { 421 if (ret == 0) 422 dep->flags |= DWC3_EP_TRANSFER_STARTED; 423 424 if (ret != -ETIMEDOUT) 425 dwc3_gadget_ep_get_transfer_index(dep); 426 } 427 428 if (saved_config) { 429 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 430 reg |= saved_config; 431 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 432 } 433 434 return ret; 435 } 436 437 static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep) 438 { 439 struct dwc3 *dwc = dep->dwc; 440 struct dwc3_gadget_ep_cmd_params params; 441 u32 cmd = DWC3_DEPCMD_CLEARSTALL; 442 443 /* 444 * As of core revision 2.60a the recommended programming model 445 * is to set the ClearPendIN bit when issuing a Clear Stall EP 446 * command for IN endpoints. This is to prevent an issue where 447 * some (non-compliant) hosts may not send ACK TPs for pending 448 * IN transfers due to a mishandled error condition. Synopsys 449 * STAR 9000614252. 450 */ 451 if (dep->direction && 452 !DWC3_VER_IS_PRIOR(DWC3, 260A) && 453 (dwc->gadget->speed >= USB_SPEED_SUPER)) 454 cmd |= DWC3_DEPCMD_CLEARPENDIN; 455 456 memset(¶ms, 0, sizeof(params)); 457 458 return dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 459 } 460 461 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 462 struct dwc3_trb *trb) 463 { 464 u32 offset = (char *) trb - (char *) dep->trb_pool; 465 466 return dep->trb_pool_dma + offset; 467 } 468 469 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 470 { 471 struct dwc3 *dwc = dep->dwc; 472 473 if (dep->trb_pool) 474 return 0; 475 476 dep->trb_pool = dma_alloc_coherent(dwc->sysdev, 477 sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 478 &dep->trb_pool_dma, GFP_KERNEL); 479 if (!dep->trb_pool) { 480 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 481 dep->name); 482 return -ENOMEM; 483 } 484 485 return 0; 486 } 487 488 static void dwc3_free_trb_pool(struct dwc3_ep *dep) 489 { 490 struct dwc3 *dwc = dep->dwc; 491 492 dma_free_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 493 dep->trb_pool, dep->trb_pool_dma); 494 495 dep->trb_pool = NULL; 496 dep->trb_pool_dma = 0; 497 } 498 499 static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep) 500 { 501 struct dwc3_gadget_ep_cmd_params params; 502 503 memset(¶ms, 0x00, sizeof(params)); 504 505 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 506 507 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE, 508 ¶ms); 509 } 510 511 /** 512 * dwc3_gadget_start_config - configure ep resources 513 * @dep: endpoint that is being enabled 514 * 515 * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's 516 * completion, it will set Transfer Resource for all available endpoints. 517 * 518 * The assignment of transfer resources cannot perfectly follow the data book 519 * due to the fact that the controller driver does not have all knowledge of the 520 * configuration in advance. It is given this information piecemeal by the 521 * composite gadget framework after every SET_CONFIGURATION and 522 * SET_INTERFACE. Trying to follow the databook programming model in this 523 * scenario can cause errors. For two reasons: 524 * 525 * 1) The databook says to do %DWC3_DEPCMD_DEPSTARTCFG for every 526 * %USB_REQ_SET_CONFIGURATION and %USB_REQ_SET_INTERFACE (8.1.5). This is 527 * incorrect in the scenario of multiple interfaces. 528 * 529 * 2) The databook does not mention doing more %DWC3_DEPCMD_DEPXFERCFG for new 530 * endpoint on alt setting (8.1.6). 531 * 532 * The following simplified method is used instead: 533 * 534 * All hardware endpoints can be assigned a transfer resource and this setting 535 * will stay persistent until either a core reset or hibernation. So whenever we 536 * do a %DWC3_DEPCMD_DEPSTARTCFG(0) we can go ahead and do 537 * %DWC3_DEPCMD_DEPXFERCFG for every hardware endpoint as well. We are 538 * guaranteed that there are as many transfer resources as endpoints. 539 * 540 * This function is called for each endpoint when it is being enabled but is 541 * triggered only when called for EP0-out, which always happens first, and which 542 * should only happen in one of the above conditions. 543 */ 544 static int dwc3_gadget_start_config(struct dwc3_ep *dep) 545 { 546 struct dwc3_gadget_ep_cmd_params params; 547 struct dwc3 *dwc; 548 u32 cmd; 549 int i; 550 int ret; 551 552 if (dep->number) 553 return 0; 554 555 memset(¶ms, 0x00, sizeof(params)); 556 cmd = DWC3_DEPCMD_DEPSTARTCFG; 557 dwc = dep->dwc; 558 559 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 560 if (ret) 561 return ret; 562 563 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 564 struct dwc3_ep *dep = dwc->eps[i]; 565 566 if (!dep) 567 continue; 568 569 ret = dwc3_gadget_set_xfer_resource(dep); 570 if (ret) 571 return ret; 572 } 573 574 return 0; 575 } 576 577 static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action) 578 { 579 const struct usb_ss_ep_comp_descriptor *comp_desc; 580 const struct usb_endpoint_descriptor *desc; 581 struct dwc3_gadget_ep_cmd_params params; 582 struct dwc3 *dwc = dep->dwc; 583 584 comp_desc = dep->endpoint.comp_desc; 585 desc = dep->endpoint.desc; 586 587 memset(¶ms, 0x00, sizeof(params)); 588 589 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) 590 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); 591 592 /* Burst size is only needed in SuperSpeed mode */ 593 if (dwc->gadget->speed >= USB_SPEED_SUPER) { 594 u32 burst = dep->endpoint.maxburst; 595 596 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1); 597 } 598 599 params.param0 |= action; 600 if (action == DWC3_DEPCFG_ACTION_RESTORE) 601 params.param2 |= dep->saved_state; 602 603 if (usb_endpoint_xfer_control(desc)) 604 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN; 605 606 if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc)) 607 params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN; 608 609 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 610 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE 611 | DWC3_DEPCFG_XFER_COMPLETE_EN 612 | DWC3_DEPCFG_STREAM_EVENT_EN; 613 dep->stream_capable = true; 614 } 615 616 if (!usb_endpoint_xfer_control(desc)) 617 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; 618 619 /* 620 * We are doing 1:1 mapping for endpoints, meaning 621 * Physical Endpoints 2 maps to Logical Endpoint 2 and 622 * so on. We consider the direction bit as part of the physical 623 * endpoint number. So USB endpoint 0x81 is 0x03. 624 */ 625 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); 626 627 /* 628 * We must use the lower 16 TX FIFOs even though 629 * HW might have more 630 */ 631 if (dep->direction) 632 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); 633 634 if (desc->bInterval) { 635 u8 bInterval_m1; 636 637 /* 638 * Valid range for DEPCFG.bInterval_m1 is from 0 to 13. 639 * 640 * NOTE: The programming guide incorrectly stated bInterval_m1 641 * must be set to 0 when operating in fullspeed. Internally the 642 * controller does not have this limitation. See DWC_usb3x 643 * programming guide section 3.2.2.1. 644 */ 645 bInterval_m1 = min_t(u8, desc->bInterval - 1, 13); 646 647 if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT && 648 dwc->gadget->speed == USB_SPEED_FULL) 649 dep->interval = desc->bInterval; 650 else 651 dep->interval = 1 << (desc->bInterval - 1); 652 653 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1); 654 } 655 656 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, ¶ms); 657 } 658 659 /** 660 * dwc3_gadget_calc_tx_fifo_size - calculates the txfifo size value 661 * @dwc: pointer to the DWC3 context 662 * @mult: multiplier to be used when calculating the fifo_size 663 * 664 * Calculates the size value based on the equation below: 665 * 666 * DWC3 revision 280A and prior: 667 * fifo_size = mult * (max_packet / mdwidth) + 1; 668 * 669 * DWC3 revision 290A and onwards: 670 * fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1 671 * 672 * The max packet size is set to 1024, as the txfifo requirements mainly apply 673 * to super speed USB use cases. However, it is safe to overestimate the fifo 674 * allocations for other scenarios, i.e. high speed USB. 675 */ 676 static int dwc3_gadget_calc_tx_fifo_size(struct dwc3 *dwc, int mult) 677 { 678 int max_packet = 1024; 679 int fifo_size; 680 int mdwidth; 681 682 mdwidth = dwc3_mdwidth(dwc); 683 684 /* MDWIDTH is represented in bits, we need it in bytes */ 685 mdwidth >>= 3; 686 687 if (DWC3_VER_IS_PRIOR(DWC3, 290A)) 688 fifo_size = mult * (max_packet / mdwidth) + 1; 689 else 690 fifo_size = mult * ((max_packet + mdwidth) / mdwidth) + 1; 691 return fifo_size; 692 } 693 694 /** 695 * dwc3_gadget_clear_tx_fifos - Clears txfifo allocation 696 * @dwc: pointer to the DWC3 context 697 * 698 * Iterates through all the endpoint registers and clears the previous txfifo 699 * allocations. 700 */ 701 void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc) 702 { 703 struct dwc3_ep *dep; 704 int fifo_depth; 705 int size; 706 int num; 707 708 if (!dwc->do_fifo_resize) 709 return; 710 711 /* Read ep0IN related TXFIFO size */ 712 dep = dwc->eps[1]; 713 size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0)); 714 if (DWC3_IP_IS(DWC3)) 715 fifo_depth = DWC3_GTXFIFOSIZ_TXFDEP(size); 716 else 717 fifo_depth = DWC31_GTXFIFOSIZ_TXFDEP(size); 718 719 dwc->last_fifo_depth = fifo_depth; 720 /* Clear existing TXFIFO for all IN eps except ep0 */ 721 for (num = 3; num < min_t(int, dwc->num_eps, DWC3_ENDPOINTS_NUM); 722 num += 2) { 723 dep = dwc->eps[num]; 724 /* Don't change TXFRAMNUM on usb31 version */ 725 size = DWC3_IP_IS(DWC3) ? 0 : 726 dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1)) & 727 DWC31_GTXFIFOSIZ_TXFRAMNUM; 728 729 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1), size); 730 dep->flags &= ~DWC3_EP_TXFIFO_RESIZED; 731 } 732 dwc->num_ep_resized = 0; 733 } 734 735 /* 736 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case 737 * @dwc: pointer to our context structure 738 * 739 * This function will a best effort FIFO allocation in order 740 * to improve FIFO usage and throughput, while still allowing 741 * us to enable as many endpoints as possible. 742 * 743 * Keep in mind that this operation will be highly dependent 744 * on the configured size for RAM1 - which contains TxFifo -, 745 * the amount of endpoints enabled on coreConsultant tool, and 746 * the width of the Master Bus. 747 * 748 * In general, FIFO depths are represented with the following equation: 749 * 750 * fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1 751 * 752 * In conjunction with dwc3_gadget_check_config(), this resizing logic will 753 * ensure that all endpoints will have enough internal memory for one max 754 * packet per endpoint. 755 */ 756 static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep) 757 { 758 struct dwc3 *dwc = dep->dwc; 759 int fifo_0_start; 760 int ram1_depth; 761 int fifo_size; 762 int min_depth; 763 int num_in_ep; 764 int remaining; 765 int num_fifos = 1; 766 int fifo; 767 int tmp; 768 769 if (!dwc->do_fifo_resize) 770 return 0; 771 772 /* resize IN endpoints except ep0 */ 773 if (!usb_endpoint_dir_in(dep->endpoint.desc) || dep->number <= 1) 774 return 0; 775 776 /* bail if already resized */ 777 if (dep->flags & DWC3_EP_TXFIFO_RESIZED) 778 return 0; 779 780 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7); 781 782 if ((dep->endpoint.maxburst > 1 && 783 usb_endpoint_xfer_bulk(dep->endpoint.desc)) || 784 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 785 num_fifos = 3; 786 787 if (dep->endpoint.maxburst > 6 && 788 (usb_endpoint_xfer_bulk(dep->endpoint.desc) || 789 usb_endpoint_xfer_isoc(dep->endpoint.desc)) && DWC3_IP_IS(DWC31)) 790 num_fifos = dwc->tx_fifo_resize_max_num; 791 792 /* FIFO size for a single buffer */ 793 fifo = dwc3_gadget_calc_tx_fifo_size(dwc, 1); 794 795 /* Calculate the number of remaining EPs w/o any FIFO */ 796 num_in_ep = dwc->max_cfg_eps; 797 num_in_ep -= dwc->num_ep_resized; 798 799 /* Reserve at least one FIFO for the number of IN EPs */ 800 min_depth = num_in_ep * (fifo + 1); 801 remaining = ram1_depth - min_depth - dwc->last_fifo_depth; 802 remaining = max_t(int, 0, remaining); 803 /* 804 * We've already reserved 1 FIFO per EP, so check what we can fit in 805 * addition to it. If there is not enough remaining space, allocate 806 * all the remaining space to the EP. 807 */ 808 fifo_size = (num_fifos - 1) * fifo; 809 if (remaining < fifo_size) 810 fifo_size = remaining; 811 812 fifo_size += fifo; 813 /* Last increment according to the TX FIFO size equation */ 814 fifo_size++; 815 816 /* Check if TXFIFOs start at non-zero addr */ 817 tmp = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0)); 818 fifo_0_start = DWC3_GTXFIFOSIZ_TXFSTADDR(tmp); 819 820 fifo_size |= (fifo_0_start + (dwc->last_fifo_depth << 16)); 821 if (DWC3_IP_IS(DWC3)) 822 dwc->last_fifo_depth += DWC3_GTXFIFOSIZ_TXFDEP(fifo_size); 823 else 824 dwc->last_fifo_depth += DWC31_GTXFIFOSIZ_TXFDEP(fifo_size); 825 826 /* Check fifo size allocation doesn't exceed available RAM size. */ 827 if (dwc->last_fifo_depth >= ram1_depth) { 828 dev_err(dwc->dev, "Fifosize(%d) > RAM size(%d) %s depth:%d\n", 829 dwc->last_fifo_depth, ram1_depth, 830 dep->endpoint.name, fifo_size); 831 if (DWC3_IP_IS(DWC3)) 832 fifo_size = DWC3_GTXFIFOSIZ_TXFDEP(fifo_size); 833 else 834 fifo_size = DWC31_GTXFIFOSIZ_TXFDEP(fifo_size); 835 836 dwc->last_fifo_depth -= fifo_size; 837 return -ENOMEM; 838 } 839 840 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1), fifo_size); 841 dep->flags |= DWC3_EP_TXFIFO_RESIZED; 842 dwc->num_ep_resized++; 843 844 return 0; 845 } 846 847 /** 848 * __dwc3_gadget_ep_enable - initializes a hw endpoint 849 * @dep: endpoint to be initialized 850 * @action: one of INIT, MODIFY or RESTORE 851 * 852 * Caller should take care of locking. Execute all necessary commands to 853 * initialize a HW endpoint so it can be used by a gadget driver. 854 */ 855 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action) 856 { 857 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; 858 struct dwc3 *dwc = dep->dwc; 859 860 u32 reg; 861 int ret; 862 863 if (!(dep->flags & DWC3_EP_ENABLED)) { 864 ret = dwc3_gadget_resize_tx_fifos(dep); 865 if (ret) 866 return ret; 867 868 ret = dwc3_gadget_start_config(dep); 869 if (ret) 870 return ret; 871 } 872 873 ret = dwc3_gadget_set_ep_config(dep, action); 874 if (ret) 875 return ret; 876 877 if (!(dep->flags & DWC3_EP_ENABLED)) { 878 struct dwc3_trb *trb_st_hw; 879 struct dwc3_trb *trb_link; 880 881 dep->type = usb_endpoint_type(desc); 882 dep->flags |= DWC3_EP_ENABLED; 883 884 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 885 reg |= DWC3_DALEPENA_EP(dep->number); 886 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 887 888 dep->trb_dequeue = 0; 889 dep->trb_enqueue = 0; 890 891 if (usb_endpoint_xfer_control(desc)) 892 goto out; 893 894 /* Initialize the TRB ring */ 895 memset(dep->trb_pool, 0, 896 sizeof(struct dwc3_trb) * DWC3_TRB_NUM); 897 898 /* Link TRB. The HWO bit is never reset */ 899 trb_st_hw = &dep->trb_pool[0]; 900 901 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 902 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 903 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 904 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB; 905 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 906 } 907 908 /* 909 * Issue StartTransfer here with no-op TRB so we can always rely on No 910 * Response Update Transfer command. 911 */ 912 if (usb_endpoint_xfer_bulk(desc) || 913 usb_endpoint_xfer_int(desc)) { 914 struct dwc3_gadget_ep_cmd_params params; 915 struct dwc3_trb *trb; 916 dma_addr_t trb_dma; 917 u32 cmd; 918 919 memset(¶ms, 0, sizeof(params)); 920 trb = &dep->trb_pool[0]; 921 trb_dma = dwc3_trb_dma_offset(dep, trb); 922 923 params.param0 = upper_32_bits(trb_dma); 924 params.param1 = lower_32_bits(trb_dma); 925 926 cmd = DWC3_DEPCMD_STARTTRANSFER; 927 928 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 929 if (ret < 0) 930 return ret; 931 932 if (dep->stream_capable) { 933 /* 934 * For streams, at start, there maybe a race where the 935 * host primes the endpoint before the function driver 936 * queues a request to initiate a stream. In that case, 937 * the controller will not see the prime to generate the 938 * ERDY and start stream. To workaround this, issue a 939 * no-op TRB as normal, but end it immediately. As a 940 * result, when the function driver queues the request, 941 * the next START_TRANSFER command will cause the 942 * controller to generate an ERDY to initiate the 943 * stream. 944 */ 945 dwc3_stop_active_transfer(dep, true, true); 946 947 /* 948 * All stream eps will reinitiate stream on NoStream 949 * rejection until we can determine that the host can 950 * prime after the first transfer. 951 * 952 * However, if the controller is capable of 953 * TXF_FLUSH_BYPASS, then IN direction endpoints will 954 * automatically restart the stream without the driver 955 * initiation. 956 */ 957 if (!dep->direction || 958 !(dwc->hwparams.hwparams9 & 959 DWC3_GHWPARAMS9_DEV_TXF_FLUSH_BYPASS)) 960 dep->flags |= DWC3_EP_FORCE_RESTART_STREAM; 961 } 962 } 963 964 out: 965 trace_dwc3_gadget_ep_enable(dep); 966 967 return 0; 968 } 969 970 void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep, int status) 971 { 972 struct dwc3_request *req; 973 974 dwc3_stop_active_transfer(dep, true, false); 975 976 /* If endxfer is delayed, avoid unmapping requests */ 977 if (dep->flags & DWC3_EP_DELAY_STOP) 978 return; 979 980 /* - giveback all requests to gadget driver */ 981 while (!list_empty(&dep->started_list)) { 982 req = next_request(&dep->started_list); 983 984 dwc3_gadget_giveback(dep, req, status); 985 } 986 987 while (!list_empty(&dep->pending_list)) { 988 req = next_request(&dep->pending_list); 989 990 dwc3_gadget_giveback(dep, req, status); 991 } 992 993 while (!list_empty(&dep->cancelled_list)) { 994 req = next_request(&dep->cancelled_list); 995 996 dwc3_gadget_giveback(dep, req, status); 997 } 998 } 999 1000 /** 1001 * __dwc3_gadget_ep_disable - disables a hw endpoint 1002 * @dep: the endpoint to disable 1003 * 1004 * This function undoes what __dwc3_gadget_ep_enable did and also removes 1005 * requests which are currently being processed by the hardware and those which 1006 * are not yet scheduled. 1007 * 1008 * Caller should take care of locking. 1009 */ 1010 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 1011 { 1012 struct dwc3 *dwc = dep->dwc; 1013 u32 reg; 1014 u32 mask; 1015 1016 trace_dwc3_gadget_ep_disable(dep); 1017 1018 /* make sure HW endpoint isn't stalled */ 1019 if (dep->flags & DWC3_EP_STALL) 1020 __dwc3_gadget_ep_set_halt(dep, 0, false); 1021 1022 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 1023 reg &= ~DWC3_DALEPENA_EP(dep->number); 1024 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 1025 1026 /* Clear out the ep descriptors for non-ep0 */ 1027 if (dep->number > 1) { 1028 dep->endpoint.comp_desc = NULL; 1029 dep->endpoint.desc = NULL; 1030 } 1031 1032 dwc3_remove_requests(dwc, dep, -ECONNRESET); 1033 1034 dep->stream_capable = false; 1035 dep->type = 0; 1036 mask = DWC3_EP_TXFIFO_RESIZED; 1037 /* 1038 * dwc3_remove_requests() can exit early if DWC3 EP delayed stop is 1039 * set. Do not clear DEP flags, so that the end transfer command will 1040 * be reattempted during the next SETUP stage. 1041 */ 1042 if (dep->flags & DWC3_EP_DELAY_STOP) 1043 mask |= (DWC3_EP_DELAY_STOP | DWC3_EP_TRANSFER_STARTED); 1044 dep->flags &= mask; 1045 1046 return 0; 1047 } 1048 1049 /* -------------------------------------------------------------------------- */ 1050 1051 static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 1052 const struct usb_endpoint_descriptor *desc) 1053 { 1054 return -EINVAL; 1055 } 1056 1057 static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 1058 { 1059 return -EINVAL; 1060 } 1061 1062 /* -------------------------------------------------------------------------- */ 1063 1064 static int dwc3_gadget_ep_enable(struct usb_ep *ep, 1065 const struct usb_endpoint_descriptor *desc) 1066 { 1067 struct dwc3_ep *dep; 1068 struct dwc3 *dwc; 1069 unsigned long flags; 1070 int ret; 1071 1072 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 1073 pr_debug("dwc3: invalid parameters\n"); 1074 return -EINVAL; 1075 } 1076 1077 if (!desc->wMaxPacketSize) { 1078 pr_debug("dwc3: missing wMaxPacketSize\n"); 1079 return -EINVAL; 1080 } 1081 1082 dep = to_dwc3_ep(ep); 1083 dwc = dep->dwc; 1084 1085 if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED, 1086 "%s is already enabled\n", 1087 dep->name)) 1088 return 0; 1089 1090 spin_lock_irqsave(&dwc->lock, flags); 1091 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT); 1092 spin_unlock_irqrestore(&dwc->lock, flags); 1093 1094 return ret; 1095 } 1096 1097 static int dwc3_gadget_ep_disable(struct usb_ep *ep) 1098 { 1099 struct dwc3_ep *dep; 1100 struct dwc3 *dwc; 1101 unsigned long flags; 1102 int ret; 1103 1104 if (!ep) { 1105 pr_debug("dwc3: invalid parameters\n"); 1106 return -EINVAL; 1107 } 1108 1109 dep = to_dwc3_ep(ep); 1110 dwc = dep->dwc; 1111 1112 if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED), 1113 "%s is already disabled\n", 1114 dep->name)) 1115 return 0; 1116 1117 spin_lock_irqsave(&dwc->lock, flags); 1118 ret = __dwc3_gadget_ep_disable(dep); 1119 spin_unlock_irqrestore(&dwc->lock, flags); 1120 1121 return ret; 1122 } 1123 1124 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 1125 gfp_t gfp_flags) 1126 { 1127 struct dwc3_request *req; 1128 struct dwc3_ep *dep = to_dwc3_ep(ep); 1129 1130 req = kzalloc(sizeof(*req), gfp_flags); 1131 if (!req) 1132 return NULL; 1133 1134 req->direction = dep->direction; 1135 req->epnum = dep->number; 1136 req->dep = dep; 1137 req->status = DWC3_REQUEST_STATUS_UNKNOWN; 1138 1139 trace_dwc3_alloc_request(req); 1140 1141 return &req->request; 1142 } 1143 1144 static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 1145 struct usb_request *request) 1146 { 1147 struct dwc3_request *req = to_dwc3_request(request); 1148 1149 trace_dwc3_free_request(req); 1150 kfree(req); 1151 } 1152 1153 /** 1154 * dwc3_ep_prev_trb - returns the previous TRB in the ring 1155 * @dep: The endpoint with the TRB ring 1156 * @index: The index of the current TRB in the ring 1157 * 1158 * Returns the TRB prior to the one pointed to by the index. If the 1159 * index is 0, we will wrap backwards, skip the link TRB, and return 1160 * the one just before that. 1161 */ 1162 static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index) 1163 { 1164 u8 tmp = index; 1165 1166 if (!tmp) 1167 tmp = DWC3_TRB_NUM - 1; 1168 1169 return &dep->trb_pool[tmp - 1]; 1170 } 1171 1172 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep) 1173 { 1174 u8 trbs_left; 1175 1176 /* 1177 * If the enqueue & dequeue are equal then the TRB ring is either full 1178 * or empty. It's considered full when there are DWC3_TRB_NUM-1 of TRBs 1179 * pending to be processed by the driver. 1180 */ 1181 if (dep->trb_enqueue == dep->trb_dequeue) { 1182 /* 1183 * If there is any request remained in the started_list at 1184 * this point, that means there is no TRB available. 1185 */ 1186 if (!list_empty(&dep->started_list)) 1187 return 0; 1188 1189 return DWC3_TRB_NUM - 1; 1190 } 1191 1192 trbs_left = dep->trb_dequeue - dep->trb_enqueue; 1193 trbs_left &= (DWC3_TRB_NUM - 1); 1194 1195 if (dep->trb_dequeue < dep->trb_enqueue) 1196 trbs_left--; 1197 1198 return trbs_left; 1199 } 1200 1201 /** 1202 * dwc3_prepare_one_trb - setup one TRB from one request 1203 * @dep: endpoint for which this request is prepared 1204 * @req: dwc3_request pointer 1205 * @trb_length: buffer size of the TRB 1206 * @chain: should this TRB be chained to the next? 1207 * @node: only for isochronous endpoints. First TRB needs different type. 1208 * @use_bounce_buffer: set to use bounce buffer 1209 * @must_interrupt: set to interrupt on TRB completion 1210 */ 1211 static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 1212 struct dwc3_request *req, unsigned int trb_length, 1213 unsigned int chain, unsigned int node, bool use_bounce_buffer, 1214 bool must_interrupt) 1215 { 1216 struct dwc3_trb *trb; 1217 dma_addr_t dma; 1218 unsigned int stream_id = req->request.stream_id; 1219 unsigned int short_not_ok = req->request.short_not_ok; 1220 unsigned int no_interrupt = req->request.no_interrupt; 1221 unsigned int is_last = req->request.is_last; 1222 struct dwc3 *dwc = dep->dwc; 1223 struct usb_gadget *gadget = dwc->gadget; 1224 enum usb_device_speed speed = gadget->speed; 1225 1226 if (use_bounce_buffer) 1227 dma = dep->dwc->bounce_addr; 1228 else if (req->request.num_sgs > 0) 1229 dma = sg_dma_address(req->start_sg); 1230 else 1231 dma = req->request.dma; 1232 1233 trb = &dep->trb_pool[dep->trb_enqueue]; 1234 1235 if (!req->trb) { 1236 dwc3_gadget_move_started_request(req); 1237 req->trb = trb; 1238 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 1239 } 1240 1241 req->num_trbs++; 1242 1243 trb->size = DWC3_TRB_SIZE_LENGTH(trb_length); 1244 trb->bpl = lower_32_bits(dma); 1245 trb->bph = upper_32_bits(dma); 1246 1247 switch (usb_endpoint_type(dep->endpoint.desc)) { 1248 case USB_ENDPOINT_XFER_CONTROL: 1249 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP; 1250 break; 1251 1252 case USB_ENDPOINT_XFER_ISOC: 1253 if (!node) { 1254 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 1255 1256 /* 1257 * USB Specification 2.0 Section 5.9.2 states that: "If 1258 * there is only a single transaction in the microframe, 1259 * only a DATA0 data packet PID is used. If there are 1260 * two transactions per microframe, DATA1 is used for 1261 * the first transaction data packet and DATA0 is used 1262 * for the second transaction data packet. If there are 1263 * three transactions per microframe, DATA2 is used for 1264 * the first transaction data packet, DATA1 is used for 1265 * the second, and DATA0 is used for the third." 1266 * 1267 * IOW, we should satisfy the following cases: 1268 * 1269 * 1) length <= maxpacket 1270 * - DATA0 1271 * 1272 * 2) maxpacket < length <= (2 * maxpacket) 1273 * - DATA1, DATA0 1274 * 1275 * 3) (2 * maxpacket) < length <= (3 * maxpacket) 1276 * - DATA2, DATA1, DATA0 1277 */ 1278 if (speed == USB_SPEED_HIGH) { 1279 struct usb_ep *ep = &dep->endpoint; 1280 unsigned int mult = 2; 1281 unsigned int maxp = usb_endpoint_maxp(ep->desc); 1282 1283 if (req->request.length <= (2 * maxp)) 1284 mult--; 1285 1286 if (req->request.length <= maxp) 1287 mult--; 1288 1289 trb->size |= DWC3_TRB_SIZE_PCM1(mult); 1290 } 1291 } else { 1292 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 1293 } 1294 1295 /* always enable Interrupt on Missed ISOC */ 1296 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 1297 break; 1298 1299 case USB_ENDPOINT_XFER_BULK: 1300 case USB_ENDPOINT_XFER_INT: 1301 trb->ctrl = DWC3_TRBCTL_NORMAL; 1302 break; 1303 default: 1304 /* 1305 * This is only possible with faulty memory because we 1306 * checked it already :) 1307 */ 1308 dev_WARN(dwc->dev, "Unknown endpoint type %d\n", 1309 usb_endpoint_type(dep->endpoint.desc)); 1310 } 1311 1312 /* 1313 * Enable Continue on Short Packet 1314 * when endpoint is not a stream capable 1315 */ 1316 if (usb_endpoint_dir_out(dep->endpoint.desc)) { 1317 if (!dep->stream_capable) 1318 trb->ctrl |= DWC3_TRB_CTRL_CSP; 1319 1320 if (short_not_ok) 1321 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 1322 } 1323 1324 /* All TRBs setup for MST must set CSP=1 when LST=0 */ 1325 if (dep->stream_capable && DWC3_MST_CAPABLE(&dwc->hwparams)) 1326 trb->ctrl |= DWC3_TRB_CTRL_CSP; 1327 1328 if ((!no_interrupt && !chain) || must_interrupt) 1329 trb->ctrl |= DWC3_TRB_CTRL_IOC; 1330 1331 if (chain) 1332 trb->ctrl |= DWC3_TRB_CTRL_CHN; 1333 else if (dep->stream_capable && is_last && 1334 !DWC3_MST_CAPABLE(&dwc->hwparams)) 1335 trb->ctrl |= DWC3_TRB_CTRL_LST; 1336 1337 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) 1338 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id); 1339 1340 /* 1341 * As per data book 4.2.3.2TRB Control Bit Rules section 1342 * 1343 * The controller autonomously checks the HWO field of a TRB to determine if the 1344 * entire TRB is valid. Therefore, software must ensure that the rest of the TRB 1345 * is valid before setting the HWO field to '1'. In most systems, this means that 1346 * software must update the fourth DWORD of a TRB last. 1347 * 1348 * However there is a possibility of CPU re-ordering here which can cause 1349 * controller to observe the HWO bit set prematurely. 1350 * Add a write memory barrier to prevent CPU re-ordering. 1351 */ 1352 wmb(); 1353 trb->ctrl |= DWC3_TRB_CTRL_HWO; 1354 1355 dwc3_ep_inc_enq(dep); 1356 1357 trace_dwc3_prepare_trb(dep, trb); 1358 } 1359 1360 static bool dwc3_needs_extra_trb(struct dwc3_ep *dep, struct dwc3_request *req) 1361 { 1362 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); 1363 unsigned int rem = req->request.length % maxp; 1364 1365 if ((req->request.length && req->request.zero && !rem && 1366 !usb_endpoint_xfer_isoc(dep->endpoint.desc)) || 1367 (!req->direction && rem)) 1368 return true; 1369 1370 return false; 1371 } 1372 1373 /** 1374 * dwc3_prepare_last_sg - prepare TRBs for the last SG entry 1375 * @dep: The endpoint that the request belongs to 1376 * @req: The request to prepare 1377 * @entry_length: The last SG entry size 1378 * @node: Indicates whether this is not the first entry (for isoc only) 1379 * 1380 * Return the number of TRBs prepared. 1381 */ 1382 static int dwc3_prepare_last_sg(struct dwc3_ep *dep, 1383 struct dwc3_request *req, unsigned int entry_length, 1384 unsigned int node) 1385 { 1386 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); 1387 unsigned int rem = req->request.length % maxp; 1388 unsigned int num_trbs = 1; 1389 1390 if (dwc3_needs_extra_trb(dep, req)) 1391 num_trbs++; 1392 1393 if (dwc3_calc_trbs_left(dep) < num_trbs) 1394 return 0; 1395 1396 req->needs_extra_trb = num_trbs > 1; 1397 1398 /* Prepare a normal TRB */ 1399 if (req->direction || req->request.length) 1400 dwc3_prepare_one_trb(dep, req, entry_length, 1401 req->needs_extra_trb, node, false, false); 1402 1403 /* Prepare extra TRBs for ZLP and MPS OUT transfer alignment */ 1404 if ((!req->direction && !req->request.length) || req->needs_extra_trb) 1405 dwc3_prepare_one_trb(dep, req, 1406 req->direction ? 0 : maxp - rem, 1407 false, 1, true, false); 1408 1409 return num_trbs; 1410 } 1411 1412 static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep, 1413 struct dwc3_request *req) 1414 { 1415 struct scatterlist *sg = req->start_sg; 1416 struct scatterlist *s; 1417 int i; 1418 unsigned int length = req->request.length; 1419 unsigned int remaining = req->request.num_mapped_sgs 1420 - req->num_queued_sgs; 1421 unsigned int num_trbs = req->num_trbs; 1422 bool needs_extra_trb = dwc3_needs_extra_trb(dep, req); 1423 1424 /* 1425 * If we resume preparing the request, then get the remaining length of 1426 * the request and resume where we left off. 1427 */ 1428 for_each_sg(req->request.sg, s, req->num_queued_sgs, i) 1429 length -= sg_dma_len(s); 1430 1431 for_each_sg(sg, s, remaining, i) { 1432 unsigned int num_trbs_left = dwc3_calc_trbs_left(dep); 1433 unsigned int trb_length; 1434 bool must_interrupt = false; 1435 bool last_sg = false; 1436 1437 trb_length = min_t(unsigned int, length, sg_dma_len(s)); 1438 1439 length -= trb_length; 1440 1441 /* 1442 * IOMMU driver is coalescing the list of sgs which shares a 1443 * page boundary into one and giving it to USB driver. With 1444 * this the number of sgs mapped is not equal to the number of 1445 * sgs passed. So mark the chain bit to false if it isthe last 1446 * mapped sg. 1447 */ 1448 if ((i == remaining - 1) || !length) 1449 last_sg = true; 1450 1451 if (!num_trbs_left) 1452 break; 1453 1454 if (last_sg) { 1455 if (!dwc3_prepare_last_sg(dep, req, trb_length, i)) 1456 break; 1457 } else { 1458 /* 1459 * Look ahead to check if we have enough TRBs for the 1460 * next SG entry. If not, set interrupt on this TRB to 1461 * resume preparing the next SG entry when more TRBs are 1462 * free. 1463 */ 1464 if (num_trbs_left == 1 || (needs_extra_trb && 1465 num_trbs_left <= 2 && 1466 sg_dma_len(sg_next(s)) >= length)) 1467 must_interrupt = true; 1468 1469 dwc3_prepare_one_trb(dep, req, trb_length, 1, i, false, 1470 must_interrupt); 1471 } 1472 1473 /* 1474 * There can be a situation where all sgs in sglist are not 1475 * queued because of insufficient trb number. To handle this 1476 * case, update start_sg to next sg to be queued, so that 1477 * we have free trbs we can continue queuing from where we 1478 * previously stopped 1479 */ 1480 if (!last_sg) 1481 req->start_sg = sg_next(s); 1482 1483 req->num_queued_sgs++; 1484 req->num_pending_sgs--; 1485 1486 /* 1487 * The number of pending SG entries may not correspond to the 1488 * number of mapped SG entries. If all the data are queued, then 1489 * don't include unused SG entries. 1490 */ 1491 if (length == 0) { 1492 req->num_pending_sgs = 0; 1493 break; 1494 } 1495 1496 if (must_interrupt) 1497 break; 1498 } 1499 1500 return req->num_trbs - num_trbs; 1501 } 1502 1503 static int dwc3_prepare_trbs_linear(struct dwc3_ep *dep, 1504 struct dwc3_request *req) 1505 { 1506 return dwc3_prepare_last_sg(dep, req, req->request.length, 0); 1507 } 1508 1509 /* 1510 * dwc3_prepare_trbs - setup TRBs from requests 1511 * @dep: endpoint for which requests are being prepared 1512 * 1513 * The function goes through the requests list and sets up TRBs for the 1514 * transfers. The function returns once there are no more TRBs available or 1515 * it runs out of requests. 1516 * 1517 * Returns the number of TRBs prepared or negative errno. 1518 */ 1519 static int dwc3_prepare_trbs(struct dwc3_ep *dep) 1520 { 1521 struct dwc3_request *req, *n; 1522 int ret = 0; 1523 1524 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 1525 1526 /* 1527 * We can get in a situation where there's a request in the started list 1528 * but there weren't enough TRBs to fully kick it in the first time 1529 * around, so it has been waiting for more TRBs to be freed up. 1530 * 1531 * In that case, we should check if we have a request with pending_sgs 1532 * in the started list and prepare TRBs for that request first, 1533 * otherwise we will prepare TRBs completely out of order and that will 1534 * break things. 1535 */ 1536 list_for_each_entry(req, &dep->started_list, list) { 1537 if (req->num_pending_sgs > 0) { 1538 ret = dwc3_prepare_trbs_sg(dep, req); 1539 if (!ret || req->num_pending_sgs) 1540 return ret; 1541 } 1542 1543 if (!dwc3_calc_trbs_left(dep)) 1544 return ret; 1545 1546 /* 1547 * Don't prepare beyond a transfer. In DWC_usb32, its transfer 1548 * burst capability may try to read and use TRBs beyond the 1549 * active transfer instead of stopping. 1550 */ 1551 if (dep->stream_capable && req->request.is_last && 1552 !DWC3_MST_CAPABLE(&dep->dwc->hwparams)) 1553 return ret; 1554 } 1555 1556 list_for_each_entry_safe(req, n, &dep->pending_list, list) { 1557 struct dwc3 *dwc = dep->dwc; 1558 1559 ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request, 1560 dep->direction); 1561 if (ret) 1562 return ret; 1563 1564 req->sg = req->request.sg; 1565 req->start_sg = req->sg; 1566 req->num_queued_sgs = 0; 1567 req->num_pending_sgs = req->request.num_mapped_sgs; 1568 1569 if (req->num_pending_sgs > 0) { 1570 ret = dwc3_prepare_trbs_sg(dep, req); 1571 if (req->num_pending_sgs) 1572 return ret; 1573 } else { 1574 ret = dwc3_prepare_trbs_linear(dep, req); 1575 } 1576 1577 if (!ret || !dwc3_calc_trbs_left(dep)) 1578 return ret; 1579 1580 /* 1581 * Don't prepare beyond a transfer. In DWC_usb32, its transfer 1582 * burst capability may try to read and use TRBs beyond the 1583 * active transfer instead of stopping. 1584 */ 1585 if (dep->stream_capable && req->request.is_last && 1586 !DWC3_MST_CAPABLE(&dwc->hwparams)) 1587 return ret; 1588 } 1589 1590 return ret; 1591 } 1592 1593 static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep); 1594 1595 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep) 1596 { 1597 struct dwc3_gadget_ep_cmd_params params; 1598 struct dwc3_request *req; 1599 int starting; 1600 int ret; 1601 u32 cmd; 1602 1603 /* 1604 * Note that it's normal to have no new TRBs prepared (i.e. ret == 0). 1605 * This happens when we need to stop and restart a transfer such as in 1606 * the case of reinitiating a stream or retrying an isoc transfer. 1607 */ 1608 ret = dwc3_prepare_trbs(dep); 1609 if (ret < 0) 1610 return ret; 1611 1612 starting = !(dep->flags & DWC3_EP_TRANSFER_STARTED); 1613 1614 /* 1615 * If there's no new TRB prepared and we don't need to restart a 1616 * transfer, there's no need to update the transfer. 1617 */ 1618 if (!ret && !starting) 1619 return ret; 1620 1621 req = next_request(&dep->started_list); 1622 if (!req) { 1623 dep->flags |= DWC3_EP_PENDING_REQUEST; 1624 return 0; 1625 } 1626 1627 memset(¶ms, 0, sizeof(params)); 1628 1629 if (starting) { 1630 params.param0 = upper_32_bits(req->trb_dma); 1631 params.param1 = lower_32_bits(req->trb_dma); 1632 cmd = DWC3_DEPCMD_STARTTRANSFER; 1633 1634 if (dep->stream_capable) 1635 cmd |= DWC3_DEPCMD_PARAM(req->request.stream_id); 1636 1637 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) 1638 cmd |= DWC3_DEPCMD_PARAM(dep->frame_number); 1639 } else { 1640 cmd = DWC3_DEPCMD_UPDATETRANSFER | 1641 DWC3_DEPCMD_PARAM(dep->resource_index); 1642 } 1643 1644 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 1645 if (ret < 0) { 1646 struct dwc3_request *tmp; 1647 1648 if (ret == -EAGAIN) 1649 return ret; 1650 1651 dwc3_stop_active_transfer(dep, true, true); 1652 1653 list_for_each_entry_safe(req, tmp, &dep->started_list, list) 1654 dwc3_gadget_move_cancelled_request(req, DWC3_REQUEST_STATUS_DEQUEUED); 1655 1656 /* If ep isn't started, then there's no end transfer pending */ 1657 if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) 1658 dwc3_gadget_ep_cleanup_cancelled_requests(dep); 1659 1660 return ret; 1661 } 1662 1663 if (dep->stream_capable && req->request.is_last && 1664 !DWC3_MST_CAPABLE(&dep->dwc->hwparams)) 1665 dep->flags |= DWC3_EP_WAIT_TRANSFER_COMPLETE; 1666 1667 return 0; 1668 } 1669 1670 static int __dwc3_gadget_get_frame(struct dwc3 *dwc) 1671 { 1672 u32 reg; 1673 1674 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1675 return DWC3_DSTS_SOFFN(reg); 1676 } 1677 1678 /** 1679 * __dwc3_stop_active_transfer - stop the current active transfer 1680 * @dep: isoc endpoint 1681 * @force: set forcerm bit in the command 1682 * @interrupt: command complete interrupt after End Transfer command 1683 * 1684 * When setting force, the ForceRM bit will be set. In that case 1685 * the controller won't update the TRB progress on command 1686 * completion. It also won't clear the HWO bit in the TRB. 1687 * The command will also not complete immediately in that case. 1688 */ 1689 static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt) 1690 { 1691 struct dwc3_gadget_ep_cmd_params params; 1692 u32 cmd; 1693 int ret; 1694 1695 cmd = DWC3_DEPCMD_ENDTRANSFER; 1696 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0; 1697 cmd |= interrupt ? DWC3_DEPCMD_CMDIOC : 0; 1698 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); 1699 memset(¶ms, 0, sizeof(params)); 1700 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 1701 WARN_ON_ONCE(ret); 1702 dep->resource_index = 0; 1703 1704 if (!interrupt) 1705 dep->flags &= ~DWC3_EP_TRANSFER_STARTED; 1706 else if (!ret) 1707 dep->flags |= DWC3_EP_END_TRANSFER_PENDING; 1708 1709 return ret; 1710 } 1711 1712 /** 1713 * dwc3_gadget_start_isoc_quirk - workaround invalid frame number 1714 * @dep: isoc endpoint 1715 * 1716 * This function tests for the correct combination of BIT[15:14] from the 16-bit 1717 * microframe number reported by the XferNotReady event for the future frame 1718 * number to start the isoc transfer. 1719 * 1720 * In DWC_usb31 version 1.70a-ea06 and prior, for highspeed and fullspeed 1721 * isochronous IN, BIT[15:14] of the 16-bit microframe number reported by the 1722 * XferNotReady event are invalid. The driver uses this number to schedule the 1723 * isochronous transfer and passes it to the START TRANSFER command. Because 1724 * this number is invalid, the command may fail. If BIT[15:14] matches the 1725 * internal 16-bit microframe, the START TRANSFER command will pass and the 1726 * transfer will start at the scheduled time, if it is off by 1, the command 1727 * will still pass, but the transfer will start 2 seconds in the future. For all 1728 * other conditions, the START TRANSFER command will fail with bus-expiry. 1729 * 1730 * In order to workaround this issue, we can test for the correct combination of 1731 * BIT[15:14] by sending START TRANSFER commands with different values of 1732 * BIT[15:14]: 'b00, 'b01, 'b10, and 'b11. Each combination is 2^14 uframe apart 1733 * (or 2 seconds). 4 seconds into the future will result in a bus-expiry status. 1734 * As the result, within the 4 possible combinations for BIT[15:14], there will 1735 * be 2 successful and 2 failure START COMMAND status. One of the 2 successful 1736 * command status will result in a 2-second delay start. The smaller BIT[15:14] 1737 * value is the correct combination. 1738 * 1739 * Since there are only 4 outcomes and the results are ordered, we can simply 1740 * test 2 START TRANSFER commands with BIT[15:14] combinations 'b00 and 'b01 to 1741 * deduce the smaller successful combination. 1742 * 1743 * Let test0 = test status for combination 'b00 and test1 = test status for 'b01 1744 * of BIT[15:14]. The correct combination is as follow: 1745 * 1746 * if test0 fails and test1 passes, BIT[15:14] is 'b01 1747 * if test0 fails and test1 fails, BIT[15:14] is 'b10 1748 * if test0 passes and test1 fails, BIT[15:14] is 'b11 1749 * if test0 passes and test1 passes, BIT[15:14] is 'b00 1750 * 1751 * Synopsys STAR 9001202023: Wrong microframe number for isochronous IN 1752 * endpoints. 1753 */ 1754 static int dwc3_gadget_start_isoc_quirk(struct dwc3_ep *dep) 1755 { 1756 int cmd_status = 0; 1757 bool test0; 1758 bool test1; 1759 1760 while (dep->combo_num < 2) { 1761 struct dwc3_gadget_ep_cmd_params params; 1762 u32 test_frame_number; 1763 u32 cmd; 1764 1765 /* 1766 * Check if we can start isoc transfer on the next interval or 1767 * 4 uframes in the future with BIT[15:14] as dep->combo_num 1768 */ 1769 test_frame_number = dep->frame_number & DWC3_FRNUMBER_MASK; 1770 test_frame_number |= dep->combo_num << 14; 1771 test_frame_number += max_t(u32, 4, dep->interval); 1772 1773 params.param0 = upper_32_bits(dep->dwc->bounce_addr); 1774 params.param1 = lower_32_bits(dep->dwc->bounce_addr); 1775 1776 cmd = DWC3_DEPCMD_STARTTRANSFER; 1777 cmd |= DWC3_DEPCMD_PARAM(test_frame_number); 1778 cmd_status = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 1779 1780 /* Redo if some other failure beside bus-expiry is received */ 1781 if (cmd_status && cmd_status != -EAGAIN) { 1782 dep->start_cmd_status = 0; 1783 dep->combo_num = 0; 1784 return 0; 1785 } 1786 1787 /* Store the first test status */ 1788 if (dep->combo_num == 0) 1789 dep->start_cmd_status = cmd_status; 1790 1791 dep->combo_num++; 1792 1793 /* 1794 * End the transfer if the START_TRANSFER command is successful 1795 * to wait for the next XferNotReady to test the command again 1796 */ 1797 if (cmd_status == 0) { 1798 dwc3_stop_active_transfer(dep, true, true); 1799 return 0; 1800 } 1801 } 1802 1803 /* test0 and test1 are both completed at this point */ 1804 test0 = (dep->start_cmd_status == 0); 1805 test1 = (cmd_status == 0); 1806 1807 if (!test0 && test1) 1808 dep->combo_num = 1; 1809 else if (!test0 && !test1) 1810 dep->combo_num = 2; 1811 else if (test0 && !test1) 1812 dep->combo_num = 3; 1813 else if (test0 && test1) 1814 dep->combo_num = 0; 1815 1816 dep->frame_number &= DWC3_FRNUMBER_MASK; 1817 dep->frame_number |= dep->combo_num << 14; 1818 dep->frame_number += max_t(u32, 4, dep->interval); 1819 1820 /* Reinitialize test variables */ 1821 dep->start_cmd_status = 0; 1822 dep->combo_num = 0; 1823 1824 return __dwc3_gadget_kick_transfer(dep); 1825 } 1826 1827 static int __dwc3_gadget_start_isoc(struct dwc3_ep *dep) 1828 { 1829 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; 1830 struct dwc3 *dwc = dep->dwc; 1831 int ret; 1832 int i; 1833 1834 if (list_empty(&dep->pending_list) && 1835 list_empty(&dep->started_list)) { 1836 dep->flags |= DWC3_EP_PENDING_REQUEST; 1837 return -EAGAIN; 1838 } 1839 1840 if (!dwc->dis_start_transfer_quirk && 1841 (DWC3_VER_IS_PRIOR(DWC31, 170A) || 1842 DWC3_VER_TYPE_IS_WITHIN(DWC31, 170A, EA01, EA06))) { 1843 if (dwc->gadget->speed <= USB_SPEED_HIGH && dep->direction) 1844 return dwc3_gadget_start_isoc_quirk(dep); 1845 } 1846 1847 if (desc->bInterval <= 14 && 1848 dwc->gadget->speed >= USB_SPEED_HIGH) { 1849 u32 frame = __dwc3_gadget_get_frame(dwc); 1850 bool rollover = frame < 1851 (dep->frame_number & DWC3_FRNUMBER_MASK); 1852 1853 /* 1854 * frame_number is set from XferNotReady and may be already 1855 * out of date. DSTS only provides the lower 14 bit of the 1856 * current frame number. So add the upper two bits of 1857 * frame_number and handle a possible rollover. 1858 * This will provide the correct frame_number unless more than 1859 * rollover has happened since XferNotReady. 1860 */ 1861 1862 dep->frame_number = (dep->frame_number & ~DWC3_FRNUMBER_MASK) | 1863 frame; 1864 if (rollover) 1865 dep->frame_number += BIT(14); 1866 } 1867 1868 for (i = 0; i < DWC3_ISOC_MAX_RETRIES; i++) { 1869 int future_interval = i + 1; 1870 1871 /* Give the controller at least 500us to schedule transfers */ 1872 if (desc->bInterval < 3) 1873 future_interval += 3 - desc->bInterval; 1874 1875 dep->frame_number = DWC3_ALIGN_FRAME(dep, future_interval); 1876 1877 ret = __dwc3_gadget_kick_transfer(dep); 1878 if (ret != -EAGAIN) 1879 break; 1880 } 1881 1882 /* 1883 * After a number of unsuccessful start attempts due to bus-expiry 1884 * status, issue END_TRANSFER command and retry on the next XferNotReady 1885 * event. 1886 */ 1887 if (ret == -EAGAIN) 1888 ret = __dwc3_stop_active_transfer(dep, false, true); 1889 1890 return ret; 1891 } 1892 1893 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 1894 { 1895 struct dwc3 *dwc = dep->dwc; 1896 1897 if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) { 1898 dev_dbg(dwc->dev, "%s: can't queue to disabled endpoint\n", 1899 dep->name); 1900 return -ESHUTDOWN; 1901 } 1902 1903 if (WARN(req->dep != dep, "request %pK belongs to '%s'\n", 1904 &req->request, req->dep->name)) 1905 return -EINVAL; 1906 1907 if (WARN(req->status < DWC3_REQUEST_STATUS_COMPLETED, 1908 "%s: request %pK already in flight\n", 1909 dep->name, &req->request)) 1910 return -EINVAL; 1911 1912 pm_runtime_get(dwc->dev); 1913 1914 req->request.actual = 0; 1915 req->request.status = -EINPROGRESS; 1916 1917 trace_dwc3_ep_queue(req); 1918 1919 list_add_tail(&req->list, &dep->pending_list); 1920 req->status = DWC3_REQUEST_STATUS_QUEUED; 1921 1922 if (dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE) 1923 return 0; 1924 1925 /* 1926 * Start the transfer only after the END_TRANSFER is completed 1927 * and endpoint STALL is cleared. 1928 */ 1929 if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) || 1930 (dep->flags & DWC3_EP_WEDGE) || 1931 (dep->flags & DWC3_EP_DELAY_STOP) || 1932 (dep->flags & DWC3_EP_STALL)) { 1933 dep->flags |= DWC3_EP_DELAY_START; 1934 return 0; 1935 } 1936 1937 /* 1938 * NOTICE: Isochronous endpoints should NEVER be prestarted. We must 1939 * wait for a XferNotReady event so we will know what's the current 1940 * (micro-)frame number. 1941 * 1942 * Without this trick, we are very, very likely gonna get Bus Expiry 1943 * errors which will force us issue EndTransfer command. 1944 */ 1945 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1946 if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) { 1947 if ((dep->flags & DWC3_EP_PENDING_REQUEST)) 1948 return __dwc3_gadget_start_isoc(dep); 1949 1950 return 0; 1951 } 1952 } 1953 1954 __dwc3_gadget_kick_transfer(dep); 1955 1956 return 0; 1957 } 1958 1959 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 1960 gfp_t gfp_flags) 1961 { 1962 struct dwc3_request *req = to_dwc3_request(request); 1963 struct dwc3_ep *dep = to_dwc3_ep(ep); 1964 struct dwc3 *dwc = dep->dwc; 1965 1966 unsigned long flags; 1967 1968 int ret; 1969 1970 spin_lock_irqsave(&dwc->lock, flags); 1971 ret = __dwc3_gadget_ep_queue(dep, req); 1972 spin_unlock_irqrestore(&dwc->lock, flags); 1973 1974 return ret; 1975 } 1976 1977 static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *req) 1978 { 1979 int i; 1980 1981 /* If req->trb is not set, then the request has not started */ 1982 if (!req->trb) 1983 return; 1984 1985 /* 1986 * If request was already started, this means we had to 1987 * stop the transfer. With that we also need to ignore 1988 * all TRBs used by the request, however TRBs can only 1989 * be modified after completion of END_TRANSFER 1990 * command. So what we do here is that we wait for 1991 * END_TRANSFER completion and only after that, we jump 1992 * over TRBs by clearing HWO and incrementing dequeue 1993 * pointer. 1994 */ 1995 for (i = 0; i < req->num_trbs; i++) { 1996 struct dwc3_trb *trb; 1997 1998 trb = &dep->trb_pool[dep->trb_dequeue]; 1999 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 2000 dwc3_ep_inc_deq(dep); 2001 } 2002 2003 req->num_trbs = 0; 2004 } 2005 2006 static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep) 2007 { 2008 struct dwc3_request *req; 2009 struct dwc3 *dwc = dep->dwc; 2010 2011 while (!list_empty(&dep->cancelled_list)) { 2012 req = next_request(&dep->cancelled_list); 2013 dwc3_gadget_ep_skip_trbs(dep, req); 2014 switch (req->status) { 2015 case DWC3_REQUEST_STATUS_DISCONNECTED: 2016 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 2017 break; 2018 case DWC3_REQUEST_STATUS_DEQUEUED: 2019 dwc3_gadget_giveback(dep, req, -ECONNRESET); 2020 break; 2021 case DWC3_REQUEST_STATUS_STALLED: 2022 dwc3_gadget_giveback(dep, req, -EPIPE); 2023 break; 2024 default: 2025 dev_err(dwc->dev, "request cancelled with wrong reason:%d\n", req->status); 2026 dwc3_gadget_giveback(dep, req, -ECONNRESET); 2027 break; 2028 } 2029 /* 2030 * The endpoint is disabled, let the dwc3_remove_requests() 2031 * handle the cleanup. 2032 */ 2033 if (!dep->endpoint.desc) 2034 break; 2035 } 2036 } 2037 2038 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 2039 struct usb_request *request) 2040 { 2041 struct dwc3_request *req = to_dwc3_request(request); 2042 struct dwc3_request *r = NULL; 2043 2044 struct dwc3_ep *dep = to_dwc3_ep(ep); 2045 struct dwc3 *dwc = dep->dwc; 2046 2047 unsigned long flags; 2048 int ret = 0; 2049 2050 trace_dwc3_ep_dequeue(req); 2051 2052 spin_lock_irqsave(&dwc->lock, flags); 2053 2054 list_for_each_entry(r, &dep->cancelled_list, list) { 2055 if (r == req) 2056 goto out; 2057 } 2058 2059 list_for_each_entry(r, &dep->pending_list, list) { 2060 if (r == req) { 2061 dwc3_gadget_giveback(dep, req, -ECONNRESET); 2062 goto out; 2063 } 2064 } 2065 2066 list_for_each_entry(r, &dep->started_list, list) { 2067 if (r == req) { 2068 struct dwc3_request *t; 2069 2070 /* wait until it is processed */ 2071 dwc3_stop_active_transfer(dep, true, true); 2072 2073 /* 2074 * Remove any started request if the transfer is 2075 * cancelled. 2076 */ 2077 list_for_each_entry_safe(r, t, &dep->started_list, list) 2078 dwc3_gadget_move_cancelled_request(r, 2079 DWC3_REQUEST_STATUS_DEQUEUED); 2080 2081 dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE; 2082 2083 goto out; 2084 } 2085 } 2086 2087 dev_err(dwc->dev, "request %pK was not queued to %s\n", 2088 request, ep->name); 2089 ret = -EINVAL; 2090 out: 2091 spin_unlock_irqrestore(&dwc->lock, flags); 2092 2093 return ret; 2094 } 2095 2096 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) 2097 { 2098 struct dwc3_gadget_ep_cmd_params params; 2099 struct dwc3 *dwc = dep->dwc; 2100 struct dwc3_request *req; 2101 struct dwc3_request *tmp; 2102 int ret; 2103 2104 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2105 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 2106 return -EINVAL; 2107 } 2108 2109 memset(¶ms, 0x00, sizeof(params)); 2110 2111 if (value) { 2112 struct dwc3_trb *trb; 2113 2114 unsigned int transfer_in_flight; 2115 unsigned int started; 2116 2117 if (dep->number > 1) 2118 trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue); 2119 else 2120 trb = &dwc->ep0_trb[dep->trb_enqueue]; 2121 2122 transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO; 2123 started = !list_empty(&dep->started_list); 2124 2125 if (!protocol && ((dep->direction && transfer_in_flight) || 2126 (!dep->direction && started))) { 2127 return -EAGAIN; 2128 } 2129 2130 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL, 2131 ¶ms); 2132 if (ret) 2133 dev_err(dwc->dev, "failed to set STALL on %s\n", 2134 dep->name); 2135 else 2136 dep->flags |= DWC3_EP_STALL; 2137 } else { 2138 /* 2139 * Don't issue CLEAR_STALL command to control endpoints. The 2140 * controller automatically clears the STALL when it receives 2141 * the SETUP token. 2142 */ 2143 if (dep->number <= 1) { 2144 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); 2145 return 0; 2146 } 2147 2148 dwc3_stop_active_transfer(dep, true, true); 2149 2150 list_for_each_entry_safe(req, tmp, &dep->started_list, list) 2151 dwc3_gadget_move_cancelled_request(req, DWC3_REQUEST_STATUS_STALLED); 2152 2153 if (dep->flags & DWC3_EP_END_TRANSFER_PENDING || 2154 (dep->flags & DWC3_EP_DELAY_STOP)) { 2155 dep->flags |= DWC3_EP_PENDING_CLEAR_STALL; 2156 if (protocol) 2157 dwc->clear_stall_protocol = dep->number; 2158 2159 return 0; 2160 } 2161 2162 dwc3_gadget_ep_cleanup_cancelled_requests(dep); 2163 2164 ret = dwc3_send_clear_stall_ep_cmd(dep); 2165 if (ret) { 2166 dev_err(dwc->dev, "failed to clear STALL on %s\n", 2167 dep->name); 2168 return ret; 2169 } 2170 2171 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); 2172 2173 if ((dep->flags & DWC3_EP_DELAY_START) && 2174 !usb_endpoint_xfer_isoc(dep->endpoint.desc)) 2175 __dwc3_gadget_kick_transfer(dep); 2176 2177 dep->flags &= ~DWC3_EP_DELAY_START; 2178 } 2179 2180 return ret; 2181 } 2182 2183 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 2184 { 2185 struct dwc3_ep *dep = to_dwc3_ep(ep); 2186 struct dwc3 *dwc = dep->dwc; 2187 2188 unsigned long flags; 2189 2190 int ret; 2191 2192 spin_lock_irqsave(&dwc->lock, flags); 2193 ret = __dwc3_gadget_ep_set_halt(dep, value, false); 2194 spin_unlock_irqrestore(&dwc->lock, flags); 2195 2196 return ret; 2197 } 2198 2199 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 2200 { 2201 struct dwc3_ep *dep = to_dwc3_ep(ep); 2202 struct dwc3 *dwc = dep->dwc; 2203 unsigned long flags; 2204 int ret; 2205 2206 spin_lock_irqsave(&dwc->lock, flags); 2207 dep->flags |= DWC3_EP_WEDGE; 2208 2209 if (dep->number == 0 || dep->number == 1) 2210 ret = __dwc3_gadget_ep0_set_halt(ep, 1); 2211 else 2212 ret = __dwc3_gadget_ep_set_halt(dep, 1, false); 2213 spin_unlock_irqrestore(&dwc->lock, flags); 2214 2215 return ret; 2216 } 2217 2218 /* -------------------------------------------------------------------------- */ 2219 2220 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 2221 .bLength = USB_DT_ENDPOINT_SIZE, 2222 .bDescriptorType = USB_DT_ENDPOINT, 2223 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 2224 }; 2225 2226 static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 2227 .enable = dwc3_gadget_ep0_enable, 2228 .disable = dwc3_gadget_ep0_disable, 2229 .alloc_request = dwc3_gadget_ep_alloc_request, 2230 .free_request = dwc3_gadget_ep_free_request, 2231 .queue = dwc3_gadget_ep0_queue, 2232 .dequeue = dwc3_gadget_ep_dequeue, 2233 .set_halt = dwc3_gadget_ep0_set_halt, 2234 .set_wedge = dwc3_gadget_ep_set_wedge, 2235 }; 2236 2237 static const struct usb_ep_ops dwc3_gadget_ep_ops = { 2238 .enable = dwc3_gadget_ep_enable, 2239 .disable = dwc3_gadget_ep_disable, 2240 .alloc_request = dwc3_gadget_ep_alloc_request, 2241 .free_request = dwc3_gadget_ep_free_request, 2242 .queue = dwc3_gadget_ep_queue, 2243 .dequeue = dwc3_gadget_ep_dequeue, 2244 .set_halt = dwc3_gadget_ep_set_halt, 2245 .set_wedge = dwc3_gadget_ep_set_wedge, 2246 }; 2247 2248 /* -------------------------------------------------------------------------- */ 2249 2250 static int dwc3_gadget_get_frame(struct usb_gadget *g) 2251 { 2252 struct dwc3 *dwc = gadget_to_dwc(g); 2253 2254 return __dwc3_gadget_get_frame(dwc); 2255 } 2256 2257 static int __dwc3_gadget_wakeup(struct dwc3 *dwc) 2258 { 2259 int retries; 2260 2261 int ret; 2262 u32 reg; 2263 2264 u8 link_state; 2265 2266 /* 2267 * According to the Databook Remote wakeup request should 2268 * be issued only when the device is in early suspend state. 2269 * 2270 * We can check that via USB Link State bits in DSTS register. 2271 */ 2272 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2273 2274 link_state = DWC3_DSTS_USBLNKST(reg); 2275 2276 switch (link_state) { 2277 case DWC3_LINK_STATE_RESET: 2278 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 2279 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 2280 case DWC3_LINK_STATE_U2: /* in HS, means Sleep (L1) */ 2281 case DWC3_LINK_STATE_U1: 2282 case DWC3_LINK_STATE_RESUME: 2283 break; 2284 default: 2285 return -EINVAL; 2286 } 2287 2288 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); 2289 if (ret < 0) { 2290 dev_err(dwc->dev, "failed to put link in Recovery\n"); 2291 return ret; 2292 } 2293 2294 /* Recent versions do this automatically */ 2295 if (DWC3_VER_IS_PRIOR(DWC3, 194A)) { 2296 /* write zeroes to Link Change Request */ 2297 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2298 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 2299 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2300 } 2301 2302 /* poll until Link State changes to ON */ 2303 retries = 20000; 2304 2305 while (retries--) { 2306 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2307 2308 /* in HS, means ON */ 2309 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 2310 break; 2311 } 2312 2313 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 2314 dev_err(dwc->dev, "failed to send remote wakeup\n"); 2315 return -EINVAL; 2316 } 2317 2318 return 0; 2319 } 2320 2321 static int dwc3_gadget_wakeup(struct usb_gadget *g) 2322 { 2323 struct dwc3 *dwc = gadget_to_dwc(g); 2324 unsigned long flags; 2325 int ret; 2326 2327 spin_lock_irqsave(&dwc->lock, flags); 2328 ret = __dwc3_gadget_wakeup(dwc); 2329 spin_unlock_irqrestore(&dwc->lock, flags); 2330 2331 return ret; 2332 } 2333 2334 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 2335 int is_selfpowered) 2336 { 2337 struct dwc3 *dwc = gadget_to_dwc(g); 2338 unsigned long flags; 2339 2340 spin_lock_irqsave(&dwc->lock, flags); 2341 g->is_selfpowered = !!is_selfpowered; 2342 spin_unlock_irqrestore(&dwc->lock, flags); 2343 2344 return 0; 2345 } 2346 2347 static void dwc3_stop_active_transfers(struct dwc3 *dwc) 2348 { 2349 u32 epnum; 2350 2351 for (epnum = 2; epnum < dwc->num_eps; epnum++) { 2352 struct dwc3_ep *dep; 2353 2354 dep = dwc->eps[epnum]; 2355 if (!dep) 2356 continue; 2357 2358 dwc3_remove_requests(dwc, dep, -ESHUTDOWN); 2359 } 2360 } 2361 2362 static void __dwc3_gadget_set_ssp_rate(struct dwc3 *dwc) 2363 { 2364 enum usb_ssp_rate ssp_rate = dwc->gadget_ssp_rate; 2365 u32 reg; 2366 2367 if (ssp_rate == USB_SSP_GEN_UNKNOWN) 2368 ssp_rate = dwc->max_ssp_rate; 2369 2370 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2371 reg &= ~DWC3_DCFG_SPEED_MASK; 2372 reg &= ~DWC3_DCFG_NUMLANES(~0); 2373 2374 if (ssp_rate == USB_SSP_GEN_1x2) 2375 reg |= DWC3_DCFG_SUPERSPEED; 2376 else if (dwc->max_ssp_rate != USB_SSP_GEN_1x2) 2377 reg |= DWC3_DCFG_SUPERSPEED_PLUS; 2378 2379 if (ssp_rate != USB_SSP_GEN_2x1 && 2380 dwc->max_ssp_rate != USB_SSP_GEN_2x1) 2381 reg |= DWC3_DCFG_NUMLANES(1); 2382 2383 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2384 } 2385 2386 static void __dwc3_gadget_set_speed(struct dwc3 *dwc) 2387 { 2388 enum usb_device_speed speed; 2389 u32 reg; 2390 2391 speed = dwc->gadget_max_speed; 2392 if (speed == USB_SPEED_UNKNOWN || speed > dwc->maximum_speed) 2393 speed = dwc->maximum_speed; 2394 2395 if (speed == USB_SPEED_SUPER_PLUS && 2396 DWC3_IP_IS(DWC32)) { 2397 __dwc3_gadget_set_ssp_rate(dwc); 2398 return; 2399 } 2400 2401 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2402 reg &= ~(DWC3_DCFG_SPEED_MASK); 2403 2404 /* 2405 * WORKAROUND: DWC3 revision < 2.20a have an issue 2406 * which would cause metastability state on Run/Stop 2407 * bit if we try to force the IP to USB2-only mode. 2408 * 2409 * Because of that, we cannot configure the IP to any 2410 * speed other than the SuperSpeed 2411 * 2412 * Refers to: 2413 * 2414 * STAR#9000525659: Clock Domain Crossing on DCTL in 2415 * USB 2.0 Mode 2416 */ 2417 if (DWC3_VER_IS_PRIOR(DWC3, 220A) && 2418 !dwc->dis_metastability_quirk) { 2419 reg |= DWC3_DCFG_SUPERSPEED; 2420 } else { 2421 switch (speed) { 2422 case USB_SPEED_FULL: 2423 reg |= DWC3_DCFG_FULLSPEED; 2424 break; 2425 case USB_SPEED_HIGH: 2426 reg |= DWC3_DCFG_HIGHSPEED; 2427 break; 2428 case USB_SPEED_SUPER: 2429 reg |= DWC3_DCFG_SUPERSPEED; 2430 break; 2431 case USB_SPEED_SUPER_PLUS: 2432 if (DWC3_IP_IS(DWC3)) 2433 reg |= DWC3_DCFG_SUPERSPEED; 2434 else 2435 reg |= DWC3_DCFG_SUPERSPEED_PLUS; 2436 break; 2437 default: 2438 dev_err(dwc->dev, "invalid speed (%d)\n", speed); 2439 2440 if (DWC3_IP_IS(DWC3)) 2441 reg |= DWC3_DCFG_SUPERSPEED; 2442 else 2443 reg |= DWC3_DCFG_SUPERSPEED_PLUS; 2444 } 2445 } 2446 2447 if (DWC3_IP_IS(DWC32) && 2448 speed > USB_SPEED_UNKNOWN && 2449 speed < USB_SPEED_SUPER_PLUS) 2450 reg &= ~DWC3_DCFG_NUMLANES(~0); 2451 2452 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2453 } 2454 2455 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) 2456 { 2457 u32 reg; 2458 u32 timeout = 2000; 2459 2460 if (pm_runtime_suspended(dwc->dev)) 2461 return 0; 2462 2463 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2464 if (is_on) { 2465 if (DWC3_VER_IS_WITHIN(DWC3, ANY, 187A)) { 2466 reg &= ~DWC3_DCTL_TRGTULST_MASK; 2467 reg |= DWC3_DCTL_TRGTULST_RX_DET; 2468 } 2469 2470 if (!DWC3_VER_IS_PRIOR(DWC3, 194A)) 2471 reg &= ~DWC3_DCTL_KEEP_CONNECT; 2472 reg |= DWC3_DCTL_RUN_STOP; 2473 2474 if (dwc->has_hibernation) 2475 reg |= DWC3_DCTL_KEEP_CONNECT; 2476 2477 __dwc3_gadget_set_speed(dwc); 2478 dwc->pullups_connected = true; 2479 } else { 2480 reg &= ~DWC3_DCTL_RUN_STOP; 2481 2482 if (dwc->has_hibernation && !suspend) 2483 reg &= ~DWC3_DCTL_KEEP_CONNECT; 2484 2485 dwc->pullups_connected = false; 2486 } 2487 2488 dwc3_gadget_dctl_write_safe(dwc, reg); 2489 2490 do { 2491 usleep_range(1000, 2000); 2492 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2493 reg &= DWC3_DSTS_DEVCTRLHLT; 2494 } while (--timeout && !(!is_on ^ !reg)); 2495 2496 if (!timeout) 2497 return -ETIMEDOUT; 2498 2499 return 0; 2500 } 2501 2502 static void dwc3_gadget_disable_irq(struct dwc3 *dwc); 2503 static void __dwc3_gadget_stop(struct dwc3 *dwc); 2504 static int __dwc3_gadget_start(struct dwc3 *dwc); 2505 2506 static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc) 2507 { 2508 unsigned long flags; 2509 2510 spin_lock_irqsave(&dwc->lock, flags); 2511 dwc->connected = false; 2512 2513 /* 2514 * Per databook, when we want to stop the gadget, if a control transfer 2515 * is still in process, complete it and get the core into setup phase. 2516 */ 2517 if (dwc->ep0state != EP0_SETUP_PHASE) { 2518 int ret; 2519 2520 if (dwc->delayed_status) 2521 dwc3_ep0_send_delayed_status(dwc); 2522 2523 reinit_completion(&dwc->ep0_in_setup); 2524 2525 spin_unlock_irqrestore(&dwc->lock, flags); 2526 ret = wait_for_completion_timeout(&dwc->ep0_in_setup, 2527 msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT)); 2528 spin_lock_irqsave(&dwc->lock, flags); 2529 if (ret == 0) 2530 dev_warn(dwc->dev, "timed out waiting for SETUP phase\n"); 2531 } 2532 2533 /* 2534 * In the Synopsys DesignWare Cores USB3 Databook Rev. 3.30a 2535 * Section 4.1.8 Table 4-7, it states that for a device-initiated 2536 * disconnect, the SW needs to ensure that it sends "a DEPENDXFER 2537 * command for any active transfers" before clearing the RunStop 2538 * bit. 2539 */ 2540 dwc3_stop_active_transfers(dwc); 2541 __dwc3_gadget_stop(dwc); 2542 spin_unlock_irqrestore(&dwc->lock, flags); 2543 2544 /* 2545 * Note: if the GEVNTCOUNT indicates events in the event buffer, the 2546 * driver needs to acknowledge them before the controller can halt. 2547 * Simply let the interrupt handler acknowledges and handle the 2548 * remaining event generated by the controller while polling for 2549 * DSTS.DEVCTLHLT. 2550 */ 2551 return dwc3_gadget_run_stop(dwc, false, false); 2552 } 2553 2554 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 2555 { 2556 struct dwc3 *dwc = gadget_to_dwc(g); 2557 int ret; 2558 2559 is_on = !!is_on; 2560 2561 dwc->softconnect = is_on; 2562 2563 /* 2564 * Avoid issuing a runtime resume if the device is already in the 2565 * suspended state during gadget disconnect. DWC3 gadget was already 2566 * halted/stopped during runtime suspend. 2567 */ 2568 if (!is_on) { 2569 pm_runtime_barrier(dwc->dev); 2570 if (pm_runtime_suspended(dwc->dev)) 2571 return 0; 2572 } 2573 2574 /* 2575 * Check the return value for successful resume, or error. For a 2576 * successful resume, the DWC3 runtime PM resume routine will handle 2577 * the run stop sequence, so avoid duplicate operations here. 2578 */ 2579 ret = pm_runtime_get_sync(dwc->dev); 2580 if (!ret || ret < 0) { 2581 pm_runtime_put(dwc->dev); 2582 return 0; 2583 } 2584 2585 if (dwc->pullups_connected == is_on) { 2586 pm_runtime_put(dwc->dev); 2587 return 0; 2588 } 2589 2590 synchronize_irq(dwc->irq_gadget); 2591 2592 if (!is_on) { 2593 ret = dwc3_gadget_soft_disconnect(dwc); 2594 } else { 2595 /* 2596 * In the Synopsys DWC_usb31 1.90a programming guide section 2597 * 4.1.9, it specifies that for a reconnect after a 2598 * device-initiated disconnect requires a core soft reset 2599 * (DCTL.CSftRst) before enabling the run/stop bit. 2600 */ 2601 dwc3_core_soft_reset(dwc); 2602 2603 dwc3_event_buffers_setup(dwc); 2604 __dwc3_gadget_start(dwc); 2605 ret = dwc3_gadget_run_stop(dwc, true, false); 2606 } 2607 2608 pm_runtime_put(dwc->dev); 2609 2610 return ret; 2611 } 2612 2613 static void dwc3_gadget_enable_irq(struct dwc3 *dwc) 2614 { 2615 u32 reg; 2616 2617 /* Enable all but Start and End of Frame IRQs */ 2618 reg = (DWC3_DEVTEN_EVNTOVERFLOWEN | 2619 DWC3_DEVTEN_CMDCMPLTEN | 2620 DWC3_DEVTEN_ERRTICERREN | 2621 DWC3_DEVTEN_WKUPEVTEN | 2622 DWC3_DEVTEN_CONNECTDONEEN | 2623 DWC3_DEVTEN_USBRSTEN | 2624 DWC3_DEVTEN_DISCONNEVTEN); 2625 2626 if (DWC3_VER_IS_PRIOR(DWC3, 250A)) 2627 reg |= DWC3_DEVTEN_ULSTCNGEN; 2628 2629 /* On 2.30a and above this bit enables U3/L2-L1 Suspend Events */ 2630 if (!DWC3_VER_IS_PRIOR(DWC3, 230A)) 2631 reg |= DWC3_DEVTEN_U3L2L1SUSPEN; 2632 2633 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 2634 } 2635 2636 static void dwc3_gadget_disable_irq(struct dwc3 *dwc) 2637 { 2638 /* mask all interrupts */ 2639 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 2640 } 2641 2642 static irqreturn_t dwc3_interrupt(int irq, void *_dwc); 2643 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc); 2644 2645 /** 2646 * dwc3_gadget_setup_nump - calculate and initialize NUMP field of %DWC3_DCFG 2647 * @dwc: pointer to our context structure 2648 * 2649 * The following looks like complex but it's actually very simple. In order to 2650 * calculate the number of packets we can burst at once on OUT transfers, we're 2651 * gonna use RxFIFO size. 2652 * 2653 * To calculate RxFIFO size we need two numbers: 2654 * MDWIDTH = size, in bits, of the internal memory bus 2655 * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits) 2656 * 2657 * Given these two numbers, the formula is simple: 2658 * 2659 * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16; 2660 * 2661 * 24 bytes is for 3x SETUP packets 2662 * 16 bytes is a clock domain crossing tolerance 2663 * 2664 * Given RxFIFO Size, NUMP = RxFIFOSize / 1024; 2665 */ 2666 static void dwc3_gadget_setup_nump(struct dwc3 *dwc) 2667 { 2668 u32 ram2_depth; 2669 u32 mdwidth; 2670 u32 nump; 2671 u32 reg; 2672 2673 ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7); 2674 mdwidth = dwc3_mdwidth(dwc); 2675 2676 nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024; 2677 nump = min_t(u32, nump, 16); 2678 2679 /* update NumP */ 2680 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2681 reg &= ~DWC3_DCFG_NUMP_MASK; 2682 reg |= nump << DWC3_DCFG_NUMP_SHIFT; 2683 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2684 } 2685 2686 static int __dwc3_gadget_start(struct dwc3 *dwc) 2687 { 2688 struct dwc3_ep *dep; 2689 int ret = 0; 2690 u32 reg; 2691 2692 /* 2693 * Use IMOD if enabled via dwc->imod_interval. Otherwise, if 2694 * the core supports IMOD, disable it. 2695 */ 2696 if (dwc->imod_interval) { 2697 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); 2698 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); 2699 } else if (dwc3_has_imod(dwc)) { 2700 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0); 2701 } 2702 2703 /* 2704 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP 2705 * field instead of letting dwc3 itself calculate that automatically. 2706 * 2707 * This way, we maximize the chances that we'll be able to get several 2708 * bursts of data without going through any sort of endpoint throttling. 2709 */ 2710 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 2711 if (DWC3_IP_IS(DWC3)) 2712 reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL; 2713 else 2714 reg &= ~DWC31_GRXTHRCFG_PKTCNTSEL; 2715 2716 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 2717 2718 dwc3_gadget_setup_nump(dwc); 2719 2720 /* 2721 * Currently the controller handles single stream only. So, Ignore 2722 * Packet Pending bit for stream selection and don't search for another 2723 * stream if the host sends Data Packet with PP=0 (for OUT direction) or 2724 * ACK with NumP=0 and PP=0 (for IN direction). This slightly improves 2725 * the stream performance. 2726 */ 2727 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2728 reg |= DWC3_DCFG_IGNSTRMPP; 2729 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2730 2731 /* Enable MST by default if the device is capable of MST */ 2732 if (DWC3_MST_CAPABLE(&dwc->hwparams)) { 2733 reg = dwc3_readl(dwc->regs, DWC3_DCFG1); 2734 reg &= ~DWC3_DCFG1_DIS_MST_ENH; 2735 dwc3_writel(dwc->regs, DWC3_DCFG1, reg); 2736 } 2737 2738 /* Start with SuperSpeed Default */ 2739 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2740 2741 dep = dwc->eps[0]; 2742 dep->flags = 0; 2743 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT); 2744 if (ret) { 2745 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2746 goto err0; 2747 } 2748 2749 dep = dwc->eps[1]; 2750 dep->flags = 0; 2751 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT); 2752 if (ret) { 2753 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2754 goto err1; 2755 } 2756 2757 /* begin to receive SETUP packets */ 2758 dwc->ep0state = EP0_SETUP_PHASE; 2759 dwc->ep0_bounced = false; 2760 dwc->link_state = DWC3_LINK_STATE_SS_DIS; 2761 dwc->delayed_status = false; 2762 dwc3_ep0_out_start(dwc); 2763 2764 dwc3_gadget_enable_irq(dwc); 2765 2766 return 0; 2767 2768 err1: 2769 __dwc3_gadget_ep_disable(dwc->eps[0]); 2770 2771 err0: 2772 return ret; 2773 } 2774 2775 static int dwc3_gadget_start(struct usb_gadget *g, 2776 struct usb_gadget_driver *driver) 2777 { 2778 struct dwc3 *dwc = gadget_to_dwc(g); 2779 unsigned long flags; 2780 int ret; 2781 int irq; 2782 2783 irq = dwc->irq_gadget; 2784 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, 2785 IRQF_SHARED, "dwc3", dwc->ev_buf); 2786 if (ret) { 2787 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 2788 irq, ret); 2789 return ret; 2790 } 2791 2792 spin_lock_irqsave(&dwc->lock, flags); 2793 dwc->gadget_driver = driver; 2794 spin_unlock_irqrestore(&dwc->lock, flags); 2795 2796 return 0; 2797 } 2798 2799 static void __dwc3_gadget_stop(struct dwc3 *dwc) 2800 { 2801 dwc3_gadget_disable_irq(dwc); 2802 __dwc3_gadget_ep_disable(dwc->eps[0]); 2803 __dwc3_gadget_ep_disable(dwc->eps[1]); 2804 } 2805 2806 static int dwc3_gadget_stop(struct usb_gadget *g) 2807 { 2808 struct dwc3 *dwc = gadget_to_dwc(g); 2809 unsigned long flags; 2810 2811 spin_lock_irqsave(&dwc->lock, flags); 2812 dwc->gadget_driver = NULL; 2813 dwc->max_cfg_eps = 0; 2814 spin_unlock_irqrestore(&dwc->lock, flags); 2815 2816 free_irq(dwc->irq_gadget, dwc->ev_buf); 2817 2818 return 0; 2819 } 2820 2821 static void dwc3_gadget_config_params(struct usb_gadget *g, 2822 struct usb_dcd_config_params *params) 2823 { 2824 struct dwc3 *dwc = gadget_to_dwc(g); 2825 2826 params->besl_baseline = USB_DEFAULT_BESL_UNSPECIFIED; 2827 params->besl_deep = USB_DEFAULT_BESL_UNSPECIFIED; 2828 2829 /* Recommended BESL */ 2830 if (!dwc->dis_enblslpm_quirk) { 2831 /* 2832 * If the recommended BESL baseline is 0 or if the BESL deep is 2833 * less than 2, Microsoft's Windows 10 host usb stack will issue 2834 * a usb reset immediately after it receives the extended BOS 2835 * descriptor and the enumeration will fail. To maintain 2836 * compatibility with the Windows' usb stack, let's set the 2837 * recommended BESL baseline to 1 and clamp the BESL deep to be 2838 * within 2 to 15. 2839 */ 2840 params->besl_baseline = 1; 2841 if (dwc->is_utmi_l1_suspend) 2842 params->besl_deep = 2843 clamp_t(u8, dwc->hird_threshold, 2, 15); 2844 } 2845 2846 /* U1 Device exit Latency */ 2847 if (dwc->dis_u1_entry_quirk) 2848 params->bU1devExitLat = 0; 2849 else 2850 params->bU1devExitLat = DWC3_DEFAULT_U1_DEV_EXIT_LAT; 2851 2852 /* U2 Device exit Latency */ 2853 if (dwc->dis_u2_entry_quirk) 2854 params->bU2DevExitLat = 0; 2855 else 2856 params->bU2DevExitLat = 2857 cpu_to_le16(DWC3_DEFAULT_U2_DEV_EXIT_LAT); 2858 } 2859 2860 static void dwc3_gadget_set_speed(struct usb_gadget *g, 2861 enum usb_device_speed speed) 2862 { 2863 struct dwc3 *dwc = gadget_to_dwc(g); 2864 unsigned long flags; 2865 2866 spin_lock_irqsave(&dwc->lock, flags); 2867 dwc->gadget_max_speed = speed; 2868 spin_unlock_irqrestore(&dwc->lock, flags); 2869 } 2870 2871 static void dwc3_gadget_set_ssp_rate(struct usb_gadget *g, 2872 enum usb_ssp_rate rate) 2873 { 2874 struct dwc3 *dwc = gadget_to_dwc(g); 2875 unsigned long flags; 2876 2877 spin_lock_irqsave(&dwc->lock, flags); 2878 dwc->gadget_max_speed = USB_SPEED_SUPER_PLUS; 2879 dwc->gadget_ssp_rate = rate; 2880 spin_unlock_irqrestore(&dwc->lock, flags); 2881 } 2882 2883 static int dwc3_gadget_vbus_draw(struct usb_gadget *g, unsigned int mA) 2884 { 2885 struct dwc3 *dwc = gadget_to_dwc(g); 2886 union power_supply_propval val = {0}; 2887 int ret; 2888 2889 if (dwc->usb2_phy) 2890 return usb_phy_set_power(dwc->usb2_phy, mA); 2891 2892 if (!dwc->usb_psy) 2893 return -EOPNOTSUPP; 2894 2895 val.intval = 1000 * mA; 2896 ret = power_supply_set_property(dwc->usb_psy, POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, &val); 2897 2898 return ret; 2899 } 2900 2901 /** 2902 * dwc3_gadget_check_config - ensure dwc3 can support the USB configuration 2903 * @g: pointer to the USB gadget 2904 * 2905 * Used to record the maximum number of endpoints being used in a USB composite 2906 * device. (across all configurations) This is to be used in the calculation 2907 * of the TXFIFO sizes when resizing internal memory for individual endpoints. 2908 * It will help ensured that the resizing logic reserves enough space for at 2909 * least one max packet. 2910 */ 2911 static int dwc3_gadget_check_config(struct usb_gadget *g) 2912 { 2913 struct dwc3 *dwc = gadget_to_dwc(g); 2914 struct usb_ep *ep; 2915 int fifo_size = 0; 2916 int ram1_depth; 2917 int ep_num = 0; 2918 2919 if (!dwc->do_fifo_resize) 2920 return 0; 2921 2922 list_for_each_entry(ep, &g->ep_list, ep_list) { 2923 /* Only interested in the IN endpoints */ 2924 if (ep->claimed && (ep->address & USB_DIR_IN)) 2925 ep_num++; 2926 } 2927 2928 if (ep_num <= dwc->max_cfg_eps) 2929 return 0; 2930 2931 /* Update the max number of eps in the composition */ 2932 dwc->max_cfg_eps = ep_num; 2933 2934 fifo_size = dwc3_gadget_calc_tx_fifo_size(dwc, dwc->max_cfg_eps); 2935 /* Based on the equation, increment by one for every ep */ 2936 fifo_size += dwc->max_cfg_eps; 2937 2938 /* Check if we can fit a single fifo per endpoint */ 2939 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7); 2940 if (fifo_size > ram1_depth) 2941 return -ENOMEM; 2942 2943 return 0; 2944 } 2945 2946 static void dwc3_gadget_async_callbacks(struct usb_gadget *g, bool enable) 2947 { 2948 struct dwc3 *dwc = gadget_to_dwc(g); 2949 unsigned long flags; 2950 2951 spin_lock_irqsave(&dwc->lock, flags); 2952 dwc->async_callbacks = enable; 2953 spin_unlock_irqrestore(&dwc->lock, flags); 2954 } 2955 2956 static const struct usb_gadget_ops dwc3_gadget_ops = { 2957 .get_frame = dwc3_gadget_get_frame, 2958 .wakeup = dwc3_gadget_wakeup, 2959 .set_selfpowered = dwc3_gadget_set_selfpowered, 2960 .pullup = dwc3_gadget_pullup, 2961 .udc_start = dwc3_gadget_start, 2962 .udc_stop = dwc3_gadget_stop, 2963 .udc_set_speed = dwc3_gadget_set_speed, 2964 .udc_set_ssp_rate = dwc3_gadget_set_ssp_rate, 2965 .get_config_params = dwc3_gadget_config_params, 2966 .vbus_draw = dwc3_gadget_vbus_draw, 2967 .check_config = dwc3_gadget_check_config, 2968 .udc_async_callbacks = dwc3_gadget_async_callbacks, 2969 }; 2970 2971 /* -------------------------------------------------------------------------- */ 2972 2973 static int dwc3_gadget_init_control_endpoint(struct dwc3_ep *dep) 2974 { 2975 struct dwc3 *dwc = dep->dwc; 2976 2977 usb_ep_set_maxpacket_limit(&dep->endpoint, 512); 2978 dep->endpoint.maxburst = 1; 2979 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 2980 if (!dep->direction) 2981 dwc->gadget->ep0 = &dep->endpoint; 2982 2983 dep->endpoint.caps.type_control = true; 2984 2985 return 0; 2986 } 2987 2988 static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) 2989 { 2990 struct dwc3 *dwc = dep->dwc; 2991 u32 mdwidth; 2992 int size; 2993 int maxpacket; 2994 2995 mdwidth = dwc3_mdwidth(dwc); 2996 2997 /* MDWIDTH is represented in bits, we need it in bytes */ 2998 mdwidth /= 8; 2999 3000 size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1)); 3001 if (DWC3_IP_IS(DWC3)) 3002 size = DWC3_GTXFIFOSIZ_TXFDEP(size); 3003 else 3004 size = DWC31_GTXFIFOSIZ_TXFDEP(size); 3005 3006 /* 3007 * maxpacket size is determined as part of the following, after assuming 3008 * a mult value of one maxpacket: 3009 * DWC3 revision 280A and prior: 3010 * fifo_size = mult * (max_packet / mdwidth) + 1; 3011 * maxpacket = mdwidth * (fifo_size - 1); 3012 * 3013 * DWC3 revision 290A and onwards: 3014 * fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1 3015 * maxpacket = mdwidth * ((fifo_size - 1) - 1) - mdwidth; 3016 */ 3017 if (DWC3_VER_IS_PRIOR(DWC3, 290A)) 3018 maxpacket = mdwidth * (size - 1); 3019 else 3020 maxpacket = mdwidth * ((size - 1) - 1) - mdwidth; 3021 3022 /* Functionally, space for one max packet is sufficient */ 3023 size = min_t(int, maxpacket, 1024); 3024 usb_ep_set_maxpacket_limit(&dep->endpoint, size); 3025 3026 dep->endpoint.max_streams = 16; 3027 dep->endpoint.ops = &dwc3_gadget_ep_ops; 3028 list_add_tail(&dep->endpoint.ep_list, 3029 &dwc->gadget->ep_list); 3030 dep->endpoint.caps.type_iso = true; 3031 dep->endpoint.caps.type_bulk = true; 3032 dep->endpoint.caps.type_int = true; 3033 3034 return dwc3_alloc_trb_pool(dep); 3035 } 3036 3037 static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep) 3038 { 3039 struct dwc3 *dwc = dep->dwc; 3040 u32 mdwidth; 3041 int size; 3042 3043 mdwidth = dwc3_mdwidth(dwc); 3044 3045 /* MDWIDTH is represented in bits, convert to bytes */ 3046 mdwidth /= 8; 3047 3048 /* All OUT endpoints share a single RxFIFO space */ 3049 size = dwc3_readl(dwc->regs, DWC3_GRXFIFOSIZ(0)); 3050 if (DWC3_IP_IS(DWC3)) 3051 size = DWC3_GRXFIFOSIZ_RXFDEP(size); 3052 else 3053 size = DWC31_GRXFIFOSIZ_RXFDEP(size); 3054 3055 /* FIFO depth is in MDWDITH bytes */ 3056 size *= mdwidth; 3057 3058 /* 3059 * To meet performance requirement, a minimum recommended RxFIFO size 3060 * is defined as follow: 3061 * RxFIFO size >= (3 x MaxPacketSize) + 3062 * (3 x 8 bytes setup packets size) + (16 bytes clock crossing margin) 3063 * 3064 * Then calculate the max packet limit as below. 3065 */ 3066 size -= (3 * 8) + 16; 3067 if (size < 0) 3068 size = 0; 3069 else 3070 size /= 3; 3071 3072 usb_ep_set_maxpacket_limit(&dep->endpoint, size); 3073 dep->endpoint.max_streams = 16; 3074 dep->endpoint.ops = &dwc3_gadget_ep_ops; 3075 list_add_tail(&dep->endpoint.ep_list, 3076 &dwc->gadget->ep_list); 3077 dep->endpoint.caps.type_iso = true; 3078 dep->endpoint.caps.type_bulk = true; 3079 dep->endpoint.caps.type_int = true; 3080 3081 return dwc3_alloc_trb_pool(dep); 3082 } 3083 3084 static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum) 3085 { 3086 struct dwc3_ep *dep; 3087 bool direction = epnum & 1; 3088 int ret; 3089 u8 num = epnum >> 1; 3090 3091 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 3092 if (!dep) 3093 return -ENOMEM; 3094 3095 dep->dwc = dwc; 3096 dep->number = epnum; 3097 dep->direction = direction; 3098 dep->regs = dwc->regs + DWC3_DEP_BASE(epnum); 3099 dwc->eps[epnum] = dep; 3100 dep->combo_num = 0; 3101 dep->start_cmd_status = 0; 3102 3103 snprintf(dep->name, sizeof(dep->name), "ep%u%s", num, 3104 direction ? "in" : "out"); 3105 3106 dep->endpoint.name = dep->name; 3107 3108 if (!(dep->number > 1)) { 3109 dep->endpoint.desc = &dwc3_gadget_ep0_desc; 3110 dep->endpoint.comp_desc = NULL; 3111 } 3112 3113 if (num == 0) 3114 ret = dwc3_gadget_init_control_endpoint(dep); 3115 else if (direction) 3116 ret = dwc3_gadget_init_in_endpoint(dep); 3117 else 3118 ret = dwc3_gadget_init_out_endpoint(dep); 3119 3120 if (ret) 3121 return ret; 3122 3123 dep->endpoint.caps.dir_in = direction; 3124 dep->endpoint.caps.dir_out = !direction; 3125 3126 INIT_LIST_HEAD(&dep->pending_list); 3127 INIT_LIST_HEAD(&dep->started_list); 3128 INIT_LIST_HEAD(&dep->cancelled_list); 3129 3130 dwc3_debugfs_create_endpoint_dir(dep); 3131 3132 return 0; 3133 } 3134 3135 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 total) 3136 { 3137 u8 epnum; 3138 3139 INIT_LIST_HEAD(&dwc->gadget->ep_list); 3140 3141 for (epnum = 0; epnum < total; epnum++) { 3142 int ret; 3143 3144 ret = dwc3_gadget_init_endpoint(dwc, epnum); 3145 if (ret) 3146 return ret; 3147 } 3148 3149 return 0; 3150 } 3151 3152 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 3153 { 3154 struct dwc3_ep *dep; 3155 u8 epnum; 3156 3157 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 3158 dep = dwc->eps[epnum]; 3159 if (!dep) 3160 continue; 3161 /* 3162 * Physical endpoints 0 and 1 are special; they form the 3163 * bi-directional USB endpoint 0. 3164 * 3165 * For those two physical endpoints, we don't allocate a TRB 3166 * pool nor do we add them the endpoints list. Due to that, we 3167 * shouldn't do these two operations otherwise we would end up 3168 * with all sorts of bugs when removing dwc3.ko. 3169 */ 3170 if (epnum != 0 && epnum != 1) { 3171 dwc3_free_trb_pool(dep); 3172 list_del(&dep->endpoint.ep_list); 3173 } 3174 3175 debugfs_remove_recursive(debugfs_lookup(dep->name, 3176 debugfs_lookup(dev_name(dep->dwc->dev), 3177 usb_debug_root))); 3178 kfree(dep); 3179 } 3180 } 3181 3182 /* -------------------------------------------------------------------------- */ 3183 3184 static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep, 3185 struct dwc3_request *req, struct dwc3_trb *trb, 3186 const struct dwc3_event_depevt *event, int status, int chain) 3187 { 3188 unsigned int count; 3189 3190 dwc3_ep_inc_deq(dep); 3191 3192 trace_dwc3_complete_trb(dep, trb); 3193 req->num_trbs--; 3194 3195 /* 3196 * If we're in the middle of series of chained TRBs and we 3197 * receive a short transfer along the way, DWC3 will skip 3198 * through all TRBs including the last TRB in the chain (the 3199 * where CHN bit is zero. DWC3 will also avoid clearing HWO 3200 * bit and SW has to do it manually. 3201 * 3202 * We're going to do that here to avoid problems of HW trying 3203 * to use bogus TRBs for transfers. 3204 */ 3205 if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO)) 3206 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 3207 3208 /* 3209 * For isochronous transfers, the first TRB in a service interval must 3210 * have the Isoc-First type. Track and report its interval frame number. 3211 */ 3212 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 3213 (trb->ctrl & DWC3_TRBCTL_ISOCHRONOUS_FIRST)) { 3214 unsigned int frame_number; 3215 3216 frame_number = DWC3_TRB_CTRL_GET_SID_SOFN(trb->ctrl); 3217 frame_number &= ~(dep->interval - 1); 3218 req->request.frame_number = frame_number; 3219 } 3220 3221 /* 3222 * We use bounce buffer for requests that needs extra TRB or OUT ZLP. If 3223 * this TRB points to the bounce buffer address, it's a MPS alignment 3224 * TRB. Don't add it to req->remaining calculation. 3225 */ 3226 if (trb->bpl == lower_32_bits(dep->dwc->bounce_addr) && 3227 trb->bph == upper_32_bits(dep->dwc->bounce_addr)) { 3228 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 3229 return 1; 3230 } 3231 3232 count = trb->size & DWC3_TRB_SIZE_MASK; 3233 req->remaining += count; 3234 3235 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 3236 return 1; 3237 3238 if (event->status & DEPEVT_STATUS_SHORT && !chain) 3239 return 1; 3240 3241 if ((trb->ctrl & DWC3_TRB_CTRL_IOC) || 3242 (trb->ctrl & DWC3_TRB_CTRL_LST)) 3243 return 1; 3244 3245 return 0; 3246 } 3247 3248 static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep, 3249 struct dwc3_request *req, const struct dwc3_event_depevt *event, 3250 int status) 3251 { 3252 struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue]; 3253 struct scatterlist *sg = req->sg; 3254 struct scatterlist *s; 3255 unsigned int num_queued = req->num_queued_sgs; 3256 unsigned int i; 3257 int ret = 0; 3258 3259 for_each_sg(sg, s, num_queued, i) { 3260 trb = &dep->trb_pool[dep->trb_dequeue]; 3261 3262 req->sg = sg_next(s); 3263 req->num_queued_sgs--; 3264 3265 ret = dwc3_gadget_ep_reclaim_completed_trb(dep, req, 3266 trb, event, status, true); 3267 if (ret) 3268 break; 3269 } 3270 3271 return ret; 3272 } 3273 3274 static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep, 3275 struct dwc3_request *req, const struct dwc3_event_depevt *event, 3276 int status) 3277 { 3278 struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue]; 3279 3280 return dwc3_gadget_ep_reclaim_completed_trb(dep, req, trb, 3281 event, status, false); 3282 } 3283 3284 static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req) 3285 { 3286 return req->num_pending_sgs == 0 && req->num_queued_sgs == 0; 3287 } 3288 3289 static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, 3290 const struct dwc3_event_depevt *event, 3291 struct dwc3_request *req, int status) 3292 { 3293 int request_status; 3294 int ret; 3295 3296 if (req->request.num_mapped_sgs) 3297 ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event, 3298 status); 3299 else 3300 ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, 3301 status); 3302 3303 req->request.actual = req->request.length - req->remaining; 3304 3305 if (!dwc3_gadget_ep_request_completed(req)) 3306 goto out; 3307 3308 if (req->needs_extra_trb) { 3309 ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, 3310 status); 3311 req->needs_extra_trb = false; 3312 } 3313 3314 /* 3315 * The event status only reflects the status of the TRB with IOC set. 3316 * For the requests that don't set interrupt on completion, the driver 3317 * needs to check and return the status of the completed TRBs associated 3318 * with the request. Use the status of the last TRB of the request. 3319 */ 3320 if (req->request.no_interrupt) { 3321 struct dwc3_trb *trb; 3322 3323 trb = dwc3_ep_prev_trb(dep, dep->trb_dequeue); 3324 switch (DWC3_TRB_SIZE_TRBSTS(trb->size)) { 3325 case DWC3_TRBSTS_MISSED_ISOC: 3326 /* Isoc endpoint only */ 3327 request_status = -EXDEV; 3328 break; 3329 case DWC3_TRB_STS_XFER_IN_PROG: 3330 /* Applicable when End Transfer with ForceRM=0 */ 3331 case DWC3_TRBSTS_SETUP_PENDING: 3332 /* Control endpoint only */ 3333 case DWC3_TRBSTS_OK: 3334 default: 3335 request_status = 0; 3336 break; 3337 } 3338 } else { 3339 request_status = status; 3340 } 3341 3342 dwc3_gadget_giveback(dep, req, request_status); 3343 3344 out: 3345 return ret; 3346 } 3347 3348 static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep, 3349 const struct dwc3_event_depevt *event, int status) 3350 { 3351 struct dwc3_request *req; 3352 3353 while (!list_empty(&dep->started_list)) { 3354 int ret; 3355 3356 req = next_request(&dep->started_list); 3357 ret = dwc3_gadget_ep_cleanup_completed_request(dep, event, 3358 req, status); 3359 if (ret) 3360 break; 3361 /* 3362 * The endpoint is disabled, let the dwc3_remove_requests() 3363 * handle the cleanup. 3364 */ 3365 if (!dep->endpoint.desc) 3366 break; 3367 } 3368 } 3369 3370 static bool dwc3_gadget_ep_should_continue(struct dwc3_ep *dep) 3371 { 3372 struct dwc3_request *req; 3373 struct dwc3 *dwc = dep->dwc; 3374 3375 if (!dep->endpoint.desc || !dwc->pullups_connected || 3376 !dwc->connected) 3377 return false; 3378 3379 if (!list_empty(&dep->pending_list)) 3380 return true; 3381 3382 /* 3383 * We only need to check the first entry of the started list. We can 3384 * assume the completed requests are removed from the started list. 3385 */ 3386 req = next_request(&dep->started_list); 3387 if (!req) 3388 return false; 3389 3390 return !dwc3_gadget_ep_request_completed(req); 3391 } 3392 3393 static void dwc3_gadget_endpoint_frame_from_event(struct dwc3_ep *dep, 3394 const struct dwc3_event_depevt *event) 3395 { 3396 dep->frame_number = event->parameters; 3397 } 3398 3399 static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep, 3400 const struct dwc3_event_depevt *event, int status) 3401 { 3402 struct dwc3 *dwc = dep->dwc; 3403 bool no_started_trb = true; 3404 3405 dwc3_gadget_ep_cleanup_completed_requests(dep, event, status); 3406 3407 if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) 3408 goto out; 3409 3410 if (!dep->endpoint.desc) 3411 return no_started_trb; 3412 3413 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 3414 list_empty(&dep->started_list) && 3415 (list_empty(&dep->pending_list) || status == -EXDEV)) 3416 dwc3_stop_active_transfer(dep, true, true); 3417 else if (dwc3_gadget_ep_should_continue(dep)) 3418 if (__dwc3_gadget_kick_transfer(dep) == 0) 3419 no_started_trb = false; 3420 3421 out: 3422 /* 3423 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. 3424 * See dwc3_gadget_linksts_change_interrupt() for 1st half. 3425 */ 3426 if (DWC3_VER_IS_PRIOR(DWC3, 183A)) { 3427 u32 reg; 3428 int i; 3429 3430 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 3431 dep = dwc->eps[i]; 3432 3433 if (!(dep->flags & DWC3_EP_ENABLED)) 3434 continue; 3435 3436 if (!list_empty(&dep->started_list)) 3437 return no_started_trb; 3438 } 3439 3440 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 3441 reg |= dwc->u1u2; 3442 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 3443 3444 dwc->u1u2 = 0; 3445 } 3446 3447 return no_started_trb; 3448 } 3449 3450 static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep, 3451 const struct dwc3_event_depevt *event) 3452 { 3453 int status = 0; 3454 3455 if (!dep->endpoint.desc) 3456 return; 3457 3458 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) 3459 dwc3_gadget_endpoint_frame_from_event(dep, event); 3460 3461 if (event->status & DEPEVT_STATUS_BUSERR) 3462 status = -ECONNRESET; 3463 3464 if (event->status & DEPEVT_STATUS_MISSED_ISOC) 3465 status = -EXDEV; 3466 3467 dwc3_gadget_endpoint_trbs_complete(dep, event, status); 3468 } 3469 3470 static void dwc3_gadget_endpoint_transfer_complete(struct dwc3_ep *dep, 3471 const struct dwc3_event_depevt *event) 3472 { 3473 int status = 0; 3474 3475 dep->flags &= ~DWC3_EP_TRANSFER_STARTED; 3476 3477 if (event->status & DEPEVT_STATUS_BUSERR) 3478 status = -ECONNRESET; 3479 3480 if (dwc3_gadget_endpoint_trbs_complete(dep, event, status)) 3481 dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE; 3482 } 3483 3484 static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep, 3485 const struct dwc3_event_depevt *event) 3486 { 3487 dwc3_gadget_endpoint_frame_from_event(dep, event); 3488 3489 /* 3490 * The XferNotReady event is generated only once before the endpoint 3491 * starts. It will be generated again when END_TRANSFER command is 3492 * issued. For some controller versions, the XferNotReady event may be 3493 * generated while the END_TRANSFER command is still in process. Ignore 3494 * it and wait for the next XferNotReady event after the command is 3495 * completed. 3496 */ 3497 if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) 3498 return; 3499 3500 (void) __dwc3_gadget_start_isoc(dep); 3501 } 3502 3503 static void dwc3_gadget_endpoint_command_complete(struct dwc3_ep *dep, 3504 const struct dwc3_event_depevt *event) 3505 { 3506 u8 cmd = DEPEVT_PARAMETER_CMD(event->parameters); 3507 3508 if (cmd != DWC3_DEPCMD_ENDTRANSFER) 3509 return; 3510 3511 /* 3512 * The END_TRANSFER command will cause the controller to generate a 3513 * NoStream Event, and it's not due to the host DP NoStream rejection. 3514 * Ignore the next NoStream event. 3515 */ 3516 if (dep->stream_capable) 3517 dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM; 3518 3519 dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; 3520 dep->flags &= ~DWC3_EP_TRANSFER_STARTED; 3521 dwc3_gadget_ep_cleanup_cancelled_requests(dep); 3522 3523 if (dep->flags & DWC3_EP_PENDING_CLEAR_STALL) { 3524 struct dwc3 *dwc = dep->dwc; 3525 3526 dep->flags &= ~DWC3_EP_PENDING_CLEAR_STALL; 3527 if (dwc3_send_clear_stall_ep_cmd(dep)) { 3528 struct usb_ep *ep0 = &dwc->eps[0]->endpoint; 3529 3530 dev_err(dwc->dev, "failed to clear STALL on %s\n", dep->name); 3531 if (dwc->delayed_status) 3532 __dwc3_gadget_ep0_set_halt(ep0, 1); 3533 return; 3534 } 3535 3536 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); 3537 if (dwc->clear_stall_protocol == dep->number) 3538 dwc3_ep0_send_delayed_status(dwc); 3539 } 3540 3541 if ((dep->flags & DWC3_EP_DELAY_START) && 3542 !usb_endpoint_xfer_isoc(dep->endpoint.desc)) 3543 __dwc3_gadget_kick_transfer(dep); 3544 3545 dep->flags &= ~DWC3_EP_DELAY_START; 3546 } 3547 3548 static void dwc3_gadget_endpoint_stream_event(struct dwc3_ep *dep, 3549 const struct dwc3_event_depevt *event) 3550 { 3551 struct dwc3 *dwc = dep->dwc; 3552 3553 if (event->status == DEPEVT_STREAMEVT_FOUND) { 3554 dep->flags |= DWC3_EP_FIRST_STREAM_PRIMED; 3555 goto out; 3556 } 3557 3558 /* Note: NoStream rejection event param value is 0 and not 0xFFFF */ 3559 switch (event->parameters) { 3560 case DEPEVT_STREAM_PRIME: 3561 /* 3562 * If the host can properly transition the endpoint state from 3563 * idle to prime after a NoStream rejection, there's no need to 3564 * force restarting the endpoint to reinitiate the stream. To 3565 * simplify the check, assume the host follows the USB spec if 3566 * it primed the endpoint more than once. 3567 */ 3568 if (dep->flags & DWC3_EP_FORCE_RESTART_STREAM) { 3569 if (dep->flags & DWC3_EP_FIRST_STREAM_PRIMED) 3570 dep->flags &= ~DWC3_EP_FORCE_RESTART_STREAM; 3571 else 3572 dep->flags |= DWC3_EP_FIRST_STREAM_PRIMED; 3573 } 3574 3575 break; 3576 case DEPEVT_STREAM_NOSTREAM: 3577 if ((dep->flags & DWC3_EP_IGNORE_NEXT_NOSTREAM) || 3578 !(dep->flags & DWC3_EP_FORCE_RESTART_STREAM) || 3579 (!DWC3_MST_CAPABLE(&dwc->hwparams) && 3580 !(dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE))) 3581 break; 3582 3583 /* 3584 * If the host rejects a stream due to no active stream, by the 3585 * USB and xHCI spec, the endpoint will be put back to idle 3586 * state. When the host is ready (buffer added/updated), it will 3587 * prime the endpoint to inform the usb device controller. This 3588 * triggers the device controller to issue ERDY to restart the 3589 * stream. However, some hosts don't follow this and keep the 3590 * endpoint in the idle state. No prime will come despite host 3591 * streams are updated, and the device controller will not be 3592 * triggered to generate ERDY to move the next stream data. To 3593 * workaround this and maintain compatibility with various 3594 * hosts, force to reinitiate the stream until the host is ready 3595 * instead of waiting for the host to prime the endpoint. 3596 */ 3597 if (DWC3_VER_IS_WITHIN(DWC32, 100A, ANY)) { 3598 unsigned int cmd = DWC3_DGCMD_SET_ENDPOINT_PRIME; 3599 3600 dwc3_send_gadget_generic_command(dwc, cmd, dep->number); 3601 } else { 3602 dep->flags |= DWC3_EP_DELAY_START; 3603 dwc3_stop_active_transfer(dep, true, true); 3604 return; 3605 } 3606 break; 3607 } 3608 3609 out: 3610 dep->flags &= ~DWC3_EP_IGNORE_NEXT_NOSTREAM; 3611 } 3612 3613 static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 3614 const struct dwc3_event_depevt *event) 3615 { 3616 struct dwc3_ep *dep; 3617 u8 epnum = event->endpoint_number; 3618 3619 dep = dwc->eps[epnum]; 3620 3621 if (!(dep->flags & DWC3_EP_ENABLED)) { 3622 if ((epnum > 1) && !(dep->flags & DWC3_EP_TRANSFER_STARTED)) 3623 return; 3624 3625 /* Handle only EPCMDCMPLT when EP disabled */ 3626 if ((event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT) && 3627 !(epnum <= 1 && event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE)) 3628 return; 3629 } 3630 3631 if (epnum == 0 || epnum == 1) { 3632 dwc3_ep0_interrupt(dwc, event); 3633 return; 3634 } 3635 3636 switch (event->endpoint_event) { 3637 case DWC3_DEPEVT_XFERINPROGRESS: 3638 dwc3_gadget_endpoint_transfer_in_progress(dep, event); 3639 break; 3640 case DWC3_DEPEVT_XFERNOTREADY: 3641 dwc3_gadget_endpoint_transfer_not_ready(dep, event); 3642 break; 3643 case DWC3_DEPEVT_EPCMDCMPLT: 3644 dwc3_gadget_endpoint_command_complete(dep, event); 3645 break; 3646 case DWC3_DEPEVT_XFERCOMPLETE: 3647 dwc3_gadget_endpoint_transfer_complete(dep, event); 3648 break; 3649 case DWC3_DEPEVT_STREAMEVT: 3650 dwc3_gadget_endpoint_stream_event(dep, event); 3651 break; 3652 case DWC3_DEPEVT_RXTXFIFOEVT: 3653 break; 3654 } 3655 } 3656 3657 static void dwc3_disconnect_gadget(struct dwc3 *dwc) 3658 { 3659 if (dwc->async_callbacks && dwc->gadget_driver->disconnect) { 3660 spin_unlock(&dwc->lock); 3661 dwc->gadget_driver->disconnect(dwc->gadget); 3662 spin_lock(&dwc->lock); 3663 } 3664 } 3665 3666 static void dwc3_suspend_gadget(struct dwc3 *dwc) 3667 { 3668 if (dwc->async_callbacks && dwc->gadget_driver->suspend) { 3669 spin_unlock(&dwc->lock); 3670 dwc->gadget_driver->suspend(dwc->gadget); 3671 spin_lock(&dwc->lock); 3672 } 3673 } 3674 3675 static void dwc3_resume_gadget(struct dwc3 *dwc) 3676 { 3677 if (dwc->async_callbacks && dwc->gadget_driver->resume) { 3678 spin_unlock(&dwc->lock); 3679 dwc->gadget_driver->resume(dwc->gadget); 3680 spin_lock(&dwc->lock); 3681 } 3682 } 3683 3684 static void dwc3_reset_gadget(struct dwc3 *dwc) 3685 { 3686 if (!dwc->gadget_driver) 3687 return; 3688 3689 if (dwc->async_callbacks && dwc->gadget->speed != USB_SPEED_UNKNOWN) { 3690 spin_unlock(&dwc->lock); 3691 usb_gadget_udc_reset(dwc->gadget, dwc->gadget_driver); 3692 spin_lock(&dwc->lock); 3693 } 3694 } 3695 3696 void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, 3697 bool interrupt) 3698 { 3699 struct dwc3 *dwc = dep->dwc; 3700 3701 /* 3702 * Only issue End Transfer command to the control endpoint of a started 3703 * Data Phase. Typically we should only do so in error cases such as 3704 * invalid/unexpected direction as described in the control transfer 3705 * flow of the programming guide. 3706 */ 3707 if (dep->number <= 1 && dwc->ep0state != EP0_DATA_PHASE) 3708 return; 3709 3710 if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) || 3711 (dep->flags & DWC3_EP_DELAY_STOP) || 3712 (dep->flags & DWC3_EP_END_TRANSFER_PENDING)) 3713 return; 3714 3715 /* 3716 * If a Setup packet is received but yet to DMA out, the controller will 3717 * not process the End Transfer command of any endpoint. Polling of its 3718 * DEPCMD.CmdAct may block setting up TRB for Setup packet, causing a 3719 * timeout. Delay issuing the End Transfer command until the Setup TRB is 3720 * prepared. 3721 */ 3722 if (dwc->ep0state != EP0_SETUP_PHASE) { 3723 dep->flags |= DWC3_EP_DELAY_STOP; 3724 return; 3725 } 3726 3727 /* 3728 * NOTICE: We are violating what the Databook says about the 3729 * EndTransfer command. Ideally we would _always_ wait for the 3730 * EndTransfer Command Completion IRQ, but that's causing too 3731 * much trouble synchronizing between us and gadget driver. 3732 * 3733 * We have discussed this with the IP Provider and it was 3734 * suggested to giveback all requests here. 3735 * 3736 * Note also that a similar handling was tested by Synopsys 3737 * (thanks a lot Paul) and nothing bad has come out of it. 3738 * In short, what we're doing is issuing EndTransfer with 3739 * CMDIOC bit set and delay kicking transfer until the 3740 * EndTransfer command had completed. 3741 * 3742 * As of IP version 3.10a of the DWC_usb3 IP, the controller 3743 * supports a mode to work around the above limitation. The 3744 * software can poll the CMDACT bit in the DEPCMD register 3745 * after issuing a EndTransfer command. This mode is enabled 3746 * by writing GUCTL2[14]. This polling is already done in the 3747 * dwc3_send_gadget_ep_cmd() function so if the mode is 3748 * enabled, the EndTransfer command will have completed upon 3749 * returning from this function. 3750 * 3751 * This mode is NOT available on the DWC_usb31 IP. 3752 */ 3753 3754 __dwc3_stop_active_transfer(dep, force, interrupt); 3755 } 3756 3757 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 3758 { 3759 u32 epnum; 3760 3761 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 3762 struct dwc3_ep *dep; 3763 int ret; 3764 3765 dep = dwc->eps[epnum]; 3766 if (!dep) 3767 continue; 3768 3769 if (!(dep->flags & DWC3_EP_STALL)) 3770 continue; 3771 3772 dep->flags &= ~DWC3_EP_STALL; 3773 3774 ret = dwc3_send_clear_stall_ep_cmd(dep); 3775 WARN_ON_ONCE(ret); 3776 } 3777 } 3778 3779 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 3780 { 3781 int reg; 3782 3783 dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RX_DET); 3784 3785 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 3786 reg &= ~DWC3_DCTL_INITU1ENA; 3787 reg &= ~DWC3_DCTL_INITU2ENA; 3788 dwc3_gadget_dctl_write_safe(dwc, reg); 3789 3790 dwc->connected = false; 3791 3792 dwc3_disconnect_gadget(dwc); 3793 3794 dwc->gadget->speed = USB_SPEED_UNKNOWN; 3795 dwc->setup_packet_pending = false; 3796 usb_gadget_set_state(dwc->gadget, USB_STATE_NOTATTACHED); 3797 3798 if (dwc->ep0state != EP0_SETUP_PHASE) { 3799 unsigned int dir; 3800 3801 dir = !!dwc->ep0_expect_in; 3802 if (dwc->ep0state == EP0_DATA_PHASE) 3803 dwc3_ep0_end_control_data(dwc, dwc->eps[dir]); 3804 else 3805 dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]); 3806 dwc3_ep0_stall_and_restart(dwc); 3807 } 3808 } 3809 3810 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 3811 { 3812 u32 reg; 3813 3814 /* 3815 * Ideally, dwc3_reset_gadget() would trigger the function 3816 * drivers to stop any active transfers through ep disable. 3817 * However, for functions which defer ep disable, such as mass 3818 * storage, we will need to rely on the call to stop active 3819 * transfers here, and avoid allowing of request queuing. 3820 */ 3821 dwc->connected = false; 3822 3823 /* 3824 * WORKAROUND: DWC3 revisions <1.88a have an issue which 3825 * would cause a missing Disconnect Event if there's a 3826 * pending Setup Packet in the FIFO. 3827 * 3828 * There's no suggested workaround on the official Bug 3829 * report, which states that "unless the driver/application 3830 * is doing any special handling of a disconnect event, 3831 * there is no functional issue". 3832 * 3833 * Unfortunately, it turns out that we _do_ some special 3834 * handling of a disconnect event, namely complete all 3835 * pending transfers, notify gadget driver of the 3836 * disconnection, and so on. 3837 * 3838 * Our suggested workaround is to follow the Disconnect 3839 * Event steps here, instead, based on a setup_packet_pending 3840 * flag. Such flag gets set whenever we have a SETUP_PENDING 3841 * status for EP0 TRBs and gets cleared on XferComplete for the 3842 * same endpoint. 3843 * 3844 * Refers to: 3845 * 3846 * STAR#9000466709: RTL: Device : Disconnect event not 3847 * generated if setup packet pending in FIFO 3848 */ 3849 if (DWC3_VER_IS_PRIOR(DWC3, 188A)) { 3850 if (dwc->setup_packet_pending) 3851 dwc3_gadget_disconnect_interrupt(dwc); 3852 } 3853 3854 dwc3_reset_gadget(dwc); 3855 3856 /* 3857 * From SNPS databook section 8.1.2, the EP0 should be in setup 3858 * phase. So ensure that EP0 is in setup phase by issuing a stall 3859 * and restart if EP0 is not in setup phase. 3860 */ 3861 if (dwc->ep0state != EP0_SETUP_PHASE) { 3862 unsigned int dir; 3863 3864 dir = !!dwc->ep0_expect_in; 3865 if (dwc->ep0state == EP0_DATA_PHASE) 3866 dwc3_ep0_end_control_data(dwc, dwc->eps[dir]); 3867 else 3868 dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]); 3869 3870 dwc->eps[0]->trb_enqueue = 0; 3871 dwc->eps[1]->trb_enqueue = 0; 3872 3873 dwc3_ep0_stall_and_restart(dwc); 3874 } 3875 3876 /* 3877 * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a 3878 * Section 4.1.2 Table 4-2, it states that during a USB reset, the SW 3879 * needs to ensure that it sends "a DEPENDXFER command for any active 3880 * transfers." 3881 */ 3882 dwc3_stop_active_transfers(dwc); 3883 dwc->connected = true; 3884 3885 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 3886 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 3887 dwc3_gadget_dctl_write_safe(dwc, reg); 3888 dwc->test_mode = false; 3889 dwc3_clear_stall_all_ep(dwc); 3890 3891 /* Reset device address to zero */ 3892 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 3893 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 3894 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 3895 } 3896 3897 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 3898 { 3899 struct dwc3_ep *dep; 3900 int ret; 3901 u32 reg; 3902 u8 lanes = 1; 3903 u8 speed; 3904 3905 if (!dwc->softconnect) 3906 return; 3907 3908 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 3909 speed = reg & DWC3_DSTS_CONNECTSPD; 3910 dwc->speed = speed; 3911 3912 if (DWC3_IP_IS(DWC32)) 3913 lanes = DWC3_DSTS_CONNLANES(reg) + 1; 3914 3915 dwc->gadget->ssp_rate = USB_SSP_GEN_UNKNOWN; 3916 3917 /* 3918 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 3919 * each time on Connect Done. 3920 * 3921 * Currently we always use the reset value. If any platform 3922 * wants to set this to a different value, we need to add a 3923 * setting and update GCTL.RAMCLKSEL here. 3924 */ 3925 3926 switch (speed) { 3927 case DWC3_DSTS_SUPERSPEED_PLUS: 3928 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 3929 dwc->gadget->ep0->maxpacket = 512; 3930 dwc->gadget->speed = USB_SPEED_SUPER_PLUS; 3931 3932 if (lanes > 1) 3933 dwc->gadget->ssp_rate = USB_SSP_GEN_2x2; 3934 else 3935 dwc->gadget->ssp_rate = USB_SSP_GEN_2x1; 3936 break; 3937 case DWC3_DSTS_SUPERSPEED: 3938 /* 3939 * WORKAROUND: DWC3 revisions <1.90a have an issue which 3940 * would cause a missing USB3 Reset event. 3941 * 3942 * In such situations, we should force a USB3 Reset 3943 * event by calling our dwc3_gadget_reset_interrupt() 3944 * routine. 3945 * 3946 * Refers to: 3947 * 3948 * STAR#9000483510: RTL: SS : USB3 reset event may 3949 * not be generated always when the link enters poll 3950 */ 3951 if (DWC3_VER_IS_PRIOR(DWC3, 190A)) 3952 dwc3_gadget_reset_interrupt(dwc); 3953 3954 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 3955 dwc->gadget->ep0->maxpacket = 512; 3956 dwc->gadget->speed = USB_SPEED_SUPER; 3957 3958 if (lanes > 1) { 3959 dwc->gadget->speed = USB_SPEED_SUPER_PLUS; 3960 dwc->gadget->ssp_rate = USB_SSP_GEN_1x2; 3961 } 3962 break; 3963 case DWC3_DSTS_HIGHSPEED: 3964 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 3965 dwc->gadget->ep0->maxpacket = 64; 3966 dwc->gadget->speed = USB_SPEED_HIGH; 3967 break; 3968 case DWC3_DSTS_FULLSPEED: 3969 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 3970 dwc->gadget->ep0->maxpacket = 64; 3971 dwc->gadget->speed = USB_SPEED_FULL; 3972 break; 3973 } 3974 3975 dwc->eps[1]->endpoint.maxpacket = dwc->gadget->ep0->maxpacket; 3976 3977 /* Enable USB2 LPM Capability */ 3978 3979 if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A) && 3980 !dwc->usb2_gadget_lpm_disable && 3981 (speed != DWC3_DSTS_SUPERSPEED) && 3982 (speed != DWC3_DSTS_SUPERSPEED_PLUS)) { 3983 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 3984 reg |= DWC3_DCFG_LPM_CAP; 3985 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 3986 3987 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 3988 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); 3989 3990 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold | 3991 (dwc->is_utmi_l1_suspend << 4)); 3992 3993 /* 3994 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and 3995 * DCFG.LPMCap is set, core responses with an ACK and the 3996 * BESL value in the LPM token is less than or equal to LPM 3997 * NYET threshold. 3998 */ 3999 WARN_ONCE(DWC3_VER_IS_PRIOR(DWC3, 240A) && dwc->has_lpm_erratum, 4000 "LPM Erratum not available on dwc3 revisions < 2.40a\n"); 4001 4002 if (dwc->has_lpm_erratum && !DWC3_VER_IS_PRIOR(DWC3, 240A)) 4003 reg |= DWC3_DCTL_NYET_THRES(dwc->lpm_nyet_threshold); 4004 4005 dwc3_gadget_dctl_write_safe(dwc, reg); 4006 } else { 4007 if (dwc->usb2_gadget_lpm_disable) { 4008 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 4009 reg &= ~DWC3_DCFG_LPM_CAP; 4010 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 4011 } 4012 4013 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 4014 reg &= ~DWC3_DCTL_HIRD_THRES_MASK; 4015 dwc3_gadget_dctl_write_safe(dwc, reg); 4016 } 4017 4018 dep = dwc->eps[0]; 4019 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY); 4020 if (ret) { 4021 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 4022 return; 4023 } 4024 4025 dep = dwc->eps[1]; 4026 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY); 4027 if (ret) { 4028 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 4029 return; 4030 } 4031 4032 /* 4033 * Configure PHY via GUSB3PIPECTLn if required. 4034 * 4035 * Update GTXFIFOSIZn 4036 * 4037 * In both cases reset values should be sufficient. 4038 */ 4039 } 4040 4041 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 4042 { 4043 /* 4044 * TODO take core out of low power mode when that's 4045 * implemented. 4046 */ 4047 4048 if (dwc->async_callbacks && dwc->gadget_driver->resume) { 4049 spin_unlock(&dwc->lock); 4050 dwc->gadget_driver->resume(dwc->gadget); 4051 spin_lock(&dwc->lock); 4052 } 4053 } 4054 4055 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 4056 unsigned int evtinfo) 4057 { 4058 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 4059 unsigned int pwropt; 4060 4061 /* 4062 * WORKAROUND: DWC3 < 2.50a have an issue when configured without 4063 * Hibernation mode enabled which would show up when device detects 4064 * host-initiated U3 exit. 4065 * 4066 * In that case, device will generate a Link State Change Interrupt 4067 * from U3 to RESUME which is only necessary if Hibernation is 4068 * configured in. 4069 * 4070 * There are no functional changes due to such spurious event and we 4071 * just need to ignore it. 4072 * 4073 * Refers to: 4074 * 4075 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation 4076 * operational mode 4077 */ 4078 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 4079 if (DWC3_VER_IS_PRIOR(DWC3, 250A) && 4080 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { 4081 if ((dwc->link_state == DWC3_LINK_STATE_U3) && 4082 (next == DWC3_LINK_STATE_RESUME)) { 4083 return; 4084 } 4085 } 4086 4087 /* 4088 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending 4089 * on the link partner, the USB session might do multiple entry/exit 4090 * of low power states before a transfer takes place. 4091 * 4092 * Due to this problem, we might experience lower throughput. The 4093 * suggested workaround is to disable DCTL[12:9] bits if we're 4094 * transitioning from U1/U2 to U0 and enable those bits again 4095 * after a transfer completes and there are no pending transfers 4096 * on any of the enabled endpoints. 4097 * 4098 * This is the first half of that workaround. 4099 * 4100 * Refers to: 4101 * 4102 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us 4103 * core send LGO_Ux entering U0 4104 */ 4105 if (DWC3_VER_IS_PRIOR(DWC3, 183A)) { 4106 if (next == DWC3_LINK_STATE_U0) { 4107 u32 u1u2; 4108 u32 reg; 4109 4110 switch (dwc->link_state) { 4111 case DWC3_LINK_STATE_U1: 4112 case DWC3_LINK_STATE_U2: 4113 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 4114 u1u2 = reg & (DWC3_DCTL_INITU2ENA 4115 | DWC3_DCTL_ACCEPTU2ENA 4116 | DWC3_DCTL_INITU1ENA 4117 | DWC3_DCTL_ACCEPTU1ENA); 4118 4119 if (!dwc->u1u2) 4120 dwc->u1u2 = reg & u1u2; 4121 4122 reg &= ~u1u2; 4123 4124 dwc3_gadget_dctl_write_safe(dwc, reg); 4125 break; 4126 default: 4127 /* do nothing */ 4128 break; 4129 } 4130 } 4131 } 4132 4133 switch (next) { 4134 case DWC3_LINK_STATE_U1: 4135 if (dwc->speed == USB_SPEED_SUPER) 4136 dwc3_suspend_gadget(dwc); 4137 break; 4138 case DWC3_LINK_STATE_U2: 4139 case DWC3_LINK_STATE_U3: 4140 dwc3_suspend_gadget(dwc); 4141 break; 4142 case DWC3_LINK_STATE_RESUME: 4143 dwc3_resume_gadget(dwc); 4144 break; 4145 default: 4146 /* do nothing */ 4147 break; 4148 } 4149 4150 dwc->link_state = next; 4151 } 4152 4153 static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc, 4154 unsigned int evtinfo) 4155 { 4156 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 4157 4158 if (dwc->link_state != next && next == DWC3_LINK_STATE_U3) 4159 dwc3_suspend_gadget(dwc); 4160 4161 dwc->link_state = next; 4162 } 4163 4164 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, 4165 unsigned int evtinfo) 4166 { 4167 unsigned int is_ss = evtinfo & BIT(4); 4168 4169 /* 4170 * WORKAROUND: DWC3 revision 2.20a with hibernation support 4171 * have a known issue which can cause USB CV TD.9.23 to fail 4172 * randomly. 4173 * 4174 * Because of this issue, core could generate bogus hibernation 4175 * events which SW needs to ignore. 4176 * 4177 * Refers to: 4178 * 4179 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0 4180 * Device Fallback from SuperSpeed 4181 */ 4182 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER)) 4183 return; 4184 4185 /* enter hibernation here */ 4186 } 4187 4188 static void dwc3_gadget_interrupt(struct dwc3 *dwc, 4189 const struct dwc3_event_devt *event) 4190 { 4191 switch (event->type) { 4192 case DWC3_DEVICE_EVENT_DISCONNECT: 4193 dwc3_gadget_disconnect_interrupt(dwc); 4194 break; 4195 case DWC3_DEVICE_EVENT_RESET: 4196 dwc3_gadget_reset_interrupt(dwc); 4197 break; 4198 case DWC3_DEVICE_EVENT_CONNECT_DONE: 4199 dwc3_gadget_conndone_interrupt(dwc); 4200 break; 4201 case DWC3_DEVICE_EVENT_WAKEUP: 4202 dwc3_gadget_wakeup_interrupt(dwc); 4203 break; 4204 case DWC3_DEVICE_EVENT_HIBER_REQ: 4205 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation, 4206 "unexpected hibernation event\n")) 4207 break; 4208 4209 dwc3_gadget_hibernation_interrupt(dwc, event->event_info); 4210 break; 4211 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 4212 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 4213 break; 4214 case DWC3_DEVICE_EVENT_SUSPEND: 4215 /* It changed to be suspend event for version 2.30a and above */ 4216 if (!DWC3_VER_IS_PRIOR(DWC3, 230A)) { 4217 /* 4218 * Ignore suspend event until the gadget enters into 4219 * USB_STATE_CONFIGURED state. 4220 */ 4221 if (dwc->gadget->state >= USB_STATE_CONFIGURED) 4222 dwc3_gadget_suspend_interrupt(dwc, 4223 event->event_info); 4224 } 4225 break; 4226 case DWC3_DEVICE_EVENT_SOF: 4227 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 4228 case DWC3_DEVICE_EVENT_CMD_CMPL: 4229 case DWC3_DEVICE_EVENT_OVERFLOW: 4230 break; 4231 default: 4232 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 4233 } 4234 } 4235 4236 static void dwc3_process_event_entry(struct dwc3 *dwc, 4237 const union dwc3_event *event) 4238 { 4239 trace_dwc3_event(event->raw, dwc); 4240 4241 if (!event->type.is_devspec) 4242 dwc3_endpoint_interrupt(dwc, &event->depevt); 4243 else if (event->type.type == DWC3_EVENT_TYPE_DEV) 4244 dwc3_gadget_interrupt(dwc, &event->devt); 4245 else 4246 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 4247 } 4248 4249 static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) 4250 { 4251 struct dwc3 *dwc = evt->dwc; 4252 irqreturn_t ret = IRQ_NONE; 4253 int left; 4254 4255 left = evt->count; 4256 4257 if (!(evt->flags & DWC3_EVENT_PENDING)) 4258 return IRQ_NONE; 4259 4260 while (left > 0) { 4261 union dwc3_event event; 4262 4263 event.raw = *(u32 *) (evt->cache + evt->lpos); 4264 4265 dwc3_process_event_entry(dwc, &event); 4266 4267 /* 4268 * FIXME we wrap around correctly to the next entry as 4269 * almost all entries are 4 bytes in size. There is one 4270 * entry which has 12 bytes which is a regular entry 4271 * followed by 8 bytes data. ATM I don't know how 4272 * things are organized if we get next to the a 4273 * boundary so I worry about that once we try to handle 4274 * that. 4275 */ 4276 evt->lpos = (evt->lpos + 4) % evt->length; 4277 left -= 4; 4278 } 4279 4280 evt->count = 0; 4281 ret = IRQ_HANDLED; 4282 4283 /* Unmask interrupt */ 4284 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), 4285 DWC3_GEVNTSIZ_SIZE(evt->length)); 4286 4287 if (dwc->imod_interval) { 4288 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); 4289 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); 4290 } 4291 4292 /* Keep the clearing of DWC3_EVENT_PENDING at the end */ 4293 evt->flags &= ~DWC3_EVENT_PENDING; 4294 4295 return ret; 4296 } 4297 4298 static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt) 4299 { 4300 struct dwc3_event_buffer *evt = _evt; 4301 struct dwc3 *dwc = evt->dwc; 4302 unsigned long flags; 4303 irqreturn_t ret = IRQ_NONE; 4304 4305 local_bh_disable(); 4306 spin_lock_irqsave(&dwc->lock, flags); 4307 ret = dwc3_process_event_buf(evt); 4308 spin_unlock_irqrestore(&dwc->lock, flags); 4309 local_bh_enable(); 4310 4311 return ret; 4312 } 4313 4314 static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) 4315 { 4316 struct dwc3 *dwc = evt->dwc; 4317 u32 amount; 4318 u32 count; 4319 4320 if (pm_runtime_suspended(dwc->dev)) { 4321 pm_runtime_get(dwc->dev); 4322 disable_irq_nosync(dwc->irq_gadget); 4323 dwc->pending_events = true; 4324 return IRQ_HANDLED; 4325 } 4326 4327 /* 4328 * With PCIe legacy interrupt, test shows that top-half irq handler can 4329 * be called again after HW interrupt deassertion. Check if bottom-half 4330 * irq event handler completes before caching new event to prevent 4331 * losing events. 4332 */ 4333 if (evt->flags & DWC3_EVENT_PENDING) 4334 return IRQ_HANDLED; 4335 4336 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); 4337 count &= DWC3_GEVNTCOUNT_MASK; 4338 if (!count) 4339 return IRQ_NONE; 4340 4341 evt->count = count; 4342 evt->flags |= DWC3_EVENT_PENDING; 4343 4344 /* Mask interrupt */ 4345 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), 4346 DWC3_GEVNTSIZ_INTMASK | DWC3_GEVNTSIZ_SIZE(evt->length)); 4347 4348 amount = min(count, evt->length - evt->lpos); 4349 memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount); 4350 4351 if (amount < count) 4352 memcpy(evt->cache, evt->buf, count - amount); 4353 4354 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count); 4355 4356 return IRQ_WAKE_THREAD; 4357 } 4358 4359 static irqreturn_t dwc3_interrupt(int irq, void *_evt) 4360 { 4361 struct dwc3_event_buffer *evt = _evt; 4362 4363 return dwc3_check_event_buf(evt); 4364 } 4365 4366 static int dwc3_gadget_get_irq(struct dwc3 *dwc) 4367 { 4368 struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); 4369 int irq; 4370 4371 irq = platform_get_irq_byname_optional(dwc3_pdev, "peripheral"); 4372 if (irq > 0) 4373 goto out; 4374 4375 if (irq == -EPROBE_DEFER) 4376 goto out; 4377 4378 irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3"); 4379 if (irq > 0) 4380 goto out; 4381 4382 if (irq == -EPROBE_DEFER) 4383 goto out; 4384 4385 irq = platform_get_irq(dwc3_pdev, 0); 4386 if (irq > 0) 4387 goto out; 4388 4389 if (!irq) 4390 irq = -EINVAL; 4391 4392 out: 4393 return irq; 4394 } 4395 4396 static void dwc_gadget_release(struct device *dev) 4397 { 4398 struct usb_gadget *gadget = container_of(dev, struct usb_gadget, dev); 4399 4400 kfree(gadget); 4401 } 4402 4403 /** 4404 * dwc3_gadget_init - initializes gadget related registers 4405 * @dwc: pointer to our controller context structure 4406 * 4407 * Returns 0 on success otherwise negative errno. 4408 */ 4409 int dwc3_gadget_init(struct dwc3 *dwc) 4410 { 4411 int ret; 4412 int irq; 4413 struct device *dev; 4414 4415 irq = dwc3_gadget_get_irq(dwc); 4416 if (irq < 0) { 4417 ret = irq; 4418 goto err0; 4419 } 4420 4421 dwc->irq_gadget = irq; 4422 4423 dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev, 4424 sizeof(*dwc->ep0_trb) * 2, 4425 &dwc->ep0_trb_addr, GFP_KERNEL); 4426 if (!dwc->ep0_trb) { 4427 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 4428 ret = -ENOMEM; 4429 goto err0; 4430 } 4431 4432 dwc->setup_buf = kzalloc(DWC3_EP0_SETUP_SIZE, GFP_KERNEL); 4433 if (!dwc->setup_buf) { 4434 ret = -ENOMEM; 4435 goto err1; 4436 } 4437 4438 dwc->bounce = dma_alloc_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, 4439 &dwc->bounce_addr, GFP_KERNEL); 4440 if (!dwc->bounce) { 4441 ret = -ENOMEM; 4442 goto err2; 4443 } 4444 4445 init_completion(&dwc->ep0_in_setup); 4446 dwc->gadget = kzalloc(sizeof(struct usb_gadget), GFP_KERNEL); 4447 if (!dwc->gadget) { 4448 ret = -ENOMEM; 4449 goto err3; 4450 } 4451 4452 4453 usb_initialize_gadget(dwc->dev, dwc->gadget, dwc_gadget_release); 4454 dev = &dwc->gadget->dev; 4455 dev->platform_data = dwc; 4456 dwc->gadget->ops = &dwc3_gadget_ops; 4457 dwc->gadget->speed = USB_SPEED_UNKNOWN; 4458 dwc->gadget->ssp_rate = USB_SSP_GEN_UNKNOWN; 4459 dwc->gadget->sg_supported = true; 4460 dwc->gadget->name = "dwc3-gadget"; 4461 dwc->gadget->lpm_capable = !dwc->usb2_gadget_lpm_disable; 4462 4463 /* 4464 * FIXME We might be setting max_speed to <SUPER, however versions 4465 * <2.20a of dwc3 have an issue with metastability (documented 4466 * elsewhere in this driver) which tells us we can't set max speed to 4467 * anything lower than SUPER. 4468 * 4469 * Because gadget.max_speed is only used by composite.c and function 4470 * drivers (i.e. it won't go into dwc3's registers) we are allowing this 4471 * to happen so we avoid sending SuperSpeed Capability descriptor 4472 * together with our BOS descriptor as that could confuse host into 4473 * thinking we can handle super speed. 4474 * 4475 * Note that, in fact, we won't even support GetBOS requests when speed 4476 * is less than super speed because we don't have means, yet, to tell 4477 * composite.c that we are USB 2.0 + LPM ECN. 4478 */ 4479 if (DWC3_VER_IS_PRIOR(DWC3, 220A) && 4480 !dwc->dis_metastability_quirk) 4481 dev_info(dwc->dev, "changing max_speed on rev %08x\n", 4482 dwc->revision); 4483 4484 dwc->gadget->max_speed = dwc->maximum_speed; 4485 dwc->gadget->max_ssp_rate = dwc->max_ssp_rate; 4486 4487 /* 4488 * REVISIT: Here we should clear all pending IRQs to be 4489 * sure we're starting from a well known location. 4490 */ 4491 4492 ret = dwc3_gadget_init_endpoints(dwc, dwc->num_eps); 4493 if (ret) 4494 goto err4; 4495 4496 ret = usb_add_gadget(dwc->gadget); 4497 if (ret) { 4498 dev_err(dwc->dev, "failed to add gadget\n"); 4499 goto err5; 4500 } 4501 4502 if (DWC3_IP_IS(DWC32) && dwc->maximum_speed == USB_SPEED_SUPER_PLUS) 4503 dwc3_gadget_set_ssp_rate(dwc->gadget, dwc->max_ssp_rate); 4504 else 4505 dwc3_gadget_set_speed(dwc->gadget, dwc->maximum_speed); 4506 4507 return 0; 4508 4509 err5: 4510 dwc3_gadget_free_endpoints(dwc); 4511 err4: 4512 usb_put_gadget(dwc->gadget); 4513 dwc->gadget = NULL; 4514 err3: 4515 dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce, 4516 dwc->bounce_addr); 4517 4518 err2: 4519 kfree(dwc->setup_buf); 4520 4521 err1: 4522 dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2, 4523 dwc->ep0_trb, dwc->ep0_trb_addr); 4524 4525 err0: 4526 return ret; 4527 } 4528 4529 /* -------------------------------------------------------------------------- */ 4530 4531 void dwc3_gadget_exit(struct dwc3 *dwc) 4532 { 4533 if (!dwc->gadget) 4534 return; 4535 4536 usb_del_gadget(dwc->gadget); 4537 dwc3_gadget_free_endpoints(dwc); 4538 usb_put_gadget(dwc->gadget); 4539 dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce, 4540 dwc->bounce_addr); 4541 kfree(dwc->setup_buf); 4542 dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2, 4543 dwc->ep0_trb, dwc->ep0_trb_addr); 4544 } 4545 4546 int dwc3_gadget_suspend(struct dwc3 *dwc) 4547 { 4548 unsigned long flags; 4549 4550 if (!dwc->gadget_driver) 4551 return 0; 4552 4553 dwc3_gadget_run_stop(dwc, false, false); 4554 4555 spin_lock_irqsave(&dwc->lock, flags); 4556 dwc3_disconnect_gadget(dwc); 4557 __dwc3_gadget_stop(dwc); 4558 spin_unlock_irqrestore(&dwc->lock, flags); 4559 4560 return 0; 4561 } 4562 4563 int dwc3_gadget_resume(struct dwc3 *dwc) 4564 { 4565 int ret; 4566 4567 if (!dwc->gadget_driver || !dwc->softconnect) 4568 return 0; 4569 4570 ret = __dwc3_gadget_start(dwc); 4571 if (ret < 0) 4572 goto err0; 4573 4574 ret = dwc3_gadget_run_stop(dwc, true, false); 4575 if (ret < 0) 4576 goto err1; 4577 4578 return 0; 4579 4580 err1: 4581 __dwc3_gadget_stop(dwc); 4582 4583 err0: 4584 return ret; 4585 } 4586 4587 void dwc3_gadget_process_pending_events(struct dwc3 *dwc) 4588 { 4589 if (dwc->pending_events) { 4590 dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf); 4591 dwc->pending_events = false; 4592 enable_irq(dwc->irq_gadget); 4593 } 4594 } 4595