1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 4 * 5 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com 6 * 7 * Authors: Felipe Balbi <balbi@ti.com>, 8 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/delay.h> 13 #include <linux/slab.h> 14 #include <linux/spinlock.h> 15 #include <linux/platform_device.h> 16 #include <linux/pm_runtime.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/list.h> 20 #include <linux/dma-mapping.h> 21 22 #include <linux/usb/ch9.h> 23 #include <linux/usb/gadget.h> 24 25 #include "debug.h" 26 #include "core.h" 27 #include "gadget.h" 28 #include "io.h" 29 30 #define DWC3_ALIGN_FRAME(d, n) (((d)->frame_number + ((d)->interval * (n))) \ 31 & ~((d)->interval - 1)) 32 33 /** 34 * dwc3_gadget_set_test_mode - enables usb2 test modes 35 * @dwc: pointer to our context structure 36 * @mode: the mode to set (J, K SE0 NAK, Force Enable) 37 * 38 * Caller should take care of locking. This function will return 0 on 39 * success or -EINVAL if wrong Test Selector is passed. 40 */ 41 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) 42 { 43 u32 reg; 44 45 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 46 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 47 48 switch (mode) { 49 case TEST_J: 50 case TEST_K: 51 case TEST_SE0_NAK: 52 case TEST_PACKET: 53 case TEST_FORCE_EN: 54 reg |= mode << 1; 55 break; 56 default: 57 return -EINVAL; 58 } 59 60 dwc3_gadget_dctl_write_safe(dwc, reg); 61 62 return 0; 63 } 64 65 /** 66 * dwc3_gadget_get_link_state - gets current state of usb link 67 * @dwc: pointer to our context structure 68 * 69 * Caller should take care of locking. This function will 70 * return the link state on success (>= 0) or -ETIMEDOUT. 71 */ 72 int dwc3_gadget_get_link_state(struct dwc3 *dwc) 73 { 74 u32 reg; 75 76 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 77 78 return DWC3_DSTS_USBLNKST(reg); 79 } 80 81 /** 82 * dwc3_gadget_set_link_state - sets usb link to a particular state 83 * @dwc: pointer to our context structure 84 * @state: the state to put link into 85 * 86 * Caller should take care of locking. This function will 87 * return 0 on success or -ETIMEDOUT. 88 */ 89 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) 90 { 91 int retries = 10000; 92 u32 reg; 93 94 /* 95 * Wait until device controller is ready. Only applies to 1.94a and 96 * later RTL. 97 */ 98 if (dwc->revision >= DWC3_REVISION_194A) { 99 while (--retries) { 100 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 101 if (reg & DWC3_DSTS_DCNRD) 102 udelay(5); 103 else 104 break; 105 } 106 107 if (retries <= 0) 108 return -ETIMEDOUT; 109 } 110 111 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 112 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 113 114 /* set no action before sending new link state change */ 115 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 116 117 /* set requested state */ 118 reg |= DWC3_DCTL_ULSTCHNGREQ(state); 119 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 120 121 /* 122 * The following code is racy when called from dwc3_gadget_wakeup, 123 * and is not needed, at least on newer versions 124 */ 125 if (dwc->revision >= DWC3_REVISION_194A) 126 return 0; 127 128 /* wait for a change in DSTS */ 129 retries = 10000; 130 while (--retries) { 131 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 132 133 if (DWC3_DSTS_USBLNKST(reg) == state) 134 return 0; 135 136 udelay(5); 137 } 138 139 return -ETIMEDOUT; 140 } 141 142 /** 143 * dwc3_ep_inc_trb - increment a trb index. 144 * @index: Pointer to the TRB index to increment. 145 * 146 * The index should never point to the link TRB. After incrementing, 147 * if it is point to the link TRB, wrap around to the beginning. The 148 * link TRB is always at the last TRB entry. 149 */ 150 static void dwc3_ep_inc_trb(u8 *index) 151 { 152 (*index)++; 153 if (*index == (DWC3_TRB_NUM - 1)) 154 *index = 0; 155 } 156 157 /** 158 * dwc3_ep_inc_enq - increment endpoint's enqueue pointer 159 * @dep: The endpoint whose enqueue pointer we're incrementing 160 */ 161 static void dwc3_ep_inc_enq(struct dwc3_ep *dep) 162 { 163 dwc3_ep_inc_trb(&dep->trb_enqueue); 164 } 165 166 /** 167 * dwc3_ep_inc_deq - increment endpoint's dequeue pointer 168 * @dep: The endpoint whose enqueue pointer we're incrementing 169 */ 170 static void dwc3_ep_inc_deq(struct dwc3_ep *dep) 171 { 172 dwc3_ep_inc_trb(&dep->trb_dequeue); 173 } 174 175 static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep, 176 struct dwc3_request *req, int status) 177 { 178 struct dwc3 *dwc = dep->dwc; 179 180 list_del(&req->list); 181 req->remaining = 0; 182 req->needs_extra_trb = false; 183 184 if (req->request.status == -EINPROGRESS) 185 req->request.status = status; 186 187 if (req->trb) 188 usb_gadget_unmap_request_by_dev(dwc->sysdev, 189 &req->request, req->direction); 190 191 req->trb = NULL; 192 trace_dwc3_gadget_giveback(req); 193 194 if (dep->number > 1) 195 pm_runtime_put(dwc->dev); 196 } 197 198 /** 199 * dwc3_gadget_giveback - call struct usb_request's ->complete callback 200 * @dep: The endpoint to whom the request belongs to 201 * @req: The request we're giving back 202 * @status: completion code for the request 203 * 204 * Must be called with controller's lock held and interrupts disabled. This 205 * function will unmap @req and call its ->complete() callback to notify upper 206 * layers that it has completed. 207 */ 208 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 209 int status) 210 { 211 struct dwc3 *dwc = dep->dwc; 212 213 dwc3_gadget_del_and_unmap_request(dep, req, status); 214 req->status = DWC3_REQUEST_STATUS_COMPLETED; 215 216 spin_unlock(&dwc->lock); 217 usb_gadget_giveback_request(&dep->endpoint, &req->request); 218 spin_lock(&dwc->lock); 219 } 220 221 /** 222 * dwc3_send_gadget_generic_command - issue a generic command for the controller 223 * @dwc: pointer to the controller context 224 * @cmd: the command to be issued 225 * @param: command parameter 226 * 227 * Caller should take care of locking. Issue @cmd with a given @param to @dwc 228 * and wait for its completion. 229 */ 230 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param) 231 { 232 u32 timeout = 500; 233 int status = 0; 234 int ret = 0; 235 u32 reg; 236 237 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param); 238 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT); 239 240 do { 241 reg = dwc3_readl(dwc->regs, DWC3_DGCMD); 242 if (!(reg & DWC3_DGCMD_CMDACT)) { 243 status = DWC3_DGCMD_STATUS(reg); 244 if (status) 245 ret = -EINVAL; 246 break; 247 } 248 } while (--timeout); 249 250 if (!timeout) { 251 ret = -ETIMEDOUT; 252 status = -ETIMEDOUT; 253 } 254 255 trace_dwc3_gadget_generic_cmd(cmd, param, status); 256 257 return ret; 258 } 259 260 static int __dwc3_gadget_wakeup(struct dwc3 *dwc); 261 262 /** 263 * dwc3_send_gadget_ep_cmd - issue an endpoint command 264 * @dep: the endpoint to which the command is going to be issued 265 * @cmd: the command to be issued 266 * @params: parameters to the command 267 * 268 * Caller should handle locking. This function will issue @cmd with given 269 * @params to @dep and wait for its completion. 270 */ 271 int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, 272 struct dwc3_gadget_ep_cmd_params *params) 273 { 274 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; 275 struct dwc3 *dwc = dep->dwc; 276 u32 timeout = 1000; 277 u32 saved_config = 0; 278 u32 reg; 279 280 int cmd_status = 0; 281 int ret = -EINVAL; 282 283 /* 284 * When operating in USB 2.0 speeds (HS/FS), if GUSB2PHYCFG.ENBLSLPM or 285 * GUSB2PHYCFG.SUSPHY is set, it must be cleared before issuing an 286 * endpoint command. 287 * 288 * Save and clear both GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY 289 * settings. Restore them after the command is completed. 290 * 291 * DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2 292 */ 293 if (dwc->gadget.speed <= USB_SPEED_HIGH) { 294 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 295 if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) { 296 saved_config |= DWC3_GUSB2PHYCFG_SUSPHY; 297 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 298 } 299 300 if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) { 301 saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM; 302 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; 303 } 304 305 if (saved_config) 306 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 307 } 308 309 if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) { 310 int needs_wakeup; 311 312 needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 || 313 dwc->link_state == DWC3_LINK_STATE_U2 || 314 dwc->link_state == DWC3_LINK_STATE_U3); 315 316 if (unlikely(needs_wakeup)) { 317 ret = __dwc3_gadget_wakeup(dwc); 318 dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n", 319 ret); 320 } 321 } 322 323 dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0); 324 dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1); 325 dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2); 326 327 /* 328 * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're 329 * not relying on XferNotReady, we can make use of a special "No 330 * Response Update Transfer" command where we should clear both CmdAct 331 * and CmdIOC bits. 332 * 333 * With this, we don't need to wait for command completion and can 334 * straight away issue further commands to the endpoint. 335 * 336 * NOTICE: We're making an assumption that control endpoints will never 337 * make use of Update Transfer command. This is a safe assumption 338 * because we can never have more than one request at a time with 339 * Control Endpoints. If anybody changes that assumption, this chunk 340 * needs to be updated accordingly. 341 */ 342 if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER && 343 !usb_endpoint_xfer_isoc(desc)) 344 cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT); 345 else 346 cmd |= DWC3_DEPCMD_CMDACT; 347 348 dwc3_writel(dep->regs, DWC3_DEPCMD, cmd); 349 do { 350 reg = dwc3_readl(dep->regs, DWC3_DEPCMD); 351 if (!(reg & DWC3_DEPCMD_CMDACT)) { 352 cmd_status = DWC3_DEPCMD_STATUS(reg); 353 354 switch (cmd_status) { 355 case 0: 356 ret = 0; 357 break; 358 case DEPEVT_TRANSFER_NO_RESOURCE: 359 ret = -EINVAL; 360 break; 361 case DEPEVT_TRANSFER_BUS_EXPIRY: 362 /* 363 * SW issues START TRANSFER command to 364 * isochronous ep with future frame interval. If 365 * future interval time has already passed when 366 * core receives the command, it will respond 367 * with an error status of 'Bus Expiry'. 368 * 369 * Instead of always returning -EINVAL, let's 370 * give a hint to the gadget driver that this is 371 * the case by returning -EAGAIN. 372 */ 373 ret = -EAGAIN; 374 break; 375 default: 376 dev_WARN(dwc->dev, "UNKNOWN cmd status\n"); 377 } 378 379 break; 380 } 381 } while (--timeout); 382 383 if (timeout == 0) { 384 ret = -ETIMEDOUT; 385 cmd_status = -ETIMEDOUT; 386 } 387 388 trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status); 389 390 if (ret == 0 && DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) { 391 dep->flags |= DWC3_EP_TRANSFER_STARTED; 392 dwc3_gadget_ep_get_transfer_index(dep); 393 } 394 395 if (saved_config) { 396 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 397 reg |= saved_config; 398 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 399 } 400 401 return ret; 402 } 403 404 static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep) 405 { 406 struct dwc3 *dwc = dep->dwc; 407 struct dwc3_gadget_ep_cmd_params params; 408 u32 cmd = DWC3_DEPCMD_CLEARSTALL; 409 410 /* 411 * As of core revision 2.60a the recommended programming model 412 * is to set the ClearPendIN bit when issuing a Clear Stall EP 413 * command for IN endpoints. This is to prevent an issue where 414 * some (non-compliant) hosts may not send ACK TPs for pending 415 * IN transfers due to a mishandled error condition. Synopsys 416 * STAR 9000614252. 417 */ 418 if (dep->direction && (dwc->revision >= DWC3_REVISION_260A) && 419 (dwc->gadget.speed >= USB_SPEED_SUPER)) 420 cmd |= DWC3_DEPCMD_CLEARPENDIN; 421 422 memset(¶ms, 0, sizeof(params)); 423 424 return dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 425 } 426 427 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 428 struct dwc3_trb *trb) 429 { 430 u32 offset = (char *) trb - (char *) dep->trb_pool; 431 432 return dep->trb_pool_dma + offset; 433 } 434 435 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 436 { 437 struct dwc3 *dwc = dep->dwc; 438 439 if (dep->trb_pool) 440 return 0; 441 442 dep->trb_pool = dma_alloc_coherent(dwc->sysdev, 443 sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 444 &dep->trb_pool_dma, GFP_KERNEL); 445 if (!dep->trb_pool) { 446 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 447 dep->name); 448 return -ENOMEM; 449 } 450 451 return 0; 452 } 453 454 static void dwc3_free_trb_pool(struct dwc3_ep *dep) 455 { 456 struct dwc3 *dwc = dep->dwc; 457 458 dma_free_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 459 dep->trb_pool, dep->trb_pool_dma); 460 461 dep->trb_pool = NULL; 462 dep->trb_pool_dma = 0; 463 } 464 465 static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep) 466 { 467 struct dwc3_gadget_ep_cmd_params params; 468 469 memset(¶ms, 0x00, sizeof(params)); 470 471 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 472 473 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE, 474 ¶ms); 475 } 476 477 /** 478 * dwc3_gadget_start_config - configure ep resources 479 * @dep: endpoint that is being enabled 480 * 481 * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's 482 * completion, it will set Transfer Resource for all available endpoints. 483 * 484 * The assignment of transfer resources cannot perfectly follow the data book 485 * due to the fact that the controller driver does not have all knowledge of the 486 * configuration in advance. It is given this information piecemeal by the 487 * composite gadget framework after every SET_CONFIGURATION and 488 * SET_INTERFACE. Trying to follow the databook programming model in this 489 * scenario can cause errors. For two reasons: 490 * 491 * 1) The databook says to do %DWC3_DEPCMD_DEPSTARTCFG for every 492 * %USB_REQ_SET_CONFIGURATION and %USB_REQ_SET_INTERFACE (8.1.5). This is 493 * incorrect in the scenario of multiple interfaces. 494 * 495 * 2) The databook does not mention doing more %DWC3_DEPCMD_DEPXFERCFG for new 496 * endpoint on alt setting (8.1.6). 497 * 498 * The following simplified method is used instead: 499 * 500 * All hardware endpoints can be assigned a transfer resource and this setting 501 * will stay persistent until either a core reset or hibernation. So whenever we 502 * do a %DWC3_DEPCMD_DEPSTARTCFG(0) we can go ahead and do 503 * %DWC3_DEPCMD_DEPXFERCFG for every hardware endpoint as well. We are 504 * guaranteed that there are as many transfer resources as endpoints. 505 * 506 * This function is called for each endpoint when it is being enabled but is 507 * triggered only when called for EP0-out, which always happens first, and which 508 * should only happen in one of the above conditions. 509 */ 510 static int dwc3_gadget_start_config(struct dwc3_ep *dep) 511 { 512 struct dwc3_gadget_ep_cmd_params params; 513 struct dwc3 *dwc; 514 u32 cmd; 515 int i; 516 int ret; 517 518 if (dep->number) 519 return 0; 520 521 memset(¶ms, 0x00, sizeof(params)); 522 cmd = DWC3_DEPCMD_DEPSTARTCFG; 523 dwc = dep->dwc; 524 525 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 526 if (ret) 527 return ret; 528 529 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 530 struct dwc3_ep *dep = dwc->eps[i]; 531 532 if (!dep) 533 continue; 534 535 ret = dwc3_gadget_set_xfer_resource(dep); 536 if (ret) 537 return ret; 538 } 539 540 return 0; 541 } 542 543 static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action) 544 { 545 const struct usb_ss_ep_comp_descriptor *comp_desc; 546 const struct usb_endpoint_descriptor *desc; 547 struct dwc3_gadget_ep_cmd_params params; 548 struct dwc3 *dwc = dep->dwc; 549 550 comp_desc = dep->endpoint.comp_desc; 551 desc = dep->endpoint.desc; 552 553 memset(¶ms, 0x00, sizeof(params)); 554 555 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) 556 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); 557 558 /* Burst size is only needed in SuperSpeed mode */ 559 if (dwc->gadget.speed >= USB_SPEED_SUPER) { 560 u32 burst = dep->endpoint.maxburst; 561 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1); 562 } 563 564 params.param0 |= action; 565 if (action == DWC3_DEPCFG_ACTION_RESTORE) 566 params.param2 |= dep->saved_state; 567 568 if (usb_endpoint_xfer_control(desc)) 569 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN; 570 571 if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc)) 572 params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN; 573 574 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 575 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE 576 | DWC3_DEPCFG_STREAM_EVENT_EN; 577 dep->stream_capable = true; 578 } 579 580 if (!usb_endpoint_xfer_control(desc)) 581 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; 582 583 /* 584 * We are doing 1:1 mapping for endpoints, meaning 585 * Physical Endpoints 2 maps to Logical Endpoint 2 and 586 * so on. We consider the direction bit as part of the physical 587 * endpoint number. So USB endpoint 0x81 is 0x03. 588 */ 589 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); 590 591 /* 592 * We must use the lower 16 TX FIFOs even though 593 * HW might have more 594 */ 595 if (dep->direction) 596 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); 597 598 if (desc->bInterval) { 599 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); 600 dep->interval = 1 << (desc->bInterval - 1); 601 } 602 603 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, ¶ms); 604 } 605 606 /** 607 * __dwc3_gadget_ep_enable - initializes a hw endpoint 608 * @dep: endpoint to be initialized 609 * @action: one of INIT, MODIFY or RESTORE 610 * 611 * Caller should take care of locking. Execute all necessary commands to 612 * initialize a HW endpoint so it can be used by a gadget driver. 613 */ 614 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action) 615 { 616 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; 617 struct dwc3 *dwc = dep->dwc; 618 619 u32 reg; 620 int ret; 621 622 if (!(dep->flags & DWC3_EP_ENABLED)) { 623 ret = dwc3_gadget_start_config(dep); 624 if (ret) 625 return ret; 626 } 627 628 ret = dwc3_gadget_set_ep_config(dep, action); 629 if (ret) 630 return ret; 631 632 if (!(dep->flags & DWC3_EP_ENABLED)) { 633 struct dwc3_trb *trb_st_hw; 634 struct dwc3_trb *trb_link; 635 636 dep->type = usb_endpoint_type(desc); 637 dep->flags |= DWC3_EP_ENABLED; 638 639 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 640 reg |= DWC3_DALEPENA_EP(dep->number); 641 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 642 643 if (usb_endpoint_xfer_control(desc)) 644 goto out; 645 646 /* Initialize the TRB ring */ 647 dep->trb_dequeue = 0; 648 dep->trb_enqueue = 0; 649 memset(dep->trb_pool, 0, 650 sizeof(struct dwc3_trb) * DWC3_TRB_NUM); 651 652 /* Link TRB. The HWO bit is never reset */ 653 trb_st_hw = &dep->trb_pool[0]; 654 655 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 656 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 657 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 658 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB; 659 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 660 } 661 662 /* 663 * Issue StartTransfer here with no-op TRB so we can always rely on No 664 * Response Update Transfer command. 665 */ 666 if ((usb_endpoint_xfer_bulk(desc) && !dep->stream_capable) || 667 usb_endpoint_xfer_int(desc)) { 668 struct dwc3_gadget_ep_cmd_params params; 669 struct dwc3_trb *trb; 670 dma_addr_t trb_dma; 671 u32 cmd; 672 673 memset(¶ms, 0, sizeof(params)); 674 trb = &dep->trb_pool[0]; 675 trb_dma = dwc3_trb_dma_offset(dep, trb); 676 677 params.param0 = upper_32_bits(trb_dma); 678 params.param1 = lower_32_bits(trb_dma); 679 680 cmd = DWC3_DEPCMD_STARTTRANSFER; 681 682 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 683 if (ret < 0) 684 return ret; 685 } 686 687 out: 688 trace_dwc3_gadget_ep_enable(dep); 689 690 return 0; 691 } 692 693 static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, 694 bool interrupt); 695 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 696 { 697 struct dwc3_request *req; 698 699 dwc3_stop_active_transfer(dep, true, false); 700 701 /* - giveback all requests to gadget driver */ 702 while (!list_empty(&dep->started_list)) { 703 req = next_request(&dep->started_list); 704 705 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 706 } 707 708 while (!list_empty(&dep->pending_list)) { 709 req = next_request(&dep->pending_list); 710 711 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 712 } 713 714 while (!list_empty(&dep->cancelled_list)) { 715 req = next_request(&dep->cancelled_list); 716 717 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 718 } 719 } 720 721 /** 722 * __dwc3_gadget_ep_disable - disables a hw endpoint 723 * @dep: the endpoint to disable 724 * 725 * This function undoes what __dwc3_gadget_ep_enable did and also removes 726 * requests which are currently being processed by the hardware and those which 727 * are not yet scheduled. 728 * 729 * Caller should take care of locking. 730 */ 731 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 732 { 733 struct dwc3 *dwc = dep->dwc; 734 u32 reg; 735 736 trace_dwc3_gadget_ep_disable(dep); 737 738 dwc3_remove_requests(dwc, dep); 739 740 /* make sure HW endpoint isn't stalled */ 741 if (dep->flags & DWC3_EP_STALL) 742 __dwc3_gadget_ep_set_halt(dep, 0, false); 743 744 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 745 reg &= ~DWC3_DALEPENA_EP(dep->number); 746 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 747 748 dep->stream_capable = false; 749 dep->type = 0; 750 dep->flags = 0; 751 752 /* Clear out the ep descriptors for non-ep0 */ 753 if (dep->number > 1) { 754 dep->endpoint.comp_desc = NULL; 755 dep->endpoint.desc = NULL; 756 } 757 758 return 0; 759 } 760 761 /* -------------------------------------------------------------------------- */ 762 763 static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 764 const struct usb_endpoint_descriptor *desc) 765 { 766 return -EINVAL; 767 } 768 769 static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 770 { 771 return -EINVAL; 772 } 773 774 /* -------------------------------------------------------------------------- */ 775 776 static int dwc3_gadget_ep_enable(struct usb_ep *ep, 777 const struct usb_endpoint_descriptor *desc) 778 { 779 struct dwc3_ep *dep; 780 struct dwc3 *dwc; 781 unsigned long flags; 782 int ret; 783 784 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 785 pr_debug("dwc3: invalid parameters\n"); 786 return -EINVAL; 787 } 788 789 if (!desc->wMaxPacketSize) { 790 pr_debug("dwc3: missing wMaxPacketSize\n"); 791 return -EINVAL; 792 } 793 794 dep = to_dwc3_ep(ep); 795 dwc = dep->dwc; 796 797 if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED, 798 "%s is already enabled\n", 799 dep->name)) 800 return 0; 801 802 spin_lock_irqsave(&dwc->lock, flags); 803 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT); 804 spin_unlock_irqrestore(&dwc->lock, flags); 805 806 return ret; 807 } 808 809 static int dwc3_gadget_ep_disable(struct usb_ep *ep) 810 { 811 struct dwc3_ep *dep; 812 struct dwc3 *dwc; 813 unsigned long flags; 814 int ret; 815 816 if (!ep) { 817 pr_debug("dwc3: invalid parameters\n"); 818 return -EINVAL; 819 } 820 821 dep = to_dwc3_ep(ep); 822 dwc = dep->dwc; 823 824 if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED), 825 "%s is already disabled\n", 826 dep->name)) 827 return 0; 828 829 spin_lock_irqsave(&dwc->lock, flags); 830 ret = __dwc3_gadget_ep_disable(dep); 831 spin_unlock_irqrestore(&dwc->lock, flags); 832 833 return ret; 834 } 835 836 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 837 gfp_t gfp_flags) 838 { 839 struct dwc3_request *req; 840 struct dwc3_ep *dep = to_dwc3_ep(ep); 841 842 req = kzalloc(sizeof(*req), gfp_flags); 843 if (!req) 844 return NULL; 845 846 req->direction = dep->direction; 847 req->epnum = dep->number; 848 req->dep = dep; 849 req->status = DWC3_REQUEST_STATUS_UNKNOWN; 850 851 trace_dwc3_alloc_request(req); 852 853 return &req->request; 854 } 855 856 static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 857 struct usb_request *request) 858 { 859 struct dwc3_request *req = to_dwc3_request(request); 860 861 trace_dwc3_free_request(req); 862 kfree(req); 863 } 864 865 /** 866 * dwc3_ep_prev_trb - returns the previous TRB in the ring 867 * @dep: The endpoint with the TRB ring 868 * @index: The index of the current TRB in the ring 869 * 870 * Returns the TRB prior to the one pointed to by the index. If the 871 * index is 0, we will wrap backwards, skip the link TRB, and return 872 * the one just before that. 873 */ 874 static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index) 875 { 876 u8 tmp = index; 877 878 if (!tmp) 879 tmp = DWC3_TRB_NUM - 1; 880 881 return &dep->trb_pool[tmp - 1]; 882 } 883 884 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep) 885 { 886 struct dwc3_trb *tmp; 887 u8 trbs_left; 888 889 /* 890 * If enqueue & dequeue are equal than it is either full or empty. 891 * 892 * One way to know for sure is if the TRB right before us has HWO bit 893 * set or not. If it has, then we're definitely full and can't fit any 894 * more transfers in our ring. 895 */ 896 if (dep->trb_enqueue == dep->trb_dequeue) { 897 tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue); 898 if (tmp->ctrl & DWC3_TRB_CTRL_HWO) 899 return 0; 900 901 return DWC3_TRB_NUM - 1; 902 } 903 904 trbs_left = dep->trb_dequeue - dep->trb_enqueue; 905 trbs_left &= (DWC3_TRB_NUM - 1); 906 907 if (dep->trb_dequeue < dep->trb_enqueue) 908 trbs_left--; 909 910 return trbs_left; 911 } 912 913 static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, 914 dma_addr_t dma, unsigned length, unsigned chain, unsigned node, 915 unsigned stream_id, unsigned short_not_ok, unsigned no_interrupt) 916 { 917 struct dwc3 *dwc = dep->dwc; 918 struct usb_gadget *gadget = &dwc->gadget; 919 enum usb_device_speed speed = gadget->speed; 920 921 trb->size = DWC3_TRB_SIZE_LENGTH(length); 922 trb->bpl = lower_32_bits(dma); 923 trb->bph = upper_32_bits(dma); 924 925 switch (usb_endpoint_type(dep->endpoint.desc)) { 926 case USB_ENDPOINT_XFER_CONTROL: 927 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP; 928 break; 929 930 case USB_ENDPOINT_XFER_ISOC: 931 if (!node) { 932 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 933 934 /* 935 * USB Specification 2.0 Section 5.9.2 states that: "If 936 * there is only a single transaction in the microframe, 937 * only a DATA0 data packet PID is used. If there are 938 * two transactions per microframe, DATA1 is used for 939 * the first transaction data packet and DATA0 is used 940 * for the second transaction data packet. If there are 941 * three transactions per microframe, DATA2 is used for 942 * the first transaction data packet, DATA1 is used for 943 * the second, and DATA0 is used for the third." 944 * 945 * IOW, we should satisfy the following cases: 946 * 947 * 1) length <= maxpacket 948 * - DATA0 949 * 950 * 2) maxpacket < length <= (2 * maxpacket) 951 * - DATA1, DATA0 952 * 953 * 3) (2 * maxpacket) < length <= (3 * maxpacket) 954 * - DATA2, DATA1, DATA0 955 */ 956 if (speed == USB_SPEED_HIGH) { 957 struct usb_ep *ep = &dep->endpoint; 958 unsigned int mult = 2; 959 unsigned int maxp = usb_endpoint_maxp(ep->desc); 960 961 if (length <= (2 * maxp)) 962 mult--; 963 964 if (length <= maxp) 965 mult--; 966 967 trb->size |= DWC3_TRB_SIZE_PCM1(mult); 968 } 969 } else { 970 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 971 } 972 973 /* always enable Interrupt on Missed ISOC */ 974 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 975 break; 976 977 case USB_ENDPOINT_XFER_BULK: 978 case USB_ENDPOINT_XFER_INT: 979 trb->ctrl = DWC3_TRBCTL_NORMAL; 980 break; 981 default: 982 /* 983 * This is only possible with faulty memory because we 984 * checked it already :) 985 */ 986 dev_WARN(dwc->dev, "Unknown endpoint type %d\n", 987 usb_endpoint_type(dep->endpoint.desc)); 988 } 989 990 /* 991 * Enable Continue on Short Packet 992 * when endpoint is not a stream capable 993 */ 994 if (usb_endpoint_dir_out(dep->endpoint.desc)) { 995 if (!dep->stream_capable) 996 trb->ctrl |= DWC3_TRB_CTRL_CSP; 997 998 if (short_not_ok) 999 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 1000 } 1001 1002 if ((!no_interrupt && !chain) || 1003 (dwc3_calc_trbs_left(dep) == 1)) 1004 trb->ctrl |= DWC3_TRB_CTRL_IOC; 1005 1006 if (chain) 1007 trb->ctrl |= DWC3_TRB_CTRL_CHN; 1008 1009 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) 1010 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id); 1011 1012 trb->ctrl |= DWC3_TRB_CTRL_HWO; 1013 1014 dwc3_ep_inc_enq(dep); 1015 1016 trace_dwc3_prepare_trb(dep, trb); 1017 } 1018 1019 /** 1020 * dwc3_prepare_one_trb - setup one TRB from one request 1021 * @dep: endpoint for which this request is prepared 1022 * @req: dwc3_request pointer 1023 * @chain: should this TRB be chained to the next? 1024 * @node: only for isochronous endpoints. First TRB needs different type. 1025 */ 1026 static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 1027 struct dwc3_request *req, unsigned chain, unsigned node) 1028 { 1029 struct dwc3_trb *trb; 1030 unsigned int length; 1031 dma_addr_t dma; 1032 unsigned stream_id = req->request.stream_id; 1033 unsigned short_not_ok = req->request.short_not_ok; 1034 unsigned no_interrupt = req->request.no_interrupt; 1035 1036 if (req->request.num_sgs > 0) { 1037 length = sg_dma_len(req->start_sg); 1038 dma = sg_dma_address(req->start_sg); 1039 } else { 1040 length = req->request.length; 1041 dma = req->request.dma; 1042 } 1043 1044 trb = &dep->trb_pool[dep->trb_enqueue]; 1045 1046 if (!req->trb) { 1047 dwc3_gadget_move_started_request(req); 1048 req->trb = trb; 1049 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 1050 } 1051 1052 req->num_trbs++; 1053 1054 __dwc3_prepare_one_trb(dep, trb, dma, length, chain, node, 1055 stream_id, short_not_ok, no_interrupt); 1056 } 1057 1058 static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, 1059 struct dwc3_request *req) 1060 { 1061 struct scatterlist *sg = req->start_sg; 1062 struct scatterlist *s; 1063 int i; 1064 1065 unsigned int remaining = req->request.num_mapped_sgs 1066 - req->num_queued_sgs; 1067 1068 for_each_sg(sg, s, remaining, i) { 1069 unsigned int length = req->request.length; 1070 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); 1071 unsigned int rem = length % maxp; 1072 unsigned chain = true; 1073 1074 /* 1075 * IOMMU driver is coalescing the list of sgs which shares a 1076 * page boundary into one and giving it to USB driver. With 1077 * this the number of sgs mapped is not equal to the number of 1078 * sgs passed. So mark the chain bit to false if it isthe last 1079 * mapped sg. 1080 */ 1081 if (i == remaining - 1) 1082 chain = false; 1083 1084 if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) { 1085 struct dwc3 *dwc = dep->dwc; 1086 struct dwc3_trb *trb; 1087 1088 req->needs_extra_trb = true; 1089 1090 /* prepare normal TRB */ 1091 dwc3_prepare_one_trb(dep, req, true, i); 1092 1093 /* Now prepare one extra TRB to align transfer size */ 1094 trb = &dep->trb_pool[dep->trb_enqueue]; 1095 req->num_trbs++; 1096 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 1097 maxp - rem, false, 1, 1098 req->request.stream_id, 1099 req->request.short_not_ok, 1100 req->request.no_interrupt); 1101 } else { 1102 dwc3_prepare_one_trb(dep, req, chain, i); 1103 } 1104 1105 /* 1106 * There can be a situation where all sgs in sglist are not 1107 * queued because of insufficient trb number. To handle this 1108 * case, update start_sg to next sg to be queued, so that 1109 * we have free trbs we can continue queuing from where we 1110 * previously stopped 1111 */ 1112 if (chain) 1113 req->start_sg = sg_next(s); 1114 1115 req->num_queued_sgs++; 1116 1117 if (!dwc3_calc_trbs_left(dep)) 1118 break; 1119 } 1120 } 1121 1122 static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, 1123 struct dwc3_request *req) 1124 { 1125 unsigned int length = req->request.length; 1126 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); 1127 unsigned int rem = length % maxp; 1128 1129 if ((!length || rem) && usb_endpoint_dir_out(dep->endpoint.desc)) { 1130 struct dwc3 *dwc = dep->dwc; 1131 struct dwc3_trb *trb; 1132 1133 req->needs_extra_trb = true; 1134 1135 /* prepare normal TRB */ 1136 dwc3_prepare_one_trb(dep, req, true, 0); 1137 1138 /* Now prepare one extra TRB to align transfer size */ 1139 trb = &dep->trb_pool[dep->trb_enqueue]; 1140 req->num_trbs++; 1141 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem, 1142 false, 1, req->request.stream_id, 1143 req->request.short_not_ok, 1144 req->request.no_interrupt); 1145 } else if (req->request.zero && req->request.length && 1146 (IS_ALIGNED(req->request.length, maxp))) { 1147 struct dwc3 *dwc = dep->dwc; 1148 struct dwc3_trb *trb; 1149 1150 req->needs_extra_trb = true; 1151 1152 /* prepare normal TRB */ 1153 dwc3_prepare_one_trb(dep, req, true, 0); 1154 1155 /* Now prepare one extra TRB to handle ZLP */ 1156 trb = &dep->trb_pool[dep->trb_enqueue]; 1157 req->num_trbs++; 1158 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, 1159 false, 1, req->request.stream_id, 1160 req->request.short_not_ok, 1161 req->request.no_interrupt); 1162 } else { 1163 dwc3_prepare_one_trb(dep, req, false, 0); 1164 } 1165 } 1166 1167 /* 1168 * dwc3_prepare_trbs - setup TRBs from requests 1169 * @dep: endpoint for which requests are being prepared 1170 * 1171 * The function goes through the requests list and sets up TRBs for the 1172 * transfers. The function returns once there are no more TRBs available or 1173 * it runs out of requests. 1174 */ 1175 static void dwc3_prepare_trbs(struct dwc3_ep *dep) 1176 { 1177 struct dwc3_request *req, *n; 1178 1179 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 1180 1181 /* 1182 * We can get in a situation where there's a request in the started list 1183 * but there weren't enough TRBs to fully kick it in the first time 1184 * around, so it has been waiting for more TRBs to be freed up. 1185 * 1186 * In that case, we should check if we have a request with pending_sgs 1187 * in the started list and prepare TRBs for that request first, 1188 * otherwise we will prepare TRBs completely out of order and that will 1189 * break things. 1190 */ 1191 list_for_each_entry(req, &dep->started_list, list) { 1192 if (req->num_pending_sgs > 0) 1193 dwc3_prepare_one_trb_sg(dep, req); 1194 1195 if (!dwc3_calc_trbs_left(dep)) 1196 return; 1197 } 1198 1199 list_for_each_entry_safe(req, n, &dep->pending_list, list) { 1200 struct dwc3 *dwc = dep->dwc; 1201 int ret; 1202 1203 ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request, 1204 dep->direction); 1205 if (ret) 1206 return; 1207 1208 req->sg = req->request.sg; 1209 req->start_sg = req->sg; 1210 req->num_queued_sgs = 0; 1211 req->num_pending_sgs = req->request.num_mapped_sgs; 1212 1213 if (req->num_pending_sgs > 0) 1214 dwc3_prepare_one_trb_sg(dep, req); 1215 else 1216 dwc3_prepare_one_trb_linear(dep, req); 1217 1218 if (!dwc3_calc_trbs_left(dep)) 1219 return; 1220 } 1221 } 1222 1223 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep) 1224 { 1225 struct dwc3_gadget_ep_cmd_params params; 1226 struct dwc3_request *req; 1227 int starting; 1228 int ret; 1229 u32 cmd; 1230 1231 if (!dwc3_calc_trbs_left(dep)) 1232 return 0; 1233 1234 starting = !(dep->flags & DWC3_EP_TRANSFER_STARTED); 1235 1236 dwc3_prepare_trbs(dep); 1237 req = next_request(&dep->started_list); 1238 if (!req) { 1239 dep->flags |= DWC3_EP_PENDING_REQUEST; 1240 return 0; 1241 } 1242 1243 memset(¶ms, 0, sizeof(params)); 1244 1245 if (starting) { 1246 params.param0 = upper_32_bits(req->trb_dma); 1247 params.param1 = lower_32_bits(req->trb_dma); 1248 cmd = DWC3_DEPCMD_STARTTRANSFER; 1249 1250 if (dep->stream_capable) 1251 cmd |= DWC3_DEPCMD_PARAM(req->request.stream_id); 1252 1253 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) 1254 cmd |= DWC3_DEPCMD_PARAM(dep->frame_number); 1255 } else { 1256 cmd = DWC3_DEPCMD_UPDATETRANSFER | 1257 DWC3_DEPCMD_PARAM(dep->resource_index); 1258 } 1259 1260 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 1261 if (ret < 0) { 1262 /* 1263 * FIXME we need to iterate over the list of requests 1264 * here and stop, unmap, free and del each of the linked 1265 * requests instead of what we do now. 1266 */ 1267 if (req->trb) 1268 memset(req->trb, 0, sizeof(struct dwc3_trb)); 1269 dwc3_gadget_del_and_unmap_request(dep, req, ret); 1270 return ret; 1271 } 1272 1273 return 0; 1274 } 1275 1276 static int __dwc3_gadget_get_frame(struct dwc3 *dwc) 1277 { 1278 u32 reg; 1279 1280 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1281 return DWC3_DSTS_SOFFN(reg); 1282 } 1283 1284 /** 1285 * dwc3_gadget_start_isoc_quirk - workaround invalid frame number 1286 * @dep: isoc endpoint 1287 * 1288 * This function tests for the correct combination of BIT[15:14] from the 16-bit 1289 * microframe number reported by the XferNotReady event for the future frame 1290 * number to start the isoc transfer. 1291 * 1292 * In DWC_usb31 version 1.70a-ea06 and prior, for highspeed and fullspeed 1293 * isochronous IN, BIT[15:14] of the 16-bit microframe number reported by the 1294 * XferNotReady event are invalid. The driver uses this number to schedule the 1295 * isochronous transfer and passes it to the START TRANSFER command. Because 1296 * this number is invalid, the command may fail. If BIT[15:14] matches the 1297 * internal 16-bit microframe, the START TRANSFER command will pass and the 1298 * transfer will start at the scheduled time, if it is off by 1, the command 1299 * will still pass, but the transfer will start 2 seconds in the future. For all 1300 * other conditions, the START TRANSFER command will fail with bus-expiry. 1301 * 1302 * In order to workaround this issue, we can test for the correct combination of 1303 * BIT[15:14] by sending START TRANSFER commands with different values of 1304 * BIT[15:14]: 'b00, 'b01, 'b10, and 'b11. Each combination is 2^14 uframe apart 1305 * (or 2 seconds). 4 seconds into the future will result in a bus-expiry status. 1306 * As the result, within the 4 possible combinations for BIT[15:14], there will 1307 * be 2 successful and 2 failure START COMMAND status. One of the 2 successful 1308 * command status will result in a 2-second delay start. The smaller BIT[15:14] 1309 * value is the correct combination. 1310 * 1311 * Since there are only 4 outcomes and the results are ordered, we can simply 1312 * test 2 START TRANSFER commands with BIT[15:14] combinations 'b00 and 'b01 to 1313 * deduce the smaller successful combination. 1314 * 1315 * Let test0 = test status for combination 'b00 and test1 = test status for 'b01 1316 * of BIT[15:14]. The correct combination is as follow: 1317 * 1318 * if test0 fails and test1 passes, BIT[15:14] is 'b01 1319 * if test0 fails and test1 fails, BIT[15:14] is 'b10 1320 * if test0 passes and test1 fails, BIT[15:14] is 'b11 1321 * if test0 passes and test1 passes, BIT[15:14] is 'b00 1322 * 1323 * Synopsys STAR 9001202023: Wrong microframe number for isochronous IN 1324 * endpoints. 1325 */ 1326 static int dwc3_gadget_start_isoc_quirk(struct dwc3_ep *dep) 1327 { 1328 int cmd_status = 0; 1329 bool test0; 1330 bool test1; 1331 1332 while (dep->combo_num < 2) { 1333 struct dwc3_gadget_ep_cmd_params params; 1334 u32 test_frame_number; 1335 u32 cmd; 1336 1337 /* 1338 * Check if we can start isoc transfer on the next interval or 1339 * 4 uframes in the future with BIT[15:14] as dep->combo_num 1340 */ 1341 test_frame_number = dep->frame_number & 0x3fff; 1342 test_frame_number |= dep->combo_num << 14; 1343 test_frame_number += max_t(u32, 4, dep->interval); 1344 1345 params.param0 = upper_32_bits(dep->dwc->bounce_addr); 1346 params.param1 = lower_32_bits(dep->dwc->bounce_addr); 1347 1348 cmd = DWC3_DEPCMD_STARTTRANSFER; 1349 cmd |= DWC3_DEPCMD_PARAM(test_frame_number); 1350 cmd_status = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 1351 1352 /* Redo if some other failure beside bus-expiry is received */ 1353 if (cmd_status && cmd_status != -EAGAIN) { 1354 dep->start_cmd_status = 0; 1355 dep->combo_num = 0; 1356 return 0; 1357 } 1358 1359 /* Store the first test status */ 1360 if (dep->combo_num == 0) 1361 dep->start_cmd_status = cmd_status; 1362 1363 dep->combo_num++; 1364 1365 /* 1366 * End the transfer if the START_TRANSFER command is successful 1367 * to wait for the next XferNotReady to test the command again 1368 */ 1369 if (cmd_status == 0) { 1370 dwc3_stop_active_transfer(dep, true, true); 1371 return 0; 1372 } 1373 } 1374 1375 /* test0 and test1 are both completed at this point */ 1376 test0 = (dep->start_cmd_status == 0); 1377 test1 = (cmd_status == 0); 1378 1379 if (!test0 && test1) 1380 dep->combo_num = 1; 1381 else if (!test0 && !test1) 1382 dep->combo_num = 2; 1383 else if (test0 && !test1) 1384 dep->combo_num = 3; 1385 else if (test0 && test1) 1386 dep->combo_num = 0; 1387 1388 dep->frame_number &= 0x3fff; 1389 dep->frame_number |= dep->combo_num << 14; 1390 dep->frame_number += max_t(u32, 4, dep->interval); 1391 1392 /* Reinitialize test variables */ 1393 dep->start_cmd_status = 0; 1394 dep->combo_num = 0; 1395 1396 return __dwc3_gadget_kick_transfer(dep); 1397 } 1398 1399 static int __dwc3_gadget_start_isoc(struct dwc3_ep *dep) 1400 { 1401 struct dwc3 *dwc = dep->dwc; 1402 int ret; 1403 int i; 1404 1405 if (list_empty(&dep->pending_list)) { 1406 dep->flags |= DWC3_EP_PENDING_REQUEST; 1407 return -EAGAIN; 1408 } 1409 1410 if (!dwc->dis_start_transfer_quirk && dwc3_is_usb31(dwc) && 1411 (dwc->revision <= DWC3_USB31_REVISION_160A || 1412 (dwc->revision == DWC3_USB31_REVISION_170A && 1413 dwc->version_type >= DWC31_VERSIONTYPE_EA01 && 1414 dwc->version_type <= DWC31_VERSIONTYPE_EA06))) { 1415 1416 if (dwc->gadget.speed <= USB_SPEED_HIGH && dep->direction) 1417 return dwc3_gadget_start_isoc_quirk(dep); 1418 } 1419 1420 for (i = 0; i < DWC3_ISOC_MAX_RETRIES; i++) { 1421 dep->frame_number = DWC3_ALIGN_FRAME(dep, i + 1); 1422 1423 ret = __dwc3_gadget_kick_transfer(dep); 1424 if (ret != -EAGAIN) 1425 break; 1426 } 1427 1428 return ret; 1429 } 1430 1431 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 1432 { 1433 struct dwc3 *dwc = dep->dwc; 1434 1435 if (!dep->endpoint.desc) { 1436 dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n", 1437 dep->name); 1438 return -ESHUTDOWN; 1439 } 1440 1441 if (WARN(req->dep != dep, "request %pK belongs to '%s'\n", 1442 &req->request, req->dep->name)) 1443 return -EINVAL; 1444 1445 if (WARN(req->status < DWC3_REQUEST_STATUS_COMPLETED, 1446 "%s: request %pK already in flight\n", 1447 dep->name, &req->request)) 1448 return -EINVAL; 1449 1450 pm_runtime_get(dwc->dev); 1451 1452 req->request.actual = 0; 1453 req->request.status = -EINPROGRESS; 1454 1455 trace_dwc3_ep_queue(req); 1456 1457 list_add_tail(&req->list, &dep->pending_list); 1458 req->status = DWC3_REQUEST_STATUS_QUEUED; 1459 1460 /* Start the transfer only after the END_TRANSFER is completed */ 1461 if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) { 1462 dep->flags |= DWC3_EP_DELAY_START; 1463 return 0; 1464 } 1465 1466 /* 1467 * NOTICE: Isochronous endpoints should NEVER be prestarted. We must 1468 * wait for a XferNotReady event so we will know what's the current 1469 * (micro-)frame number. 1470 * 1471 * Without this trick, we are very, very likely gonna get Bus Expiry 1472 * errors which will force us issue EndTransfer command. 1473 */ 1474 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1475 if (!(dep->flags & DWC3_EP_PENDING_REQUEST) && 1476 !(dep->flags & DWC3_EP_TRANSFER_STARTED)) 1477 return 0; 1478 1479 if ((dep->flags & DWC3_EP_PENDING_REQUEST)) { 1480 if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) { 1481 return __dwc3_gadget_start_isoc(dep); 1482 } 1483 } 1484 } 1485 1486 return __dwc3_gadget_kick_transfer(dep); 1487 } 1488 1489 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 1490 gfp_t gfp_flags) 1491 { 1492 struct dwc3_request *req = to_dwc3_request(request); 1493 struct dwc3_ep *dep = to_dwc3_ep(ep); 1494 struct dwc3 *dwc = dep->dwc; 1495 1496 unsigned long flags; 1497 1498 int ret; 1499 1500 spin_lock_irqsave(&dwc->lock, flags); 1501 ret = __dwc3_gadget_ep_queue(dep, req); 1502 spin_unlock_irqrestore(&dwc->lock, flags); 1503 1504 return ret; 1505 } 1506 1507 static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *req) 1508 { 1509 int i; 1510 1511 /* 1512 * If request was already started, this means we had to 1513 * stop the transfer. With that we also need to ignore 1514 * all TRBs used by the request, however TRBs can only 1515 * be modified after completion of END_TRANSFER 1516 * command. So what we do here is that we wait for 1517 * END_TRANSFER completion and only after that, we jump 1518 * over TRBs by clearing HWO and incrementing dequeue 1519 * pointer. 1520 */ 1521 for (i = 0; i < req->num_trbs; i++) { 1522 struct dwc3_trb *trb; 1523 1524 trb = &dep->trb_pool[dep->trb_dequeue]; 1525 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 1526 dwc3_ep_inc_deq(dep); 1527 } 1528 1529 req->num_trbs = 0; 1530 } 1531 1532 static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep) 1533 { 1534 struct dwc3_request *req; 1535 struct dwc3_request *tmp; 1536 1537 list_for_each_entry_safe(req, tmp, &dep->cancelled_list, list) { 1538 dwc3_gadget_ep_skip_trbs(dep, req); 1539 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1540 } 1541 } 1542 1543 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 1544 struct usb_request *request) 1545 { 1546 struct dwc3_request *req = to_dwc3_request(request); 1547 struct dwc3_request *r = NULL; 1548 1549 struct dwc3_ep *dep = to_dwc3_ep(ep); 1550 struct dwc3 *dwc = dep->dwc; 1551 1552 unsigned long flags; 1553 int ret = 0; 1554 1555 trace_dwc3_ep_dequeue(req); 1556 1557 spin_lock_irqsave(&dwc->lock, flags); 1558 1559 list_for_each_entry(r, &dep->pending_list, list) { 1560 if (r == req) 1561 break; 1562 } 1563 1564 if (r != req) { 1565 list_for_each_entry(r, &dep->started_list, list) { 1566 if (r == req) 1567 break; 1568 } 1569 if (r == req) { 1570 /* wait until it is processed */ 1571 dwc3_stop_active_transfer(dep, true, true); 1572 1573 if (!r->trb) 1574 goto out0; 1575 1576 dwc3_gadget_move_cancelled_request(req); 1577 if (dep->flags & DWC3_EP_TRANSFER_STARTED) 1578 goto out0; 1579 else 1580 goto out1; 1581 } 1582 dev_err(dwc->dev, "request %pK was not queued to %s\n", 1583 request, ep->name); 1584 ret = -EINVAL; 1585 goto out0; 1586 } 1587 1588 out1: 1589 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1590 1591 out0: 1592 spin_unlock_irqrestore(&dwc->lock, flags); 1593 1594 return ret; 1595 } 1596 1597 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) 1598 { 1599 struct dwc3_gadget_ep_cmd_params params; 1600 struct dwc3 *dwc = dep->dwc; 1601 int ret; 1602 1603 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1604 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 1605 return -EINVAL; 1606 } 1607 1608 memset(¶ms, 0x00, sizeof(params)); 1609 1610 if (value) { 1611 struct dwc3_trb *trb; 1612 1613 unsigned transfer_in_flight; 1614 unsigned started; 1615 1616 if (dep->number > 1) 1617 trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue); 1618 else 1619 trb = &dwc->ep0_trb[dep->trb_enqueue]; 1620 1621 transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO; 1622 started = !list_empty(&dep->started_list); 1623 1624 if (!protocol && ((dep->direction && transfer_in_flight) || 1625 (!dep->direction && started))) { 1626 return -EAGAIN; 1627 } 1628 1629 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL, 1630 ¶ms); 1631 if (ret) 1632 dev_err(dwc->dev, "failed to set STALL on %s\n", 1633 dep->name); 1634 else 1635 dep->flags |= DWC3_EP_STALL; 1636 } else { 1637 1638 ret = dwc3_send_clear_stall_ep_cmd(dep); 1639 if (ret) 1640 dev_err(dwc->dev, "failed to clear STALL on %s\n", 1641 dep->name); 1642 else 1643 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); 1644 } 1645 1646 return ret; 1647 } 1648 1649 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 1650 { 1651 struct dwc3_ep *dep = to_dwc3_ep(ep); 1652 struct dwc3 *dwc = dep->dwc; 1653 1654 unsigned long flags; 1655 1656 int ret; 1657 1658 spin_lock_irqsave(&dwc->lock, flags); 1659 ret = __dwc3_gadget_ep_set_halt(dep, value, false); 1660 spin_unlock_irqrestore(&dwc->lock, flags); 1661 1662 return ret; 1663 } 1664 1665 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 1666 { 1667 struct dwc3_ep *dep = to_dwc3_ep(ep); 1668 struct dwc3 *dwc = dep->dwc; 1669 unsigned long flags; 1670 int ret; 1671 1672 spin_lock_irqsave(&dwc->lock, flags); 1673 dep->flags |= DWC3_EP_WEDGE; 1674 1675 if (dep->number == 0 || dep->number == 1) 1676 ret = __dwc3_gadget_ep0_set_halt(ep, 1); 1677 else 1678 ret = __dwc3_gadget_ep_set_halt(dep, 1, false); 1679 spin_unlock_irqrestore(&dwc->lock, flags); 1680 1681 return ret; 1682 } 1683 1684 /* -------------------------------------------------------------------------- */ 1685 1686 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 1687 .bLength = USB_DT_ENDPOINT_SIZE, 1688 .bDescriptorType = USB_DT_ENDPOINT, 1689 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1690 }; 1691 1692 static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 1693 .enable = dwc3_gadget_ep0_enable, 1694 .disable = dwc3_gadget_ep0_disable, 1695 .alloc_request = dwc3_gadget_ep_alloc_request, 1696 .free_request = dwc3_gadget_ep_free_request, 1697 .queue = dwc3_gadget_ep0_queue, 1698 .dequeue = dwc3_gadget_ep_dequeue, 1699 .set_halt = dwc3_gadget_ep0_set_halt, 1700 .set_wedge = dwc3_gadget_ep_set_wedge, 1701 }; 1702 1703 static const struct usb_ep_ops dwc3_gadget_ep_ops = { 1704 .enable = dwc3_gadget_ep_enable, 1705 .disable = dwc3_gadget_ep_disable, 1706 .alloc_request = dwc3_gadget_ep_alloc_request, 1707 .free_request = dwc3_gadget_ep_free_request, 1708 .queue = dwc3_gadget_ep_queue, 1709 .dequeue = dwc3_gadget_ep_dequeue, 1710 .set_halt = dwc3_gadget_ep_set_halt, 1711 .set_wedge = dwc3_gadget_ep_set_wedge, 1712 }; 1713 1714 /* -------------------------------------------------------------------------- */ 1715 1716 static int dwc3_gadget_get_frame(struct usb_gadget *g) 1717 { 1718 struct dwc3 *dwc = gadget_to_dwc(g); 1719 1720 return __dwc3_gadget_get_frame(dwc); 1721 } 1722 1723 static int __dwc3_gadget_wakeup(struct dwc3 *dwc) 1724 { 1725 int retries; 1726 1727 int ret; 1728 u32 reg; 1729 1730 u8 link_state; 1731 1732 /* 1733 * According to the Databook Remote wakeup request should 1734 * be issued only when the device is in early suspend state. 1735 * 1736 * We can check that via USB Link State bits in DSTS register. 1737 */ 1738 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1739 1740 link_state = DWC3_DSTS_USBLNKST(reg); 1741 1742 switch (link_state) { 1743 case DWC3_LINK_STATE_RESET: 1744 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1745 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1746 case DWC3_LINK_STATE_RESUME: 1747 break; 1748 default: 1749 return -EINVAL; 1750 } 1751 1752 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); 1753 if (ret < 0) { 1754 dev_err(dwc->dev, "failed to put link in Recovery\n"); 1755 return ret; 1756 } 1757 1758 /* Recent versions do this automatically */ 1759 if (dwc->revision < DWC3_REVISION_194A) { 1760 /* write zeroes to Link Change Request */ 1761 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1762 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 1763 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1764 } 1765 1766 /* poll until Link State changes to ON */ 1767 retries = 20000; 1768 1769 while (retries--) { 1770 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1771 1772 /* in HS, means ON */ 1773 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 1774 break; 1775 } 1776 1777 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1778 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1779 return -EINVAL; 1780 } 1781 1782 return 0; 1783 } 1784 1785 static int dwc3_gadget_wakeup(struct usb_gadget *g) 1786 { 1787 struct dwc3 *dwc = gadget_to_dwc(g); 1788 unsigned long flags; 1789 int ret; 1790 1791 spin_lock_irqsave(&dwc->lock, flags); 1792 ret = __dwc3_gadget_wakeup(dwc); 1793 spin_unlock_irqrestore(&dwc->lock, flags); 1794 1795 return ret; 1796 } 1797 1798 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 1799 int is_selfpowered) 1800 { 1801 struct dwc3 *dwc = gadget_to_dwc(g); 1802 unsigned long flags; 1803 1804 spin_lock_irqsave(&dwc->lock, flags); 1805 g->is_selfpowered = !!is_selfpowered; 1806 spin_unlock_irqrestore(&dwc->lock, flags); 1807 1808 return 0; 1809 } 1810 1811 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) 1812 { 1813 u32 reg; 1814 u32 timeout = 500; 1815 1816 if (pm_runtime_suspended(dwc->dev)) 1817 return 0; 1818 1819 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1820 if (is_on) { 1821 if (dwc->revision <= DWC3_REVISION_187A) { 1822 reg &= ~DWC3_DCTL_TRGTULST_MASK; 1823 reg |= DWC3_DCTL_TRGTULST_RX_DET; 1824 } 1825 1826 if (dwc->revision >= DWC3_REVISION_194A) 1827 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1828 reg |= DWC3_DCTL_RUN_STOP; 1829 1830 if (dwc->has_hibernation) 1831 reg |= DWC3_DCTL_KEEP_CONNECT; 1832 1833 dwc->pullups_connected = true; 1834 } else { 1835 reg &= ~DWC3_DCTL_RUN_STOP; 1836 1837 if (dwc->has_hibernation && !suspend) 1838 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1839 1840 dwc->pullups_connected = false; 1841 } 1842 1843 dwc3_gadget_dctl_write_safe(dwc, reg); 1844 1845 do { 1846 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1847 reg &= DWC3_DSTS_DEVCTRLHLT; 1848 } while (--timeout && !(!is_on ^ !reg)); 1849 1850 if (!timeout) 1851 return -ETIMEDOUT; 1852 1853 return 0; 1854 } 1855 1856 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 1857 { 1858 struct dwc3 *dwc = gadget_to_dwc(g); 1859 unsigned long flags; 1860 int ret; 1861 1862 is_on = !!is_on; 1863 1864 /* 1865 * Per databook, when we want to stop the gadget, if a control transfer 1866 * is still in process, complete it and get the core into setup phase. 1867 */ 1868 if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) { 1869 reinit_completion(&dwc->ep0_in_setup); 1870 1871 ret = wait_for_completion_timeout(&dwc->ep0_in_setup, 1872 msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT)); 1873 if (ret == 0) { 1874 dev_err(dwc->dev, "timed out waiting for SETUP phase\n"); 1875 return -ETIMEDOUT; 1876 } 1877 } 1878 1879 spin_lock_irqsave(&dwc->lock, flags); 1880 ret = dwc3_gadget_run_stop(dwc, is_on, false); 1881 spin_unlock_irqrestore(&dwc->lock, flags); 1882 1883 return ret; 1884 } 1885 1886 static void dwc3_gadget_enable_irq(struct dwc3 *dwc) 1887 { 1888 u32 reg; 1889 1890 /* Enable all but Start and End of Frame IRQs */ 1891 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | 1892 DWC3_DEVTEN_EVNTOVERFLOWEN | 1893 DWC3_DEVTEN_CMDCMPLTEN | 1894 DWC3_DEVTEN_ERRTICERREN | 1895 DWC3_DEVTEN_WKUPEVTEN | 1896 DWC3_DEVTEN_CONNECTDONEEN | 1897 DWC3_DEVTEN_USBRSTEN | 1898 DWC3_DEVTEN_DISCONNEVTEN); 1899 1900 if (dwc->revision < DWC3_REVISION_250A) 1901 reg |= DWC3_DEVTEN_ULSTCNGEN; 1902 1903 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 1904 } 1905 1906 static void dwc3_gadget_disable_irq(struct dwc3 *dwc) 1907 { 1908 /* mask all interrupts */ 1909 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 1910 } 1911 1912 static irqreturn_t dwc3_interrupt(int irq, void *_dwc); 1913 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc); 1914 1915 /** 1916 * dwc3_gadget_setup_nump - calculate and initialize NUMP field of %DWC3_DCFG 1917 * @dwc: pointer to our context structure 1918 * 1919 * The following looks like complex but it's actually very simple. In order to 1920 * calculate the number of packets we can burst at once on OUT transfers, we're 1921 * gonna use RxFIFO size. 1922 * 1923 * To calculate RxFIFO size we need two numbers: 1924 * MDWIDTH = size, in bits, of the internal memory bus 1925 * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits) 1926 * 1927 * Given these two numbers, the formula is simple: 1928 * 1929 * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16; 1930 * 1931 * 24 bytes is for 3x SETUP packets 1932 * 16 bytes is a clock domain crossing tolerance 1933 * 1934 * Given RxFIFO Size, NUMP = RxFIFOSize / 1024; 1935 */ 1936 static void dwc3_gadget_setup_nump(struct dwc3 *dwc) 1937 { 1938 u32 ram2_depth; 1939 u32 mdwidth; 1940 u32 nump; 1941 u32 reg; 1942 1943 ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7); 1944 mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0); 1945 1946 nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024; 1947 nump = min_t(u32, nump, 16); 1948 1949 /* update NumP */ 1950 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1951 reg &= ~DWC3_DCFG_NUMP_MASK; 1952 reg |= nump << DWC3_DCFG_NUMP_SHIFT; 1953 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1954 } 1955 1956 static int __dwc3_gadget_start(struct dwc3 *dwc) 1957 { 1958 struct dwc3_ep *dep; 1959 int ret = 0; 1960 u32 reg; 1961 1962 /* 1963 * Use IMOD if enabled via dwc->imod_interval. Otherwise, if 1964 * the core supports IMOD, disable it. 1965 */ 1966 if (dwc->imod_interval) { 1967 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); 1968 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); 1969 } else if (dwc3_has_imod(dwc)) { 1970 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0); 1971 } 1972 1973 /* 1974 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP 1975 * field instead of letting dwc3 itself calculate that automatically. 1976 * 1977 * This way, we maximize the chances that we'll be able to get several 1978 * bursts of data without going through any sort of endpoint throttling. 1979 */ 1980 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1981 if (dwc3_is_usb31(dwc)) 1982 reg &= ~DWC31_GRXTHRCFG_PKTCNTSEL; 1983 else 1984 reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL; 1985 1986 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1987 1988 dwc3_gadget_setup_nump(dwc); 1989 1990 /* Start with SuperSpeed Default */ 1991 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1992 1993 dep = dwc->eps[0]; 1994 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT); 1995 if (ret) { 1996 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1997 goto err0; 1998 } 1999 2000 dep = dwc->eps[1]; 2001 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT); 2002 if (ret) { 2003 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2004 goto err1; 2005 } 2006 2007 /* begin to receive SETUP packets */ 2008 dwc->ep0state = EP0_SETUP_PHASE; 2009 dwc->link_state = DWC3_LINK_STATE_SS_DIS; 2010 dwc3_ep0_out_start(dwc); 2011 2012 dwc3_gadget_enable_irq(dwc); 2013 2014 return 0; 2015 2016 err1: 2017 __dwc3_gadget_ep_disable(dwc->eps[0]); 2018 2019 err0: 2020 return ret; 2021 } 2022 2023 static int dwc3_gadget_start(struct usb_gadget *g, 2024 struct usb_gadget_driver *driver) 2025 { 2026 struct dwc3 *dwc = gadget_to_dwc(g); 2027 unsigned long flags; 2028 int ret = 0; 2029 int irq; 2030 2031 irq = dwc->irq_gadget; 2032 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, 2033 IRQF_SHARED, "dwc3", dwc->ev_buf); 2034 if (ret) { 2035 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 2036 irq, ret); 2037 goto err0; 2038 } 2039 2040 spin_lock_irqsave(&dwc->lock, flags); 2041 if (dwc->gadget_driver) { 2042 dev_err(dwc->dev, "%s is already bound to %s\n", 2043 dwc->gadget.name, 2044 dwc->gadget_driver->driver.name); 2045 ret = -EBUSY; 2046 goto err1; 2047 } 2048 2049 dwc->gadget_driver = driver; 2050 2051 if (pm_runtime_active(dwc->dev)) 2052 __dwc3_gadget_start(dwc); 2053 2054 spin_unlock_irqrestore(&dwc->lock, flags); 2055 2056 return 0; 2057 2058 err1: 2059 spin_unlock_irqrestore(&dwc->lock, flags); 2060 free_irq(irq, dwc); 2061 2062 err0: 2063 return ret; 2064 } 2065 2066 static void __dwc3_gadget_stop(struct dwc3 *dwc) 2067 { 2068 dwc3_gadget_disable_irq(dwc); 2069 __dwc3_gadget_ep_disable(dwc->eps[0]); 2070 __dwc3_gadget_ep_disable(dwc->eps[1]); 2071 } 2072 2073 static int dwc3_gadget_stop(struct usb_gadget *g) 2074 { 2075 struct dwc3 *dwc = gadget_to_dwc(g); 2076 unsigned long flags; 2077 2078 spin_lock_irqsave(&dwc->lock, flags); 2079 2080 if (pm_runtime_suspended(dwc->dev)) 2081 goto out; 2082 2083 __dwc3_gadget_stop(dwc); 2084 2085 out: 2086 dwc->gadget_driver = NULL; 2087 spin_unlock_irqrestore(&dwc->lock, flags); 2088 2089 free_irq(dwc->irq_gadget, dwc->ev_buf); 2090 2091 return 0; 2092 } 2093 2094 static void dwc3_gadget_config_params(struct usb_gadget *g, 2095 struct usb_dcd_config_params *params) 2096 { 2097 struct dwc3 *dwc = gadget_to_dwc(g); 2098 2099 params->besl_baseline = USB_DEFAULT_BESL_UNSPECIFIED; 2100 params->besl_deep = USB_DEFAULT_BESL_UNSPECIFIED; 2101 2102 /* Recommended BESL */ 2103 if (!dwc->dis_enblslpm_quirk) { 2104 /* 2105 * If the recommended BESL baseline is 0 or if the BESL deep is 2106 * less than 2, Microsoft's Windows 10 host usb stack will issue 2107 * a usb reset immediately after it receives the extended BOS 2108 * descriptor and the enumeration will fail. To maintain 2109 * compatibility with the Windows' usb stack, let's set the 2110 * recommended BESL baseline to 1 and clamp the BESL deep to be 2111 * within 2 to 15. 2112 */ 2113 params->besl_baseline = 1; 2114 if (dwc->is_utmi_l1_suspend) 2115 params->besl_deep = 2116 clamp_t(u8, dwc->hird_threshold, 2, 15); 2117 } 2118 2119 /* U1 Device exit Latency */ 2120 if (dwc->dis_u1_entry_quirk) 2121 params->bU1devExitLat = 0; 2122 else 2123 params->bU1devExitLat = DWC3_DEFAULT_U1_DEV_EXIT_LAT; 2124 2125 /* U2 Device exit Latency */ 2126 if (dwc->dis_u2_entry_quirk) 2127 params->bU2DevExitLat = 0; 2128 else 2129 params->bU2DevExitLat = 2130 cpu_to_le16(DWC3_DEFAULT_U2_DEV_EXIT_LAT); 2131 } 2132 2133 static void dwc3_gadget_set_speed(struct usb_gadget *g, 2134 enum usb_device_speed speed) 2135 { 2136 struct dwc3 *dwc = gadget_to_dwc(g); 2137 unsigned long flags; 2138 u32 reg; 2139 2140 spin_lock_irqsave(&dwc->lock, flags); 2141 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2142 reg &= ~(DWC3_DCFG_SPEED_MASK); 2143 2144 /* 2145 * WORKAROUND: DWC3 revision < 2.20a have an issue 2146 * which would cause metastability state on Run/Stop 2147 * bit if we try to force the IP to USB2-only mode. 2148 * 2149 * Because of that, we cannot configure the IP to any 2150 * speed other than the SuperSpeed 2151 * 2152 * Refers to: 2153 * 2154 * STAR#9000525659: Clock Domain Crossing on DCTL in 2155 * USB 2.0 Mode 2156 */ 2157 if (dwc->revision < DWC3_REVISION_220A && 2158 !dwc->dis_metastability_quirk) { 2159 reg |= DWC3_DCFG_SUPERSPEED; 2160 } else { 2161 switch (speed) { 2162 case USB_SPEED_LOW: 2163 reg |= DWC3_DCFG_LOWSPEED; 2164 break; 2165 case USB_SPEED_FULL: 2166 reg |= DWC3_DCFG_FULLSPEED; 2167 break; 2168 case USB_SPEED_HIGH: 2169 reg |= DWC3_DCFG_HIGHSPEED; 2170 break; 2171 case USB_SPEED_SUPER: 2172 reg |= DWC3_DCFG_SUPERSPEED; 2173 break; 2174 case USB_SPEED_SUPER_PLUS: 2175 if (dwc3_is_usb31(dwc)) 2176 reg |= DWC3_DCFG_SUPERSPEED_PLUS; 2177 else 2178 reg |= DWC3_DCFG_SUPERSPEED; 2179 break; 2180 default: 2181 dev_err(dwc->dev, "invalid speed (%d)\n", speed); 2182 2183 if (dwc->revision & DWC3_REVISION_IS_DWC31) 2184 reg |= DWC3_DCFG_SUPERSPEED_PLUS; 2185 else 2186 reg |= DWC3_DCFG_SUPERSPEED; 2187 } 2188 } 2189 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2190 2191 spin_unlock_irqrestore(&dwc->lock, flags); 2192 } 2193 2194 static const struct usb_gadget_ops dwc3_gadget_ops = { 2195 .get_frame = dwc3_gadget_get_frame, 2196 .wakeup = dwc3_gadget_wakeup, 2197 .set_selfpowered = dwc3_gadget_set_selfpowered, 2198 .pullup = dwc3_gadget_pullup, 2199 .udc_start = dwc3_gadget_start, 2200 .udc_stop = dwc3_gadget_stop, 2201 .udc_set_speed = dwc3_gadget_set_speed, 2202 .get_config_params = dwc3_gadget_config_params, 2203 }; 2204 2205 /* -------------------------------------------------------------------------- */ 2206 2207 static int dwc3_gadget_init_control_endpoint(struct dwc3_ep *dep) 2208 { 2209 struct dwc3 *dwc = dep->dwc; 2210 2211 usb_ep_set_maxpacket_limit(&dep->endpoint, 512); 2212 dep->endpoint.maxburst = 1; 2213 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 2214 if (!dep->direction) 2215 dwc->gadget.ep0 = &dep->endpoint; 2216 2217 dep->endpoint.caps.type_control = true; 2218 2219 return 0; 2220 } 2221 2222 static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) 2223 { 2224 struct dwc3 *dwc = dep->dwc; 2225 int mdwidth; 2226 int size; 2227 2228 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); 2229 /* MDWIDTH is represented in bits, we need it in bytes */ 2230 mdwidth /= 8; 2231 2232 size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1)); 2233 if (dwc3_is_usb31(dwc)) 2234 size = DWC31_GTXFIFOSIZ_TXFDEP(size); 2235 else 2236 size = DWC3_GTXFIFOSIZ_TXFDEP(size); 2237 2238 /* FIFO Depth is in MDWDITH bytes. Multiply */ 2239 size *= mdwidth; 2240 2241 /* 2242 * To meet performance requirement, a minimum TxFIFO size of 3x 2243 * MaxPacketSize is recommended for endpoints that support burst and a 2244 * minimum TxFIFO size of 2x MaxPacketSize for endpoints that don't 2245 * support burst. Use those numbers and we can calculate the max packet 2246 * limit as below. 2247 */ 2248 if (dwc->maximum_speed >= USB_SPEED_SUPER) 2249 size /= 3; 2250 else 2251 size /= 2; 2252 2253 usb_ep_set_maxpacket_limit(&dep->endpoint, size); 2254 2255 dep->endpoint.max_streams = 15; 2256 dep->endpoint.ops = &dwc3_gadget_ep_ops; 2257 list_add_tail(&dep->endpoint.ep_list, 2258 &dwc->gadget.ep_list); 2259 dep->endpoint.caps.type_iso = true; 2260 dep->endpoint.caps.type_bulk = true; 2261 dep->endpoint.caps.type_int = true; 2262 2263 return dwc3_alloc_trb_pool(dep); 2264 } 2265 2266 static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep) 2267 { 2268 struct dwc3 *dwc = dep->dwc; 2269 int mdwidth; 2270 int size; 2271 2272 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); 2273 2274 /* MDWIDTH is represented in bits, convert to bytes */ 2275 mdwidth /= 8; 2276 2277 /* All OUT endpoints share a single RxFIFO space */ 2278 size = dwc3_readl(dwc->regs, DWC3_GRXFIFOSIZ(0)); 2279 if (dwc3_is_usb31(dwc)) 2280 size = DWC31_GRXFIFOSIZ_RXFDEP(size); 2281 else 2282 size = DWC3_GRXFIFOSIZ_RXFDEP(size); 2283 2284 /* FIFO depth is in MDWDITH bytes */ 2285 size *= mdwidth; 2286 2287 /* 2288 * To meet performance requirement, a minimum recommended RxFIFO size 2289 * is defined as follow: 2290 * RxFIFO size >= (3 x MaxPacketSize) + 2291 * (3 x 8 bytes setup packets size) + (16 bytes clock crossing margin) 2292 * 2293 * Then calculate the max packet limit as below. 2294 */ 2295 size -= (3 * 8) + 16; 2296 if (size < 0) 2297 size = 0; 2298 else 2299 size /= 3; 2300 2301 usb_ep_set_maxpacket_limit(&dep->endpoint, size); 2302 dep->endpoint.max_streams = 15; 2303 dep->endpoint.ops = &dwc3_gadget_ep_ops; 2304 list_add_tail(&dep->endpoint.ep_list, 2305 &dwc->gadget.ep_list); 2306 dep->endpoint.caps.type_iso = true; 2307 dep->endpoint.caps.type_bulk = true; 2308 dep->endpoint.caps.type_int = true; 2309 2310 return dwc3_alloc_trb_pool(dep); 2311 } 2312 2313 static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum) 2314 { 2315 struct dwc3_ep *dep; 2316 bool direction = epnum & 1; 2317 int ret; 2318 u8 num = epnum >> 1; 2319 2320 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 2321 if (!dep) 2322 return -ENOMEM; 2323 2324 dep->dwc = dwc; 2325 dep->number = epnum; 2326 dep->direction = direction; 2327 dep->regs = dwc->regs + DWC3_DEP_BASE(epnum); 2328 dwc->eps[epnum] = dep; 2329 dep->combo_num = 0; 2330 dep->start_cmd_status = 0; 2331 2332 snprintf(dep->name, sizeof(dep->name), "ep%u%s", num, 2333 direction ? "in" : "out"); 2334 2335 dep->endpoint.name = dep->name; 2336 2337 if (!(dep->number > 1)) { 2338 dep->endpoint.desc = &dwc3_gadget_ep0_desc; 2339 dep->endpoint.comp_desc = NULL; 2340 } 2341 2342 if (num == 0) 2343 ret = dwc3_gadget_init_control_endpoint(dep); 2344 else if (direction) 2345 ret = dwc3_gadget_init_in_endpoint(dep); 2346 else 2347 ret = dwc3_gadget_init_out_endpoint(dep); 2348 2349 if (ret) 2350 return ret; 2351 2352 dep->endpoint.caps.dir_in = direction; 2353 dep->endpoint.caps.dir_out = !direction; 2354 2355 INIT_LIST_HEAD(&dep->pending_list); 2356 INIT_LIST_HEAD(&dep->started_list); 2357 INIT_LIST_HEAD(&dep->cancelled_list); 2358 2359 return 0; 2360 } 2361 2362 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 total) 2363 { 2364 u8 epnum; 2365 2366 INIT_LIST_HEAD(&dwc->gadget.ep_list); 2367 2368 for (epnum = 0; epnum < total; epnum++) { 2369 int ret; 2370 2371 ret = dwc3_gadget_init_endpoint(dwc, epnum); 2372 if (ret) 2373 return ret; 2374 } 2375 2376 return 0; 2377 } 2378 2379 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 2380 { 2381 struct dwc3_ep *dep; 2382 u8 epnum; 2383 2384 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2385 dep = dwc->eps[epnum]; 2386 if (!dep) 2387 continue; 2388 /* 2389 * Physical endpoints 0 and 1 are special; they form the 2390 * bi-directional USB endpoint 0. 2391 * 2392 * For those two physical endpoints, we don't allocate a TRB 2393 * pool nor do we add them the endpoints list. Due to that, we 2394 * shouldn't do these two operations otherwise we would end up 2395 * with all sorts of bugs when removing dwc3.ko. 2396 */ 2397 if (epnum != 0 && epnum != 1) { 2398 dwc3_free_trb_pool(dep); 2399 list_del(&dep->endpoint.ep_list); 2400 } 2401 2402 kfree(dep); 2403 } 2404 } 2405 2406 /* -------------------------------------------------------------------------- */ 2407 2408 static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep, 2409 struct dwc3_request *req, struct dwc3_trb *trb, 2410 const struct dwc3_event_depevt *event, int status, int chain) 2411 { 2412 unsigned int count; 2413 2414 dwc3_ep_inc_deq(dep); 2415 2416 trace_dwc3_complete_trb(dep, trb); 2417 req->num_trbs--; 2418 2419 /* 2420 * If we're in the middle of series of chained TRBs and we 2421 * receive a short transfer along the way, DWC3 will skip 2422 * through all TRBs including the last TRB in the chain (the 2423 * where CHN bit is zero. DWC3 will also avoid clearing HWO 2424 * bit and SW has to do it manually. 2425 * 2426 * We're going to do that here to avoid problems of HW trying 2427 * to use bogus TRBs for transfers. 2428 */ 2429 if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO)) 2430 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 2431 2432 /* 2433 * For isochronous transfers, the first TRB in a service interval must 2434 * have the Isoc-First type. Track and report its interval frame number. 2435 */ 2436 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 2437 (trb->ctrl & DWC3_TRBCTL_ISOCHRONOUS_FIRST)) { 2438 unsigned int frame_number; 2439 2440 frame_number = DWC3_TRB_CTRL_GET_SID_SOFN(trb->ctrl); 2441 frame_number &= ~(dep->interval - 1); 2442 req->request.frame_number = frame_number; 2443 } 2444 2445 /* 2446 * If we're dealing with unaligned size OUT transfer, we will be left 2447 * with one TRB pending in the ring. We need to manually clear HWO bit 2448 * from that TRB. 2449 */ 2450 2451 if (req->needs_extra_trb && !(trb->ctrl & DWC3_TRB_CTRL_CHN)) { 2452 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 2453 return 1; 2454 } 2455 2456 count = trb->size & DWC3_TRB_SIZE_MASK; 2457 req->remaining += count; 2458 2459 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 2460 return 1; 2461 2462 if (event->status & DEPEVT_STATUS_SHORT && !chain) 2463 return 1; 2464 2465 if ((trb->ctrl & DWC3_TRB_CTRL_IOC) || 2466 (trb->ctrl & DWC3_TRB_CTRL_LST)) 2467 return 1; 2468 2469 return 0; 2470 } 2471 2472 static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep, 2473 struct dwc3_request *req, const struct dwc3_event_depevt *event, 2474 int status) 2475 { 2476 struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue]; 2477 struct scatterlist *sg = req->sg; 2478 struct scatterlist *s; 2479 unsigned int pending = req->num_pending_sgs; 2480 unsigned int i; 2481 int ret = 0; 2482 2483 for_each_sg(sg, s, pending, i) { 2484 trb = &dep->trb_pool[dep->trb_dequeue]; 2485 2486 if (trb->ctrl & DWC3_TRB_CTRL_HWO) 2487 break; 2488 2489 req->sg = sg_next(s); 2490 req->num_pending_sgs--; 2491 2492 ret = dwc3_gadget_ep_reclaim_completed_trb(dep, req, 2493 trb, event, status, true); 2494 if (ret) 2495 break; 2496 } 2497 2498 return ret; 2499 } 2500 2501 static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep, 2502 struct dwc3_request *req, const struct dwc3_event_depevt *event, 2503 int status) 2504 { 2505 struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue]; 2506 2507 return dwc3_gadget_ep_reclaim_completed_trb(dep, req, trb, 2508 event, status, false); 2509 } 2510 2511 static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req) 2512 { 2513 return req->num_pending_sgs == 0; 2514 } 2515 2516 static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, 2517 const struct dwc3_event_depevt *event, 2518 struct dwc3_request *req, int status) 2519 { 2520 int ret; 2521 2522 if (req->num_pending_sgs) 2523 ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event, 2524 status); 2525 else 2526 ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, 2527 status); 2528 2529 if (req->needs_extra_trb) { 2530 ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, 2531 status); 2532 req->needs_extra_trb = false; 2533 } 2534 2535 req->request.actual = req->request.length - req->remaining; 2536 2537 if (!dwc3_gadget_ep_request_completed(req)) { 2538 __dwc3_gadget_kick_transfer(dep); 2539 goto out; 2540 } 2541 2542 dwc3_gadget_giveback(dep, req, status); 2543 2544 out: 2545 return ret; 2546 } 2547 2548 static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep, 2549 const struct dwc3_event_depevt *event, int status) 2550 { 2551 struct dwc3_request *req; 2552 struct dwc3_request *tmp; 2553 2554 list_for_each_entry_safe(req, tmp, &dep->started_list, list) { 2555 int ret; 2556 2557 ret = dwc3_gadget_ep_cleanup_completed_request(dep, event, 2558 req, status); 2559 if (ret) 2560 break; 2561 } 2562 } 2563 2564 static void dwc3_gadget_endpoint_frame_from_event(struct dwc3_ep *dep, 2565 const struct dwc3_event_depevt *event) 2566 { 2567 dep->frame_number = event->parameters; 2568 } 2569 2570 static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep, 2571 const struct dwc3_event_depevt *event) 2572 { 2573 struct dwc3 *dwc = dep->dwc; 2574 unsigned status = 0; 2575 bool stop = false; 2576 2577 dwc3_gadget_endpoint_frame_from_event(dep, event); 2578 2579 if (event->status & DEPEVT_STATUS_BUSERR) 2580 status = -ECONNRESET; 2581 2582 if (event->status & DEPEVT_STATUS_MISSED_ISOC) { 2583 status = -EXDEV; 2584 2585 if (list_empty(&dep->started_list)) 2586 stop = true; 2587 } 2588 2589 dwc3_gadget_ep_cleanup_completed_requests(dep, event, status); 2590 2591 if (stop) 2592 dwc3_stop_active_transfer(dep, true, true); 2593 2594 /* 2595 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. 2596 * See dwc3_gadget_linksts_change_interrupt() for 1st half. 2597 */ 2598 if (dwc->revision < DWC3_REVISION_183A) { 2599 u32 reg; 2600 int i; 2601 2602 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 2603 dep = dwc->eps[i]; 2604 2605 if (!(dep->flags & DWC3_EP_ENABLED)) 2606 continue; 2607 2608 if (!list_empty(&dep->started_list)) 2609 return; 2610 } 2611 2612 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2613 reg |= dwc->u1u2; 2614 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2615 2616 dwc->u1u2 = 0; 2617 } 2618 } 2619 2620 static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep, 2621 const struct dwc3_event_depevt *event) 2622 { 2623 dwc3_gadget_endpoint_frame_from_event(dep, event); 2624 (void) __dwc3_gadget_start_isoc(dep); 2625 } 2626 2627 static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 2628 const struct dwc3_event_depevt *event) 2629 { 2630 struct dwc3_ep *dep; 2631 u8 epnum = event->endpoint_number; 2632 u8 cmd; 2633 2634 dep = dwc->eps[epnum]; 2635 2636 if (!(dep->flags & DWC3_EP_ENABLED)) { 2637 if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) 2638 return; 2639 2640 /* Handle only EPCMDCMPLT when EP disabled */ 2641 if (event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT) 2642 return; 2643 } 2644 2645 if (epnum == 0 || epnum == 1) { 2646 dwc3_ep0_interrupt(dwc, event); 2647 return; 2648 } 2649 2650 switch (event->endpoint_event) { 2651 case DWC3_DEPEVT_XFERINPROGRESS: 2652 dwc3_gadget_endpoint_transfer_in_progress(dep, event); 2653 break; 2654 case DWC3_DEPEVT_XFERNOTREADY: 2655 dwc3_gadget_endpoint_transfer_not_ready(dep, event); 2656 break; 2657 case DWC3_DEPEVT_EPCMDCMPLT: 2658 cmd = DEPEVT_PARAMETER_CMD(event->parameters); 2659 2660 if (cmd == DWC3_DEPCMD_ENDTRANSFER) { 2661 dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; 2662 dep->flags &= ~DWC3_EP_TRANSFER_STARTED; 2663 dwc3_gadget_ep_cleanup_cancelled_requests(dep); 2664 if ((dep->flags & DWC3_EP_DELAY_START) && 2665 !usb_endpoint_xfer_isoc(dep->endpoint.desc)) 2666 __dwc3_gadget_kick_transfer(dep); 2667 2668 dep->flags &= ~DWC3_EP_DELAY_START; 2669 } 2670 break; 2671 case DWC3_DEPEVT_STREAMEVT: 2672 case DWC3_DEPEVT_XFERCOMPLETE: 2673 case DWC3_DEPEVT_RXTXFIFOEVT: 2674 break; 2675 } 2676 } 2677 2678 static void dwc3_disconnect_gadget(struct dwc3 *dwc) 2679 { 2680 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { 2681 spin_unlock(&dwc->lock); 2682 dwc->gadget_driver->disconnect(&dwc->gadget); 2683 spin_lock(&dwc->lock); 2684 } 2685 } 2686 2687 static void dwc3_suspend_gadget(struct dwc3 *dwc) 2688 { 2689 if (dwc->gadget_driver && dwc->gadget_driver->suspend) { 2690 spin_unlock(&dwc->lock); 2691 dwc->gadget_driver->suspend(&dwc->gadget); 2692 spin_lock(&dwc->lock); 2693 } 2694 } 2695 2696 static void dwc3_resume_gadget(struct dwc3 *dwc) 2697 { 2698 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2699 spin_unlock(&dwc->lock); 2700 dwc->gadget_driver->resume(&dwc->gadget); 2701 spin_lock(&dwc->lock); 2702 } 2703 } 2704 2705 static void dwc3_reset_gadget(struct dwc3 *dwc) 2706 { 2707 if (!dwc->gadget_driver) 2708 return; 2709 2710 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) { 2711 spin_unlock(&dwc->lock); 2712 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver); 2713 spin_lock(&dwc->lock); 2714 } 2715 } 2716 2717 static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, 2718 bool interrupt) 2719 { 2720 struct dwc3_gadget_ep_cmd_params params; 2721 u32 cmd; 2722 int ret; 2723 2724 if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) || 2725 (dep->flags & DWC3_EP_END_TRANSFER_PENDING)) 2726 return; 2727 2728 /* 2729 * NOTICE: We are violating what the Databook says about the 2730 * EndTransfer command. Ideally we would _always_ wait for the 2731 * EndTransfer Command Completion IRQ, but that's causing too 2732 * much trouble synchronizing between us and gadget driver. 2733 * 2734 * We have discussed this with the IP Provider and it was 2735 * suggested to giveback all requests here. 2736 * 2737 * Note also that a similar handling was tested by Synopsys 2738 * (thanks a lot Paul) and nothing bad has come out of it. 2739 * In short, what we're doing is issuing EndTransfer with 2740 * CMDIOC bit set and delay kicking transfer until the 2741 * EndTransfer command had completed. 2742 * 2743 * As of IP version 3.10a of the DWC_usb3 IP, the controller 2744 * supports a mode to work around the above limitation. The 2745 * software can poll the CMDACT bit in the DEPCMD register 2746 * after issuing a EndTransfer command. This mode is enabled 2747 * by writing GUCTL2[14]. This polling is already done in the 2748 * dwc3_send_gadget_ep_cmd() function so if the mode is 2749 * enabled, the EndTransfer command will have completed upon 2750 * returning from this function. 2751 * 2752 * This mode is NOT available on the DWC_usb31 IP. 2753 */ 2754 2755 cmd = DWC3_DEPCMD_ENDTRANSFER; 2756 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0; 2757 cmd |= interrupt ? DWC3_DEPCMD_CMDIOC : 0; 2758 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); 2759 memset(¶ms, 0, sizeof(params)); 2760 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 2761 WARN_ON_ONCE(ret); 2762 dep->resource_index = 0; 2763 2764 if (!interrupt) 2765 dep->flags &= ~DWC3_EP_TRANSFER_STARTED; 2766 else 2767 dep->flags |= DWC3_EP_END_TRANSFER_PENDING; 2768 } 2769 2770 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 2771 { 2772 u32 epnum; 2773 2774 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2775 struct dwc3_ep *dep; 2776 int ret; 2777 2778 dep = dwc->eps[epnum]; 2779 if (!dep) 2780 continue; 2781 2782 if (!(dep->flags & DWC3_EP_STALL)) 2783 continue; 2784 2785 dep->flags &= ~DWC3_EP_STALL; 2786 2787 ret = dwc3_send_clear_stall_ep_cmd(dep); 2788 WARN_ON_ONCE(ret); 2789 } 2790 } 2791 2792 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 2793 { 2794 int reg; 2795 2796 dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RX_DET); 2797 2798 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2799 reg &= ~DWC3_DCTL_INITU1ENA; 2800 reg &= ~DWC3_DCTL_INITU2ENA; 2801 dwc3_gadget_dctl_write_safe(dwc, reg); 2802 2803 dwc3_disconnect_gadget(dwc); 2804 2805 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2806 dwc->setup_packet_pending = false; 2807 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED); 2808 2809 dwc->connected = false; 2810 } 2811 2812 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 2813 { 2814 u32 reg; 2815 2816 dwc->connected = true; 2817 2818 /* 2819 * WORKAROUND: DWC3 revisions <1.88a have an issue which 2820 * would cause a missing Disconnect Event if there's a 2821 * pending Setup Packet in the FIFO. 2822 * 2823 * There's no suggested workaround on the official Bug 2824 * report, which states that "unless the driver/application 2825 * is doing any special handling of a disconnect event, 2826 * there is no functional issue". 2827 * 2828 * Unfortunately, it turns out that we _do_ some special 2829 * handling of a disconnect event, namely complete all 2830 * pending transfers, notify gadget driver of the 2831 * disconnection, and so on. 2832 * 2833 * Our suggested workaround is to follow the Disconnect 2834 * Event steps here, instead, based on a setup_packet_pending 2835 * flag. Such flag gets set whenever we have a SETUP_PENDING 2836 * status for EP0 TRBs and gets cleared on XferComplete for the 2837 * same endpoint. 2838 * 2839 * Refers to: 2840 * 2841 * STAR#9000466709: RTL: Device : Disconnect event not 2842 * generated if setup packet pending in FIFO 2843 */ 2844 if (dwc->revision < DWC3_REVISION_188A) { 2845 if (dwc->setup_packet_pending) 2846 dwc3_gadget_disconnect_interrupt(dwc); 2847 } 2848 2849 dwc3_reset_gadget(dwc); 2850 2851 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2852 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 2853 dwc3_gadget_dctl_write_safe(dwc, reg); 2854 dwc->test_mode = false; 2855 dwc3_clear_stall_all_ep(dwc); 2856 2857 /* Reset device address to zero */ 2858 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2859 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 2860 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2861 } 2862 2863 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 2864 { 2865 struct dwc3_ep *dep; 2866 int ret; 2867 u32 reg; 2868 u8 speed; 2869 2870 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2871 speed = reg & DWC3_DSTS_CONNECTSPD; 2872 dwc->speed = speed; 2873 2874 /* 2875 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 2876 * each time on Connect Done. 2877 * 2878 * Currently we always use the reset value. If any platform 2879 * wants to set this to a different value, we need to add a 2880 * setting and update GCTL.RAMCLKSEL here. 2881 */ 2882 2883 switch (speed) { 2884 case DWC3_DSTS_SUPERSPEED_PLUS: 2885 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2886 dwc->gadget.ep0->maxpacket = 512; 2887 dwc->gadget.speed = USB_SPEED_SUPER_PLUS; 2888 break; 2889 case DWC3_DSTS_SUPERSPEED: 2890 /* 2891 * WORKAROUND: DWC3 revisions <1.90a have an issue which 2892 * would cause a missing USB3 Reset event. 2893 * 2894 * In such situations, we should force a USB3 Reset 2895 * event by calling our dwc3_gadget_reset_interrupt() 2896 * routine. 2897 * 2898 * Refers to: 2899 * 2900 * STAR#9000483510: RTL: SS : USB3 reset event may 2901 * not be generated always when the link enters poll 2902 */ 2903 if (dwc->revision < DWC3_REVISION_190A) 2904 dwc3_gadget_reset_interrupt(dwc); 2905 2906 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2907 dwc->gadget.ep0->maxpacket = 512; 2908 dwc->gadget.speed = USB_SPEED_SUPER; 2909 break; 2910 case DWC3_DSTS_HIGHSPEED: 2911 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2912 dwc->gadget.ep0->maxpacket = 64; 2913 dwc->gadget.speed = USB_SPEED_HIGH; 2914 break; 2915 case DWC3_DSTS_FULLSPEED: 2916 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2917 dwc->gadget.ep0->maxpacket = 64; 2918 dwc->gadget.speed = USB_SPEED_FULL; 2919 break; 2920 case DWC3_DSTS_LOWSPEED: 2921 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); 2922 dwc->gadget.ep0->maxpacket = 8; 2923 dwc->gadget.speed = USB_SPEED_LOW; 2924 break; 2925 } 2926 2927 dwc->eps[1]->endpoint.maxpacket = dwc->gadget.ep0->maxpacket; 2928 2929 /* Enable USB2 LPM Capability */ 2930 2931 if ((dwc->revision > DWC3_REVISION_194A) && 2932 (speed != DWC3_DSTS_SUPERSPEED) && 2933 (speed != DWC3_DSTS_SUPERSPEED_PLUS)) { 2934 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2935 reg |= DWC3_DCFG_LPM_CAP; 2936 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2937 2938 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2939 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); 2940 2941 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold | 2942 (dwc->is_utmi_l1_suspend << 4)); 2943 2944 /* 2945 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and 2946 * DCFG.LPMCap is set, core responses with an ACK and the 2947 * BESL value in the LPM token is less than or equal to LPM 2948 * NYET threshold. 2949 */ 2950 WARN_ONCE(dwc->revision < DWC3_REVISION_240A 2951 && dwc->has_lpm_erratum, 2952 "LPM Erratum not available on dwc3 revisions < 2.40a\n"); 2953 2954 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A) 2955 reg |= DWC3_DCTL_NYET_THRES(dwc->lpm_nyet_threshold); 2956 2957 dwc3_gadget_dctl_write_safe(dwc, reg); 2958 } else { 2959 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2960 reg &= ~DWC3_DCTL_HIRD_THRES_MASK; 2961 dwc3_gadget_dctl_write_safe(dwc, reg); 2962 } 2963 2964 dep = dwc->eps[0]; 2965 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY); 2966 if (ret) { 2967 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2968 return; 2969 } 2970 2971 dep = dwc->eps[1]; 2972 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY); 2973 if (ret) { 2974 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2975 return; 2976 } 2977 2978 /* 2979 * Configure PHY via GUSB3PIPECTLn if required. 2980 * 2981 * Update GTXFIFOSIZn 2982 * 2983 * In both cases reset values should be sufficient. 2984 */ 2985 } 2986 2987 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 2988 { 2989 /* 2990 * TODO take core out of low power mode when that's 2991 * implemented. 2992 */ 2993 2994 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2995 spin_unlock(&dwc->lock); 2996 dwc->gadget_driver->resume(&dwc->gadget); 2997 spin_lock(&dwc->lock); 2998 } 2999 } 3000 3001 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 3002 unsigned int evtinfo) 3003 { 3004 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 3005 unsigned int pwropt; 3006 3007 /* 3008 * WORKAROUND: DWC3 < 2.50a have an issue when configured without 3009 * Hibernation mode enabled which would show up when device detects 3010 * host-initiated U3 exit. 3011 * 3012 * In that case, device will generate a Link State Change Interrupt 3013 * from U3 to RESUME which is only necessary if Hibernation is 3014 * configured in. 3015 * 3016 * There are no functional changes due to such spurious event and we 3017 * just need to ignore it. 3018 * 3019 * Refers to: 3020 * 3021 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation 3022 * operational mode 3023 */ 3024 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 3025 if ((dwc->revision < DWC3_REVISION_250A) && 3026 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { 3027 if ((dwc->link_state == DWC3_LINK_STATE_U3) && 3028 (next == DWC3_LINK_STATE_RESUME)) { 3029 return; 3030 } 3031 } 3032 3033 /* 3034 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending 3035 * on the link partner, the USB session might do multiple entry/exit 3036 * of low power states before a transfer takes place. 3037 * 3038 * Due to this problem, we might experience lower throughput. The 3039 * suggested workaround is to disable DCTL[12:9] bits if we're 3040 * transitioning from U1/U2 to U0 and enable those bits again 3041 * after a transfer completes and there are no pending transfers 3042 * on any of the enabled endpoints. 3043 * 3044 * This is the first half of that workaround. 3045 * 3046 * Refers to: 3047 * 3048 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us 3049 * core send LGO_Ux entering U0 3050 */ 3051 if (dwc->revision < DWC3_REVISION_183A) { 3052 if (next == DWC3_LINK_STATE_U0) { 3053 u32 u1u2; 3054 u32 reg; 3055 3056 switch (dwc->link_state) { 3057 case DWC3_LINK_STATE_U1: 3058 case DWC3_LINK_STATE_U2: 3059 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 3060 u1u2 = reg & (DWC3_DCTL_INITU2ENA 3061 | DWC3_DCTL_ACCEPTU2ENA 3062 | DWC3_DCTL_INITU1ENA 3063 | DWC3_DCTL_ACCEPTU1ENA); 3064 3065 if (!dwc->u1u2) 3066 dwc->u1u2 = reg & u1u2; 3067 3068 reg &= ~u1u2; 3069 3070 dwc3_gadget_dctl_write_safe(dwc, reg); 3071 break; 3072 default: 3073 /* do nothing */ 3074 break; 3075 } 3076 } 3077 } 3078 3079 switch (next) { 3080 case DWC3_LINK_STATE_U1: 3081 if (dwc->speed == USB_SPEED_SUPER) 3082 dwc3_suspend_gadget(dwc); 3083 break; 3084 case DWC3_LINK_STATE_U2: 3085 case DWC3_LINK_STATE_U3: 3086 dwc3_suspend_gadget(dwc); 3087 break; 3088 case DWC3_LINK_STATE_RESUME: 3089 dwc3_resume_gadget(dwc); 3090 break; 3091 default: 3092 /* do nothing */ 3093 break; 3094 } 3095 3096 dwc->link_state = next; 3097 } 3098 3099 static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc, 3100 unsigned int evtinfo) 3101 { 3102 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 3103 3104 if (dwc->link_state != next && next == DWC3_LINK_STATE_U3) 3105 dwc3_suspend_gadget(dwc); 3106 3107 dwc->link_state = next; 3108 } 3109 3110 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, 3111 unsigned int evtinfo) 3112 { 3113 unsigned int is_ss = evtinfo & BIT(4); 3114 3115 /* 3116 * WORKAROUND: DWC3 revison 2.20a with hibernation support 3117 * have a known issue which can cause USB CV TD.9.23 to fail 3118 * randomly. 3119 * 3120 * Because of this issue, core could generate bogus hibernation 3121 * events which SW needs to ignore. 3122 * 3123 * Refers to: 3124 * 3125 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0 3126 * Device Fallback from SuperSpeed 3127 */ 3128 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER)) 3129 return; 3130 3131 /* enter hibernation here */ 3132 } 3133 3134 static void dwc3_gadget_interrupt(struct dwc3 *dwc, 3135 const struct dwc3_event_devt *event) 3136 { 3137 switch (event->type) { 3138 case DWC3_DEVICE_EVENT_DISCONNECT: 3139 dwc3_gadget_disconnect_interrupt(dwc); 3140 break; 3141 case DWC3_DEVICE_EVENT_RESET: 3142 dwc3_gadget_reset_interrupt(dwc); 3143 break; 3144 case DWC3_DEVICE_EVENT_CONNECT_DONE: 3145 dwc3_gadget_conndone_interrupt(dwc); 3146 break; 3147 case DWC3_DEVICE_EVENT_WAKEUP: 3148 dwc3_gadget_wakeup_interrupt(dwc); 3149 break; 3150 case DWC3_DEVICE_EVENT_HIBER_REQ: 3151 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation, 3152 "unexpected hibernation event\n")) 3153 break; 3154 3155 dwc3_gadget_hibernation_interrupt(dwc, event->event_info); 3156 break; 3157 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 3158 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 3159 break; 3160 case DWC3_DEVICE_EVENT_EOPF: 3161 /* It changed to be suspend event for version 2.30a and above */ 3162 if (dwc->revision >= DWC3_REVISION_230A) { 3163 /* 3164 * Ignore suspend event until the gadget enters into 3165 * USB_STATE_CONFIGURED state. 3166 */ 3167 if (dwc->gadget.state >= USB_STATE_CONFIGURED) 3168 dwc3_gadget_suspend_interrupt(dwc, 3169 event->event_info); 3170 } 3171 break; 3172 case DWC3_DEVICE_EVENT_SOF: 3173 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 3174 case DWC3_DEVICE_EVENT_CMD_CMPL: 3175 case DWC3_DEVICE_EVENT_OVERFLOW: 3176 break; 3177 default: 3178 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 3179 } 3180 } 3181 3182 static void dwc3_process_event_entry(struct dwc3 *dwc, 3183 const union dwc3_event *event) 3184 { 3185 trace_dwc3_event(event->raw, dwc); 3186 3187 if (!event->type.is_devspec) 3188 dwc3_endpoint_interrupt(dwc, &event->depevt); 3189 else if (event->type.type == DWC3_EVENT_TYPE_DEV) 3190 dwc3_gadget_interrupt(dwc, &event->devt); 3191 else 3192 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 3193 } 3194 3195 static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) 3196 { 3197 struct dwc3 *dwc = evt->dwc; 3198 irqreturn_t ret = IRQ_NONE; 3199 int left; 3200 u32 reg; 3201 3202 left = evt->count; 3203 3204 if (!(evt->flags & DWC3_EVENT_PENDING)) 3205 return IRQ_NONE; 3206 3207 while (left > 0) { 3208 union dwc3_event event; 3209 3210 event.raw = *(u32 *) (evt->cache + evt->lpos); 3211 3212 dwc3_process_event_entry(dwc, &event); 3213 3214 /* 3215 * FIXME we wrap around correctly to the next entry as 3216 * almost all entries are 4 bytes in size. There is one 3217 * entry which has 12 bytes which is a regular entry 3218 * followed by 8 bytes data. ATM I don't know how 3219 * things are organized if we get next to the a 3220 * boundary so I worry about that once we try to handle 3221 * that. 3222 */ 3223 evt->lpos = (evt->lpos + 4) % evt->length; 3224 left -= 4; 3225 } 3226 3227 evt->count = 0; 3228 evt->flags &= ~DWC3_EVENT_PENDING; 3229 ret = IRQ_HANDLED; 3230 3231 /* Unmask interrupt */ 3232 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0)); 3233 reg &= ~DWC3_GEVNTSIZ_INTMASK; 3234 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); 3235 3236 if (dwc->imod_interval) { 3237 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); 3238 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); 3239 } 3240 3241 return ret; 3242 } 3243 3244 static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt) 3245 { 3246 struct dwc3_event_buffer *evt = _evt; 3247 struct dwc3 *dwc = evt->dwc; 3248 unsigned long flags; 3249 irqreturn_t ret = IRQ_NONE; 3250 3251 spin_lock_irqsave(&dwc->lock, flags); 3252 ret = dwc3_process_event_buf(evt); 3253 spin_unlock_irqrestore(&dwc->lock, flags); 3254 3255 return ret; 3256 } 3257 3258 static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) 3259 { 3260 struct dwc3 *dwc = evt->dwc; 3261 u32 amount; 3262 u32 count; 3263 u32 reg; 3264 3265 if (pm_runtime_suspended(dwc->dev)) { 3266 pm_runtime_get(dwc->dev); 3267 disable_irq_nosync(dwc->irq_gadget); 3268 dwc->pending_events = true; 3269 return IRQ_HANDLED; 3270 } 3271 3272 /* 3273 * With PCIe legacy interrupt, test shows that top-half irq handler can 3274 * be called again after HW interrupt deassertion. Check if bottom-half 3275 * irq event handler completes before caching new event to prevent 3276 * losing events. 3277 */ 3278 if (evt->flags & DWC3_EVENT_PENDING) 3279 return IRQ_HANDLED; 3280 3281 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); 3282 count &= DWC3_GEVNTCOUNT_MASK; 3283 if (!count) 3284 return IRQ_NONE; 3285 3286 evt->count = count; 3287 evt->flags |= DWC3_EVENT_PENDING; 3288 3289 /* Mask interrupt */ 3290 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0)); 3291 reg |= DWC3_GEVNTSIZ_INTMASK; 3292 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); 3293 3294 amount = min(count, evt->length - evt->lpos); 3295 memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount); 3296 3297 if (amount < count) 3298 memcpy(evt->cache, evt->buf, count - amount); 3299 3300 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count); 3301 3302 return IRQ_WAKE_THREAD; 3303 } 3304 3305 static irqreturn_t dwc3_interrupt(int irq, void *_evt) 3306 { 3307 struct dwc3_event_buffer *evt = _evt; 3308 3309 return dwc3_check_event_buf(evt); 3310 } 3311 3312 static int dwc3_gadget_get_irq(struct dwc3 *dwc) 3313 { 3314 struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); 3315 int irq; 3316 3317 irq = platform_get_irq_byname_optional(dwc3_pdev, "peripheral"); 3318 if (irq > 0) 3319 goto out; 3320 3321 if (irq == -EPROBE_DEFER) 3322 goto out; 3323 3324 irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3"); 3325 if (irq > 0) 3326 goto out; 3327 3328 if (irq == -EPROBE_DEFER) 3329 goto out; 3330 3331 irq = platform_get_irq(dwc3_pdev, 0); 3332 if (irq > 0) 3333 goto out; 3334 3335 if (!irq) 3336 irq = -EINVAL; 3337 3338 out: 3339 return irq; 3340 } 3341 3342 /** 3343 * dwc3_gadget_init - initializes gadget related registers 3344 * @dwc: pointer to our controller context structure 3345 * 3346 * Returns 0 on success otherwise negative errno. 3347 */ 3348 int dwc3_gadget_init(struct dwc3 *dwc) 3349 { 3350 int ret; 3351 int irq; 3352 3353 irq = dwc3_gadget_get_irq(dwc); 3354 if (irq < 0) { 3355 ret = irq; 3356 goto err0; 3357 } 3358 3359 dwc->irq_gadget = irq; 3360 3361 dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev, 3362 sizeof(*dwc->ep0_trb) * 2, 3363 &dwc->ep0_trb_addr, GFP_KERNEL); 3364 if (!dwc->ep0_trb) { 3365 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 3366 ret = -ENOMEM; 3367 goto err0; 3368 } 3369 3370 dwc->setup_buf = kzalloc(DWC3_EP0_SETUP_SIZE, GFP_KERNEL); 3371 if (!dwc->setup_buf) { 3372 ret = -ENOMEM; 3373 goto err1; 3374 } 3375 3376 dwc->bounce = dma_alloc_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, 3377 &dwc->bounce_addr, GFP_KERNEL); 3378 if (!dwc->bounce) { 3379 ret = -ENOMEM; 3380 goto err2; 3381 } 3382 3383 init_completion(&dwc->ep0_in_setup); 3384 3385 dwc->gadget.ops = &dwc3_gadget_ops; 3386 dwc->gadget.speed = USB_SPEED_UNKNOWN; 3387 dwc->gadget.sg_supported = true; 3388 dwc->gadget.name = "dwc3-gadget"; 3389 dwc->gadget.lpm_capable = true; 3390 3391 /* 3392 * FIXME We might be setting max_speed to <SUPER, however versions 3393 * <2.20a of dwc3 have an issue with metastability (documented 3394 * elsewhere in this driver) which tells us we can't set max speed to 3395 * anything lower than SUPER. 3396 * 3397 * Because gadget.max_speed is only used by composite.c and function 3398 * drivers (i.e. it won't go into dwc3's registers) we are allowing this 3399 * to happen so we avoid sending SuperSpeed Capability descriptor 3400 * together with our BOS descriptor as that could confuse host into 3401 * thinking we can handle super speed. 3402 * 3403 * Note that, in fact, we won't even support GetBOS requests when speed 3404 * is less than super speed because we don't have means, yet, to tell 3405 * composite.c that we are USB 2.0 + LPM ECN. 3406 */ 3407 if (dwc->revision < DWC3_REVISION_220A && 3408 !dwc->dis_metastability_quirk) 3409 dev_info(dwc->dev, "changing max_speed on rev %08x\n", 3410 dwc->revision); 3411 3412 dwc->gadget.max_speed = dwc->maximum_speed; 3413 3414 /* 3415 * REVISIT: Here we should clear all pending IRQs to be 3416 * sure we're starting from a well known location. 3417 */ 3418 3419 ret = dwc3_gadget_init_endpoints(dwc, dwc->num_eps); 3420 if (ret) 3421 goto err3; 3422 3423 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 3424 if (ret) { 3425 dev_err(dwc->dev, "failed to register udc\n"); 3426 goto err4; 3427 } 3428 3429 dwc3_gadget_set_speed(&dwc->gadget, dwc->maximum_speed); 3430 3431 return 0; 3432 3433 err4: 3434 dwc3_gadget_free_endpoints(dwc); 3435 3436 err3: 3437 dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce, 3438 dwc->bounce_addr); 3439 3440 err2: 3441 kfree(dwc->setup_buf); 3442 3443 err1: 3444 dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2, 3445 dwc->ep0_trb, dwc->ep0_trb_addr); 3446 3447 err0: 3448 return ret; 3449 } 3450 3451 /* -------------------------------------------------------------------------- */ 3452 3453 void dwc3_gadget_exit(struct dwc3 *dwc) 3454 { 3455 usb_del_gadget_udc(&dwc->gadget); 3456 dwc3_gadget_free_endpoints(dwc); 3457 dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce, 3458 dwc->bounce_addr); 3459 kfree(dwc->setup_buf); 3460 dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2, 3461 dwc->ep0_trb, dwc->ep0_trb_addr); 3462 } 3463 3464 int dwc3_gadget_suspend(struct dwc3 *dwc) 3465 { 3466 if (!dwc->gadget_driver) 3467 return 0; 3468 3469 dwc3_gadget_run_stop(dwc, false, false); 3470 dwc3_disconnect_gadget(dwc); 3471 __dwc3_gadget_stop(dwc); 3472 3473 return 0; 3474 } 3475 3476 int dwc3_gadget_resume(struct dwc3 *dwc) 3477 { 3478 int ret; 3479 3480 if (!dwc->gadget_driver) 3481 return 0; 3482 3483 ret = __dwc3_gadget_start(dwc); 3484 if (ret < 0) 3485 goto err0; 3486 3487 ret = dwc3_gadget_run_stop(dwc, true, false); 3488 if (ret < 0) 3489 goto err1; 3490 3491 return 0; 3492 3493 err1: 3494 __dwc3_gadget_stop(dwc); 3495 3496 err0: 3497 return ret; 3498 } 3499 3500 void dwc3_gadget_process_pending_events(struct dwc3 *dwc) 3501 { 3502 if (dwc->pending_events) { 3503 dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf); 3504 dwc->pending_events = false; 3505 enable_irq(dwc->irq_gadget); 3506 } 3507 } 3508