1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 4 * 5 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com 6 * 7 * Authors: Felipe Balbi <balbi@ti.com>, 8 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/delay.h> 13 #include <linux/slab.h> 14 #include <linux/spinlock.h> 15 #include <linux/platform_device.h> 16 #include <linux/pm_runtime.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/list.h> 20 #include <linux/dma-mapping.h> 21 22 #include <linux/usb/ch9.h> 23 #include <linux/usb/gadget.h> 24 25 #include "debug.h" 26 #include "core.h" 27 #include "gadget.h" 28 #include "io.h" 29 30 #define DWC3_ALIGN_FRAME(d, n) (((d)->frame_number + ((d)->interval * (n))) \ 31 & ~((d)->interval - 1)) 32 33 /** 34 * dwc3_gadget_set_test_mode - enables usb2 test modes 35 * @dwc: pointer to our context structure 36 * @mode: the mode to set (J, K SE0 NAK, Force Enable) 37 * 38 * Caller should take care of locking. This function will return 0 on 39 * success or -EINVAL if wrong Test Selector is passed. 40 */ 41 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) 42 { 43 u32 reg; 44 45 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 46 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 47 48 switch (mode) { 49 case TEST_J: 50 case TEST_K: 51 case TEST_SE0_NAK: 52 case TEST_PACKET: 53 case TEST_FORCE_EN: 54 reg |= mode << 1; 55 break; 56 default: 57 return -EINVAL; 58 } 59 60 dwc3_gadget_dctl_write_safe(dwc, reg); 61 62 return 0; 63 } 64 65 /** 66 * dwc3_gadget_get_link_state - gets current state of usb link 67 * @dwc: pointer to our context structure 68 * 69 * Caller should take care of locking. This function will 70 * return the link state on success (>= 0) or -ETIMEDOUT. 71 */ 72 int dwc3_gadget_get_link_state(struct dwc3 *dwc) 73 { 74 u32 reg; 75 76 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 77 78 return DWC3_DSTS_USBLNKST(reg); 79 } 80 81 /** 82 * dwc3_gadget_set_link_state - sets usb link to a particular state 83 * @dwc: pointer to our context structure 84 * @state: the state to put link into 85 * 86 * Caller should take care of locking. This function will 87 * return 0 on success or -ETIMEDOUT. 88 */ 89 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) 90 { 91 int retries = 10000; 92 u32 reg; 93 94 /* 95 * Wait until device controller is ready. Only applies to 1.94a and 96 * later RTL. 97 */ 98 if (dwc->revision >= DWC3_REVISION_194A) { 99 while (--retries) { 100 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 101 if (reg & DWC3_DSTS_DCNRD) 102 udelay(5); 103 else 104 break; 105 } 106 107 if (retries <= 0) 108 return -ETIMEDOUT; 109 } 110 111 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 112 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 113 114 /* set no action before sending new link state change */ 115 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 116 117 /* set requested state */ 118 reg |= DWC3_DCTL_ULSTCHNGREQ(state); 119 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 120 121 /* 122 * The following code is racy when called from dwc3_gadget_wakeup, 123 * and is not needed, at least on newer versions 124 */ 125 if (dwc->revision >= DWC3_REVISION_194A) 126 return 0; 127 128 /* wait for a change in DSTS */ 129 retries = 10000; 130 while (--retries) { 131 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 132 133 if (DWC3_DSTS_USBLNKST(reg) == state) 134 return 0; 135 136 udelay(5); 137 } 138 139 return -ETIMEDOUT; 140 } 141 142 /** 143 * dwc3_ep_inc_trb - increment a trb index. 144 * @index: Pointer to the TRB index to increment. 145 * 146 * The index should never point to the link TRB. After incrementing, 147 * if it is point to the link TRB, wrap around to the beginning. The 148 * link TRB is always at the last TRB entry. 149 */ 150 static void dwc3_ep_inc_trb(u8 *index) 151 { 152 (*index)++; 153 if (*index == (DWC3_TRB_NUM - 1)) 154 *index = 0; 155 } 156 157 /** 158 * dwc3_ep_inc_enq - increment endpoint's enqueue pointer 159 * @dep: The endpoint whose enqueue pointer we're incrementing 160 */ 161 static void dwc3_ep_inc_enq(struct dwc3_ep *dep) 162 { 163 dwc3_ep_inc_trb(&dep->trb_enqueue); 164 } 165 166 /** 167 * dwc3_ep_inc_deq - increment endpoint's dequeue pointer 168 * @dep: The endpoint whose enqueue pointer we're incrementing 169 */ 170 static void dwc3_ep_inc_deq(struct dwc3_ep *dep) 171 { 172 dwc3_ep_inc_trb(&dep->trb_dequeue); 173 } 174 175 static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep, 176 struct dwc3_request *req, int status) 177 { 178 struct dwc3 *dwc = dep->dwc; 179 180 list_del(&req->list); 181 req->remaining = 0; 182 req->needs_extra_trb = false; 183 184 if (req->request.status == -EINPROGRESS) 185 req->request.status = status; 186 187 if (req->trb) 188 usb_gadget_unmap_request_by_dev(dwc->sysdev, 189 &req->request, req->direction); 190 191 req->trb = NULL; 192 trace_dwc3_gadget_giveback(req); 193 194 if (dep->number > 1) 195 pm_runtime_put(dwc->dev); 196 } 197 198 /** 199 * dwc3_gadget_giveback - call struct usb_request's ->complete callback 200 * @dep: The endpoint to whom the request belongs to 201 * @req: The request we're giving back 202 * @status: completion code for the request 203 * 204 * Must be called with controller's lock held and interrupts disabled. This 205 * function will unmap @req and call its ->complete() callback to notify upper 206 * layers that it has completed. 207 */ 208 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 209 int status) 210 { 211 struct dwc3 *dwc = dep->dwc; 212 213 dwc3_gadget_del_and_unmap_request(dep, req, status); 214 req->status = DWC3_REQUEST_STATUS_COMPLETED; 215 216 spin_unlock(&dwc->lock); 217 usb_gadget_giveback_request(&dep->endpoint, &req->request); 218 spin_lock(&dwc->lock); 219 } 220 221 /** 222 * dwc3_send_gadget_generic_command - issue a generic command for the controller 223 * @dwc: pointer to the controller context 224 * @cmd: the command to be issued 225 * @param: command parameter 226 * 227 * Caller should take care of locking. Issue @cmd with a given @param to @dwc 228 * and wait for its completion. 229 */ 230 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param) 231 { 232 u32 timeout = 500; 233 int status = 0; 234 int ret = 0; 235 u32 reg; 236 237 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param); 238 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT); 239 240 do { 241 reg = dwc3_readl(dwc->regs, DWC3_DGCMD); 242 if (!(reg & DWC3_DGCMD_CMDACT)) { 243 status = DWC3_DGCMD_STATUS(reg); 244 if (status) 245 ret = -EINVAL; 246 break; 247 } 248 } while (--timeout); 249 250 if (!timeout) { 251 ret = -ETIMEDOUT; 252 status = -ETIMEDOUT; 253 } 254 255 trace_dwc3_gadget_generic_cmd(cmd, param, status); 256 257 return ret; 258 } 259 260 static int __dwc3_gadget_wakeup(struct dwc3 *dwc); 261 262 /** 263 * dwc3_send_gadget_ep_cmd - issue an endpoint command 264 * @dep: the endpoint to which the command is going to be issued 265 * @cmd: the command to be issued 266 * @params: parameters to the command 267 * 268 * Caller should handle locking. This function will issue @cmd with given 269 * @params to @dep and wait for its completion. 270 */ 271 int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, 272 struct dwc3_gadget_ep_cmd_params *params) 273 { 274 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; 275 struct dwc3 *dwc = dep->dwc; 276 u32 timeout = 1000; 277 u32 saved_config = 0; 278 u32 reg; 279 280 int cmd_status = 0; 281 int ret = -EINVAL; 282 283 /* 284 * When operating in USB 2.0 speeds (HS/FS), if GUSB2PHYCFG.ENBLSLPM or 285 * GUSB2PHYCFG.SUSPHY is set, it must be cleared before issuing an 286 * endpoint command. 287 * 288 * Save and clear both GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY 289 * settings. Restore them after the command is completed. 290 * 291 * DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2 292 */ 293 if (dwc->gadget.speed <= USB_SPEED_HIGH) { 294 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 295 if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) { 296 saved_config |= DWC3_GUSB2PHYCFG_SUSPHY; 297 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 298 } 299 300 if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) { 301 saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM; 302 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; 303 } 304 305 if (saved_config) 306 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 307 } 308 309 if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) { 310 int needs_wakeup; 311 312 needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 || 313 dwc->link_state == DWC3_LINK_STATE_U2 || 314 dwc->link_state == DWC3_LINK_STATE_U3); 315 316 if (unlikely(needs_wakeup)) { 317 ret = __dwc3_gadget_wakeup(dwc); 318 dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n", 319 ret); 320 } 321 } 322 323 dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0); 324 dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1); 325 dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2); 326 327 /* 328 * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're 329 * not relying on XferNotReady, we can make use of a special "No 330 * Response Update Transfer" command where we should clear both CmdAct 331 * and CmdIOC bits. 332 * 333 * With this, we don't need to wait for command completion and can 334 * straight away issue further commands to the endpoint. 335 * 336 * NOTICE: We're making an assumption that control endpoints will never 337 * make use of Update Transfer command. This is a safe assumption 338 * because we can never have more than one request at a time with 339 * Control Endpoints. If anybody changes that assumption, this chunk 340 * needs to be updated accordingly. 341 */ 342 if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER && 343 !usb_endpoint_xfer_isoc(desc)) 344 cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT); 345 else 346 cmd |= DWC3_DEPCMD_CMDACT; 347 348 dwc3_writel(dep->regs, DWC3_DEPCMD, cmd); 349 do { 350 reg = dwc3_readl(dep->regs, DWC3_DEPCMD); 351 if (!(reg & DWC3_DEPCMD_CMDACT)) { 352 cmd_status = DWC3_DEPCMD_STATUS(reg); 353 354 switch (cmd_status) { 355 case 0: 356 ret = 0; 357 break; 358 case DEPEVT_TRANSFER_NO_RESOURCE: 359 ret = -EINVAL; 360 break; 361 case DEPEVT_TRANSFER_BUS_EXPIRY: 362 /* 363 * SW issues START TRANSFER command to 364 * isochronous ep with future frame interval. If 365 * future interval time has already passed when 366 * core receives the command, it will respond 367 * with an error status of 'Bus Expiry'. 368 * 369 * Instead of always returning -EINVAL, let's 370 * give a hint to the gadget driver that this is 371 * the case by returning -EAGAIN. 372 */ 373 ret = -EAGAIN; 374 break; 375 default: 376 dev_WARN(dwc->dev, "UNKNOWN cmd status\n"); 377 } 378 379 break; 380 } 381 } while (--timeout); 382 383 if (timeout == 0) { 384 ret = -ETIMEDOUT; 385 cmd_status = -ETIMEDOUT; 386 } 387 388 trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status); 389 390 if (ret == 0 && DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) { 391 dep->flags |= DWC3_EP_TRANSFER_STARTED; 392 dwc3_gadget_ep_get_transfer_index(dep); 393 } 394 395 if (saved_config) { 396 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 397 reg |= saved_config; 398 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 399 } 400 401 return ret; 402 } 403 404 static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep) 405 { 406 struct dwc3 *dwc = dep->dwc; 407 struct dwc3_gadget_ep_cmd_params params; 408 u32 cmd = DWC3_DEPCMD_CLEARSTALL; 409 410 /* 411 * As of core revision 2.60a the recommended programming model 412 * is to set the ClearPendIN bit when issuing a Clear Stall EP 413 * command for IN endpoints. This is to prevent an issue where 414 * some (non-compliant) hosts may not send ACK TPs for pending 415 * IN transfers due to a mishandled error condition. Synopsys 416 * STAR 9000614252. 417 */ 418 if (dep->direction && (dwc->revision >= DWC3_REVISION_260A) && 419 (dwc->gadget.speed >= USB_SPEED_SUPER)) 420 cmd |= DWC3_DEPCMD_CLEARPENDIN; 421 422 memset(¶ms, 0, sizeof(params)); 423 424 return dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 425 } 426 427 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 428 struct dwc3_trb *trb) 429 { 430 u32 offset = (char *) trb - (char *) dep->trb_pool; 431 432 return dep->trb_pool_dma + offset; 433 } 434 435 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 436 { 437 struct dwc3 *dwc = dep->dwc; 438 439 if (dep->trb_pool) 440 return 0; 441 442 dep->trb_pool = dma_alloc_coherent(dwc->sysdev, 443 sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 444 &dep->trb_pool_dma, GFP_KERNEL); 445 if (!dep->trb_pool) { 446 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 447 dep->name); 448 return -ENOMEM; 449 } 450 451 return 0; 452 } 453 454 static void dwc3_free_trb_pool(struct dwc3_ep *dep) 455 { 456 struct dwc3 *dwc = dep->dwc; 457 458 dma_free_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 459 dep->trb_pool, dep->trb_pool_dma); 460 461 dep->trb_pool = NULL; 462 dep->trb_pool_dma = 0; 463 } 464 465 static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep) 466 { 467 struct dwc3_gadget_ep_cmd_params params; 468 469 memset(¶ms, 0x00, sizeof(params)); 470 471 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 472 473 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE, 474 ¶ms); 475 } 476 477 /** 478 * dwc3_gadget_start_config - configure ep resources 479 * @dep: endpoint that is being enabled 480 * 481 * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's 482 * completion, it will set Transfer Resource for all available endpoints. 483 * 484 * The assignment of transfer resources cannot perfectly follow the data book 485 * due to the fact that the controller driver does not have all knowledge of the 486 * configuration in advance. It is given this information piecemeal by the 487 * composite gadget framework after every SET_CONFIGURATION and 488 * SET_INTERFACE. Trying to follow the databook programming model in this 489 * scenario can cause errors. For two reasons: 490 * 491 * 1) The databook says to do %DWC3_DEPCMD_DEPSTARTCFG for every 492 * %USB_REQ_SET_CONFIGURATION and %USB_REQ_SET_INTERFACE (8.1.5). This is 493 * incorrect in the scenario of multiple interfaces. 494 * 495 * 2) The databook does not mention doing more %DWC3_DEPCMD_DEPXFERCFG for new 496 * endpoint on alt setting (8.1.6). 497 * 498 * The following simplified method is used instead: 499 * 500 * All hardware endpoints can be assigned a transfer resource and this setting 501 * will stay persistent until either a core reset or hibernation. So whenever we 502 * do a %DWC3_DEPCMD_DEPSTARTCFG(0) we can go ahead and do 503 * %DWC3_DEPCMD_DEPXFERCFG for every hardware endpoint as well. We are 504 * guaranteed that there are as many transfer resources as endpoints. 505 * 506 * This function is called for each endpoint when it is being enabled but is 507 * triggered only when called for EP0-out, which always happens first, and which 508 * should only happen in one of the above conditions. 509 */ 510 static int dwc3_gadget_start_config(struct dwc3_ep *dep) 511 { 512 struct dwc3_gadget_ep_cmd_params params; 513 struct dwc3 *dwc; 514 u32 cmd; 515 int i; 516 int ret; 517 518 if (dep->number) 519 return 0; 520 521 memset(¶ms, 0x00, sizeof(params)); 522 cmd = DWC3_DEPCMD_DEPSTARTCFG; 523 dwc = dep->dwc; 524 525 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 526 if (ret) 527 return ret; 528 529 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 530 struct dwc3_ep *dep = dwc->eps[i]; 531 532 if (!dep) 533 continue; 534 535 ret = dwc3_gadget_set_xfer_resource(dep); 536 if (ret) 537 return ret; 538 } 539 540 return 0; 541 } 542 543 static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action) 544 { 545 const struct usb_ss_ep_comp_descriptor *comp_desc; 546 const struct usb_endpoint_descriptor *desc; 547 struct dwc3_gadget_ep_cmd_params params; 548 struct dwc3 *dwc = dep->dwc; 549 550 comp_desc = dep->endpoint.comp_desc; 551 desc = dep->endpoint.desc; 552 553 memset(¶ms, 0x00, sizeof(params)); 554 555 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) 556 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); 557 558 /* Burst size is only needed in SuperSpeed mode */ 559 if (dwc->gadget.speed >= USB_SPEED_SUPER) { 560 u32 burst = dep->endpoint.maxburst; 561 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1); 562 } 563 564 params.param0 |= action; 565 if (action == DWC3_DEPCFG_ACTION_RESTORE) 566 params.param2 |= dep->saved_state; 567 568 if (usb_endpoint_xfer_control(desc)) 569 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN; 570 571 if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc)) 572 params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN; 573 574 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 575 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE 576 | DWC3_DEPCFG_STREAM_EVENT_EN; 577 dep->stream_capable = true; 578 } 579 580 if (!usb_endpoint_xfer_control(desc)) 581 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; 582 583 /* 584 * We are doing 1:1 mapping for endpoints, meaning 585 * Physical Endpoints 2 maps to Logical Endpoint 2 and 586 * so on. We consider the direction bit as part of the physical 587 * endpoint number. So USB endpoint 0x81 is 0x03. 588 */ 589 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); 590 591 /* 592 * We must use the lower 16 TX FIFOs even though 593 * HW might have more 594 */ 595 if (dep->direction) 596 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); 597 598 if (desc->bInterval) { 599 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); 600 dep->interval = 1 << (desc->bInterval - 1); 601 } 602 603 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, ¶ms); 604 } 605 606 /** 607 * __dwc3_gadget_ep_enable - initializes a hw endpoint 608 * @dep: endpoint to be initialized 609 * @action: one of INIT, MODIFY or RESTORE 610 * 611 * Caller should take care of locking. Execute all necessary commands to 612 * initialize a HW endpoint so it can be used by a gadget driver. 613 */ 614 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action) 615 { 616 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; 617 struct dwc3 *dwc = dep->dwc; 618 619 u32 reg; 620 int ret; 621 622 if (!(dep->flags & DWC3_EP_ENABLED)) { 623 ret = dwc3_gadget_start_config(dep); 624 if (ret) 625 return ret; 626 } 627 628 ret = dwc3_gadget_set_ep_config(dep, action); 629 if (ret) 630 return ret; 631 632 if (!(dep->flags & DWC3_EP_ENABLED)) { 633 struct dwc3_trb *trb_st_hw; 634 struct dwc3_trb *trb_link; 635 636 dep->type = usb_endpoint_type(desc); 637 dep->flags |= DWC3_EP_ENABLED; 638 639 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 640 reg |= DWC3_DALEPENA_EP(dep->number); 641 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 642 643 if (usb_endpoint_xfer_control(desc)) 644 goto out; 645 646 /* Initialize the TRB ring */ 647 dep->trb_dequeue = 0; 648 dep->trb_enqueue = 0; 649 memset(dep->trb_pool, 0, 650 sizeof(struct dwc3_trb) * DWC3_TRB_NUM); 651 652 /* Link TRB. The HWO bit is never reset */ 653 trb_st_hw = &dep->trb_pool[0]; 654 655 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 656 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 657 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 658 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB; 659 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 660 } 661 662 /* 663 * Issue StartTransfer here with no-op TRB so we can always rely on No 664 * Response Update Transfer command. 665 */ 666 if ((usb_endpoint_xfer_bulk(desc) && !dep->stream_capable) || 667 usb_endpoint_xfer_int(desc)) { 668 struct dwc3_gadget_ep_cmd_params params; 669 struct dwc3_trb *trb; 670 dma_addr_t trb_dma; 671 u32 cmd; 672 673 memset(¶ms, 0, sizeof(params)); 674 trb = &dep->trb_pool[0]; 675 trb_dma = dwc3_trb_dma_offset(dep, trb); 676 677 params.param0 = upper_32_bits(trb_dma); 678 params.param1 = lower_32_bits(trb_dma); 679 680 cmd = DWC3_DEPCMD_STARTTRANSFER; 681 682 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 683 if (ret < 0) 684 return ret; 685 } 686 687 out: 688 trace_dwc3_gadget_ep_enable(dep); 689 690 return 0; 691 } 692 693 static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, 694 bool interrupt); 695 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 696 { 697 struct dwc3_request *req; 698 699 dwc3_stop_active_transfer(dep, true, false); 700 701 /* - giveback all requests to gadget driver */ 702 while (!list_empty(&dep->started_list)) { 703 req = next_request(&dep->started_list); 704 705 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 706 } 707 708 while (!list_empty(&dep->pending_list)) { 709 req = next_request(&dep->pending_list); 710 711 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 712 } 713 714 while (!list_empty(&dep->cancelled_list)) { 715 req = next_request(&dep->cancelled_list); 716 717 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 718 } 719 } 720 721 /** 722 * __dwc3_gadget_ep_disable - disables a hw endpoint 723 * @dep: the endpoint to disable 724 * 725 * This function undoes what __dwc3_gadget_ep_enable did and also removes 726 * requests which are currently being processed by the hardware and those which 727 * are not yet scheduled. 728 * 729 * Caller should take care of locking. 730 */ 731 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 732 { 733 struct dwc3 *dwc = dep->dwc; 734 u32 reg; 735 736 trace_dwc3_gadget_ep_disable(dep); 737 738 dwc3_remove_requests(dwc, dep); 739 740 /* make sure HW endpoint isn't stalled */ 741 if (dep->flags & DWC3_EP_STALL) 742 __dwc3_gadget_ep_set_halt(dep, 0, false); 743 744 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 745 reg &= ~DWC3_DALEPENA_EP(dep->number); 746 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 747 748 dep->stream_capable = false; 749 dep->type = 0; 750 dep->flags = 0; 751 752 /* Clear out the ep descriptors for non-ep0 */ 753 if (dep->number > 1) { 754 dep->endpoint.comp_desc = NULL; 755 dep->endpoint.desc = NULL; 756 } 757 758 return 0; 759 } 760 761 /* -------------------------------------------------------------------------- */ 762 763 static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 764 const struct usb_endpoint_descriptor *desc) 765 { 766 return -EINVAL; 767 } 768 769 static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 770 { 771 return -EINVAL; 772 } 773 774 /* -------------------------------------------------------------------------- */ 775 776 static int dwc3_gadget_ep_enable(struct usb_ep *ep, 777 const struct usb_endpoint_descriptor *desc) 778 { 779 struct dwc3_ep *dep; 780 struct dwc3 *dwc; 781 unsigned long flags; 782 int ret; 783 784 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 785 pr_debug("dwc3: invalid parameters\n"); 786 return -EINVAL; 787 } 788 789 if (!desc->wMaxPacketSize) { 790 pr_debug("dwc3: missing wMaxPacketSize\n"); 791 return -EINVAL; 792 } 793 794 dep = to_dwc3_ep(ep); 795 dwc = dep->dwc; 796 797 if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED, 798 "%s is already enabled\n", 799 dep->name)) 800 return 0; 801 802 spin_lock_irqsave(&dwc->lock, flags); 803 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT); 804 spin_unlock_irqrestore(&dwc->lock, flags); 805 806 return ret; 807 } 808 809 static int dwc3_gadget_ep_disable(struct usb_ep *ep) 810 { 811 struct dwc3_ep *dep; 812 struct dwc3 *dwc; 813 unsigned long flags; 814 int ret; 815 816 if (!ep) { 817 pr_debug("dwc3: invalid parameters\n"); 818 return -EINVAL; 819 } 820 821 dep = to_dwc3_ep(ep); 822 dwc = dep->dwc; 823 824 if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED), 825 "%s is already disabled\n", 826 dep->name)) 827 return 0; 828 829 spin_lock_irqsave(&dwc->lock, flags); 830 ret = __dwc3_gadget_ep_disable(dep); 831 spin_unlock_irqrestore(&dwc->lock, flags); 832 833 return ret; 834 } 835 836 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 837 gfp_t gfp_flags) 838 { 839 struct dwc3_request *req; 840 struct dwc3_ep *dep = to_dwc3_ep(ep); 841 842 req = kzalloc(sizeof(*req), gfp_flags); 843 if (!req) 844 return NULL; 845 846 req->direction = dep->direction; 847 req->epnum = dep->number; 848 req->dep = dep; 849 req->status = DWC3_REQUEST_STATUS_UNKNOWN; 850 851 trace_dwc3_alloc_request(req); 852 853 return &req->request; 854 } 855 856 static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 857 struct usb_request *request) 858 { 859 struct dwc3_request *req = to_dwc3_request(request); 860 861 trace_dwc3_free_request(req); 862 kfree(req); 863 } 864 865 /** 866 * dwc3_ep_prev_trb - returns the previous TRB in the ring 867 * @dep: The endpoint with the TRB ring 868 * @index: The index of the current TRB in the ring 869 * 870 * Returns the TRB prior to the one pointed to by the index. If the 871 * index is 0, we will wrap backwards, skip the link TRB, and return 872 * the one just before that. 873 */ 874 static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index) 875 { 876 u8 tmp = index; 877 878 if (!tmp) 879 tmp = DWC3_TRB_NUM - 1; 880 881 return &dep->trb_pool[tmp - 1]; 882 } 883 884 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep) 885 { 886 struct dwc3_trb *tmp; 887 u8 trbs_left; 888 889 /* 890 * If enqueue & dequeue are equal than it is either full or empty. 891 * 892 * One way to know for sure is if the TRB right before us has HWO bit 893 * set or not. If it has, then we're definitely full and can't fit any 894 * more transfers in our ring. 895 */ 896 if (dep->trb_enqueue == dep->trb_dequeue) { 897 tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue); 898 if (tmp->ctrl & DWC3_TRB_CTRL_HWO) 899 return 0; 900 901 return DWC3_TRB_NUM - 1; 902 } 903 904 trbs_left = dep->trb_dequeue - dep->trb_enqueue; 905 trbs_left &= (DWC3_TRB_NUM - 1); 906 907 if (dep->trb_dequeue < dep->trb_enqueue) 908 trbs_left--; 909 910 return trbs_left; 911 } 912 913 static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, 914 dma_addr_t dma, unsigned length, unsigned chain, unsigned node, 915 unsigned stream_id, unsigned short_not_ok, unsigned no_interrupt) 916 { 917 struct dwc3 *dwc = dep->dwc; 918 struct usb_gadget *gadget = &dwc->gadget; 919 enum usb_device_speed speed = gadget->speed; 920 921 trb->size = DWC3_TRB_SIZE_LENGTH(length); 922 trb->bpl = lower_32_bits(dma); 923 trb->bph = upper_32_bits(dma); 924 925 switch (usb_endpoint_type(dep->endpoint.desc)) { 926 case USB_ENDPOINT_XFER_CONTROL: 927 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP; 928 break; 929 930 case USB_ENDPOINT_XFER_ISOC: 931 if (!node) { 932 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 933 934 /* 935 * USB Specification 2.0 Section 5.9.2 states that: "If 936 * there is only a single transaction in the microframe, 937 * only a DATA0 data packet PID is used. If there are 938 * two transactions per microframe, DATA1 is used for 939 * the first transaction data packet and DATA0 is used 940 * for the second transaction data packet. If there are 941 * three transactions per microframe, DATA2 is used for 942 * the first transaction data packet, DATA1 is used for 943 * the second, and DATA0 is used for the third." 944 * 945 * IOW, we should satisfy the following cases: 946 * 947 * 1) length <= maxpacket 948 * - DATA0 949 * 950 * 2) maxpacket < length <= (2 * maxpacket) 951 * - DATA1, DATA0 952 * 953 * 3) (2 * maxpacket) < length <= (3 * maxpacket) 954 * - DATA2, DATA1, DATA0 955 */ 956 if (speed == USB_SPEED_HIGH) { 957 struct usb_ep *ep = &dep->endpoint; 958 unsigned int mult = 2; 959 unsigned int maxp = usb_endpoint_maxp(ep->desc); 960 961 if (length <= (2 * maxp)) 962 mult--; 963 964 if (length <= maxp) 965 mult--; 966 967 trb->size |= DWC3_TRB_SIZE_PCM1(mult); 968 } 969 } else { 970 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 971 } 972 973 /* always enable Interrupt on Missed ISOC */ 974 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 975 break; 976 977 case USB_ENDPOINT_XFER_BULK: 978 case USB_ENDPOINT_XFER_INT: 979 trb->ctrl = DWC3_TRBCTL_NORMAL; 980 break; 981 default: 982 /* 983 * This is only possible with faulty memory because we 984 * checked it already :) 985 */ 986 dev_WARN(dwc->dev, "Unknown endpoint type %d\n", 987 usb_endpoint_type(dep->endpoint.desc)); 988 } 989 990 /* 991 * Enable Continue on Short Packet 992 * when endpoint is not a stream capable 993 */ 994 if (usb_endpoint_dir_out(dep->endpoint.desc)) { 995 if (!dep->stream_capable) 996 trb->ctrl |= DWC3_TRB_CTRL_CSP; 997 998 if (short_not_ok) 999 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 1000 } 1001 1002 if ((!no_interrupt && !chain) || 1003 (dwc3_calc_trbs_left(dep) == 1)) 1004 trb->ctrl |= DWC3_TRB_CTRL_IOC; 1005 1006 if (chain) 1007 trb->ctrl |= DWC3_TRB_CTRL_CHN; 1008 1009 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) 1010 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id); 1011 1012 trb->ctrl |= DWC3_TRB_CTRL_HWO; 1013 1014 dwc3_ep_inc_enq(dep); 1015 1016 trace_dwc3_prepare_trb(dep, trb); 1017 } 1018 1019 /** 1020 * dwc3_prepare_one_trb - setup one TRB from one request 1021 * @dep: endpoint for which this request is prepared 1022 * @req: dwc3_request pointer 1023 * @chain: should this TRB be chained to the next? 1024 * @node: only for isochronous endpoints. First TRB needs different type. 1025 */ 1026 static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 1027 struct dwc3_request *req, unsigned chain, unsigned node) 1028 { 1029 struct dwc3_trb *trb; 1030 unsigned int length; 1031 dma_addr_t dma; 1032 unsigned stream_id = req->request.stream_id; 1033 unsigned short_not_ok = req->request.short_not_ok; 1034 unsigned no_interrupt = req->request.no_interrupt; 1035 1036 if (req->request.num_sgs > 0) { 1037 length = sg_dma_len(req->start_sg); 1038 dma = sg_dma_address(req->start_sg); 1039 } else { 1040 length = req->request.length; 1041 dma = req->request.dma; 1042 } 1043 1044 trb = &dep->trb_pool[dep->trb_enqueue]; 1045 1046 if (!req->trb) { 1047 dwc3_gadget_move_started_request(req); 1048 req->trb = trb; 1049 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 1050 } 1051 1052 req->num_trbs++; 1053 1054 __dwc3_prepare_one_trb(dep, trb, dma, length, chain, node, 1055 stream_id, short_not_ok, no_interrupt); 1056 } 1057 1058 static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, 1059 struct dwc3_request *req) 1060 { 1061 struct scatterlist *sg = req->start_sg; 1062 struct scatterlist *s; 1063 int i; 1064 1065 unsigned int remaining = req->request.num_mapped_sgs 1066 - req->num_queued_sgs; 1067 1068 for_each_sg(sg, s, remaining, i) { 1069 unsigned int length = req->request.length; 1070 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); 1071 unsigned int rem = length % maxp; 1072 unsigned chain = true; 1073 1074 /* 1075 * IOMMU driver is coalescing the list of sgs which shares a 1076 * page boundary into one and giving it to USB driver. With 1077 * this the number of sgs mapped is not equal to the number of 1078 * sgs passed. So mark the chain bit to false if it isthe last 1079 * mapped sg. 1080 */ 1081 if (i == remaining - 1) 1082 chain = false; 1083 1084 if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) { 1085 struct dwc3 *dwc = dep->dwc; 1086 struct dwc3_trb *trb; 1087 1088 req->needs_extra_trb = true; 1089 1090 /* prepare normal TRB */ 1091 dwc3_prepare_one_trb(dep, req, true, i); 1092 1093 /* Now prepare one extra TRB to align transfer size */ 1094 trb = &dep->trb_pool[dep->trb_enqueue]; 1095 req->num_trbs++; 1096 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 1097 maxp - rem, false, 1, 1098 req->request.stream_id, 1099 req->request.short_not_ok, 1100 req->request.no_interrupt); 1101 } else { 1102 dwc3_prepare_one_trb(dep, req, chain, i); 1103 } 1104 1105 /* 1106 * There can be a situation where all sgs in sglist are not 1107 * queued because of insufficient trb number. To handle this 1108 * case, update start_sg to next sg to be queued, so that 1109 * we have free trbs we can continue queuing from where we 1110 * previously stopped 1111 */ 1112 if (chain) 1113 req->start_sg = sg_next(s); 1114 1115 req->num_queued_sgs++; 1116 1117 if (!dwc3_calc_trbs_left(dep)) 1118 break; 1119 } 1120 } 1121 1122 static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, 1123 struct dwc3_request *req) 1124 { 1125 unsigned int length = req->request.length; 1126 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); 1127 unsigned int rem = length % maxp; 1128 1129 if ((!length || rem) && usb_endpoint_dir_out(dep->endpoint.desc)) { 1130 struct dwc3 *dwc = dep->dwc; 1131 struct dwc3_trb *trb; 1132 1133 req->needs_extra_trb = true; 1134 1135 /* prepare normal TRB */ 1136 dwc3_prepare_one_trb(dep, req, true, 0); 1137 1138 /* Now prepare one extra TRB to align transfer size */ 1139 trb = &dep->trb_pool[dep->trb_enqueue]; 1140 req->num_trbs++; 1141 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem, 1142 false, 1, req->request.stream_id, 1143 req->request.short_not_ok, 1144 req->request.no_interrupt); 1145 } else if (req->request.zero && req->request.length && 1146 (IS_ALIGNED(req->request.length, maxp))) { 1147 struct dwc3 *dwc = dep->dwc; 1148 struct dwc3_trb *trb; 1149 1150 req->needs_extra_trb = true; 1151 1152 /* prepare normal TRB */ 1153 dwc3_prepare_one_trb(dep, req, true, 0); 1154 1155 /* Now prepare one extra TRB to handle ZLP */ 1156 trb = &dep->trb_pool[dep->trb_enqueue]; 1157 req->num_trbs++; 1158 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, 1159 false, 1, req->request.stream_id, 1160 req->request.short_not_ok, 1161 req->request.no_interrupt); 1162 } else { 1163 dwc3_prepare_one_trb(dep, req, false, 0); 1164 } 1165 } 1166 1167 /* 1168 * dwc3_prepare_trbs - setup TRBs from requests 1169 * @dep: endpoint for which requests are being prepared 1170 * 1171 * The function goes through the requests list and sets up TRBs for the 1172 * transfers. The function returns once there are no more TRBs available or 1173 * it runs out of requests. 1174 */ 1175 static void dwc3_prepare_trbs(struct dwc3_ep *dep) 1176 { 1177 struct dwc3_request *req, *n; 1178 1179 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 1180 1181 /* 1182 * We can get in a situation where there's a request in the started list 1183 * but there weren't enough TRBs to fully kick it in the first time 1184 * around, so it has been waiting for more TRBs to be freed up. 1185 * 1186 * In that case, we should check if we have a request with pending_sgs 1187 * in the started list and prepare TRBs for that request first, 1188 * otherwise we will prepare TRBs completely out of order and that will 1189 * break things. 1190 */ 1191 list_for_each_entry(req, &dep->started_list, list) { 1192 if (req->num_pending_sgs > 0) 1193 dwc3_prepare_one_trb_sg(dep, req); 1194 1195 if (!dwc3_calc_trbs_left(dep)) 1196 return; 1197 } 1198 1199 list_for_each_entry_safe(req, n, &dep->pending_list, list) { 1200 struct dwc3 *dwc = dep->dwc; 1201 int ret; 1202 1203 ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request, 1204 dep->direction); 1205 if (ret) 1206 return; 1207 1208 req->sg = req->request.sg; 1209 req->start_sg = req->sg; 1210 req->num_queued_sgs = 0; 1211 req->num_pending_sgs = req->request.num_mapped_sgs; 1212 1213 if (req->num_pending_sgs > 0) 1214 dwc3_prepare_one_trb_sg(dep, req); 1215 else 1216 dwc3_prepare_one_trb_linear(dep, req); 1217 1218 if (!dwc3_calc_trbs_left(dep)) 1219 return; 1220 } 1221 } 1222 1223 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep) 1224 { 1225 struct dwc3_gadget_ep_cmd_params params; 1226 struct dwc3_request *req; 1227 int starting; 1228 int ret; 1229 u32 cmd; 1230 1231 if (!dwc3_calc_trbs_left(dep)) 1232 return 0; 1233 1234 starting = !(dep->flags & DWC3_EP_TRANSFER_STARTED); 1235 1236 dwc3_prepare_trbs(dep); 1237 req = next_request(&dep->started_list); 1238 if (!req) { 1239 dep->flags |= DWC3_EP_PENDING_REQUEST; 1240 return 0; 1241 } 1242 1243 memset(¶ms, 0, sizeof(params)); 1244 1245 if (starting) { 1246 params.param0 = upper_32_bits(req->trb_dma); 1247 params.param1 = lower_32_bits(req->trb_dma); 1248 cmd = DWC3_DEPCMD_STARTTRANSFER; 1249 1250 if (dep->stream_capable) 1251 cmd |= DWC3_DEPCMD_PARAM(req->request.stream_id); 1252 1253 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) 1254 cmd |= DWC3_DEPCMD_PARAM(dep->frame_number); 1255 } else { 1256 cmd = DWC3_DEPCMD_UPDATETRANSFER | 1257 DWC3_DEPCMD_PARAM(dep->resource_index); 1258 } 1259 1260 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 1261 if (ret < 0) { 1262 /* 1263 * FIXME we need to iterate over the list of requests 1264 * here and stop, unmap, free and del each of the linked 1265 * requests instead of what we do now. 1266 */ 1267 if (req->trb) 1268 memset(req->trb, 0, sizeof(struct dwc3_trb)); 1269 dwc3_gadget_del_and_unmap_request(dep, req, ret); 1270 return ret; 1271 } 1272 1273 return 0; 1274 } 1275 1276 static int __dwc3_gadget_get_frame(struct dwc3 *dwc) 1277 { 1278 u32 reg; 1279 1280 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1281 return DWC3_DSTS_SOFFN(reg); 1282 } 1283 1284 /** 1285 * dwc3_gadget_start_isoc_quirk - workaround invalid frame number 1286 * @dep: isoc endpoint 1287 * 1288 * This function tests for the correct combination of BIT[15:14] from the 16-bit 1289 * microframe number reported by the XferNotReady event for the future frame 1290 * number to start the isoc transfer. 1291 * 1292 * In DWC_usb31 version 1.70a-ea06 and prior, for highspeed and fullspeed 1293 * isochronous IN, BIT[15:14] of the 16-bit microframe number reported by the 1294 * XferNotReady event are invalid. The driver uses this number to schedule the 1295 * isochronous transfer and passes it to the START TRANSFER command. Because 1296 * this number is invalid, the command may fail. If BIT[15:14] matches the 1297 * internal 16-bit microframe, the START TRANSFER command will pass and the 1298 * transfer will start at the scheduled time, if it is off by 1, the command 1299 * will still pass, but the transfer will start 2 seconds in the future. For all 1300 * other conditions, the START TRANSFER command will fail with bus-expiry. 1301 * 1302 * In order to workaround this issue, we can test for the correct combination of 1303 * BIT[15:14] by sending START TRANSFER commands with different values of 1304 * BIT[15:14]: 'b00, 'b01, 'b10, and 'b11. Each combination is 2^14 uframe apart 1305 * (or 2 seconds). 4 seconds into the future will result in a bus-expiry status. 1306 * As the result, within the 4 possible combinations for BIT[15:14], there will 1307 * be 2 successful and 2 failure START COMMAND status. One of the 2 successful 1308 * command status will result in a 2-second delay start. The smaller BIT[15:14] 1309 * value is the correct combination. 1310 * 1311 * Since there are only 4 outcomes and the results are ordered, we can simply 1312 * test 2 START TRANSFER commands with BIT[15:14] combinations 'b00 and 'b01 to 1313 * deduce the smaller successful combination. 1314 * 1315 * Let test0 = test status for combination 'b00 and test1 = test status for 'b01 1316 * of BIT[15:14]. The correct combination is as follow: 1317 * 1318 * if test0 fails and test1 passes, BIT[15:14] is 'b01 1319 * if test0 fails and test1 fails, BIT[15:14] is 'b10 1320 * if test0 passes and test1 fails, BIT[15:14] is 'b11 1321 * if test0 passes and test1 passes, BIT[15:14] is 'b00 1322 * 1323 * Synopsys STAR 9001202023: Wrong microframe number for isochronous IN 1324 * endpoints. 1325 */ 1326 static int dwc3_gadget_start_isoc_quirk(struct dwc3_ep *dep) 1327 { 1328 int cmd_status = 0; 1329 bool test0; 1330 bool test1; 1331 1332 while (dep->combo_num < 2) { 1333 struct dwc3_gadget_ep_cmd_params params; 1334 u32 test_frame_number; 1335 u32 cmd; 1336 1337 /* 1338 * Check if we can start isoc transfer on the next interval or 1339 * 4 uframes in the future with BIT[15:14] as dep->combo_num 1340 */ 1341 test_frame_number = dep->frame_number & 0x3fff; 1342 test_frame_number |= dep->combo_num << 14; 1343 test_frame_number += max_t(u32, 4, dep->interval); 1344 1345 params.param0 = upper_32_bits(dep->dwc->bounce_addr); 1346 params.param1 = lower_32_bits(dep->dwc->bounce_addr); 1347 1348 cmd = DWC3_DEPCMD_STARTTRANSFER; 1349 cmd |= DWC3_DEPCMD_PARAM(test_frame_number); 1350 cmd_status = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 1351 1352 /* Redo if some other failure beside bus-expiry is received */ 1353 if (cmd_status && cmd_status != -EAGAIN) { 1354 dep->start_cmd_status = 0; 1355 dep->combo_num = 0; 1356 return 0; 1357 } 1358 1359 /* Store the first test status */ 1360 if (dep->combo_num == 0) 1361 dep->start_cmd_status = cmd_status; 1362 1363 dep->combo_num++; 1364 1365 /* 1366 * End the transfer if the START_TRANSFER command is successful 1367 * to wait for the next XferNotReady to test the command again 1368 */ 1369 if (cmd_status == 0) { 1370 dwc3_stop_active_transfer(dep, true, true); 1371 return 0; 1372 } 1373 } 1374 1375 /* test0 and test1 are both completed at this point */ 1376 test0 = (dep->start_cmd_status == 0); 1377 test1 = (cmd_status == 0); 1378 1379 if (!test0 && test1) 1380 dep->combo_num = 1; 1381 else if (!test0 && !test1) 1382 dep->combo_num = 2; 1383 else if (test0 && !test1) 1384 dep->combo_num = 3; 1385 else if (test0 && test1) 1386 dep->combo_num = 0; 1387 1388 dep->frame_number &= 0x3fff; 1389 dep->frame_number |= dep->combo_num << 14; 1390 dep->frame_number += max_t(u32, 4, dep->interval); 1391 1392 /* Reinitialize test variables */ 1393 dep->start_cmd_status = 0; 1394 dep->combo_num = 0; 1395 1396 return __dwc3_gadget_kick_transfer(dep); 1397 } 1398 1399 static int __dwc3_gadget_start_isoc(struct dwc3_ep *dep) 1400 { 1401 struct dwc3 *dwc = dep->dwc; 1402 int ret; 1403 int i; 1404 1405 if (list_empty(&dep->pending_list)) { 1406 dep->flags |= DWC3_EP_PENDING_REQUEST; 1407 return -EAGAIN; 1408 } 1409 1410 if (!dwc->dis_start_transfer_quirk && dwc3_is_usb31(dwc) && 1411 (dwc->revision <= DWC3_USB31_REVISION_160A || 1412 (dwc->revision == DWC3_USB31_REVISION_170A && 1413 dwc->version_type >= DWC31_VERSIONTYPE_EA01 && 1414 dwc->version_type <= DWC31_VERSIONTYPE_EA06))) { 1415 1416 if (dwc->gadget.speed <= USB_SPEED_HIGH && dep->direction) 1417 return dwc3_gadget_start_isoc_quirk(dep); 1418 } 1419 1420 for (i = 0; i < DWC3_ISOC_MAX_RETRIES; i++) { 1421 dep->frame_number = DWC3_ALIGN_FRAME(dep, i + 1); 1422 1423 ret = __dwc3_gadget_kick_transfer(dep); 1424 if (ret != -EAGAIN) 1425 break; 1426 } 1427 1428 return ret; 1429 } 1430 1431 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 1432 { 1433 struct dwc3 *dwc = dep->dwc; 1434 1435 if (!dep->endpoint.desc) { 1436 dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n", 1437 dep->name); 1438 return -ESHUTDOWN; 1439 } 1440 1441 if (WARN(req->dep != dep, "request %pK belongs to '%s'\n", 1442 &req->request, req->dep->name)) 1443 return -EINVAL; 1444 1445 if (WARN(req->status < DWC3_REQUEST_STATUS_COMPLETED, 1446 "%s: request %pK already in flight\n", 1447 dep->name, &req->request)) 1448 return -EINVAL; 1449 1450 pm_runtime_get(dwc->dev); 1451 1452 req->request.actual = 0; 1453 req->request.status = -EINPROGRESS; 1454 1455 trace_dwc3_ep_queue(req); 1456 1457 list_add_tail(&req->list, &dep->pending_list); 1458 req->status = DWC3_REQUEST_STATUS_QUEUED; 1459 1460 /* Start the transfer only after the END_TRANSFER is completed */ 1461 if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) { 1462 dep->flags |= DWC3_EP_DELAY_START; 1463 return 0; 1464 } 1465 1466 /* 1467 * NOTICE: Isochronous endpoints should NEVER be prestarted. We must 1468 * wait for a XferNotReady event so we will know what's the current 1469 * (micro-)frame number. 1470 * 1471 * Without this trick, we are very, very likely gonna get Bus Expiry 1472 * errors which will force us issue EndTransfer command. 1473 */ 1474 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1475 if (!(dep->flags & DWC3_EP_PENDING_REQUEST) && 1476 !(dep->flags & DWC3_EP_TRANSFER_STARTED)) 1477 return 0; 1478 1479 if ((dep->flags & DWC3_EP_PENDING_REQUEST)) { 1480 if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) { 1481 return __dwc3_gadget_start_isoc(dep); 1482 } 1483 } 1484 } 1485 1486 return __dwc3_gadget_kick_transfer(dep); 1487 } 1488 1489 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 1490 gfp_t gfp_flags) 1491 { 1492 struct dwc3_request *req = to_dwc3_request(request); 1493 struct dwc3_ep *dep = to_dwc3_ep(ep); 1494 struct dwc3 *dwc = dep->dwc; 1495 1496 unsigned long flags; 1497 1498 int ret; 1499 1500 spin_lock_irqsave(&dwc->lock, flags); 1501 ret = __dwc3_gadget_ep_queue(dep, req); 1502 spin_unlock_irqrestore(&dwc->lock, flags); 1503 1504 return ret; 1505 } 1506 1507 static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *req) 1508 { 1509 int i; 1510 1511 /* 1512 * If request was already started, this means we had to 1513 * stop the transfer. With that we also need to ignore 1514 * all TRBs used by the request, however TRBs can only 1515 * be modified after completion of END_TRANSFER 1516 * command. So what we do here is that we wait for 1517 * END_TRANSFER completion and only after that, we jump 1518 * over TRBs by clearing HWO and incrementing dequeue 1519 * pointer. 1520 */ 1521 for (i = 0; i < req->num_trbs; i++) { 1522 struct dwc3_trb *trb; 1523 1524 trb = &dep->trb_pool[dep->trb_dequeue]; 1525 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 1526 dwc3_ep_inc_deq(dep); 1527 } 1528 1529 req->num_trbs = 0; 1530 } 1531 1532 static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep) 1533 { 1534 struct dwc3_request *req; 1535 struct dwc3_request *tmp; 1536 1537 list_for_each_entry_safe(req, tmp, &dep->cancelled_list, list) { 1538 dwc3_gadget_ep_skip_trbs(dep, req); 1539 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1540 } 1541 } 1542 1543 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 1544 struct usb_request *request) 1545 { 1546 struct dwc3_request *req = to_dwc3_request(request); 1547 struct dwc3_request *r = NULL; 1548 1549 struct dwc3_ep *dep = to_dwc3_ep(ep); 1550 struct dwc3 *dwc = dep->dwc; 1551 1552 unsigned long flags; 1553 int ret = 0; 1554 1555 trace_dwc3_ep_dequeue(req); 1556 1557 spin_lock_irqsave(&dwc->lock, flags); 1558 1559 list_for_each_entry(r, &dep->pending_list, list) { 1560 if (r == req) 1561 break; 1562 } 1563 1564 if (r != req) { 1565 list_for_each_entry(r, &dep->started_list, list) { 1566 if (r == req) 1567 break; 1568 } 1569 if (r == req) { 1570 /* wait until it is processed */ 1571 dwc3_stop_active_transfer(dep, true, true); 1572 1573 if (!r->trb) 1574 goto out0; 1575 1576 dwc3_gadget_move_cancelled_request(req); 1577 if (dep->flags & DWC3_EP_TRANSFER_STARTED) 1578 goto out0; 1579 else 1580 goto out1; 1581 } 1582 dev_err(dwc->dev, "request %pK was not queued to %s\n", 1583 request, ep->name); 1584 ret = -EINVAL; 1585 goto out0; 1586 } 1587 1588 out1: 1589 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1590 1591 out0: 1592 spin_unlock_irqrestore(&dwc->lock, flags); 1593 1594 return ret; 1595 } 1596 1597 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) 1598 { 1599 struct dwc3_gadget_ep_cmd_params params; 1600 struct dwc3 *dwc = dep->dwc; 1601 int ret; 1602 1603 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1604 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 1605 return -EINVAL; 1606 } 1607 1608 memset(¶ms, 0x00, sizeof(params)); 1609 1610 if (value) { 1611 struct dwc3_trb *trb; 1612 1613 unsigned transfer_in_flight; 1614 unsigned started; 1615 1616 if (dep->number > 1) 1617 trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue); 1618 else 1619 trb = &dwc->ep0_trb[dep->trb_enqueue]; 1620 1621 transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO; 1622 started = !list_empty(&dep->started_list); 1623 1624 if (!protocol && ((dep->direction && transfer_in_flight) || 1625 (!dep->direction && started))) { 1626 return -EAGAIN; 1627 } 1628 1629 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL, 1630 ¶ms); 1631 if (ret) 1632 dev_err(dwc->dev, "failed to set STALL on %s\n", 1633 dep->name); 1634 else 1635 dep->flags |= DWC3_EP_STALL; 1636 } else { 1637 1638 ret = dwc3_send_clear_stall_ep_cmd(dep); 1639 if (ret) 1640 dev_err(dwc->dev, "failed to clear STALL on %s\n", 1641 dep->name); 1642 else 1643 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); 1644 } 1645 1646 return ret; 1647 } 1648 1649 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 1650 { 1651 struct dwc3_ep *dep = to_dwc3_ep(ep); 1652 struct dwc3 *dwc = dep->dwc; 1653 1654 unsigned long flags; 1655 1656 int ret; 1657 1658 spin_lock_irqsave(&dwc->lock, flags); 1659 ret = __dwc3_gadget_ep_set_halt(dep, value, false); 1660 spin_unlock_irqrestore(&dwc->lock, flags); 1661 1662 return ret; 1663 } 1664 1665 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 1666 { 1667 struct dwc3_ep *dep = to_dwc3_ep(ep); 1668 struct dwc3 *dwc = dep->dwc; 1669 unsigned long flags; 1670 int ret; 1671 1672 spin_lock_irqsave(&dwc->lock, flags); 1673 dep->flags |= DWC3_EP_WEDGE; 1674 1675 if (dep->number == 0 || dep->number == 1) 1676 ret = __dwc3_gadget_ep0_set_halt(ep, 1); 1677 else 1678 ret = __dwc3_gadget_ep_set_halt(dep, 1, false); 1679 spin_unlock_irqrestore(&dwc->lock, flags); 1680 1681 return ret; 1682 } 1683 1684 /* -------------------------------------------------------------------------- */ 1685 1686 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 1687 .bLength = USB_DT_ENDPOINT_SIZE, 1688 .bDescriptorType = USB_DT_ENDPOINT, 1689 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1690 }; 1691 1692 static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 1693 .enable = dwc3_gadget_ep0_enable, 1694 .disable = dwc3_gadget_ep0_disable, 1695 .alloc_request = dwc3_gadget_ep_alloc_request, 1696 .free_request = dwc3_gadget_ep_free_request, 1697 .queue = dwc3_gadget_ep0_queue, 1698 .dequeue = dwc3_gadget_ep_dequeue, 1699 .set_halt = dwc3_gadget_ep0_set_halt, 1700 .set_wedge = dwc3_gadget_ep_set_wedge, 1701 }; 1702 1703 static const struct usb_ep_ops dwc3_gadget_ep_ops = { 1704 .enable = dwc3_gadget_ep_enable, 1705 .disable = dwc3_gadget_ep_disable, 1706 .alloc_request = dwc3_gadget_ep_alloc_request, 1707 .free_request = dwc3_gadget_ep_free_request, 1708 .queue = dwc3_gadget_ep_queue, 1709 .dequeue = dwc3_gadget_ep_dequeue, 1710 .set_halt = dwc3_gadget_ep_set_halt, 1711 .set_wedge = dwc3_gadget_ep_set_wedge, 1712 }; 1713 1714 /* -------------------------------------------------------------------------- */ 1715 1716 static int dwc3_gadget_get_frame(struct usb_gadget *g) 1717 { 1718 struct dwc3 *dwc = gadget_to_dwc(g); 1719 1720 return __dwc3_gadget_get_frame(dwc); 1721 } 1722 1723 static int __dwc3_gadget_wakeup(struct dwc3 *dwc) 1724 { 1725 int retries; 1726 1727 int ret; 1728 u32 reg; 1729 1730 u8 link_state; 1731 u8 speed; 1732 1733 /* 1734 * According to the Databook Remote wakeup request should 1735 * be issued only when the device is in early suspend state. 1736 * 1737 * We can check that via USB Link State bits in DSTS register. 1738 */ 1739 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1740 1741 speed = reg & DWC3_DSTS_CONNECTSPD; 1742 if ((speed == DWC3_DSTS_SUPERSPEED) || 1743 (speed == DWC3_DSTS_SUPERSPEED_PLUS)) 1744 return 0; 1745 1746 link_state = DWC3_DSTS_USBLNKST(reg); 1747 1748 switch (link_state) { 1749 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1750 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1751 break; 1752 default: 1753 return -EINVAL; 1754 } 1755 1756 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); 1757 if (ret < 0) { 1758 dev_err(dwc->dev, "failed to put link in Recovery\n"); 1759 return ret; 1760 } 1761 1762 /* Recent versions do this automatically */ 1763 if (dwc->revision < DWC3_REVISION_194A) { 1764 /* write zeroes to Link Change Request */ 1765 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1766 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 1767 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1768 } 1769 1770 /* poll until Link State changes to ON */ 1771 retries = 20000; 1772 1773 while (retries--) { 1774 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1775 1776 /* in HS, means ON */ 1777 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 1778 break; 1779 } 1780 1781 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1782 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1783 return -EINVAL; 1784 } 1785 1786 return 0; 1787 } 1788 1789 static int dwc3_gadget_wakeup(struct usb_gadget *g) 1790 { 1791 struct dwc3 *dwc = gadget_to_dwc(g); 1792 unsigned long flags; 1793 int ret; 1794 1795 spin_lock_irqsave(&dwc->lock, flags); 1796 ret = __dwc3_gadget_wakeup(dwc); 1797 spin_unlock_irqrestore(&dwc->lock, flags); 1798 1799 return ret; 1800 } 1801 1802 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 1803 int is_selfpowered) 1804 { 1805 struct dwc3 *dwc = gadget_to_dwc(g); 1806 unsigned long flags; 1807 1808 spin_lock_irqsave(&dwc->lock, flags); 1809 g->is_selfpowered = !!is_selfpowered; 1810 spin_unlock_irqrestore(&dwc->lock, flags); 1811 1812 return 0; 1813 } 1814 1815 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) 1816 { 1817 u32 reg; 1818 u32 timeout = 500; 1819 1820 if (pm_runtime_suspended(dwc->dev)) 1821 return 0; 1822 1823 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1824 if (is_on) { 1825 if (dwc->revision <= DWC3_REVISION_187A) { 1826 reg &= ~DWC3_DCTL_TRGTULST_MASK; 1827 reg |= DWC3_DCTL_TRGTULST_RX_DET; 1828 } 1829 1830 if (dwc->revision >= DWC3_REVISION_194A) 1831 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1832 reg |= DWC3_DCTL_RUN_STOP; 1833 1834 if (dwc->has_hibernation) 1835 reg |= DWC3_DCTL_KEEP_CONNECT; 1836 1837 dwc->pullups_connected = true; 1838 } else { 1839 reg &= ~DWC3_DCTL_RUN_STOP; 1840 1841 if (dwc->has_hibernation && !suspend) 1842 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1843 1844 dwc->pullups_connected = false; 1845 } 1846 1847 dwc3_gadget_dctl_write_safe(dwc, reg); 1848 1849 do { 1850 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1851 reg &= DWC3_DSTS_DEVCTRLHLT; 1852 } while (--timeout && !(!is_on ^ !reg)); 1853 1854 if (!timeout) 1855 return -ETIMEDOUT; 1856 1857 return 0; 1858 } 1859 1860 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 1861 { 1862 struct dwc3 *dwc = gadget_to_dwc(g); 1863 unsigned long flags; 1864 int ret; 1865 1866 is_on = !!is_on; 1867 1868 /* 1869 * Per databook, when we want to stop the gadget, if a control transfer 1870 * is still in process, complete it and get the core into setup phase. 1871 */ 1872 if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) { 1873 reinit_completion(&dwc->ep0_in_setup); 1874 1875 ret = wait_for_completion_timeout(&dwc->ep0_in_setup, 1876 msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT)); 1877 if (ret == 0) { 1878 dev_err(dwc->dev, "timed out waiting for SETUP phase\n"); 1879 return -ETIMEDOUT; 1880 } 1881 } 1882 1883 spin_lock_irqsave(&dwc->lock, flags); 1884 ret = dwc3_gadget_run_stop(dwc, is_on, false); 1885 spin_unlock_irqrestore(&dwc->lock, flags); 1886 1887 return ret; 1888 } 1889 1890 static void dwc3_gadget_enable_irq(struct dwc3 *dwc) 1891 { 1892 u32 reg; 1893 1894 /* Enable all but Start and End of Frame IRQs */ 1895 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | 1896 DWC3_DEVTEN_EVNTOVERFLOWEN | 1897 DWC3_DEVTEN_CMDCMPLTEN | 1898 DWC3_DEVTEN_ERRTICERREN | 1899 DWC3_DEVTEN_WKUPEVTEN | 1900 DWC3_DEVTEN_CONNECTDONEEN | 1901 DWC3_DEVTEN_USBRSTEN | 1902 DWC3_DEVTEN_DISCONNEVTEN); 1903 1904 if (dwc->revision < DWC3_REVISION_250A) 1905 reg |= DWC3_DEVTEN_ULSTCNGEN; 1906 1907 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 1908 } 1909 1910 static void dwc3_gadget_disable_irq(struct dwc3 *dwc) 1911 { 1912 /* mask all interrupts */ 1913 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 1914 } 1915 1916 static irqreturn_t dwc3_interrupt(int irq, void *_dwc); 1917 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc); 1918 1919 /** 1920 * dwc3_gadget_setup_nump - calculate and initialize NUMP field of %DWC3_DCFG 1921 * @dwc: pointer to our context structure 1922 * 1923 * The following looks like complex but it's actually very simple. In order to 1924 * calculate the number of packets we can burst at once on OUT transfers, we're 1925 * gonna use RxFIFO size. 1926 * 1927 * To calculate RxFIFO size we need two numbers: 1928 * MDWIDTH = size, in bits, of the internal memory bus 1929 * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits) 1930 * 1931 * Given these two numbers, the formula is simple: 1932 * 1933 * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16; 1934 * 1935 * 24 bytes is for 3x SETUP packets 1936 * 16 bytes is a clock domain crossing tolerance 1937 * 1938 * Given RxFIFO Size, NUMP = RxFIFOSize / 1024; 1939 */ 1940 static void dwc3_gadget_setup_nump(struct dwc3 *dwc) 1941 { 1942 u32 ram2_depth; 1943 u32 mdwidth; 1944 u32 nump; 1945 u32 reg; 1946 1947 ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7); 1948 mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0); 1949 1950 nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024; 1951 nump = min_t(u32, nump, 16); 1952 1953 /* update NumP */ 1954 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1955 reg &= ~DWC3_DCFG_NUMP_MASK; 1956 reg |= nump << DWC3_DCFG_NUMP_SHIFT; 1957 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1958 } 1959 1960 static int __dwc3_gadget_start(struct dwc3 *dwc) 1961 { 1962 struct dwc3_ep *dep; 1963 int ret = 0; 1964 u32 reg; 1965 1966 /* 1967 * Use IMOD if enabled via dwc->imod_interval. Otherwise, if 1968 * the core supports IMOD, disable it. 1969 */ 1970 if (dwc->imod_interval) { 1971 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); 1972 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); 1973 } else if (dwc3_has_imod(dwc)) { 1974 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0); 1975 } 1976 1977 /* 1978 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP 1979 * field instead of letting dwc3 itself calculate that automatically. 1980 * 1981 * This way, we maximize the chances that we'll be able to get several 1982 * bursts of data without going through any sort of endpoint throttling. 1983 */ 1984 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1985 if (dwc3_is_usb31(dwc)) 1986 reg &= ~DWC31_GRXTHRCFG_PKTCNTSEL; 1987 else 1988 reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL; 1989 1990 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1991 1992 dwc3_gadget_setup_nump(dwc); 1993 1994 /* Start with SuperSpeed Default */ 1995 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1996 1997 dep = dwc->eps[0]; 1998 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT); 1999 if (ret) { 2000 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2001 goto err0; 2002 } 2003 2004 dep = dwc->eps[1]; 2005 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT); 2006 if (ret) { 2007 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2008 goto err1; 2009 } 2010 2011 /* begin to receive SETUP packets */ 2012 dwc->ep0state = EP0_SETUP_PHASE; 2013 dwc->link_state = DWC3_LINK_STATE_SS_DIS; 2014 dwc3_ep0_out_start(dwc); 2015 2016 dwc3_gadget_enable_irq(dwc); 2017 2018 return 0; 2019 2020 err1: 2021 __dwc3_gadget_ep_disable(dwc->eps[0]); 2022 2023 err0: 2024 return ret; 2025 } 2026 2027 static int dwc3_gadget_start(struct usb_gadget *g, 2028 struct usb_gadget_driver *driver) 2029 { 2030 struct dwc3 *dwc = gadget_to_dwc(g); 2031 unsigned long flags; 2032 int ret = 0; 2033 int irq; 2034 2035 irq = dwc->irq_gadget; 2036 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, 2037 IRQF_SHARED, "dwc3", dwc->ev_buf); 2038 if (ret) { 2039 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 2040 irq, ret); 2041 goto err0; 2042 } 2043 2044 spin_lock_irqsave(&dwc->lock, flags); 2045 if (dwc->gadget_driver) { 2046 dev_err(dwc->dev, "%s is already bound to %s\n", 2047 dwc->gadget.name, 2048 dwc->gadget_driver->driver.name); 2049 ret = -EBUSY; 2050 goto err1; 2051 } 2052 2053 dwc->gadget_driver = driver; 2054 2055 if (pm_runtime_active(dwc->dev)) 2056 __dwc3_gadget_start(dwc); 2057 2058 spin_unlock_irqrestore(&dwc->lock, flags); 2059 2060 return 0; 2061 2062 err1: 2063 spin_unlock_irqrestore(&dwc->lock, flags); 2064 free_irq(irq, dwc); 2065 2066 err0: 2067 return ret; 2068 } 2069 2070 static void __dwc3_gadget_stop(struct dwc3 *dwc) 2071 { 2072 dwc3_gadget_disable_irq(dwc); 2073 __dwc3_gadget_ep_disable(dwc->eps[0]); 2074 __dwc3_gadget_ep_disable(dwc->eps[1]); 2075 } 2076 2077 static int dwc3_gadget_stop(struct usb_gadget *g) 2078 { 2079 struct dwc3 *dwc = gadget_to_dwc(g); 2080 unsigned long flags; 2081 2082 spin_lock_irqsave(&dwc->lock, flags); 2083 2084 if (pm_runtime_suspended(dwc->dev)) 2085 goto out; 2086 2087 __dwc3_gadget_stop(dwc); 2088 2089 out: 2090 dwc->gadget_driver = NULL; 2091 spin_unlock_irqrestore(&dwc->lock, flags); 2092 2093 free_irq(dwc->irq_gadget, dwc->ev_buf); 2094 2095 return 0; 2096 } 2097 2098 static void dwc3_gadget_config_params(struct usb_gadget *g, 2099 struct usb_dcd_config_params *params) 2100 { 2101 struct dwc3 *dwc = gadget_to_dwc(g); 2102 2103 params->besl_baseline = USB_DEFAULT_BESL_UNSPECIFIED; 2104 params->besl_deep = USB_DEFAULT_BESL_UNSPECIFIED; 2105 2106 /* Recommended BESL */ 2107 if (!dwc->dis_enblslpm_quirk) { 2108 /* 2109 * If the recommended BESL baseline is 0 or if the BESL deep is 2110 * less than 2, Microsoft's Windows 10 host usb stack will issue 2111 * a usb reset immediately after it receives the extended BOS 2112 * descriptor and the enumeration will fail. To maintain 2113 * compatibility with the Windows' usb stack, let's set the 2114 * recommended BESL baseline to 1 and clamp the BESL deep to be 2115 * within 2 to 15. 2116 */ 2117 params->besl_baseline = 1; 2118 if (dwc->is_utmi_l1_suspend) 2119 params->besl_deep = 2120 clamp_t(u8, dwc->hird_threshold, 2, 15); 2121 } 2122 2123 /* U1 Device exit Latency */ 2124 if (dwc->dis_u1_entry_quirk) 2125 params->bU1devExitLat = 0; 2126 else 2127 params->bU1devExitLat = DWC3_DEFAULT_U1_DEV_EXIT_LAT; 2128 2129 /* U2 Device exit Latency */ 2130 if (dwc->dis_u2_entry_quirk) 2131 params->bU2DevExitLat = 0; 2132 else 2133 params->bU2DevExitLat = 2134 cpu_to_le16(DWC3_DEFAULT_U2_DEV_EXIT_LAT); 2135 } 2136 2137 static void dwc3_gadget_set_speed(struct usb_gadget *g, 2138 enum usb_device_speed speed) 2139 { 2140 struct dwc3 *dwc = gadget_to_dwc(g); 2141 unsigned long flags; 2142 u32 reg; 2143 2144 spin_lock_irqsave(&dwc->lock, flags); 2145 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2146 reg &= ~(DWC3_DCFG_SPEED_MASK); 2147 2148 /* 2149 * WORKAROUND: DWC3 revision < 2.20a have an issue 2150 * which would cause metastability state on Run/Stop 2151 * bit if we try to force the IP to USB2-only mode. 2152 * 2153 * Because of that, we cannot configure the IP to any 2154 * speed other than the SuperSpeed 2155 * 2156 * Refers to: 2157 * 2158 * STAR#9000525659: Clock Domain Crossing on DCTL in 2159 * USB 2.0 Mode 2160 */ 2161 if (dwc->revision < DWC3_REVISION_220A && 2162 !dwc->dis_metastability_quirk) { 2163 reg |= DWC3_DCFG_SUPERSPEED; 2164 } else { 2165 switch (speed) { 2166 case USB_SPEED_LOW: 2167 reg |= DWC3_DCFG_LOWSPEED; 2168 break; 2169 case USB_SPEED_FULL: 2170 reg |= DWC3_DCFG_FULLSPEED; 2171 break; 2172 case USB_SPEED_HIGH: 2173 reg |= DWC3_DCFG_HIGHSPEED; 2174 break; 2175 case USB_SPEED_SUPER: 2176 reg |= DWC3_DCFG_SUPERSPEED; 2177 break; 2178 case USB_SPEED_SUPER_PLUS: 2179 if (dwc3_is_usb31(dwc)) 2180 reg |= DWC3_DCFG_SUPERSPEED_PLUS; 2181 else 2182 reg |= DWC3_DCFG_SUPERSPEED; 2183 break; 2184 default: 2185 dev_err(dwc->dev, "invalid speed (%d)\n", speed); 2186 2187 if (dwc->revision & DWC3_REVISION_IS_DWC31) 2188 reg |= DWC3_DCFG_SUPERSPEED_PLUS; 2189 else 2190 reg |= DWC3_DCFG_SUPERSPEED; 2191 } 2192 } 2193 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2194 2195 spin_unlock_irqrestore(&dwc->lock, flags); 2196 } 2197 2198 static const struct usb_gadget_ops dwc3_gadget_ops = { 2199 .get_frame = dwc3_gadget_get_frame, 2200 .wakeup = dwc3_gadget_wakeup, 2201 .set_selfpowered = dwc3_gadget_set_selfpowered, 2202 .pullup = dwc3_gadget_pullup, 2203 .udc_start = dwc3_gadget_start, 2204 .udc_stop = dwc3_gadget_stop, 2205 .udc_set_speed = dwc3_gadget_set_speed, 2206 .get_config_params = dwc3_gadget_config_params, 2207 }; 2208 2209 /* -------------------------------------------------------------------------- */ 2210 2211 static int dwc3_gadget_init_control_endpoint(struct dwc3_ep *dep) 2212 { 2213 struct dwc3 *dwc = dep->dwc; 2214 2215 usb_ep_set_maxpacket_limit(&dep->endpoint, 512); 2216 dep->endpoint.maxburst = 1; 2217 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 2218 if (!dep->direction) 2219 dwc->gadget.ep0 = &dep->endpoint; 2220 2221 dep->endpoint.caps.type_control = true; 2222 2223 return 0; 2224 } 2225 2226 static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) 2227 { 2228 struct dwc3 *dwc = dep->dwc; 2229 int mdwidth; 2230 int kbytes; 2231 int size; 2232 2233 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); 2234 /* MDWIDTH is represented in bits, we need it in bytes */ 2235 mdwidth /= 8; 2236 2237 size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1)); 2238 if (dwc3_is_usb31(dwc)) 2239 size = DWC31_GTXFIFOSIZ_TXFDEF(size); 2240 else 2241 size = DWC3_GTXFIFOSIZ_TXFDEF(size); 2242 2243 /* FIFO Depth is in MDWDITH bytes. Multiply */ 2244 size *= mdwidth; 2245 2246 kbytes = size / 1024; 2247 if (kbytes == 0) 2248 kbytes = 1; 2249 2250 /* 2251 * FIFO sizes account an extra MDWIDTH * (kbytes + 1) bytes for 2252 * internal overhead. We don't really know how these are used, 2253 * but documentation say it exists. 2254 */ 2255 size -= mdwidth * (kbytes + 1); 2256 size /= kbytes; 2257 2258 usb_ep_set_maxpacket_limit(&dep->endpoint, size); 2259 2260 dep->endpoint.max_streams = 15; 2261 dep->endpoint.ops = &dwc3_gadget_ep_ops; 2262 list_add_tail(&dep->endpoint.ep_list, 2263 &dwc->gadget.ep_list); 2264 dep->endpoint.caps.type_iso = true; 2265 dep->endpoint.caps.type_bulk = true; 2266 dep->endpoint.caps.type_int = true; 2267 2268 return dwc3_alloc_trb_pool(dep); 2269 } 2270 2271 static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep) 2272 { 2273 struct dwc3 *dwc = dep->dwc; 2274 2275 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024); 2276 dep->endpoint.max_streams = 15; 2277 dep->endpoint.ops = &dwc3_gadget_ep_ops; 2278 list_add_tail(&dep->endpoint.ep_list, 2279 &dwc->gadget.ep_list); 2280 dep->endpoint.caps.type_iso = true; 2281 dep->endpoint.caps.type_bulk = true; 2282 dep->endpoint.caps.type_int = true; 2283 2284 return dwc3_alloc_trb_pool(dep); 2285 } 2286 2287 static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum) 2288 { 2289 struct dwc3_ep *dep; 2290 bool direction = epnum & 1; 2291 int ret; 2292 u8 num = epnum >> 1; 2293 2294 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 2295 if (!dep) 2296 return -ENOMEM; 2297 2298 dep->dwc = dwc; 2299 dep->number = epnum; 2300 dep->direction = direction; 2301 dep->regs = dwc->regs + DWC3_DEP_BASE(epnum); 2302 dwc->eps[epnum] = dep; 2303 dep->combo_num = 0; 2304 dep->start_cmd_status = 0; 2305 2306 snprintf(dep->name, sizeof(dep->name), "ep%u%s", num, 2307 direction ? "in" : "out"); 2308 2309 dep->endpoint.name = dep->name; 2310 2311 if (!(dep->number > 1)) { 2312 dep->endpoint.desc = &dwc3_gadget_ep0_desc; 2313 dep->endpoint.comp_desc = NULL; 2314 } 2315 2316 if (num == 0) 2317 ret = dwc3_gadget_init_control_endpoint(dep); 2318 else if (direction) 2319 ret = dwc3_gadget_init_in_endpoint(dep); 2320 else 2321 ret = dwc3_gadget_init_out_endpoint(dep); 2322 2323 if (ret) 2324 return ret; 2325 2326 dep->endpoint.caps.dir_in = direction; 2327 dep->endpoint.caps.dir_out = !direction; 2328 2329 INIT_LIST_HEAD(&dep->pending_list); 2330 INIT_LIST_HEAD(&dep->started_list); 2331 INIT_LIST_HEAD(&dep->cancelled_list); 2332 2333 return 0; 2334 } 2335 2336 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 total) 2337 { 2338 u8 epnum; 2339 2340 INIT_LIST_HEAD(&dwc->gadget.ep_list); 2341 2342 for (epnum = 0; epnum < total; epnum++) { 2343 int ret; 2344 2345 ret = dwc3_gadget_init_endpoint(dwc, epnum); 2346 if (ret) 2347 return ret; 2348 } 2349 2350 return 0; 2351 } 2352 2353 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 2354 { 2355 struct dwc3_ep *dep; 2356 u8 epnum; 2357 2358 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2359 dep = dwc->eps[epnum]; 2360 if (!dep) 2361 continue; 2362 /* 2363 * Physical endpoints 0 and 1 are special; they form the 2364 * bi-directional USB endpoint 0. 2365 * 2366 * For those two physical endpoints, we don't allocate a TRB 2367 * pool nor do we add them the endpoints list. Due to that, we 2368 * shouldn't do these two operations otherwise we would end up 2369 * with all sorts of bugs when removing dwc3.ko. 2370 */ 2371 if (epnum != 0 && epnum != 1) { 2372 dwc3_free_trb_pool(dep); 2373 list_del(&dep->endpoint.ep_list); 2374 } 2375 2376 kfree(dep); 2377 } 2378 } 2379 2380 /* -------------------------------------------------------------------------- */ 2381 2382 static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep, 2383 struct dwc3_request *req, struct dwc3_trb *trb, 2384 const struct dwc3_event_depevt *event, int status, int chain) 2385 { 2386 unsigned int count; 2387 2388 dwc3_ep_inc_deq(dep); 2389 2390 trace_dwc3_complete_trb(dep, trb); 2391 req->num_trbs--; 2392 2393 /* 2394 * If we're in the middle of series of chained TRBs and we 2395 * receive a short transfer along the way, DWC3 will skip 2396 * through all TRBs including the last TRB in the chain (the 2397 * where CHN bit is zero. DWC3 will also avoid clearing HWO 2398 * bit and SW has to do it manually. 2399 * 2400 * We're going to do that here to avoid problems of HW trying 2401 * to use bogus TRBs for transfers. 2402 */ 2403 if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO)) 2404 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 2405 2406 /* 2407 * For isochronous transfers, the first TRB in a service interval must 2408 * have the Isoc-First type. Track and report its interval frame number. 2409 */ 2410 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 2411 (trb->ctrl & DWC3_TRBCTL_ISOCHRONOUS_FIRST)) { 2412 unsigned int frame_number; 2413 2414 frame_number = DWC3_TRB_CTRL_GET_SID_SOFN(trb->ctrl); 2415 frame_number &= ~(dep->interval - 1); 2416 req->request.frame_number = frame_number; 2417 } 2418 2419 /* 2420 * If we're dealing with unaligned size OUT transfer, we will be left 2421 * with one TRB pending in the ring. We need to manually clear HWO bit 2422 * from that TRB. 2423 */ 2424 2425 if (req->needs_extra_trb && !(trb->ctrl & DWC3_TRB_CTRL_CHN)) { 2426 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 2427 return 1; 2428 } 2429 2430 count = trb->size & DWC3_TRB_SIZE_MASK; 2431 req->remaining += count; 2432 2433 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 2434 return 1; 2435 2436 if (event->status & DEPEVT_STATUS_SHORT && !chain) 2437 return 1; 2438 2439 if ((trb->ctrl & DWC3_TRB_CTRL_IOC) || 2440 (trb->ctrl & DWC3_TRB_CTRL_LST)) 2441 return 1; 2442 2443 return 0; 2444 } 2445 2446 static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep, 2447 struct dwc3_request *req, const struct dwc3_event_depevt *event, 2448 int status) 2449 { 2450 struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue]; 2451 struct scatterlist *sg = req->sg; 2452 struct scatterlist *s; 2453 unsigned int pending = req->num_pending_sgs; 2454 unsigned int i; 2455 int ret = 0; 2456 2457 for_each_sg(sg, s, pending, i) { 2458 trb = &dep->trb_pool[dep->trb_dequeue]; 2459 2460 if (trb->ctrl & DWC3_TRB_CTRL_HWO) 2461 break; 2462 2463 req->sg = sg_next(s); 2464 req->num_pending_sgs--; 2465 2466 ret = dwc3_gadget_ep_reclaim_completed_trb(dep, req, 2467 trb, event, status, true); 2468 if (ret) 2469 break; 2470 } 2471 2472 return ret; 2473 } 2474 2475 static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep, 2476 struct dwc3_request *req, const struct dwc3_event_depevt *event, 2477 int status) 2478 { 2479 struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue]; 2480 2481 return dwc3_gadget_ep_reclaim_completed_trb(dep, req, trb, 2482 event, status, false); 2483 } 2484 2485 static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req) 2486 { 2487 /* 2488 * For OUT direction, host may send less than the setup 2489 * length. Return true for all OUT requests. 2490 */ 2491 if (!req->direction) 2492 return true; 2493 2494 return req->request.actual == req->request.length; 2495 } 2496 2497 static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, 2498 const struct dwc3_event_depevt *event, 2499 struct dwc3_request *req, int status) 2500 { 2501 int ret; 2502 2503 if (req->num_pending_sgs) 2504 ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event, 2505 status); 2506 else 2507 ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, 2508 status); 2509 2510 if (req->needs_extra_trb) { 2511 ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, 2512 status); 2513 req->needs_extra_trb = false; 2514 } 2515 2516 req->request.actual = req->request.length - req->remaining; 2517 2518 if (!dwc3_gadget_ep_request_completed(req) || 2519 req->num_pending_sgs) { 2520 __dwc3_gadget_kick_transfer(dep); 2521 goto out; 2522 } 2523 2524 dwc3_gadget_giveback(dep, req, status); 2525 2526 out: 2527 return ret; 2528 } 2529 2530 static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep, 2531 const struct dwc3_event_depevt *event, int status) 2532 { 2533 struct dwc3_request *req; 2534 struct dwc3_request *tmp; 2535 2536 list_for_each_entry_safe(req, tmp, &dep->started_list, list) { 2537 int ret; 2538 2539 ret = dwc3_gadget_ep_cleanup_completed_request(dep, event, 2540 req, status); 2541 if (ret) 2542 break; 2543 } 2544 } 2545 2546 static void dwc3_gadget_endpoint_frame_from_event(struct dwc3_ep *dep, 2547 const struct dwc3_event_depevt *event) 2548 { 2549 dep->frame_number = event->parameters; 2550 } 2551 2552 static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep, 2553 const struct dwc3_event_depevt *event) 2554 { 2555 struct dwc3 *dwc = dep->dwc; 2556 unsigned status = 0; 2557 bool stop = false; 2558 2559 dwc3_gadget_endpoint_frame_from_event(dep, event); 2560 2561 if (event->status & DEPEVT_STATUS_BUSERR) 2562 status = -ECONNRESET; 2563 2564 if (event->status & DEPEVT_STATUS_MISSED_ISOC) { 2565 status = -EXDEV; 2566 2567 if (list_empty(&dep->started_list)) 2568 stop = true; 2569 } 2570 2571 dwc3_gadget_ep_cleanup_completed_requests(dep, event, status); 2572 2573 if (stop) 2574 dwc3_stop_active_transfer(dep, true, true); 2575 2576 /* 2577 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. 2578 * See dwc3_gadget_linksts_change_interrupt() for 1st half. 2579 */ 2580 if (dwc->revision < DWC3_REVISION_183A) { 2581 u32 reg; 2582 int i; 2583 2584 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 2585 dep = dwc->eps[i]; 2586 2587 if (!(dep->flags & DWC3_EP_ENABLED)) 2588 continue; 2589 2590 if (!list_empty(&dep->started_list)) 2591 return; 2592 } 2593 2594 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2595 reg |= dwc->u1u2; 2596 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2597 2598 dwc->u1u2 = 0; 2599 } 2600 } 2601 2602 static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep, 2603 const struct dwc3_event_depevt *event) 2604 { 2605 dwc3_gadget_endpoint_frame_from_event(dep, event); 2606 (void) __dwc3_gadget_start_isoc(dep); 2607 } 2608 2609 static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 2610 const struct dwc3_event_depevt *event) 2611 { 2612 struct dwc3_ep *dep; 2613 u8 epnum = event->endpoint_number; 2614 u8 cmd; 2615 2616 dep = dwc->eps[epnum]; 2617 2618 if (!(dep->flags & DWC3_EP_ENABLED)) { 2619 if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) 2620 return; 2621 2622 /* Handle only EPCMDCMPLT when EP disabled */ 2623 if (event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT) 2624 return; 2625 } 2626 2627 if (epnum == 0 || epnum == 1) { 2628 dwc3_ep0_interrupt(dwc, event); 2629 return; 2630 } 2631 2632 switch (event->endpoint_event) { 2633 case DWC3_DEPEVT_XFERINPROGRESS: 2634 dwc3_gadget_endpoint_transfer_in_progress(dep, event); 2635 break; 2636 case DWC3_DEPEVT_XFERNOTREADY: 2637 dwc3_gadget_endpoint_transfer_not_ready(dep, event); 2638 break; 2639 case DWC3_DEPEVT_EPCMDCMPLT: 2640 cmd = DEPEVT_PARAMETER_CMD(event->parameters); 2641 2642 if (cmd == DWC3_DEPCMD_ENDTRANSFER) { 2643 dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; 2644 dep->flags &= ~DWC3_EP_TRANSFER_STARTED; 2645 dwc3_gadget_ep_cleanup_cancelled_requests(dep); 2646 if ((dep->flags & DWC3_EP_DELAY_START) && 2647 !usb_endpoint_xfer_isoc(dep->endpoint.desc)) 2648 __dwc3_gadget_kick_transfer(dep); 2649 2650 dep->flags &= ~DWC3_EP_DELAY_START; 2651 } 2652 break; 2653 case DWC3_DEPEVT_STREAMEVT: 2654 case DWC3_DEPEVT_XFERCOMPLETE: 2655 case DWC3_DEPEVT_RXTXFIFOEVT: 2656 break; 2657 } 2658 } 2659 2660 static void dwc3_disconnect_gadget(struct dwc3 *dwc) 2661 { 2662 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { 2663 spin_unlock(&dwc->lock); 2664 dwc->gadget_driver->disconnect(&dwc->gadget); 2665 spin_lock(&dwc->lock); 2666 } 2667 } 2668 2669 static void dwc3_suspend_gadget(struct dwc3 *dwc) 2670 { 2671 if (dwc->gadget_driver && dwc->gadget_driver->suspend) { 2672 spin_unlock(&dwc->lock); 2673 dwc->gadget_driver->suspend(&dwc->gadget); 2674 spin_lock(&dwc->lock); 2675 } 2676 } 2677 2678 static void dwc3_resume_gadget(struct dwc3 *dwc) 2679 { 2680 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2681 spin_unlock(&dwc->lock); 2682 dwc->gadget_driver->resume(&dwc->gadget); 2683 spin_lock(&dwc->lock); 2684 } 2685 } 2686 2687 static void dwc3_reset_gadget(struct dwc3 *dwc) 2688 { 2689 if (!dwc->gadget_driver) 2690 return; 2691 2692 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) { 2693 spin_unlock(&dwc->lock); 2694 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver); 2695 spin_lock(&dwc->lock); 2696 } 2697 } 2698 2699 static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, 2700 bool interrupt) 2701 { 2702 struct dwc3_gadget_ep_cmd_params params; 2703 u32 cmd; 2704 int ret; 2705 2706 if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) || 2707 (dep->flags & DWC3_EP_END_TRANSFER_PENDING)) 2708 return; 2709 2710 /* 2711 * NOTICE: We are violating what the Databook says about the 2712 * EndTransfer command. Ideally we would _always_ wait for the 2713 * EndTransfer Command Completion IRQ, but that's causing too 2714 * much trouble synchronizing between us and gadget driver. 2715 * 2716 * We have discussed this with the IP Provider and it was 2717 * suggested to giveback all requests here. 2718 * 2719 * Note also that a similar handling was tested by Synopsys 2720 * (thanks a lot Paul) and nothing bad has come out of it. 2721 * In short, what we're doing is issuing EndTransfer with 2722 * CMDIOC bit set and delay kicking transfer until the 2723 * EndTransfer command had completed. 2724 * 2725 * As of IP version 3.10a of the DWC_usb3 IP, the controller 2726 * supports a mode to work around the above limitation. The 2727 * software can poll the CMDACT bit in the DEPCMD register 2728 * after issuing a EndTransfer command. This mode is enabled 2729 * by writing GUCTL2[14]. This polling is already done in the 2730 * dwc3_send_gadget_ep_cmd() function so if the mode is 2731 * enabled, the EndTransfer command will have completed upon 2732 * returning from this function. 2733 * 2734 * This mode is NOT available on the DWC_usb31 IP. 2735 */ 2736 2737 cmd = DWC3_DEPCMD_ENDTRANSFER; 2738 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0; 2739 cmd |= interrupt ? DWC3_DEPCMD_CMDIOC : 0; 2740 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); 2741 memset(¶ms, 0, sizeof(params)); 2742 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 2743 WARN_ON_ONCE(ret); 2744 dep->resource_index = 0; 2745 2746 if (!interrupt) 2747 dep->flags &= ~DWC3_EP_TRANSFER_STARTED; 2748 else 2749 dep->flags |= DWC3_EP_END_TRANSFER_PENDING; 2750 } 2751 2752 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 2753 { 2754 u32 epnum; 2755 2756 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2757 struct dwc3_ep *dep; 2758 int ret; 2759 2760 dep = dwc->eps[epnum]; 2761 if (!dep) 2762 continue; 2763 2764 if (!(dep->flags & DWC3_EP_STALL)) 2765 continue; 2766 2767 dep->flags &= ~DWC3_EP_STALL; 2768 2769 ret = dwc3_send_clear_stall_ep_cmd(dep); 2770 WARN_ON_ONCE(ret); 2771 } 2772 } 2773 2774 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 2775 { 2776 int reg; 2777 2778 dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RX_DET); 2779 2780 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2781 reg &= ~DWC3_DCTL_INITU1ENA; 2782 reg &= ~DWC3_DCTL_INITU2ENA; 2783 dwc3_gadget_dctl_write_safe(dwc, reg); 2784 2785 dwc3_disconnect_gadget(dwc); 2786 2787 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2788 dwc->setup_packet_pending = false; 2789 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED); 2790 2791 dwc->connected = false; 2792 } 2793 2794 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 2795 { 2796 u32 reg; 2797 2798 dwc->connected = true; 2799 2800 /* 2801 * WORKAROUND: DWC3 revisions <1.88a have an issue which 2802 * would cause a missing Disconnect Event if there's a 2803 * pending Setup Packet in the FIFO. 2804 * 2805 * There's no suggested workaround on the official Bug 2806 * report, which states that "unless the driver/application 2807 * is doing any special handling of a disconnect event, 2808 * there is no functional issue". 2809 * 2810 * Unfortunately, it turns out that we _do_ some special 2811 * handling of a disconnect event, namely complete all 2812 * pending transfers, notify gadget driver of the 2813 * disconnection, and so on. 2814 * 2815 * Our suggested workaround is to follow the Disconnect 2816 * Event steps here, instead, based on a setup_packet_pending 2817 * flag. Such flag gets set whenever we have a SETUP_PENDING 2818 * status for EP0 TRBs and gets cleared on XferComplete for the 2819 * same endpoint. 2820 * 2821 * Refers to: 2822 * 2823 * STAR#9000466709: RTL: Device : Disconnect event not 2824 * generated if setup packet pending in FIFO 2825 */ 2826 if (dwc->revision < DWC3_REVISION_188A) { 2827 if (dwc->setup_packet_pending) 2828 dwc3_gadget_disconnect_interrupt(dwc); 2829 } 2830 2831 dwc3_reset_gadget(dwc); 2832 2833 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2834 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 2835 dwc3_gadget_dctl_write_safe(dwc, reg); 2836 dwc->test_mode = false; 2837 dwc3_clear_stall_all_ep(dwc); 2838 2839 /* Reset device address to zero */ 2840 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2841 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 2842 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2843 } 2844 2845 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 2846 { 2847 struct dwc3_ep *dep; 2848 int ret; 2849 u32 reg; 2850 u8 speed; 2851 2852 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2853 speed = reg & DWC3_DSTS_CONNECTSPD; 2854 dwc->speed = speed; 2855 2856 /* 2857 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 2858 * each time on Connect Done. 2859 * 2860 * Currently we always use the reset value. If any platform 2861 * wants to set this to a different value, we need to add a 2862 * setting and update GCTL.RAMCLKSEL here. 2863 */ 2864 2865 switch (speed) { 2866 case DWC3_DSTS_SUPERSPEED_PLUS: 2867 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2868 dwc->gadget.ep0->maxpacket = 512; 2869 dwc->gadget.speed = USB_SPEED_SUPER_PLUS; 2870 break; 2871 case DWC3_DSTS_SUPERSPEED: 2872 /* 2873 * WORKAROUND: DWC3 revisions <1.90a have an issue which 2874 * would cause a missing USB3 Reset event. 2875 * 2876 * In such situations, we should force a USB3 Reset 2877 * event by calling our dwc3_gadget_reset_interrupt() 2878 * routine. 2879 * 2880 * Refers to: 2881 * 2882 * STAR#9000483510: RTL: SS : USB3 reset event may 2883 * not be generated always when the link enters poll 2884 */ 2885 if (dwc->revision < DWC3_REVISION_190A) 2886 dwc3_gadget_reset_interrupt(dwc); 2887 2888 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2889 dwc->gadget.ep0->maxpacket = 512; 2890 dwc->gadget.speed = USB_SPEED_SUPER; 2891 break; 2892 case DWC3_DSTS_HIGHSPEED: 2893 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2894 dwc->gadget.ep0->maxpacket = 64; 2895 dwc->gadget.speed = USB_SPEED_HIGH; 2896 break; 2897 case DWC3_DSTS_FULLSPEED: 2898 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2899 dwc->gadget.ep0->maxpacket = 64; 2900 dwc->gadget.speed = USB_SPEED_FULL; 2901 break; 2902 case DWC3_DSTS_LOWSPEED: 2903 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); 2904 dwc->gadget.ep0->maxpacket = 8; 2905 dwc->gadget.speed = USB_SPEED_LOW; 2906 break; 2907 } 2908 2909 dwc->eps[1]->endpoint.maxpacket = dwc->gadget.ep0->maxpacket; 2910 2911 /* Enable USB2 LPM Capability */ 2912 2913 if ((dwc->revision > DWC3_REVISION_194A) && 2914 (speed != DWC3_DSTS_SUPERSPEED) && 2915 (speed != DWC3_DSTS_SUPERSPEED_PLUS)) { 2916 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2917 reg |= DWC3_DCFG_LPM_CAP; 2918 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2919 2920 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2921 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); 2922 2923 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold | 2924 (dwc->is_utmi_l1_suspend << 4)); 2925 2926 /* 2927 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and 2928 * DCFG.LPMCap is set, core responses with an ACK and the 2929 * BESL value in the LPM token is less than or equal to LPM 2930 * NYET threshold. 2931 */ 2932 WARN_ONCE(dwc->revision < DWC3_REVISION_240A 2933 && dwc->has_lpm_erratum, 2934 "LPM Erratum not available on dwc3 revisions < 2.40a\n"); 2935 2936 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A) 2937 reg |= DWC3_DCTL_NYET_THRES(dwc->lpm_nyet_threshold); 2938 2939 dwc3_gadget_dctl_write_safe(dwc, reg); 2940 } else { 2941 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2942 reg &= ~DWC3_DCTL_HIRD_THRES_MASK; 2943 dwc3_gadget_dctl_write_safe(dwc, reg); 2944 } 2945 2946 dep = dwc->eps[0]; 2947 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY); 2948 if (ret) { 2949 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2950 return; 2951 } 2952 2953 dep = dwc->eps[1]; 2954 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY); 2955 if (ret) { 2956 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2957 return; 2958 } 2959 2960 /* 2961 * Configure PHY via GUSB3PIPECTLn if required. 2962 * 2963 * Update GTXFIFOSIZn 2964 * 2965 * In both cases reset values should be sufficient. 2966 */ 2967 } 2968 2969 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 2970 { 2971 /* 2972 * TODO take core out of low power mode when that's 2973 * implemented. 2974 */ 2975 2976 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2977 spin_unlock(&dwc->lock); 2978 dwc->gadget_driver->resume(&dwc->gadget); 2979 spin_lock(&dwc->lock); 2980 } 2981 } 2982 2983 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 2984 unsigned int evtinfo) 2985 { 2986 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2987 unsigned int pwropt; 2988 2989 /* 2990 * WORKAROUND: DWC3 < 2.50a have an issue when configured without 2991 * Hibernation mode enabled which would show up when device detects 2992 * host-initiated U3 exit. 2993 * 2994 * In that case, device will generate a Link State Change Interrupt 2995 * from U3 to RESUME which is only necessary if Hibernation is 2996 * configured in. 2997 * 2998 * There are no functional changes due to such spurious event and we 2999 * just need to ignore it. 3000 * 3001 * Refers to: 3002 * 3003 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation 3004 * operational mode 3005 */ 3006 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 3007 if ((dwc->revision < DWC3_REVISION_250A) && 3008 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { 3009 if ((dwc->link_state == DWC3_LINK_STATE_U3) && 3010 (next == DWC3_LINK_STATE_RESUME)) { 3011 return; 3012 } 3013 } 3014 3015 /* 3016 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending 3017 * on the link partner, the USB session might do multiple entry/exit 3018 * of low power states before a transfer takes place. 3019 * 3020 * Due to this problem, we might experience lower throughput. The 3021 * suggested workaround is to disable DCTL[12:9] bits if we're 3022 * transitioning from U1/U2 to U0 and enable those bits again 3023 * after a transfer completes and there are no pending transfers 3024 * on any of the enabled endpoints. 3025 * 3026 * This is the first half of that workaround. 3027 * 3028 * Refers to: 3029 * 3030 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us 3031 * core send LGO_Ux entering U0 3032 */ 3033 if (dwc->revision < DWC3_REVISION_183A) { 3034 if (next == DWC3_LINK_STATE_U0) { 3035 u32 u1u2; 3036 u32 reg; 3037 3038 switch (dwc->link_state) { 3039 case DWC3_LINK_STATE_U1: 3040 case DWC3_LINK_STATE_U2: 3041 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 3042 u1u2 = reg & (DWC3_DCTL_INITU2ENA 3043 | DWC3_DCTL_ACCEPTU2ENA 3044 | DWC3_DCTL_INITU1ENA 3045 | DWC3_DCTL_ACCEPTU1ENA); 3046 3047 if (!dwc->u1u2) 3048 dwc->u1u2 = reg & u1u2; 3049 3050 reg &= ~u1u2; 3051 3052 dwc3_gadget_dctl_write_safe(dwc, reg); 3053 break; 3054 default: 3055 /* do nothing */ 3056 break; 3057 } 3058 } 3059 } 3060 3061 switch (next) { 3062 case DWC3_LINK_STATE_U1: 3063 if (dwc->speed == USB_SPEED_SUPER) 3064 dwc3_suspend_gadget(dwc); 3065 break; 3066 case DWC3_LINK_STATE_U2: 3067 case DWC3_LINK_STATE_U3: 3068 dwc3_suspend_gadget(dwc); 3069 break; 3070 case DWC3_LINK_STATE_RESUME: 3071 dwc3_resume_gadget(dwc); 3072 break; 3073 default: 3074 /* do nothing */ 3075 break; 3076 } 3077 3078 dwc->link_state = next; 3079 } 3080 3081 static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc, 3082 unsigned int evtinfo) 3083 { 3084 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 3085 3086 if (dwc->link_state != next && next == DWC3_LINK_STATE_U3) 3087 dwc3_suspend_gadget(dwc); 3088 3089 dwc->link_state = next; 3090 } 3091 3092 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, 3093 unsigned int evtinfo) 3094 { 3095 unsigned int is_ss = evtinfo & BIT(4); 3096 3097 /* 3098 * WORKAROUND: DWC3 revison 2.20a with hibernation support 3099 * have a known issue which can cause USB CV TD.9.23 to fail 3100 * randomly. 3101 * 3102 * Because of this issue, core could generate bogus hibernation 3103 * events which SW needs to ignore. 3104 * 3105 * Refers to: 3106 * 3107 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0 3108 * Device Fallback from SuperSpeed 3109 */ 3110 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER)) 3111 return; 3112 3113 /* enter hibernation here */ 3114 } 3115 3116 static void dwc3_gadget_interrupt(struct dwc3 *dwc, 3117 const struct dwc3_event_devt *event) 3118 { 3119 switch (event->type) { 3120 case DWC3_DEVICE_EVENT_DISCONNECT: 3121 dwc3_gadget_disconnect_interrupt(dwc); 3122 break; 3123 case DWC3_DEVICE_EVENT_RESET: 3124 dwc3_gadget_reset_interrupt(dwc); 3125 break; 3126 case DWC3_DEVICE_EVENT_CONNECT_DONE: 3127 dwc3_gadget_conndone_interrupt(dwc); 3128 break; 3129 case DWC3_DEVICE_EVENT_WAKEUP: 3130 dwc3_gadget_wakeup_interrupt(dwc); 3131 break; 3132 case DWC3_DEVICE_EVENT_HIBER_REQ: 3133 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation, 3134 "unexpected hibernation event\n")) 3135 break; 3136 3137 dwc3_gadget_hibernation_interrupt(dwc, event->event_info); 3138 break; 3139 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 3140 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 3141 break; 3142 case DWC3_DEVICE_EVENT_EOPF: 3143 /* It changed to be suspend event for version 2.30a and above */ 3144 if (dwc->revision >= DWC3_REVISION_230A) { 3145 /* 3146 * Ignore suspend event until the gadget enters into 3147 * USB_STATE_CONFIGURED state. 3148 */ 3149 if (dwc->gadget.state >= USB_STATE_CONFIGURED) 3150 dwc3_gadget_suspend_interrupt(dwc, 3151 event->event_info); 3152 } 3153 break; 3154 case DWC3_DEVICE_EVENT_SOF: 3155 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 3156 case DWC3_DEVICE_EVENT_CMD_CMPL: 3157 case DWC3_DEVICE_EVENT_OVERFLOW: 3158 break; 3159 default: 3160 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 3161 } 3162 } 3163 3164 static void dwc3_process_event_entry(struct dwc3 *dwc, 3165 const union dwc3_event *event) 3166 { 3167 trace_dwc3_event(event->raw, dwc); 3168 3169 if (!event->type.is_devspec) 3170 dwc3_endpoint_interrupt(dwc, &event->depevt); 3171 else if (event->type.type == DWC3_EVENT_TYPE_DEV) 3172 dwc3_gadget_interrupt(dwc, &event->devt); 3173 else 3174 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 3175 } 3176 3177 static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) 3178 { 3179 struct dwc3 *dwc = evt->dwc; 3180 irqreturn_t ret = IRQ_NONE; 3181 int left; 3182 u32 reg; 3183 3184 left = evt->count; 3185 3186 if (!(evt->flags & DWC3_EVENT_PENDING)) 3187 return IRQ_NONE; 3188 3189 while (left > 0) { 3190 union dwc3_event event; 3191 3192 event.raw = *(u32 *) (evt->cache + evt->lpos); 3193 3194 dwc3_process_event_entry(dwc, &event); 3195 3196 /* 3197 * FIXME we wrap around correctly to the next entry as 3198 * almost all entries are 4 bytes in size. There is one 3199 * entry which has 12 bytes which is a regular entry 3200 * followed by 8 bytes data. ATM I don't know how 3201 * things are organized if we get next to the a 3202 * boundary so I worry about that once we try to handle 3203 * that. 3204 */ 3205 evt->lpos = (evt->lpos + 4) % evt->length; 3206 left -= 4; 3207 } 3208 3209 evt->count = 0; 3210 evt->flags &= ~DWC3_EVENT_PENDING; 3211 ret = IRQ_HANDLED; 3212 3213 /* Unmask interrupt */ 3214 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0)); 3215 reg &= ~DWC3_GEVNTSIZ_INTMASK; 3216 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); 3217 3218 if (dwc->imod_interval) { 3219 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); 3220 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); 3221 } 3222 3223 return ret; 3224 } 3225 3226 static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt) 3227 { 3228 struct dwc3_event_buffer *evt = _evt; 3229 struct dwc3 *dwc = evt->dwc; 3230 unsigned long flags; 3231 irqreturn_t ret = IRQ_NONE; 3232 3233 spin_lock_irqsave(&dwc->lock, flags); 3234 ret = dwc3_process_event_buf(evt); 3235 spin_unlock_irqrestore(&dwc->lock, flags); 3236 3237 return ret; 3238 } 3239 3240 static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) 3241 { 3242 struct dwc3 *dwc = evt->dwc; 3243 u32 amount; 3244 u32 count; 3245 u32 reg; 3246 3247 if (pm_runtime_suspended(dwc->dev)) { 3248 pm_runtime_get(dwc->dev); 3249 disable_irq_nosync(dwc->irq_gadget); 3250 dwc->pending_events = true; 3251 return IRQ_HANDLED; 3252 } 3253 3254 /* 3255 * With PCIe legacy interrupt, test shows that top-half irq handler can 3256 * be called again after HW interrupt deassertion. Check if bottom-half 3257 * irq event handler completes before caching new event to prevent 3258 * losing events. 3259 */ 3260 if (evt->flags & DWC3_EVENT_PENDING) 3261 return IRQ_HANDLED; 3262 3263 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); 3264 count &= DWC3_GEVNTCOUNT_MASK; 3265 if (!count) 3266 return IRQ_NONE; 3267 3268 evt->count = count; 3269 evt->flags |= DWC3_EVENT_PENDING; 3270 3271 /* Mask interrupt */ 3272 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0)); 3273 reg |= DWC3_GEVNTSIZ_INTMASK; 3274 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); 3275 3276 amount = min(count, evt->length - evt->lpos); 3277 memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount); 3278 3279 if (amount < count) 3280 memcpy(evt->cache, evt->buf, count - amount); 3281 3282 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count); 3283 3284 return IRQ_WAKE_THREAD; 3285 } 3286 3287 static irqreturn_t dwc3_interrupt(int irq, void *_evt) 3288 { 3289 struct dwc3_event_buffer *evt = _evt; 3290 3291 return dwc3_check_event_buf(evt); 3292 } 3293 3294 static int dwc3_gadget_get_irq(struct dwc3 *dwc) 3295 { 3296 struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); 3297 int irq; 3298 3299 irq = platform_get_irq_byname_optional(dwc3_pdev, "peripheral"); 3300 if (irq > 0) 3301 goto out; 3302 3303 if (irq == -EPROBE_DEFER) 3304 goto out; 3305 3306 irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3"); 3307 if (irq > 0) 3308 goto out; 3309 3310 if (irq == -EPROBE_DEFER) 3311 goto out; 3312 3313 irq = platform_get_irq(dwc3_pdev, 0); 3314 if (irq > 0) 3315 goto out; 3316 3317 if (!irq) 3318 irq = -EINVAL; 3319 3320 out: 3321 return irq; 3322 } 3323 3324 /** 3325 * dwc3_gadget_init - initializes gadget related registers 3326 * @dwc: pointer to our controller context structure 3327 * 3328 * Returns 0 on success otherwise negative errno. 3329 */ 3330 int dwc3_gadget_init(struct dwc3 *dwc) 3331 { 3332 int ret; 3333 int irq; 3334 3335 irq = dwc3_gadget_get_irq(dwc); 3336 if (irq < 0) { 3337 ret = irq; 3338 goto err0; 3339 } 3340 3341 dwc->irq_gadget = irq; 3342 3343 dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev, 3344 sizeof(*dwc->ep0_trb) * 2, 3345 &dwc->ep0_trb_addr, GFP_KERNEL); 3346 if (!dwc->ep0_trb) { 3347 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 3348 ret = -ENOMEM; 3349 goto err0; 3350 } 3351 3352 dwc->setup_buf = kzalloc(DWC3_EP0_SETUP_SIZE, GFP_KERNEL); 3353 if (!dwc->setup_buf) { 3354 ret = -ENOMEM; 3355 goto err1; 3356 } 3357 3358 dwc->bounce = dma_alloc_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, 3359 &dwc->bounce_addr, GFP_KERNEL); 3360 if (!dwc->bounce) { 3361 ret = -ENOMEM; 3362 goto err2; 3363 } 3364 3365 init_completion(&dwc->ep0_in_setup); 3366 3367 dwc->gadget.ops = &dwc3_gadget_ops; 3368 dwc->gadget.speed = USB_SPEED_UNKNOWN; 3369 dwc->gadget.sg_supported = true; 3370 dwc->gadget.name = "dwc3-gadget"; 3371 dwc->gadget.lpm_capable = true; 3372 3373 /* 3374 * FIXME We might be setting max_speed to <SUPER, however versions 3375 * <2.20a of dwc3 have an issue with metastability (documented 3376 * elsewhere in this driver) which tells us we can't set max speed to 3377 * anything lower than SUPER. 3378 * 3379 * Because gadget.max_speed is only used by composite.c and function 3380 * drivers (i.e. it won't go into dwc3's registers) we are allowing this 3381 * to happen so we avoid sending SuperSpeed Capability descriptor 3382 * together with our BOS descriptor as that could confuse host into 3383 * thinking we can handle super speed. 3384 * 3385 * Note that, in fact, we won't even support GetBOS requests when speed 3386 * is less than super speed because we don't have means, yet, to tell 3387 * composite.c that we are USB 2.0 + LPM ECN. 3388 */ 3389 if (dwc->revision < DWC3_REVISION_220A && 3390 !dwc->dis_metastability_quirk) 3391 dev_info(dwc->dev, "changing max_speed on rev %08x\n", 3392 dwc->revision); 3393 3394 dwc->gadget.max_speed = dwc->maximum_speed; 3395 3396 /* 3397 * REVISIT: Here we should clear all pending IRQs to be 3398 * sure we're starting from a well known location. 3399 */ 3400 3401 ret = dwc3_gadget_init_endpoints(dwc, dwc->num_eps); 3402 if (ret) 3403 goto err3; 3404 3405 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 3406 if (ret) { 3407 dev_err(dwc->dev, "failed to register udc\n"); 3408 goto err4; 3409 } 3410 3411 dwc3_gadget_set_speed(&dwc->gadget, dwc->maximum_speed); 3412 3413 return 0; 3414 3415 err4: 3416 dwc3_gadget_free_endpoints(dwc); 3417 3418 err3: 3419 dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce, 3420 dwc->bounce_addr); 3421 3422 err2: 3423 kfree(dwc->setup_buf); 3424 3425 err1: 3426 dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2, 3427 dwc->ep0_trb, dwc->ep0_trb_addr); 3428 3429 err0: 3430 return ret; 3431 } 3432 3433 /* -------------------------------------------------------------------------- */ 3434 3435 void dwc3_gadget_exit(struct dwc3 *dwc) 3436 { 3437 usb_del_gadget_udc(&dwc->gadget); 3438 dwc3_gadget_free_endpoints(dwc); 3439 dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce, 3440 dwc->bounce_addr); 3441 kfree(dwc->setup_buf); 3442 dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2, 3443 dwc->ep0_trb, dwc->ep0_trb_addr); 3444 } 3445 3446 int dwc3_gadget_suspend(struct dwc3 *dwc) 3447 { 3448 if (!dwc->gadget_driver) 3449 return 0; 3450 3451 dwc3_gadget_run_stop(dwc, false, false); 3452 dwc3_disconnect_gadget(dwc); 3453 __dwc3_gadget_stop(dwc); 3454 3455 return 0; 3456 } 3457 3458 int dwc3_gadget_resume(struct dwc3 *dwc) 3459 { 3460 int ret; 3461 3462 if (!dwc->gadget_driver) 3463 return 0; 3464 3465 ret = __dwc3_gadget_start(dwc); 3466 if (ret < 0) 3467 goto err0; 3468 3469 ret = dwc3_gadget_run_stop(dwc, true, false); 3470 if (ret < 0) 3471 goto err1; 3472 3473 return 0; 3474 3475 err1: 3476 __dwc3_gadget_stop(dwc); 3477 3478 err0: 3479 return ret; 3480 } 3481 3482 void dwc3_gadget_process_pending_events(struct dwc3 *dwc) 3483 { 3484 if (dwc->pending_events) { 3485 dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf); 3486 dwc->pending_events = false; 3487 enable_irq(dwc->irq_gadget); 3488 } 3489 } 3490