1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 4 * 5 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com 6 * 7 * Authors: Felipe Balbi <balbi@ti.com>, 8 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/delay.h> 13 #include <linux/slab.h> 14 #include <linux/spinlock.h> 15 #include <linux/platform_device.h> 16 #include <linux/pm_runtime.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/list.h> 20 #include <linux/dma-mapping.h> 21 22 #include <linux/usb/ch9.h> 23 #include <linux/usb/gadget.h> 24 25 #include "debug.h" 26 #include "core.h" 27 #include "gadget.h" 28 #include "io.h" 29 30 #define DWC3_ALIGN_FRAME(d, n) (((d)->frame_number + ((d)->interval * (n))) \ 31 & ~((d)->interval - 1)) 32 33 /** 34 * dwc3_gadget_set_test_mode - enables usb2 test modes 35 * @dwc: pointer to our context structure 36 * @mode: the mode to set (J, K SE0 NAK, Force Enable) 37 * 38 * Caller should take care of locking. This function will return 0 on 39 * success or -EINVAL if wrong Test Selector is passed. 40 */ 41 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) 42 { 43 u32 reg; 44 45 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 46 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 47 48 switch (mode) { 49 case TEST_J: 50 case TEST_K: 51 case TEST_SE0_NAK: 52 case TEST_PACKET: 53 case TEST_FORCE_EN: 54 reg |= mode << 1; 55 break; 56 default: 57 return -EINVAL; 58 } 59 60 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 61 62 return 0; 63 } 64 65 /** 66 * dwc3_gadget_get_link_state - gets current state of usb link 67 * @dwc: pointer to our context structure 68 * 69 * Caller should take care of locking. This function will 70 * return the link state on success (>= 0) or -ETIMEDOUT. 71 */ 72 int dwc3_gadget_get_link_state(struct dwc3 *dwc) 73 { 74 u32 reg; 75 76 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 77 78 return DWC3_DSTS_USBLNKST(reg); 79 } 80 81 /** 82 * dwc3_gadget_set_link_state - sets usb link to a particular state 83 * @dwc: pointer to our context structure 84 * @state: the state to put link into 85 * 86 * Caller should take care of locking. This function will 87 * return 0 on success or -ETIMEDOUT. 88 */ 89 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) 90 { 91 int retries = 10000; 92 u32 reg; 93 94 /* 95 * Wait until device controller is ready. Only applies to 1.94a and 96 * later RTL. 97 */ 98 if (dwc->revision >= DWC3_REVISION_194A) { 99 while (--retries) { 100 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 101 if (reg & DWC3_DSTS_DCNRD) 102 udelay(5); 103 else 104 break; 105 } 106 107 if (retries <= 0) 108 return -ETIMEDOUT; 109 } 110 111 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 112 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 113 114 /* set requested state */ 115 reg |= DWC3_DCTL_ULSTCHNGREQ(state); 116 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 117 118 /* 119 * The following code is racy when called from dwc3_gadget_wakeup, 120 * and is not needed, at least on newer versions 121 */ 122 if (dwc->revision >= DWC3_REVISION_194A) 123 return 0; 124 125 /* wait for a change in DSTS */ 126 retries = 10000; 127 while (--retries) { 128 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 129 130 if (DWC3_DSTS_USBLNKST(reg) == state) 131 return 0; 132 133 udelay(5); 134 } 135 136 return -ETIMEDOUT; 137 } 138 139 /** 140 * dwc3_ep_inc_trb - increment a trb index. 141 * @index: Pointer to the TRB index to increment. 142 * 143 * The index should never point to the link TRB. After incrementing, 144 * if it is point to the link TRB, wrap around to the beginning. The 145 * link TRB is always at the last TRB entry. 146 */ 147 static void dwc3_ep_inc_trb(u8 *index) 148 { 149 (*index)++; 150 if (*index == (DWC3_TRB_NUM - 1)) 151 *index = 0; 152 } 153 154 /** 155 * dwc3_ep_inc_enq - increment endpoint's enqueue pointer 156 * @dep: The endpoint whose enqueue pointer we're incrementing 157 */ 158 static void dwc3_ep_inc_enq(struct dwc3_ep *dep) 159 { 160 dwc3_ep_inc_trb(&dep->trb_enqueue); 161 } 162 163 /** 164 * dwc3_ep_inc_deq - increment endpoint's dequeue pointer 165 * @dep: The endpoint whose enqueue pointer we're incrementing 166 */ 167 static void dwc3_ep_inc_deq(struct dwc3_ep *dep) 168 { 169 dwc3_ep_inc_trb(&dep->trb_dequeue); 170 } 171 172 static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep, 173 struct dwc3_request *req, int status) 174 { 175 struct dwc3 *dwc = dep->dwc; 176 177 req->started = false; 178 list_del(&req->list); 179 req->remaining = 0; 180 req->needs_extra_trb = false; 181 182 if (req->request.status == -EINPROGRESS) 183 req->request.status = status; 184 185 if (req->trb) 186 usb_gadget_unmap_request_by_dev(dwc->sysdev, 187 &req->request, req->direction); 188 189 req->trb = NULL; 190 trace_dwc3_gadget_giveback(req); 191 192 if (dep->number > 1) 193 pm_runtime_put(dwc->dev); 194 } 195 196 /** 197 * dwc3_gadget_giveback - call struct usb_request's ->complete callback 198 * @dep: The endpoint to whom the request belongs to 199 * @req: The request we're giving back 200 * @status: completion code for the request 201 * 202 * Must be called with controller's lock held and interrupts disabled. This 203 * function will unmap @req and call its ->complete() callback to notify upper 204 * layers that it has completed. 205 */ 206 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 207 int status) 208 { 209 struct dwc3 *dwc = dep->dwc; 210 211 dwc3_gadget_del_and_unmap_request(dep, req, status); 212 213 spin_unlock(&dwc->lock); 214 usb_gadget_giveback_request(&dep->endpoint, &req->request); 215 spin_lock(&dwc->lock); 216 } 217 218 /** 219 * dwc3_send_gadget_generic_command - issue a generic command for the controller 220 * @dwc: pointer to the controller context 221 * @cmd: the command to be issued 222 * @param: command parameter 223 * 224 * Caller should take care of locking. Issue @cmd with a given @param to @dwc 225 * and wait for its completion. 226 */ 227 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param) 228 { 229 u32 timeout = 500; 230 int status = 0; 231 int ret = 0; 232 u32 reg; 233 234 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param); 235 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT); 236 237 do { 238 reg = dwc3_readl(dwc->regs, DWC3_DGCMD); 239 if (!(reg & DWC3_DGCMD_CMDACT)) { 240 status = DWC3_DGCMD_STATUS(reg); 241 if (status) 242 ret = -EINVAL; 243 break; 244 } 245 } while (--timeout); 246 247 if (!timeout) { 248 ret = -ETIMEDOUT; 249 status = -ETIMEDOUT; 250 } 251 252 trace_dwc3_gadget_generic_cmd(cmd, param, status); 253 254 return ret; 255 } 256 257 static int __dwc3_gadget_wakeup(struct dwc3 *dwc); 258 259 /** 260 * dwc3_send_gadget_ep_cmd - issue an endpoint command 261 * @dep: the endpoint to which the command is going to be issued 262 * @cmd: the command to be issued 263 * @params: parameters to the command 264 * 265 * Caller should handle locking. This function will issue @cmd with given 266 * @params to @dep and wait for its completion. 267 */ 268 int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, 269 struct dwc3_gadget_ep_cmd_params *params) 270 { 271 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; 272 struct dwc3 *dwc = dep->dwc; 273 u32 timeout = 1000; 274 u32 saved_config = 0; 275 u32 reg; 276 277 int cmd_status = 0; 278 int ret = -EINVAL; 279 280 /* 281 * When operating in USB 2.0 speeds (HS/FS), if GUSB2PHYCFG.ENBLSLPM or 282 * GUSB2PHYCFG.SUSPHY is set, it must be cleared before issuing an 283 * endpoint command. 284 * 285 * Save and clear both GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY 286 * settings. Restore them after the command is completed. 287 * 288 * DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2 289 */ 290 if (dwc->gadget.speed <= USB_SPEED_HIGH) { 291 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 292 if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) { 293 saved_config |= DWC3_GUSB2PHYCFG_SUSPHY; 294 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 295 } 296 297 if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) { 298 saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM; 299 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; 300 } 301 302 if (saved_config) 303 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 304 } 305 306 if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) { 307 int needs_wakeup; 308 309 needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 || 310 dwc->link_state == DWC3_LINK_STATE_U2 || 311 dwc->link_state == DWC3_LINK_STATE_U3); 312 313 if (unlikely(needs_wakeup)) { 314 ret = __dwc3_gadget_wakeup(dwc); 315 dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n", 316 ret); 317 } 318 } 319 320 dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0); 321 dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1); 322 dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2); 323 324 /* 325 * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're 326 * not relying on XferNotReady, we can make use of a special "No 327 * Response Update Transfer" command where we should clear both CmdAct 328 * and CmdIOC bits. 329 * 330 * With this, we don't need to wait for command completion and can 331 * straight away issue further commands to the endpoint. 332 * 333 * NOTICE: We're making an assumption that control endpoints will never 334 * make use of Update Transfer command. This is a safe assumption 335 * because we can never have more than one request at a time with 336 * Control Endpoints. If anybody changes that assumption, this chunk 337 * needs to be updated accordingly. 338 */ 339 if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER && 340 !usb_endpoint_xfer_isoc(desc)) 341 cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT); 342 else 343 cmd |= DWC3_DEPCMD_CMDACT; 344 345 dwc3_writel(dep->regs, DWC3_DEPCMD, cmd); 346 do { 347 reg = dwc3_readl(dep->regs, DWC3_DEPCMD); 348 if (!(reg & DWC3_DEPCMD_CMDACT)) { 349 cmd_status = DWC3_DEPCMD_STATUS(reg); 350 351 switch (cmd_status) { 352 case 0: 353 ret = 0; 354 break; 355 case DEPEVT_TRANSFER_NO_RESOURCE: 356 ret = -EINVAL; 357 break; 358 case DEPEVT_TRANSFER_BUS_EXPIRY: 359 /* 360 * SW issues START TRANSFER command to 361 * isochronous ep with future frame interval. If 362 * future interval time has already passed when 363 * core receives the command, it will respond 364 * with an error status of 'Bus Expiry'. 365 * 366 * Instead of always returning -EINVAL, let's 367 * give a hint to the gadget driver that this is 368 * the case by returning -EAGAIN. 369 */ 370 ret = -EAGAIN; 371 break; 372 default: 373 dev_WARN(dwc->dev, "UNKNOWN cmd status\n"); 374 } 375 376 break; 377 } 378 } while (--timeout); 379 380 if (timeout == 0) { 381 ret = -ETIMEDOUT; 382 cmd_status = -ETIMEDOUT; 383 } 384 385 trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status); 386 387 if (ret == 0) { 388 switch (DWC3_DEPCMD_CMD(cmd)) { 389 case DWC3_DEPCMD_STARTTRANSFER: 390 dep->flags |= DWC3_EP_TRANSFER_STARTED; 391 dwc3_gadget_ep_get_transfer_index(dep); 392 break; 393 case DWC3_DEPCMD_ENDTRANSFER: 394 dep->flags &= ~DWC3_EP_TRANSFER_STARTED; 395 break; 396 default: 397 /* nothing */ 398 break; 399 } 400 } 401 402 if (saved_config) { 403 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 404 reg |= saved_config; 405 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 406 } 407 408 return ret; 409 } 410 411 static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep) 412 { 413 struct dwc3 *dwc = dep->dwc; 414 struct dwc3_gadget_ep_cmd_params params; 415 u32 cmd = DWC3_DEPCMD_CLEARSTALL; 416 417 /* 418 * As of core revision 2.60a the recommended programming model 419 * is to set the ClearPendIN bit when issuing a Clear Stall EP 420 * command for IN endpoints. This is to prevent an issue where 421 * some (non-compliant) hosts may not send ACK TPs for pending 422 * IN transfers due to a mishandled error condition. Synopsys 423 * STAR 9000614252. 424 */ 425 if (dep->direction && (dwc->revision >= DWC3_REVISION_260A) && 426 (dwc->gadget.speed >= USB_SPEED_SUPER)) 427 cmd |= DWC3_DEPCMD_CLEARPENDIN; 428 429 memset(¶ms, 0, sizeof(params)); 430 431 return dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 432 } 433 434 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 435 struct dwc3_trb *trb) 436 { 437 u32 offset = (char *) trb - (char *) dep->trb_pool; 438 439 return dep->trb_pool_dma + offset; 440 } 441 442 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 443 { 444 struct dwc3 *dwc = dep->dwc; 445 446 if (dep->trb_pool) 447 return 0; 448 449 dep->trb_pool = dma_alloc_coherent(dwc->sysdev, 450 sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 451 &dep->trb_pool_dma, GFP_KERNEL); 452 if (!dep->trb_pool) { 453 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 454 dep->name); 455 return -ENOMEM; 456 } 457 458 return 0; 459 } 460 461 static void dwc3_free_trb_pool(struct dwc3_ep *dep) 462 { 463 struct dwc3 *dwc = dep->dwc; 464 465 dma_free_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 466 dep->trb_pool, dep->trb_pool_dma); 467 468 dep->trb_pool = NULL; 469 dep->trb_pool_dma = 0; 470 } 471 472 static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep) 473 { 474 struct dwc3_gadget_ep_cmd_params params; 475 476 memset(¶ms, 0x00, sizeof(params)); 477 478 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 479 480 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE, 481 ¶ms); 482 } 483 484 /** 485 * dwc3_gadget_start_config - configure ep resources 486 * @dep: endpoint that is being enabled 487 * 488 * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's 489 * completion, it will set Transfer Resource for all available endpoints. 490 * 491 * The assignment of transfer resources cannot perfectly follow the data book 492 * due to the fact that the controller driver does not have all knowledge of the 493 * configuration in advance. It is given this information piecemeal by the 494 * composite gadget framework after every SET_CONFIGURATION and 495 * SET_INTERFACE. Trying to follow the databook programming model in this 496 * scenario can cause errors. For two reasons: 497 * 498 * 1) The databook says to do %DWC3_DEPCMD_DEPSTARTCFG for every 499 * %USB_REQ_SET_CONFIGURATION and %USB_REQ_SET_INTERFACE (8.1.5). This is 500 * incorrect in the scenario of multiple interfaces. 501 * 502 * 2) The databook does not mention doing more %DWC3_DEPCMD_DEPXFERCFG for new 503 * endpoint on alt setting (8.1.6). 504 * 505 * The following simplified method is used instead: 506 * 507 * All hardware endpoints can be assigned a transfer resource and this setting 508 * will stay persistent until either a core reset or hibernation. So whenever we 509 * do a %DWC3_DEPCMD_DEPSTARTCFG(0) we can go ahead and do 510 * %DWC3_DEPCMD_DEPXFERCFG for every hardware endpoint as well. We are 511 * guaranteed that there are as many transfer resources as endpoints. 512 * 513 * This function is called for each endpoint when it is being enabled but is 514 * triggered only when called for EP0-out, which always happens first, and which 515 * should only happen in one of the above conditions. 516 */ 517 static int dwc3_gadget_start_config(struct dwc3_ep *dep) 518 { 519 struct dwc3_gadget_ep_cmd_params params; 520 struct dwc3 *dwc; 521 u32 cmd; 522 int i; 523 int ret; 524 525 if (dep->number) 526 return 0; 527 528 memset(¶ms, 0x00, sizeof(params)); 529 cmd = DWC3_DEPCMD_DEPSTARTCFG; 530 dwc = dep->dwc; 531 532 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 533 if (ret) 534 return ret; 535 536 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 537 struct dwc3_ep *dep = dwc->eps[i]; 538 539 if (!dep) 540 continue; 541 542 ret = dwc3_gadget_set_xfer_resource(dep); 543 if (ret) 544 return ret; 545 } 546 547 return 0; 548 } 549 550 static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action) 551 { 552 const struct usb_ss_ep_comp_descriptor *comp_desc; 553 const struct usb_endpoint_descriptor *desc; 554 struct dwc3_gadget_ep_cmd_params params; 555 struct dwc3 *dwc = dep->dwc; 556 557 comp_desc = dep->endpoint.comp_desc; 558 desc = dep->endpoint.desc; 559 560 memset(¶ms, 0x00, sizeof(params)); 561 562 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) 563 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); 564 565 /* Burst size is only needed in SuperSpeed mode */ 566 if (dwc->gadget.speed >= USB_SPEED_SUPER) { 567 u32 burst = dep->endpoint.maxburst; 568 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1); 569 } 570 571 params.param0 |= action; 572 if (action == DWC3_DEPCFG_ACTION_RESTORE) 573 params.param2 |= dep->saved_state; 574 575 if (usb_endpoint_xfer_control(desc)) 576 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN; 577 578 if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc)) 579 params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN; 580 581 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 582 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE 583 | DWC3_DEPCFG_STREAM_EVENT_EN; 584 dep->stream_capable = true; 585 } 586 587 if (!usb_endpoint_xfer_control(desc)) 588 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; 589 590 /* 591 * We are doing 1:1 mapping for endpoints, meaning 592 * Physical Endpoints 2 maps to Logical Endpoint 2 and 593 * so on. We consider the direction bit as part of the physical 594 * endpoint number. So USB endpoint 0x81 is 0x03. 595 */ 596 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); 597 598 /* 599 * We must use the lower 16 TX FIFOs even though 600 * HW might have more 601 */ 602 if (dep->direction) 603 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); 604 605 if (desc->bInterval) { 606 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); 607 dep->interval = 1 << (desc->bInterval - 1); 608 } 609 610 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, ¶ms); 611 } 612 613 /** 614 * __dwc3_gadget_ep_enable - initializes a hw endpoint 615 * @dep: endpoint to be initialized 616 * @action: one of INIT, MODIFY or RESTORE 617 * 618 * Caller should take care of locking. Execute all necessary commands to 619 * initialize a HW endpoint so it can be used by a gadget driver. 620 */ 621 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action) 622 { 623 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; 624 struct dwc3 *dwc = dep->dwc; 625 626 u32 reg; 627 int ret; 628 629 if (!(dep->flags & DWC3_EP_ENABLED)) { 630 ret = dwc3_gadget_start_config(dep); 631 if (ret) 632 return ret; 633 } 634 635 ret = dwc3_gadget_set_ep_config(dep, action); 636 if (ret) 637 return ret; 638 639 if (!(dep->flags & DWC3_EP_ENABLED)) { 640 struct dwc3_trb *trb_st_hw; 641 struct dwc3_trb *trb_link; 642 643 dep->type = usb_endpoint_type(desc); 644 dep->flags |= DWC3_EP_ENABLED; 645 dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; 646 647 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 648 reg |= DWC3_DALEPENA_EP(dep->number); 649 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 650 651 if (usb_endpoint_xfer_control(desc)) 652 goto out; 653 654 /* Initialize the TRB ring */ 655 dep->trb_dequeue = 0; 656 dep->trb_enqueue = 0; 657 memset(dep->trb_pool, 0, 658 sizeof(struct dwc3_trb) * DWC3_TRB_NUM); 659 660 /* Link TRB. The HWO bit is never reset */ 661 trb_st_hw = &dep->trb_pool[0]; 662 663 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 664 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 665 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 666 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB; 667 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 668 } 669 670 /* 671 * Issue StartTransfer here with no-op TRB so we can always rely on No 672 * Response Update Transfer command. 673 */ 674 if ((usb_endpoint_xfer_bulk(desc) && !dep->stream_capable) || 675 usb_endpoint_xfer_int(desc)) { 676 struct dwc3_gadget_ep_cmd_params params; 677 struct dwc3_trb *trb; 678 dma_addr_t trb_dma; 679 u32 cmd; 680 681 memset(¶ms, 0, sizeof(params)); 682 trb = &dep->trb_pool[0]; 683 trb_dma = dwc3_trb_dma_offset(dep, trb); 684 685 params.param0 = upper_32_bits(trb_dma); 686 params.param1 = lower_32_bits(trb_dma); 687 688 cmd = DWC3_DEPCMD_STARTTRANSFER; 689 690 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 691 if (ret < 0) 692 return ret; 693 } 694 695 out: 696 trace_dwc3_gadget_ep_enable(dep); 697 698 return 0; 699 } 700 701 static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force); 702 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 703 { 704 struct dwc3_request *req; 705 706 dwc3_stop_active_transfer(dep, true); 707 708 /* - giveback all requests to gadget driver */ 709 while (!list_empty(&dep->started_list)) { 710 req = next_request(&dep->started_list); 711 712 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 713 } 714 715 while (!list_empty(&dep->pending_list)) { 716 req = next_request(&dep->pending_list); 717 718 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 719 } 720 } 721 722 /** 723 * __dwc3_gadget_ep_disable - disables a hw endpoint 724 * @dep: the endpoint to disable 725 * 726 * This function undoes what __dwc3_gadget_ep_enable did and also removes 727 * requests which are currently being processed by the hardware and those which 728 * are not yet scheduled. 729 * 730 * Caller should take care of locking. 731 */ 732 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 733 { 734 struct dwc3 *dwc = dep->dwc; 735 u32 reg; 736 737 trace_dwc3_gadget_ep_disable(dep); 738 739 dwc3_remove_requests(dwc, dep); 740 741 /* make sure HW endpoint isn't stalled */ 742 if (dep->flags & DWC3_EP_STALL) 743 __dwc3_gadget_ep_set_halt(dep, 0, false); 744 745 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 746 reg &= ~DWC3_DALEPENA_EP(dep->number); 747 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 748 749 dep->stream_capable = false; 750 dep->type = 0; 751 dep->flags &= DWC3_EP_END_TRANSFER_PENDING; 752 753 /* Clear out the ep descriptors for non-ep0 */ 754 if (dep->number > 1) { 755 dep->endpoint.comp_desc = NULL; 756 dep->endpoint.desc = NULL; 757 } 758 759 return 0; 760 } 761 762 /* -------------------------------------------------------------------------- */ 763 764 static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 765 const struct usb_endpoint_descriptor *desc) 766 { 767 return -EINVAL; 768 } 769 770 static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 771 { 772 return -EINVAL; 773 } 774 775 /* -------------------------------------------------------------------------- */ 776 777 static int dwc3_gadget_ep_enable(struct usb_ep *ep, 778 const struct usb_endpoint_descriptor *desc) 779 { 780 struct dwc3_ep *dep; 781 struct dwc3 *dwc; 782 unsigned long flags; 783 int ret; 784 785 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 786 pr_debug("dwc3: invalid parameters\n"); 787 return -EINVAL; 788 } 789 790 if (!desc->wMaxPacketSize) { 791 pr_debug("dwc3: missing wMaxPacketSize\n"); 792 return -EINVAL; 793 } 794 795 dep = to_dwc3_ep(ep); 796 dwc = dep->dwc; 797 798 if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED, 799 "%s is already enabled\n", 800 dep->name)) 801 return 0; 802 803 spin_lock_irqsave(&dwc->lock, flags); 804 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT); 805 spin_unlock_irqrestore(&dwc->lock, flags); 806 807 return ret; 808 } 809 810 static int dwc3_gadget_ep_disable(struct usb_ep *ep) 811 { 812 struct dwc3_ep *dep; 813 struct dwc3 *dwc; 814 unsigned long flags; 815 int ret; 816 817 if (!ep) { 818 pr_debug("dwc3: invalid parameters\n"); 819 return -EINVAL; 820 } 821 822 dep = to_dwc3_ep(ep); 823 dwc = dep->dwc; 824 825 if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED), 826 "%s is already disabled\n", 827 dep->name)) 828 return 0; 829 830 spin_lock_irqsave(&dwc->lock, flags); 831 ret = __dwc3_gadget_ep_disable(dep); 832 spin_unlock_irqrestore(&dwc->lock, flags); 833 834 return ret; 835 } 836 837 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 838 gfp_t gfp_flags) 839 { 840 struct dwc3_request *req; 841 struct dwc3_ep *dep = to_dwc3_ep(ep); 842 843 req = kzalloc(sizeof(*req), gfp_flags); 844 if (!req) 845 return NULL; 846 847 req->direction = dep->direction; 848 req->epnum = dep->number; 849 req->dep = dep; 850 851 trace_dwc3_alloc_request(req); 852 853 return &req->request; 854 } 855 856 static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 857 struct usb_request *request) 858 { 859 struct dwc3_request *req = to_dwc3_request(request); 860 861 trace_dwc3_free_request(req); 862 kfree(req); 863 } 864 865 /** 866 * dwc3_ep_prev_trb - returns the previous TRB in the ring 867 * @dep: The endpoint with the TRB ring 868 * @index: The index of the current TRB in the ring 869 * 870 * Returns the TRB prior to the one pointed to by the index. If the 871 * index is 0, we will wrap backwards, skip the link TRB, and return 872 * the one just before that. 873 */ 874 static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index) 875 { 876 u8 tmp = index; 877 878 if (!tmp) 879 tmp = DWC3_TRB_NUM - 1; 880 881 return &dep->trb_pool[tmp - 1]; 882 } 883 884 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep) 885 { 886 struct dwc3_trb *tmp; 887 u8 trbs_left; 888 889 /* 890 * If enqueue & dequeue are equal than it is either full or empty. 891 * 892 * One way to know for sure is if the TRB right before us has HWO bit 893 * set or not. If it has, then we're definitely full and can't fit any 894 * more transfers in our ring. 895 */ 896 if (dep->trb_enqueue == dep->trb_dequeue) { 897 tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue); 898 if (tmp->ctrl & DWC3_TRB_CTRL_HWO) 899 return 0; 900 901 return DWC3_TRB_NUM - 1; 902 } 903 904 trbs_left = dep->trb_dequeue - dep->trb_enqueue; 905 trbs_left &= (DWC3_TRB_NUM - 1); 906 907 if (dep->trb_dequeue < dep->trb_enqueue) 908 trbs_left--; 909 910 return trbs_left; 911 } 912 913 static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, 914 dma_addr_t dma, unsigned length, unsigned chain, unsigned node, 915 unsigned stream_id, unsigned short_not_ok, unsigned no_interrupt) 916 { 917 struct dwc3 *dwc = dep->dwc; 918 struct usb_gadget *gadget = &dwc->gadget; 919 enum usb_device_speed speed = gadget->speed; 920 921 trb->size = DWC3_TRB_SIZE_LENGTH(length); 922 trb->bpl = lower_32_bits(dma); 923 trb->bph = upper_32_bits(dma); 924 925 switch (usb_endpoint_type(dep->endpoint.desc)) { 926 case USB_ENDPOINT_XFER_CONTROL: 927 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP; 928 break; 929 930 case USB_ENDPOINT_XFER_ISOC: 931 if (!node) { 932 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 933 934 /* 935 * USB Specification 2.0 Section 5.9.2 states that: "If 936 * there is only a single transaction in the microframe, 937 * only a DATA0 data packet PID is used. If there are 938 * two transactions per microframe, DATA1 is used for 939 * the first transaction data packet and DATA0 is used 940 * for the second transaction data packet. If there are 941 * three transactions per microframe, DATA2 is used for 942 * the first transaction data packet, DATA1 is used for 943 * the second, and DATA0 is used for the third." 944 * 945 * IOW, we should satisfy the following cases: 946 * 947 * 1) length <= maxpacket 948 * - DATA0 949 * 950 * 2) maxpacket < length <= (2 * maxpacket) 951 * - DATA1, DATA0 952 * 953 * 3) (2 * maxpacket) < length <= (3 * maxpacket) 954 * - DATA2, DATA1, DATA0 955 */ 956 if (speed == USB_SPEED_HIGH) { 957 struct usb_ep *ep = &dep->endpoint; 958 unsigned int mult = 2; 959 unsigned int maxp = usb_endpoint_maxp(ep->desc); 960 961 if (length <= (2 * maxp)) 962 mult--; 963 964 if (length <= maxp) 965 mult--; 966 967 trb->size |= DWC3_TRB_SIZE_PCM1(mult); 968 } 969 } else { 970 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 971 } 972 973 /* always enable Interrupt on Missed ISOC */ 974 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 975 break; 976 977 case USB_ENDPOINT_XFER_BULK: 978 case USB_ENDPOINT_XFER_INT: 979 trb->ctrl = DWC3_TRBCTL_NORMAL; 980 break; 981 default: 982 /* 983 * This is only possible with faulty memory because we 984 * checked it already :) 985 */ 986 dev_WARN(dwc->dev, "Unknown endpoint type %d\n", 987 usb_endpoint_type(dep->endpoint.desc)); 988 } 989 990 /* 991 * Enable Continue on Short Packet 992 * when endpoint is not a stream capable 993 */ 994 if (usb_endpoint_dir_out(dep->endpoint.desc)) { 995 if (!dep->stream_capable) 996 trb->ctrl |= DWC3_TRB_CTRL_CSP; 997 998 if (short_not_ok) 999 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 1000 } 1001 1002 if ((!no_interrupt && !chain) || 1003 (dwc3_calc_trbs_left(dep) == 1)) 1004 trb->ctrl |= DWC3_TRB_CTRL_IOC; 1005 1006 if (chain) 1007 trb->ctrl |= DWC3_TRB_CTRL_CHN; 1008 1009 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) 1010 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id); 1011 1012 trb->ctrl |= DWC3_TRB_CTRL_HWO; 1013 1014 dwc3_ep_inc_enq(dep); 1015 1016 trace_dwc3_prepare_trb(dep, trb); 1017 } 1018 1019 /** 1020 * dwc3_prepare_one_trb - setup one TRB from one request 1021 * @dep: endpoint for which this request is prepared 1022 * @req: dwc3_request pointer 1023 * @chain: should this TRB be chained to the next? 1024 * @node: only for isochronous endpoints. First TRB needs different type. 1025 */ 1026 static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 1027 struct dwc3_request *req, unsigned chain, unsigned node) 1028 { 1029 struct dwc3_trb *trb; 1030 unsigned int length; 1031 dma_addr_t dma; 1032 unsigned stream_id = req->request.stream_id; 1033 unsigned short_not_ok = req->request.short_not_ok; 1034 unsigned no_interrupt = req->request.no_interrupt; 1035 1036 if (req->request.num_sgs > 0) { 1037 length = sg_dma_len(req->start_sg); 1038 dma = sg_dma_address(req->start_sg); 1039 } else { 1040 length = req->request.length; 1041 dma = req->request.dma; 1042 } 1043 1044 trb = &dep->trb_pool[dep->trb_enqueue]; 1045 1046 if (!req->trb) { 1047 dwc3_gadget_move_started_request(req); 1048 req->trb = trb; 1049 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 1050 } 1051 1052 req->num_trbs++; 1053 1054 __dwc3_prepare_one_trb(dep, trb, dma, length, chain, node, 1055 stream_id, short_not_ok, no_interrupt); 1056 } 1057 1058 static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, 1059 struct dwc3_request *req) 1060 { 1061 struct scatterlist *sg = req->start_sg; 1062 struct scatterlist *s; 1063 int i; 1064 1065 unsigned int remaining = req->request.num_mapped_sgs 1066 - req->num_queued_sgs; 1067 1068 for_each_sg(sg, s, remaining, i) { 1069 unsigned int length = req->request.length; 1070 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); 1071 unsigned int rem = length % maxp; 1072 unsigned chain = true; 1073 1074 if (sg_is_last(s)) 1075 chain = false; 1076 1077 if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) { 1078 struct dwc3 *dwc = dep->dwc; 1079 struct dwc3_trb *trb; 1080 1081 req->needs_extra_trb = true; 1082 1083 /* prepare normal TRB */ 1084 dwc3_prepare_one_trb(dep, req, true, i); 1085 1086 /* Now prepare one extra TRB to align transfer size */ 1087 trb = &dep->trb_pool[dep->trb_enqueue]; 1088 req->num_trbs++; 1089 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 1090 maxp - rem, false, 1, 1091 req->request.stream_id, 1092 req->request.short_not_ok, 1093 req->request.no_interrupt); 1094 } else { 1095 dwc3_prepare_one_trb(dep, req, chain, i); 1096 } 1097 1098 /* 1099 * There can be a situation where all sgs in sglist are not 1100 * queued because of insufficient trb number. To handle this 1101 * case, update start_sg to next sg to be queued, so that 1102 * we have free trbs we can continue queuing from where we 1103 * previously stopped 1104 */ 1105 if (chain) 1106 req->start_sg = sg_next(s); 1107 1108 req->num_queued_sgs++; 1109 1110 if (!dwc3_calc_trbs_left(dep)) 1111 break; 1112 } 1113 } 1114 1115 static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, 1116 struct dwc3_request *req) 1117 { 1118 unsigned int length = req->request.length; 1119 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); 1120 unsigned int rem = length % maxp; 1121 1122 if ((!length || rem) && usb_endpoint_dir_out(dep->endpoint.desc)) { 1123 struct dwc3 *dwc = dep->dwc; 1124 struct dwc3_trb *trb; 1125 1126 req->needs_extra_trb = true; 1127 1128 /* prepare normal TRB */ 1129 dwc3_prepare_one_trb(dep, req, true, 0); 1130 1131 /* Now prepare one extra TRB to align transfer size */ 1132 trb = &dep->trb_pool[dep->trb_enqueue]; 1133 req->num_trbs++; 1134 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem, 1135 false, 1, req->request.stream_id, 1136 req->request.short_not_ok, 1137 req->request.no_interrupt); 1138 } else if (req->request.zero && req->request.length && 1139 (IS_ALIGNED(req->request.length, maxp))) { 1140 struct dwc3 *dwc = dep->dwc; 1141 struct dwc3_trb *trb; 1142 1143 req->needs_extra_trb = true; 1144 1145 /* prepare normal TRB */ 1146 dwc3_prepare_one_trb(dep, req, true, 0); 1147 1148 /* Now prepare one extra TRB to handle ZLP */ 1149 trb = &dep->trb_pool[dep->trb_enqueue]; 1150 req->num_trbs++; 1151 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, 1152 false, 1, req->request.stream_id, 1153 req->request.short_not_ok, 1154 req->request.no_interrupt); 1155 } else { 1156 dwc3_prepare_one_trb(dep, req, false, 0); 1157 } 1158 } 1159 1160 /* 1161 * dwc3_prepare_trbs - setup TRBs from requests 1162 * @dep: endpoint for which requests are being prepared 1163 * 1164 * The function goes through the requests list and sets up TRBs for the 1165 * transfers. The function returns once there are no more TRBs available or 1166 * it runs out of requests. 1167 */ 1168 static void dwc3_prepare_trbs(struct dwc3_ep *dep) 1169 { 1170 struct dwc3_request *req, *n; 1171 1172 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 1173 1174 /* 1175 * We can get in a situation where there's a request in the started list 1176 * but there weren't enough TRBs to fully kick it in the first time 1177 * around, so it has been waiting for more TRBs to be freed up. 1178 * 1179 * In that case, we should check if we have a request with pending_sgs 1180 * in the started list and prepare TRBs for that request first, 1181 * otherwise we will prepare TRBs completely out of order and that will 1182 * break things. 1183 */ 1184 list_for_each_entry(req, &dep->started_list, list) { 1185 if (req->num_pending_sgs > 0) 1186 dwc3_prepare_one_trb_sg(dep, req); 1187 1188 if (!dwc3_calc_trbs_left(dep)) 1189 return; 1190 } 1191 1192 list_for_each_entry_safe(req, n, &dep->pending_list, list) { 1193 struct dwc3 *dwc = dep->dwc; 1194 int ret; 1195 1196 ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request, 1197 dep->direction); 1198 if (ret) 1199 return; 1200 1201 req->sg = req->request.sg; 1202 req->start_sg = req->sg; 1203 req->num_queued_sgs = 0; 1204 req->num_pending_sgs = req->request.num_mapped_sgs; 1205 1206 if (req->num_pending_sgs > 0) 1207 dwc3_prepare_one_trb_sg(dep, req); 1208 else 1209 dwc3_prepare_one_trb_linear(dep, req); 1210 1211 if (!dwc3_calc_trbs_left(dep)) 1212 return; 1213 } 1214 } 1215 1216 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep) 1217 { 1218 struct dwc3_gadget_ep_cmd_params params; 1219 struct dwc3_request *req; 1220 int starting; 1221 int ret; 1222 u32 cmd; 1223 1224 if (!dwc3_calc_trbs_left(dep)) 1225 return 0; 1226 1227 starting = !(dep->flags & DWC3_EP_TRANSFER_STARTED); 1228 1229 dwc3_prepare_trbs(dep); 1230 req = next_request(&dep->started_list); 1231 if (!req) { 1232 dep->flags |= DWC3_EP_PENDING_REQUEST; 1233 return 0; 1234 } 1235 1236 memset(¶ms, 0, sizeof(params)); 1237 1238 if (starting) { 1239 params.param0 = upper_32_bits(req->trb_dma); 1240 params.param1 = lower_32_bits(req->trb_dma); 1241 cmd = DWC3_DEPCMD_STARTTRANSFER; 1242 1243 if (dep->stream_capable) 1244 cmd |= DWC3_DEPCMD_PARAM(req->request.stream_id); 1245 1246 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) 1247 cmd |= DWC3_DEPCMD_PARAM(dep->frame_number); 1248 } else { 1249 cmd = DWC3_DEPCMD_UPDATETRANSFER | 1250 DWC3_DEPCMD_PARAM(dep->resource_index); 1251 } 1252 1253 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 1254 if (ret < 0) { 1255 /* 1256 * FIXME we need to iterate over the list of requests 1257 * here and stop, unmap, free and del each of the linked 1258 * requests instead of what we do now. 1259 */ 1260 if (req->trb) 1261 memset(req->trb, 0, sizeof(struct dwc3_trb)); 1262 dwc3_gadget_del_and_unmap_request(dep, req, ret); 1263 return ret; 1264 } 1265 1266 return 0; 1267 } 1268 1269 static int __dwc3_gadget_get_frame(struct dwc3 *dwc) 1270 { 1271 u32 reg; 1272 1273 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1274 return DWC3_DSTS_SOFFN(reg); 1275 } 1276 1277 /** 1278 * dwc3_gadget_start_isoc_quirk - workaround invalid frame number 1279 * @dep: isoc endpoint 1280 * 1281 * This function tests for the correct combination of BIT[15:14] from the 16-bit 1282 * microframe number reported by the XferNotReady event for the future frame 1283 * number to start the isoc transfer. 1284 * 1285 * In DWC_usb31 version 1.70a-ea06 and prior, for highspeed and fullspeed 1286 * isochronous IN, BIT[15:14] of the 16-bit microframe number reported by the 1287 * XferNotReady event are invalid. The driver uses this number to schedule the 1288 * isochronous transfer and passes it to the START TRANSFER command. Because 1289 * this number is invalid, the command may fail. If BIT[15:14] matches the 1290 * internal 16-bit microframe, the START TRANSFER command will pass and the 1291 * transfer will start at the scheduled time, if it is off by 1, the command 1292 * will still pass, but the transfer will start 2 seconds in the future. For all 1293 * other conditions, the START TRANSFER command will fail with bus-expiry. 1294 * 1295 * In order to workaround this issue, we can test for the correct combination of 1296 * BIT[15:14] by sending START TRANSFER commands with different values of 1297 * BIT[15:14]: 'b00, 'b01, 'b10, and 'b11. Each combination is 2^14 uframe apart 1298 * (or 2 seconds). 4 seconds into the future will result in a bus-expiry status. 1299 * As the result, within the 4 possible combinations for BIT[15:14], there will 1300 * be 2 successful and 2 failure START COMMAND status. One of the 2 successful 1301 * command status will result in a 2-second delay start. The smaller BIT[15:14] 1302 * value is the correct combination. 1303 * 1304 * Since there are only 4 outcomes and the results are ordered, we can simply 1305 * test 2 START TRANSFER commands with BIT[15:14] combinations 'b00 and 'b01 to 1306 * deduce the smaller successful combination. 1307 * 1308 * Let test0 = test status for combination 'b00 and test1 = test status for 'b01 1309 * of BIT[15:14]. The correct combination is as follow: 1310 * 1311 * if test0 fails and test1 passes, BIT[15:14] is 'b01 1312 * if test0 fails and test1 fails, BIT[15:14] is 'b10 1313 * if test0 passes and test1 fails, BIT[15:14] is 'b11 1314 * if test0 passes and test1 passes, BIT[15:14] is 'b00 1315 * 1316 * Synopsys STAR 9001202023: Wrong microframe number for isochronous IN 1317 * endpoints. 1318 */ 1319 static int dwc3_gadget_start_isoc_quirk(struct dwc3_ep *dep) 1320 { 1321 int cmd_status = 0; 1322 bool test0; 1323 bool test1; 1324 1325 while (dep->combo_num < 2) { 1326 struct dwc3_gadget_ep_cmd_params params; 1327 u32 test_frame_number; 1328 u32 cmd; 1329 1330 /* 1331 * Check if we can start isoc transfer on the next interval or 1332 * 4 uframes in the future with BIT[15:14] as dep->combo_num 1333 */ 1334 test_frame_number = dep->frame_number & 0x3fff; 1335 test_frame_number |= dep->combo_num << 14; 1336 test_frame_number += max_t(u32, 4, dep->interval); 1337 1338 params.param0 = upper_32_bits(dep->dwc->bounce_addr); 1339 params.param1 = lower_32_bits(dep->dwc->bounce_addr); 1340 1341 cmd = DWC3_DEPCMD_STARTTRANSFER; 1342 cmd |= DWC3_DEPCMD_PARAM(test_frame_number); 1343 cmd_status = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 1344 1345 /* Redo if some other failure beside bus-expiry is received */ 1346 if (cmd_status && cmd_status != -EAGAIN) { 1347 dep->start_cmd_status = 0; 1348 dep->combo_num = 0; 1349 return 0; 1350 } 1351 1352 /* Store the first test status */ 1353 if (dep->combo_num == 0) 1354 dep->start_cmd_status = cmd_status; 1355 1356 dep->combo_num++; 1357 1358 /* 1359 * End the transfer if the START_TRANSFER command is successful 1360 * to wait for the next XferNotReady to test the command again 1361 */ 1362 if (cmd_status == 0) { 1363 dwc3_stop_active_transfer(dep, true); 1364 return 0; 1365 } 1366 } 1367 1368 /* test0 and test1 are both completed at this point */ 1369 test0 = (dep->start_cmd_status == 0); 1370 test1 = (cmd_status == 0); 1371 1372 if (!test0 && test1) 1373 dep->combo_num = 1; 1374 else if (!test0 && !test1) 1375 dep->combo_num = 2; 1376 else if (test0 && !test1) 1377 dep->combo_num = 3; 1378 else if (test0 && test1) 1379 dep->combo_num = 0; 1380 1381 dep->frame_number &= 0x3fff; 1382 dep->frame_number |= dep->combo_num << 14; 1383 dep->frame_number += max_t(u32, 4, dep->interval); 1384 1385 /* Reinitialize test variables */ 1386 dep->start_cmd_status = 0; 1387 dep->combo_num = 0; 1388 1389 return __dwc3_gadget_kick_transfer(dep); 1390 } 1391 1392 static int __dwc3_gadget_start_isoc(struct dwc3_ep *dep) 1393 { 1394 struct dwc3 *dwc = dep->dwc; 1395 int ret; 1396 int i; 1397 1398 if (list_empty(&dep->pending_list)) { 1399 dep->flags |= DWC3_EP_PENDING_REQUEST; 1400 return -EAGAIN; 1401 } 1402 1403 if (!dwc->dis_start_transfer_quirk && dwc3_is_usb31(dwc) && 1404 (dwc->revision <= DWC3_USB31_REVISION_160A || 1405 (dwc->revision == DWC3_USB31_REVISION_170A && 1406 dwc->version_type >= DWC31_VERSIONTYPE_EA01 && 1407 dwc->version_type <= DWC31_VERSIONTYPE_EA06))) { 1408 1409 if (dwc->gadget.speed <= USB_SPEED_HIGH && dep->direction) 1410 return dwc3_gadget_start_isoc_quirk(dep); 1411 } 1412 1413 for (i = 0; i < DWC3_ISOC_MAX_RETRIES; i++) { 1414 dep->frame_number = DWC3_ALIGN_FRAME(dep, i + 1); 1415 1416 ret = __dwc3_gadget_kick_transfer(dep); 1417 if (ret != -EAGAIN) 1418 break; 1419 } 1420 1421 return ret; 1422 } 1423 1424 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 1425 { 1426 struct dwc3 *dwc = dep->dwc; 1427 1428 if (!dep->endpoint.desc) { 1429 dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n", 1430 dep->name); 1431 return -ESHUTDOWN; 1432 } 1433 1434 if (WARN(req->dep != dep, "request %pK belongs to '%s'\n", 1435 &req->request, req->dep->name)) 1436 return -EINVAL; 1437 1438 pm_runtime_get(dwc->dev); 1439 1440 req->request.actual = 0; 1441 req->request.status = -EINPROGRESS; 1442 1443 trace_dwc3_ep_queue(req); 1444 1445 list_add_tail(&req->list, &dep->pending_list); 1446 1447 /* 1448 * NOTICE: Isochronous endpoints should NEVER be prestarted. We must 1449 * wait for a XferNotReady event so we will know what's the current 1450 * (micro-)frame number. 1451 * 1452 * Without this trick, we are very, very likely gonna get Bus Expiry 1453 * errors which will force us issue EndTransfer command. 1454 */ 1455 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1456 if (!(dep->flags & DWC3_EP_PENDING_REQUEST) && 1457 !(dep->flags & DWC3_EP_TRANSFER_STARTED)) 1458 return 0; 1459 1460 if ((dep->flags & DWC3_EP_PENDING_REQUEST)) { 1461 if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) { 1462 return __dwc3_gadget_start_isoc(dep); 1463 } 1464 } 1465 } 1466 1467 return __dwc3_gadget_kick_transfer(dep); 1468 } 1469 1470 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 1471 gfp_t gfp_flags) 1472 { 1473 struct dwc3_request *req = to_dwc3_request(request); 1474 struct dwc3_ep *dep = to_dwc3_ep(ep); 1475 struct dwc3 *dwc = dep->dwc; 1476 1477 unsigned long flags; 1478 1479 int ret; 1480 1481 spin_lock_irqsave(&dwc->lock, flags); 1482 ret = __dwc3_gadget_ep_queue(dep, req); 1483 spin_unlock_irqrestore(&dwc->lock, flags); 1484 1485 return ret; 1486 } 1487 1488 static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *req) 1489 { 1490 int i; 1491 1492 /* 1493 * If request was already started, this means we had to 1494 * stop the transfer. With that we also need to ignore 1495 * all TRBs used by the request, however TRBs can only 1496 * be modified after completion of END_TRANSFER 1497 * command. So what we do here is that we wait for 1498 * END_TRANSFER completion and only after that, we jump 1499 * over TRBs by clearing HWO and incrementing dequeue 1500 * pointer. 1501 */ 1502 for (i = 0; i < req->num_trbs; i++) { 1503 struct dwc3_trb *trb; 1504 1505 trb = req->trb + i; 1506 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 1507 dwc3_ep_inc_deq(dep); 1508 } 1509 } 1510 1511 static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep) 1512 { 1513 struct dwc3_request *req; 1514 struct dwc3_request *tmp; 1515 1516 list_for_each_entry_safe(req, tmp, &dep->cancelled_list, list) { 1517 dwc3_gadget_ep_skip_trbs(dep, req); 1518 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1519 } 1520 } 1521 1522 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 1523 struct usb_request *request) 1524 { 1525 struct dwc3_request *req = to_dwc3_request(request); 1526 struct dwc3_request *r = NULL; 1527 1528 struct dwc3_ep *dep = to_dwc3_ep(ep); 1529 struct dwc3 *dwc = dep->dwc; 1530 1531 unsigned long flags; 1532 int ret = 0; 1533 1534 trace_dwc3_ep_dequeue(req); 1535 1536 spin_lock_irqsave(&dwc->lock, flags); 1537 1538 list_for_each_entry(r, &dep->pending_list, list) { 1539 if (r == req) 1540 break; 1541 } 1542 1543 if (r != req) { 1544 list_for_each_entry(r, &dep->started_list, list) { 1545 if (r == req) 1546 break; 1547 } 1548 if (r == req) { 1549 /* wait until it is processed */ 1550 dwc3_stop_active_transfer(dep, true); 1551 1552 if (!r->trb) 1553 goto out0; 1554 1555 dwc3_gadget_move_cancelled_request(req); 1556 goto out0; 1557 } 1558 dev_err(dwc->dev, "request %pK was not queued to %s\n", 1559 request, ep->name); 1560 ret = -EINVAL; 1561 goto out0; 1562 } 1563 1564 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1565 1566 out0: 1567 spin_unlock_irqrestore(&dwc->lock, flags); 1568 1569 return ret; 1570 } 1571 1572 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) 1573 { 1574 struct dwc3_gadget_ep_cmd_params params; 1575 struct dwc3 *dwc = dep->dwc; 1576 int ret; 1577 1578 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1579 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 1580 return -EINVAL; 1581 } 1582 1583 memset(¶ms, 0x00, sizeof(params)); 1584 1585 if (value) { 1586 struct dwc3_trb *trb; 1587 1588 unsigned transfer_in_flight; 1589 unsigned started; 1590 1591 if (dep->number > 1) 1592 trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue); 1593 else 1594 trb = &dwc->ep0_trb[dep->trb_enqueue]; 1595 1596 transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO; 1597 started = !list_empty(&dep->started_list); 1598 1599 if (!protocol && ((dep->direction && transfer_in_flight) || 1600 (!dep->direction && started))) { 1601 return -EAGAIN; 1602 } 1603 1604 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL, 1605 ¶ms); 1606 if (ret) 1607 dev_err(dwc->dev, "failed to set STALL on %s\n", 1608 dep->name); 1609 else 1610 dep->flags |= DWC3_EP_STALL; 1611 } else { 1612 1613 ret = dwc3_send_clear_stall_ep_cmd(dep); 1614 if (ret) 1615 dev_err(dwc->dev, "failed to clear STALL on %s\n", 1616 dep->name); 1617 else 1618 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); 1619 } 1620 1621 return ret; 1622 } 1623 1624 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 1625 { 1626 struct dwc3_ep *dep = to_dwc3_ep(ep); 1627 struct dwc3 *dwc = dep->dwc; 1628 1629 unsigned long flags; 1630 1631 int ret; 1632 1633 spin_lock_irqsave(&dwc->lock, flags); 1634 ret = __dwc3_gadget_ep_set_halt(dep, value, false); 1635 spin_unlock_irqrestore(&dwc->lock, flags); 1636 1637 return ret; 1638 } 1639 1640 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 1641 { 1642 struct dwc3_ep *dep = to_dwc3_ep(ep); 1643 struct dwc3 *dwc = dep->dwc; 1644 unsigned long flags; 1645 int ret; 1646 1647 spin_lock_irqsave(&dwc->lock, flags); 1648 dep->flags |= DWC3_EP_WEDGE; 1649 1650 if (dep->number == 0 || dep->number == 1) 1651 ret = __dwc3_gadget_ep0_set_halt(ep, 1); 1652 else 1653 ret = __dwc3_gadget_ep_set_halt(dep, 1, false); 1654 spin_unlock_irqrestore(&dwc->lock, flags); 1655 1656 return ret; 1657 } 1658 1659 /* -------------------------------------------------------------------------- */ 1660 1661 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 1662 .bLength = USB_DT_ENDPOINT_SIZE, 1663 .bDescriptorType = USB_DT_ENDPOINT, 1664 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1665 }; 1666 1667 static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 1668 .enable = dwc3_gadget_ep0_enable, 1669 .disable = dwc3_gadget_ep0_disable, 1670 .alloc_request = dwc3_gadget_ep_alloc_request, 1671 .free_request = dwc3_gadget_ep_free_request, 1672 .queue = dwc3_gadget_ep0_queue, 1673 .dequeue = dwc3_gadget_ep_dequeue, 1674 .set_halt = dwc3_gadget_ep0_set_halt, 1675 .set_wedge = dwc3_gadget_ep_set_wedge, 1676 }; 1677 1678 static const struct usb_ep_ops dwc3_gadget_ep_ops = { 1679 .enable = dwc3_gadget_ep_enable, 1680 .disable = dwc3_gadget_ep_disable, 1681 .alloc_request = dwc3_gadget_ep_alloc_request, 1682 .free_request = dwc3_gadget_ep_free_request, 1683 .queue = dwc3_gadget_ep_queue, 1684 .dequeue = dwc3_gadget_ep_dequeue, 1685 .set_halt = dwc3_gadget_ep_set_halt, 1686 .set_wedge = dwc3_gadget_ep_set_wedge, 1687 }; 1688 1689 /* -------------------------------------------------------------------------- */ 1690 1691 static int dwc3_gadget_get_frame(struct usb_gadget *g) 1692 { 1693 struct dwc3 *dwc = gadget_to_dwc(g); 1694 1695 return __dwc3_gadget_get_frame(dwc); 1696 } 1697 1698 static int __dwc3_gadget_wakeup(struct dwc3 *dwc) 1699 { 1700 int retries; 1701 1702 int ret; 1703 u32 reg; 1704 1705 u8 link_state; 1706 u8 speed; 1707 1708 /* 1709 * According to the Databook Remote wakeup request should 1710 * be issued only when the device is in early suspend state. 1711 * 1712 * We can check that via USB Link State bits in DSTS register. 1713 */ 1714 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1715 1716 speed = reg & DWC3_DSTS_CONNECTSPD; 1717 if ((speed == DWC3_DSTS_SUPERSPEED) || 1718 (speed == DWC3_DSTS_SUPERSPEED_PLUS)) 1719 return 0; 1720 1721 link_state = DWC3_DSTS_USBLNKST(reg); 1722 1723 switch (link_state) { 1724 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1725 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1726 break; 1727 default: 1728 return -EINVAL; 1729 } 1730 1731 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); 1732 if (ret < 0) { 1733 dev_err(dwc->dev, "failed to put link in Recovery\n"); 1734 return ret; 1735 } 1736 1737 /* Recent versions do this automatically */ 1738 if (dwc->revision < DWC3_REVISION_194A) { 1739 /* write zeroes to Link Change Request */ 1740 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1741 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 1742 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1743 } 1744 1745 /* poll until Link State changes to ON */ 1746 retries = 20000; 1747 1748 while (retries--) { 1749 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1750 1751 /* in HS, means ON */ 1752 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 1753 break; 1754 } 1755 1756 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1757 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1758 return -EINVAL; 1759 } 1760 1761 return 0; 1762 } 1763 1764 static int dwc3_gadget_wakeup(struct usb_gadget *g) 1765 { 1766 struct dwc3 *dwc = gadget_to_dwc(g); 1767 unsigned long flags; 1768 int ret; 1769 1770 spin_lock_irqsave(&dwc->lock, flags); 1771 ret = __dwc3_gadget_wakeup(dwc); 1772 spin_unlock_irqrestore(&dwc->lock, flags); 1773 1774 return ret; 1775 } 1776 1777 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 1778 int is_selfpowered) 1779 { 1780 struct dwc3 *dwc = gadget_to_dwc(g); 1781 unsigned long flags; 1782 1783 spin_lock_irqsave(&dwc->lock, flags); 1784 g->is_selfpowered = !!is_selfpowered; 1785 spin_unlock_irqrestore(&dwc->lock, flags); 1786 1787 return 0; 1788 } 1789 1790 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) 1791 { 1792 u32 reg; 1793 u32 timeout = 500; 1794 1795 if (pm_runtime_suspended(dwc->dev)) 1796 return 0; 1797 1798 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1799 if (is_on) { 1800 if (dwc->revision <= DWC3_REVISION_187A) { 1801 reg &= ~DWC3_DCTL_TRGTULST_MASK; 1802 reg |= DWC3_DCTL_TRGTULST_RX_DET; 1803 } 1804 1805 if (dwc->revision >= DWC3_REVISION_194A) 1806 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1807 reg |= DWC3_DCTL_RUN_STOP; 1808 1809 if (dwc->has_hibernation) 1810 reg |= DWC3_DCTL_KEEP_CONNECT; 1811 1812 dwc->pullups_connected = true; 1813 } else { 1814 reg &= ~DWC3_DCTL_RUN_STOP; 1815 1816 if (dwc->has_hibernation && !suspend) 1817 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1818 1819 dwc->pullups_connected = false; 1820 } 1821 1822 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1823 1824 do { 1825 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1826 reg &= DWC3_DSTS_DEVCTRLHLT; 1827 } while (--timeout && !(!is_on ^ !reg)); 1828 1829 if (!timeout) 1830 return -ETIMEDOUT; 1831 1832 return 0; 1833 } 1834 1835 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 1836 { 1837 struct dwc3 *dwc = gadget_to_dwc(g); 1838 unsigned long flags; 1839 int ret; 1840 1841 is_on = !!is_on; 1842 1843 /* 1844 * Per databook, when we want to stop the gadget, if a control transfer 1845 * is still in process, complete it and get the core into setup phase. 1846 */ 1847 if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) { 1848 reinit_completion(&dwc->ep0_in_setup); 1849 1850 ret = wait_for_completion_timeout(&dwc->ep0_in_setup, 1851 msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT)); 1852 if (ret == 0) { 1853 dev_err(dwc->dev, "timed out waiting for SETUP phase\n"); 1854 return -ETIMEDOUT; 1855 } 1856 } 1857 1858 spin_lock_irqsave(&dwc->lock, flags); 1859 ret = dwc3_gadget_run_stop(dwc, is_on, false); 1860 spin_unlock_irqrestore(&dwc->lock, flags); 1861 1862 return ret; 1863 } 1864 1865 static void dwc3_gadget_enable_irq(struct dwc3 *dwc) 1866 { 1867 u32 reg; 1868 1869 /* Enable all but Start and End of Frame IRQs */ 1870 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | 1871 DWC3_DEVTEN_EVNTOVERFLOWEN | 1872 DWC3_DEVTEN_CMDCMPLTEN | 1873 DWC3_DEVTEN_ERRTICERREN | 1874 DWC3_DEVTEN_WKUPEVTEN | 1875 DWC3_DEVTEN_CONNECTDONEEN | 1876 DWC3_DEVTEN_USBRSTEN | 1877 DWC3_DEVTEN_DISCONNEVTEN); 1878 1879 if (dwc->revision < DWC3_REVISION_250A) 1880 reg |= DWC3_DEVTEN_ULSTCNGEN; 1881 1882 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 1883 } 1884 1885 static void dwc3_gadget_disable_irq(struct dwc3 *dwc) 1886 { 1887 /* mask all interrupts */ 1888 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 1889 } 1890 1891 static irqreturn_t dwc3_interrupt(int irq, void *_dwc); 1892 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc); 1893 1894 /** 1895 * dwc3_gadget_setup_nump - calculate and initialize NUMP field of %DWC3_DCFG 1896 * @dwc: pointer to our context structure 1897 * 1898 * The following looks like complex but it's actually very simple. In order to 1899 * calculate the number of packets we can burst at once on OUT transfers, we're 1900 * gonna use RxFIFO size. 1901 * 1902 * To calculate RxFIFO size we need two numbers: 1903 * MDWIDTH = size, in bits, of the internal memory bus 1904 * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits) 1905 * 1906 * Given these two numbers, the formula is simple: 1907 * 1908 * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16; 1909 * 1910 * 24 bytes is for 3x SETUP packets 1911 * 16 bytes is a clock domain crossing tolerance 1912 * 1913 * Given RxFIFO Size, NUMP = RxFIFOSize / 1024; 1914 */ 1915 static void dwc3_gadget_setup_nump(struct dwc3 *dwc) 1916 { 1917 u32 ram2_depth; 1918 u32 mdwidth; 1919 u32 nump; 1920 u32 reg; 1921 1922 ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7); 1923 mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0); 1924 1925 nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024; 1926 nump = min_t(u32, nump, 16); 1927 1928 /* update NumP */ 1929 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1930 reg &= ~DWC3_DCFG_NUMP_MASK; 1931 reg |= nump << DWC3_DCFG_NUMP_SHIFT; 1932 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1933 } 1934 1935 static int __dwc3_gadget_start(struct dwc3 *dwc) 1936 { 1937 struct dwc3_ep *dep; 1938 int ret = 0; 1939 u32 reg; 1940 1941 /* 1942 * Use IMOD if enabled via dwc->imod_interval. Otherwise, if 1943 * the core supports IMOD, disable it. 1944 */ 1945 if (dwc->imod_interval) { 1946 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); 1947 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); 1948 } else if (dwc3_has_imod(dwc)) { 1949 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0); 1950 } 1951 1952 /* 1953 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP 1954 * field instead of letting dwc3 itself calculate that automatically. 1955 * 1956 * This way, we maximize the chances that we'll be able to get several 1957 * bursts of data without going through any sort of endpoint throttling. 1958 */ 1959 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1960 if (dwc3_is_usb31(dwc)) 1961 reg &= ~DWC31_GRXTHRCFG_PKTCNTSEL; 1962 else 1963 reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL; 1964 1965 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1966 1967 dwc3_gadget_setup_nump(dwc); 1968 1969 /* Start with SuperSpeed Default */ 1970 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1971 1972 dep = dwc->eps[0]; 1973 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT); 1974 if (ret) { 1975 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1976 goto err0; 1977 } 1978 1979 dep = dwc->eps[1]; 1980 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT); 1981 if (ret) { 1982 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1983 goto err1; 1984 } 1985 1986 /* begin to receive SETUP packets */ 1987 dwc->ep0state = EP0_SETUP_PHASE; 1988 dwc->link_state = DWC3_LINK_STATE_SS_DIS; 1989 dwc3_ep0_out_start(dwc); 1990 1991 dwc3_gadget_enable_irq(dwc); 1992 1993 return 0; 1994 1995 err1: 1996 __dwc3_gadget_ep_disable(dwc->eps[0]); 1997 1998 err0: 1999 return ret; 2000 } 2001 2002 static int dwc3_gadget_start(struct usb_gadget *g, 2003 struct usb_gadget_driver *driver) 2004 { 2005 struct dwc3 *dwc = gadget_to_dwc(g); 2006 unsigned long flags; 2007 int ret = 0; 2008 int irq; 2009 2010 irq = dwc->irq_gadget; 2011 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, 2012 IRQF_SHARED, "dwc3", dwc->ev_buf); 2013 if (ret) { 2014 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 2015 irq, ret); 2016 goto err0; 2017 } 2018 2019 spin_lock_irqsave(&dwc->lock, flags); 2020 if (dwc->gadget_driver) { 2021 dev_err(dwc->dev, "%s is already bound to %s\n", 2022 dwc->gadget.name, 2023 dwc->gadget_driver->driver.name); 2024 ret = -EBUSY; 2025 goto err1; 2026 } 2027 2028 dwc->gadget_driver = driver; 2029 2030 if (pm_runtime_active(dwc->dev)) 2031 __dwc3_gadget_start(dwc); 2032 2033 spin_unlock_irqrestore(&dwc->lock, flags); 2034 2035 return 0; 2036 2037 err1: 2038 spin_unlock_irqrestore(&dwc->lock, flags); 2039 free_irq(irq, dwc); 2040 2041 err0: 2042 return ret; 2043 } 2044 2045 static void __dwc3_gadget_stop(struct dwc3 *dwc) 2046 { 2047 dwc3_gadget_disable_irq(dwc); 2048 __dwc3_gadget_ep_disable(dwc->eps[0]); 2049 __dwc3_gadget_ep_disable(dwc->eps[1]); 2050 } 2051 2052 static int dwc3_gadget_stop(struct usb_gadget *g) 2053 { 2054 struct dwc3 *dwc = gadget_to_dwc(g); 2055 unsigned long flags; 2056 2057 spin_lock_irqsave(&dwc->lock, flags); 2058 2059 if (pm_runtime_suspended(dwc->dev)) 2060 goto out; 2061 2062 __dwc3_gadget_stop(dwc); 2063 2064 out: 2065 dwc->gadget_driver = NULL; 2066 spin_unlock_irqrestore(&dwc->lock, flags); 2067 2068 free_irq(dwc->irq_gadget, dwc->ev_buf); 2069 2070 return 0; 2071 } 2072 2073 static void dwc3_gadget_set_speed(struct usb_gadget *g, 2074 enum usb_device_speed speed) 2075 { 2076 struct dwc3 *dwc = gadget_to_dwc(g); 2077 unsigned long flags; 2078 u32 reg; 2079 2080 spin_lock_irqsave(&dwc->lock, flags); 2081 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2082 reg &= ~(DWC3_DCFG_SPEED_MASK); 2083 2084 /* 2085 * WORKAROUND: DWC3 revision < 2.20a have an issue 2086 * which would cause metastability state on Run/Stop 2087 * bit if we try to force the IP to USB2-only mode. 2088 * 2089 * Because of that, we cannot configure the IP to any 2090 * speed other than the SuperSpeed 2091 * 2092 * Refers to: 2093 * 2094 * STAR#9000525659: Clock Domain Crossing on DCTL in 2095 * USB 2.0 Mode 2096 */ 2097 if (dwc->revision < DWC3_REVISION_220A && 2098 !dwc->dis_metastability_quirk) { 2099 reg |= DWC3_DCFG_SUPERSPEED; 2100 } else { 2101 switch (speed) { 2102 case USB_SPEED_LOW: 2103 reg |= DWC3_DCFG_LOWSPEED; 2104 break; 2105 case USB_SPEED_FULL: 2106 reg |= DWC3_DCFG_FULLSPEED; 2107 break; 2108 case USB_SPEED_HIGH: 2109 reg |= DWC3_DCFG_HIGHSPEED; 2110 break; 2111 case USB_SPEED_SUPER: 2112 reg |= DWC3_DCFG_SUPERSPEED; 2113 break; 2114 case USB_SPEED_SUPER_PLUS: 2115 if (dwc3_is_usb31(dwc)) 2116 reg |= DWC3_DCFG_SUPERSPEED_PLUS; 2117 else 2118 reg |= DWC3_DCFG_SUPERSPEED; 2119 break; 2120 default: 2121 dev_err(dwc->dev, "invalid speed (%d)\n", speed); 2122 2123 if (dwc->revision & DWC3_REVISION_IS_DWC31) 2124 reg |= DWC3_DCFG_SUPERSPEED_PLUS; 2125 else 2126 reg |= DWC3_DCFG_SUPERSPEED; 2127 } 2128 } 2129 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2130 2131 spin_unlock_irqrestore(&dwc->lock, flags); 2132 } 2133 2134 static const struct usb_gadget_ops dwc3_gadget_ops = { 2135 .get_frame = dwc3_gadget_get_frame, 2136 .wakeup = dwc3_gadget_wakeup, 2137 .set_selfpowered = dwc3_gadget_set_selfpowered, 2138 .pullup = dwc3_gadget_pullup, 2139 .udc_start = dwc3_gadget_start, 2140 .udc_stop = dwc3_gadget_stop, 2141 .udc_set_speed = dwc3_gadget_set_speed, 2142 }; 2143 2144 /* -------------------------------------------------------------------------- */ 2145 2146 static int dwc3_gadget_init_control_endpoint(struct dwc3_ep *dep) 2147 { 2148 struct dwc3 *dwc = dep->dwc; 2149 2150 usb_ep_set_maxpacket_limit(&dep->endpoint, 512); 2151 dep->endpoint.maxburst = 1; 2152 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 2153 if (!dep->direction) 2154 dwc->gadget.ep0 = &dep->endpoint; 2155 2156 dep->endpoint.caps.type_control = true; 2157 2158 return 0; 2159 } 2160 2161 static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) 2162 { 2163 struct dwc3 *dwc = dep->dwc; 2164 int mdwidth; 2165 int kbytes; 2166 int size; 2167 2168 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); 2169 /* MDWIDTH is represented in bits, we need it in bytes */ 2170 mdwidth /= 8; 2171 2172 size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1)); 2173 if (dwc3_is_usb31(dwc)) 2174 size = DWC31_GTXFIFOSIZ_TXFDEF(size); 2175 else 2176 size = DWC3_GTXFIFOSIZ_TXFDEF(size); 2177 2178 /* FIFO Depth is in MDWDITH bytes. Multiply */ 2179 size *= mdwidth; 2180 2181 kbytes = size / 1024; 2182 if (kbytes == 0) 2183 kbytes = 1; 2184 2185 /* 2186 * FIFO sizes account an extra MDWIDTH * (kbytes + 1) bytes for 2187 * internal overhead. We don't really know how these are used, 2188 * but documentation say it exists. 2189 */ 2190 size -= mdwidth * (kbytes + 1); 2191 size /= kbytes; 2192 2193 usb_ep_set_maxpacket_limit(&dep->endpoint, size); 2194 2195 dep->endpoint.max_streams = 15; 2196 dep->endpoint.ops = &dwc3_gadget_ep_ops; 2197 list_add_tail(&dep->endpoint.ep_list, 2198 &dwc->gadget.ep_list); 2199 dep->endpoint.caps.type_iso = true; 2200 dep->endpoint.caps.type_bulk = true; 2201 dep->endpoint.caps.type_int = true; 2202 2203 return dwc3_alloc_trb_pool(dep); 2204 } 2205 2206 static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep) 2207 { 2208 struct dwc3 *dwc = dep->dwc; 2209 2210 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024); 2211 dep->endpoint.max_streams = 15; 2212 dep->endpoint.ops = &dwc3_gadget_ep_ops; 2213 list_add_tail(&dep->endpoint.ep_list, 2214 &dwc->gadget.ep_list); 2215 dep->endpoint.caps.type_iso = true; 2216 dep->endpoint.caps.type_bulk = true; 2217 dep->endpoint.caps.type_int = true; 2218 2219 return dwc3_alloc_trb_pool(dep); 2220 } 2221 2222 static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum) 2223 { 2224 struct dwc3_ep *dep; 2225 bool direction = epnum & 1; 2226 int ret; 2227 u8 num = epnum >> 1; 2228 2229 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 2230 if (!dep) 2231 return -ENOMEM; 2232 2233 dep->dwc = dwc; 2234 dep->number = epnum; 2235 dep->direction = direction; 2236 dep->regs = dwc->regs + DWC3_DEP_BASE(epnum); 2237 dwc->eps[epnum] = dep; 2238 dep->combo_num = 0; 2239 dep->start_cmd_status = 0; 2240 2241 snprintf(dep->name, sizeof(dep->name), "ep%u%s", num, 2242 direction ? "in" : "out"); 2243 2244 dep->endpoint.name = dep->name; 2245 2246 if (!(dep->number > 1)) { 2247 dep->endpoint.desc = &dwc3_gadget_ep0_desc; 2248 dep->endpoint.comp_desc = NULL; 2249 } 2250 2251 spin_lock_init(&dep->lock); 2252 2253 if (num == 0) 2254 ret = dwc3_gadget_init_control_endpoint(dep); 2255 else if (direction) 2256 ret = dwc3_gadget_init_in_endpoint(dep); 2257 else 2258 ret = dwc3_gadget_init_out_endpoint(dep); 2259 2260 if (ret) 2261 return ret; 2262 2263 dep->endpoint.caps.dir_in = direction; 2264 dep->endpoint.caps.dir_out = !direction; 2265 2266 INIT_LIST_HEAD(&dep->pending_list); 2267 INIT_LIST_HEAD(&dep->started_list); 2268 INIT_LIST_HEAD(&dep->cancelled_list); 2269 2270 return 0; 2271 } 2272 2273 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 total) 2274 { 2275 u8 epnum; 2276 2277 INIT_LIST_HEAD(&dwc->gadget.ep_list); 2278 2279 for (epnum = 0; epnum < total; epnum++) { 2280 int ret; 2281 2282 ret = dwc3_gadget_init_endpoint(dwc, epnum); 2283 if (ret) 2284 return ret; 2285 } 2286 2287 return 0; 2288 } 2289 2290 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 2291 { 2292 struct dwc3_ep *dep; 2293 u8 epnum; 2294 2295 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2296 dep = dwc->eps[epnum]; 2297 if (!dep) 2298 continue; 2299 /* 2300 * Physical endpoints 0 and 1 are special; they form the 2301 * bi-directional USB endpoint 0. 2302 * 2303 * For those two physical endpoints, we don't allocate a TRB 2304 * pool nor do we add them the endpoints list. Due to that, we 2305 * shouldn't do these two operations otherwise we would end up 2306 * with all sorts of bugs when removing dwc3.ko. 2307 */ 2308 if (epnum != 0 && epnum != 1) { 2309 dwc3_free_trb_pool(dep); 2310 list_del(&dep->endpoint.ep_list); 2311 } 2312 2313 kfree(dep); 2314 } 2315 } 2316 2317 /* -------------------------------------------------------------------------- */ 2318 2319 static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep, 2320 struct dwc3_request *req, struct dwc3_trb *trb, 2321 const struct dwc3_event_depevt *event, int status, int chain) 2322 { 2323 unsigned int count; 2324 2325 dwc3_ep_inc_deq(dep); 2326 2327 trace_dwc3_complete_trb(dep, trb); 2328 req->num_trbs--; 2329 2330 /* 2331 * If we're in the middle of series of chained TRBs and we 2332 * receive a short transfer along the way, DWC3 will skip 2333 * through all TRBs including the last TRB in the chain (the 2334 * where CHN bit is zero. DWC3 will also avoid clearing HWO 2335 * bit and SW has to do it manually. 2336 * 2337 * We're going to do that here to avoid problems of HW trying 2338 * to use bogus TRBs for transfers. 2339 */ 2340 if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO)) 2341 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 2342 2343 /* 2344 * For isochronous transfers, the first TRB in a service interval must 2345 * have the Isoc-First type. Track and report its interval frame number. 2346 */ 2347 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 2348 (trb->ctrl & DWC3_TRBCTL_ISOCHRONOUS_FIRST)) { 2349 unsigned int frame_number; 2350 2351 frame_number = DWC3_TRB_CTRL_GET_SID_SOFN(trb->ctrl); 2352 frame_number &= ~(dep->interval - 1); 2353 req->request.frame_number = frame_number; 2354 } 2355 2356 /* 2357 * If we're dealing with unaligned size OUT transfer, we will be left 2358 * with one TRB pending in the ring. We need to manually clear HWO bit 2359 * from that TRB. 2360 */ 2361 2362 if (req->needs_extra_trb && !(trb->ctrl & DWC3_TRB_CTRL_CHN)) { 2363 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 2364 return 1; 2365 } 2366 2367 count = trb->size & DWC3_TRB_SIZE_MASK; 2368 req->remaining += count; 2369 2370 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 2371 return 1; 2372 2373 if (event->status & DEPEVT_STATUS_SHORT && !chain) 2374 return 1; 2375 2376 if (event->status & DEPEVT_STATUS_IOC) 2377 return 1; 2378 2379 return 0; 2380 } 2381 2382 static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep, 2383 struct dwc3_request *req, const struct dwc3_event_depevt *event, 2384 int status) 2385 { 2386 struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue]; 2387 struct scatterlist *sg = req->sg; 2388 struct scatterlist *s; 2389 unsigned int pending = req->num_pending_sgs; 2390 unsigned int i; 2391 int ret = 0; 2392 2393 for_each_sg(sg, s, pending, i) { 2394 trb = &dep->trb_pool[dep->trb_dequeue]; 2395 2396 if (trb->ctrl & DWC3_TRB_CTRL_HWO) 2397 break; 2398 2399 req->sg = sg_next(s); 2400 req->num_pending_sgs--; 2401 2402 ret = dwc3_gadget_ep_reclaim_completed_trb(dep, req, 2403 trb, event, status, true); 2404 if (ret) 2405 break; 2406 } 2407 2408 return ret; 2409 } 2410 2411 static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep, 2412 struct dwc3_request *req, const struct dwc3_event_depevt *event, 2413 int status) 2414 { 2415 struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue]; 2416 2417 return dwc3_gadget_ep_reclaim_completed_trb(dep, req, trb, 2418 event, status, false); 2419 } 2420 2421 static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req) 2422 { 2423 return req->request.actual == req->request.length; 2424 } 2425 2426 static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, 2427 const struct dwc3_event_depevt *event, 2428 struct dwc3_request *req, int status) 2429 { 2430 int ret; 2431 2432 if (req->num_pending_sgs) 2433 ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event, 2434 status); 2435 else 2436 ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, 2437 status); 2438 2439 if (req->needs_extra_trb) { 2440 ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, 2441 status); 2442 req->needs_extra_trb = false; 2443 } 2444 2445 req->request.actual = req->request.length - req->remaining; 2446 2447 if (!dwc3_gadget_ep_request_completed(req) && 2448 req->num_pending_sgs) { 2449 __dwc3_gadget_kick_transfer(dep); 2450 goto out; 2451 } 2452 2453 dwc3_gadget_giveback(dep, req, status); 2454 2455 out: 2456 return ret; 2457 } 2458 2459 static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep, 2460 const struct dwc3_event_depevt *event, int status) 2461 { 2462 struct dwc3_request *req; 2463 struct dwc3_request *tmp; 2464 2465 list_for_each_entry_safe(req, tmp, &dep->started_list, list) { 2466 int ret; 2467 2468 ret = dwc3_gadget_ep_cleanup_completed_request(dep, event, 2469 req, status); 2470 if (ret) 2471 break; 2472 } 2473 } 2474 2475 static void dwc3_gadget_endpoint_frame_from_event(struct dwc3_ep *dep, 2476 const struct dwc3_event_depevt *event) 2477 { 2478 dep->frame_number = event->parameters; 2479 } 2480 2481 static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep, 2482 const struct dwc3_event_depevt *event) 2483 { 2484 struct dwc3 *dwc = dep->dwc; 2485 unsigned status = 0; 2486 bool stop = false; 2487 2488 dwc3_gadget_endpoint_frame_from_event(dep, event); 2489 2490 if (event->status & DEPEVT_STATUS_BUSERR) 2491 status = -ECONNRESET; 2492 2493 if (event->status & DEPEVT_STATUS_MISSED_ISOC) { 2494 status = -EXDEV; 2495 2496 if (list_empty(&dep->started_list)) 2497 stop = true; 2498 } 2499 2500 dwc3_gadget_ep_cleanup_completed_requests(dep, event, status); 2501 2502 if (stop) { 2503 dwc3_stop_active_transfer(dep, true); 2504 dep->flags = DWC3_EP_ENABLED; 2505 } 2506 2507 /* 2508 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. 2509 * See dwc3_gadget_linksts_change_interrupt() for 1st half. 2510 */ 2511 if (dwc->revision < DWC3_REVISION_183A) { 2512 u32 reg; 2513 int i; 2514 2515 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 2516 dep = dwc->eps[i]; 2517 2518 if (!(dep->flags & DWC3_EP_ENABLED)) 2519 continue; 2520 2521 if (!list_empty(&dep->started_list)) 2522 return; 2523 } 2524 2525 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2526 reg |= dwc->u1u2; 2527 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2528 2529 dwc->u1u2 = 0; 2530 } 2531 } 2532 2533 static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep, 2534 const struct dwc3_event_depevt *event) 2535 { 2536 dwc3_gadget_endpoint_frame_from_event(dep, event); 2537 (void) __dwc3_gadget_start_isoc(dep); 2538 } 2539 2540 static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 2541 const struct dwc3_event_depevt *event) 2542 { 2543 struct dwc3_ep *dep; 2544 u8 epnum = event->endpoint_number; 2545 u8 cmd; 2546 2547 dep = dwc->eps[epnum]; 2548 2549 if (!(dep->flags & DWC3_EP_ENABLED)) { 2550 if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) 2551 return; 2552 2553 /* Handle only EPCMDCMPLT when EP disabled */ 2554 if (event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT) 2555 return; 2556 } 2557 2558 if (epnum == 0 || epnum == 1) { 2559 dwc3_ep0_interrupt(dwc, event); 2560 return; 2561 } 2562 2563 switch (event->endpoint_event) { 2564 case DWC3_DEPEVT_XFERINPROGRESS: 2565 dwc3_gadget_endpoint_transfer_in_progress(dep, event); 2566 break; 2567 case DWC3_DEPEVT_XFERNOTREADY: 2568 dwc3_gadget_endpoint_transfer_not_ready(dep, event); 2569 break; 2570 case DWC3_DEPEVT_EPCMDCMPLT: 2571 cmd = DEPEVT_PARAMETER_CMD(event->parameters); 2572 2573 if (cmd == DWC3_DEPCMD_ENDTRANSFER) { 2574 dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; 2575 dwc3_gadget_ep_cleanup_cancelled_requests(dep); 2576 } 2577 break; 2578 case DWC3_DEPEVT_STREAMEVT: 2579 case DWC3_DEPEVT_XFERCOMPLETE: 2580 case DWC3_DEPEVT_RXTXFIFOEVT: 2581 break; 2582 } 2583 } 2584 2585 static void dwc3_disconnect_gadget(struct dwc3 *dwc) 2586 { 2587 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { 2588 spin_unlock(&dwc->lock); 2589 dwc->gadget_driver->disconnect(&dwc->gadget); 2590 spin_lock(&dwc->lock); 2591 } 2592 } 2593 2594 static void dwc3_suspend_gadget(struct dwc3 *dwc) 2595 { 2596 if (dwc->gadget_driver && dwc->gadget_driver->suspend) { 2597 spin_unlock(&dwc->lock); 2598 dwc->gadget_driver->suspend(&dwc->gadget); 2599 spin_lock(&dwc->lock); 2600 } 2601 } 2602 2603 static void dwc3_resume_gadget(struct dwc3 *dwc) 2604 { 2605 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2606 spin_unlock(&dwc->lock); 2607 dwc->gadget_driver->resume(&dwc->gadget); 2608 spin_lock(&dwc->lock); 2609 } 2610 } 2611 2612 static void dwc3_reset_gadget(struct dwc3 *dwc) 2613 { 2614 if (!dwc->gadget_driver) 2615 return; 2616 2617 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) { 2618 spin_unlock(&dwc->lock); 2619 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver); 2620 spin_lock(&dwc->lock); 2621 } 2622 } 2623 2624 static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force) 2625 { 2626 struct dwc3 *dwc = dep->dwc; 2627 struct dwc3_gadget_ep_cmd_params params; 2628 u32 cmd; 2629 int ret; 2630 2631 if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) || 2632 !dep->resource_index) 2633 return; 2634 2635 /* 2636 * NOTICE: We are violating what the Databook says about the 2637 * EndTransfer command. Ideally we would _always_ wait for the 2638 * EndTransfer Command Completion IRQ, but that's causing too 2639 * much trouble synchronizing between us and gadget driver. 2640 * 2641 * We have discussed this with the IP Provider and it was 2642 * suggested to giveback all requests here, but give HW some 2643 * extra time to synchronize with the interconnect. We're using 2644 * an arbitrary 100us delay for that. 2645 * 2646 * Note also that a similar handling was tested by Synopsys 2647 * (thanks a lot Paul) and nothing bad has come out of it. 2648 * In short, what we're doing is: 2649 * 2650 * - Issue EndTransfer WITH CMDIOC bit set 2651 * - Wait 100us 2652 * 2653 * As of IP version 3.10a of the DWC_usb3 IP, the controller 2654 * supports a mode to work around the above limitation. The 2655 * software can poll the CMDACT bit in the DEPCMD register 2656 * after issuing a EndTransfer command. This mode is enabled 2657 * by writing GUCTL2[14]. This polling is already done in the 2658 * dwc3_send_gadget_ep_cmd() function so if the mode is 2659 * enabled, the EndTransfer command will have completed upon 2660 * returning from this function and we don't need to delay for 2661 * 100us. 2662 * 2663 * This mode is NOT available on the DWC_usb31 IP. 2664 */ 2665 2666 cmd = DWC3_DEPCMD_ENDTRANSFER; 2667 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0; 2668 cmd |= DWC3_DEPCMD_CMDIOC; 2669 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); 2670 memset(¶ms, 0, sizeof(params)); 2671 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 2672 WARN_ON_ONCE(ret); 2673 dep->resource_index = 0; 2674 2675 if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A) { 2676 dep->flags |= DWC3_EP_END_TRANSFER_PENDING; 2677 udelay(100); 2678 } 2679 } 2680 2681 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 2682 { 2683 u32 epnum; 2684 2685 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2686 struct dwc3_ep *dep; 2687 int ret; 2688 2689 dep = dwc->eps[epnum]; 2690 if (!dep) 2691 continue; 2692 2693 if (!(dep->flags & DWC3_EP_STALL)) 2694 continue; 2695 2696 dep->flags &= ~DWC3_EP_STALL; 2697 2698 ret = dwc3_send_clear_stall_ep_cmd(dep); 2699 WARN_ON_ONCE(ret); 2700 } 2701 } 2702 2703 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 2704 { 2705 int reg; 2706 2707 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2708 reg &= ~DWC3_DCTL_INITU1ENA; 2709 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2710 2711 reg &= ~DWC3_DCTL_INITU2ENA; 2712 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2713 2714 dwc3_disconnect_gadget(dwc); 2715 2716 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2717 dwc->setup_packet_pending = false; 2718 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED); 2719 2720 dwc->connected = false; 2721 } 2722 2723 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 2724 { 2725 u32 reg; 2726 2727 dwc->connected = true; 2728 2729 /* 2730 * WORKAROUND: DWC3 revisions <1.88a have an issue which 2731 * would cause a missing Disconnect Event if there's a 2732 * pending Setup Packet in the FIFO. 2733 * 2734 * There's no suggested workaround on the official Bug 2735 * report, which states that "unless the driver/application 2736 * is doing any special handling of a disconnect event, 2737 * there is no functional issue". 2738 * 2739 * Unfortunately, it turns out that we _do_ some special 2740 * handling of a disconnect event, namely complete all 2741 * pending transfers, notify gadget driver of the 2742 * disconnection, and so on. 2743 * 2744 * Our suggested workaround is to follow the Disconnect 2745 * Event steps here, instead, based on a setup_packet_pending 2746 * flag. Such flag gets set whenever we have a SETUP_PENDING 2747 * status for EP0 TRBs and gets cleared on XferComplete for the 2748 * same endpoint. 2749 * 2750 * Refers to: 2751 * 2752 * STAR#9000466709: RTL: Device : Disconnect event not 2753 * generated if setup packet pending in FIFO 2754 */ 2755 if (dwc->revision < DWC3_REVISION_188A) { 2756 if (dwc->setup_packet_pending) 2757 dwc3_gadget_disconnect_interrupt(dwc); 2758 } 2759 2760 dwc3_reset_gadget(dwc); 2761 2762 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2763 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 2764 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2765 dwc->test_mode = false; 2766 dwc3_clear_stall_all_ep(dwc); 2767 2768 /* Reset device address to zero */ 2769 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2770 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 2771 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2772 } 2773 2774 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 2775 { 2776 struct dwc3_ep *dep; 2777 int ret; 2778 u32 reg; 2779 u8 speed; 2780 2781 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2782 speed = reg & DWC3_DSTS_CONNECTSPD; 2783 dwc->speed = speed; 2784 2785 /* 2786 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 2787 * each time on Connect Done. 2788 * 2789 * Currently we always use the reset value. If any platform 2790 * wants to set this to a different value, we need to add a 2791 * setting and update GCTL.RAMCLKSEL here. 2792 */ 2793 2794 switch (speed) { 2795 case DWC3_DSTS_SUPERSPEED_PLUS: 2796 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2797 dwc->gadget.ep0->maxpacket = 512; 2798 dwc->gadget.speed = USB_SPEED_SUPER_PLUS; 2799 break; 2800 case DWC3_DSTS_SUPERSPEED: 2801 /* 2802 * WORKAROUND: DWC3 revisions <1.90a have an issue which 2803 * would cause a missing USB3 Reset event. 2804 * 2805 * In such situations, we should force a USB3 Reset 2806 * event by calling our dwc3_gadget_reset_interrupt() 2807 * routine. 2808 * 2809 * Refers to: 2810 * 2811 * STAR#9000483510: RTL: SS : USB3 reset event may 2812 * not be generated always when the link enters poll 2813 */ 2814 if (dwc->revision < DWC3_REVISION_190A) 2815 dwc3_gadget_reset_interrupt(dwc); 2816 2817 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2818 dwc->gadget.ep0->maxpacket = 512; 2819 dwc->gadget.speed = USB_SPEED_SUPER; 2820 break; 2821 case DWC3_DSTS_HIGHSPEED: 2822 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2823 dwc->gadget.ep0->maxpacket = 64; 2824 dwc->gadget.speed = USB_SPEED_HIGH; 2825 break; 2826 case DWC3_DSTS_FULLSPEED: 2827 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2828 dwc->gadget.ep0->maxpacket = 64; 2829 dwc->gadget.speed = USB_SPEED_FULL; 2830 break; 2831 case DWC3_DSTS_LOWSPEED: 2832 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); 2833 dwc->gadget.ep0->maxpacket = 8; 2834 dwc->gadget.speed = USB_SPEED_LOW; 2835 break; 2836 } 2837 2838 dwc->eps[1]->endpoint.maxpacket = dwc->gadget.ep0->maxpacket; 2839 2840 /* Enable USB2 LPM Capability */ 2841 2842 if ((dwc->revision > DWC3_REVISION_194A) && 2843 (speed != DWC3_DSTS_SUPERSPEED) && 2844 (speed != DWC3_DSTS_SUPERSPEED_PLUS)) { 2845 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2846 reg |= DWC3_DCFG_LPM_CAP; 2847 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2848 2849 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2850 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); 2851 2852 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold); 2853 2854 /* 2855 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and 2856 * DCFG.LPMCap is set, core responses with an ACK and the 2857 * BESL value in the LPM token is less than or equal to LPM 2858 * NYET threshold. 2859 */ 2860 WARN_ONCE(dwc->revision < DWC3_REVISION_240A 2861 && dwc->has_lpm_erratum, 2862 "LPM Erratum not available on dwc3 revisions < 2.40a\n"); 2863 2864 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A) 2865 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold); 2866 2867 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2868 } else { 2869 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2870 reg &= ~DWC3_DCTL_HIRD_THRES_MASK; 2871 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2872 } 2873 2874 dep = dwc->eps[0]; 2875 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY); 2876 if (ret) { 2877 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2878 return; 2879 } 2880 2881 dep = dwc->eps[1]; 2882 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY); 2883 if (ret) { 2884 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2885 return; 2886 } 2887 2888 /* 2889 * Configure PHY via GUSB3PIPECTLn if required. 2890 * 2891 * Update GTXFIFOSIZn 2892 * 2893 * In both cases reset values should be sufficient. 2894 */ 2895 } 2896 2897 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 2898 { 2899 /* 2900 * TODO take core out of low power mode when that's 2901 * implemented. 2902 */ 2903 2904 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2905 spin_unlock(&dwc->lock); 2906 dwc->gadget_driver->resume(&dwc->gadget); 2907 spin_lock(&dwc->lock); 2908 } 2909 } 2910 2911 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 2912 unsigned int evtinfo) 2913 { 2914 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2915 unsigned int pwropt; 2916 2917 /* 2918 * WORKAROUND: DWC3 < 2.50a have an issue when configured without 2919 * Hibernation mode enabled which would show up when device detects 2920 * host-initiated U3 exit. 2921 * 2922 * In that case, device will generate a Link State Change Interrupt 2923 * from U3 to RESUME which is only necessary if Hibernation is 2924 * configured in. 2925 * 2926 * There are no functional changes due to such spurious event and we 2927 * just need to ignore it. 2928 * 2929 * Refers to: 2930 * 2931 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation 2932 * operational mode 2933 */ 2934 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 2935 if ((dwc->revision < DWC3_REVISION_250A) && 2936 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { 2937 if ((dwc->link_state == DWC3_LINK_STATE_U3) && 2938 (next == DWC3_LINK_STATE_RESUME)) { 2939 return; 2940 } 2941 } 2942 2943 /* 2944 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending 2945 * on the link partner, the USB session might do multiple entry/exit 2946 * of low power states before a transfer takes place. 2947 * 2948 * Due to this problem, we might experience lower throughput. The 2949 * suggested workaround is to disable DCTL[12:9] bits if we're 2950 * transitioning from U1/U2 to U0 and enable those bits again 2951 * after a transfer completes and there are no pending transfers 2952 * on any of the enabled endpoints. 2953 * 2954 * This is the first half of that workaround. 2955 * 2956 * Refers to: 2957 * 2958 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us 2959 * core send LGO_Ux entering U0 2960 */ 2961 if (dwc->revision < DWC3_REVISION_183A) { 2962 if (next == DWC3_LINK_STATE_U0) { 2963 u32 u1u2; 2964 u32 reg; 2965 2966 switch (dwc->link_state) { 2967 case DWC3_LINK_STATE_U1: 2968 case DWC3_LINK_STATE_U2: 2969 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2970 u1u2 = reg & (DWC3_DCTL_INITU2ENA 2971 | DWC3_DCTL_ACCEPTU2ENA 2972 | DWC3_DCTL_INITU1ENA 2973 | DWC3_DCTL_ACCEPTU1ENA); 2974 2975 if (!dwc->u1u2) 2976 dwc->u1u2 = reg & u1u2; 2977 2978 reg &= ~u1u2; 2979 2980 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2981 break; 2982 default: 2983 /* do nothing */ 2984 break; 2985 } 2986 } 2987 } 2988 2989 switch (next) { 2990 case DWC3_LINK_STATE_U1: 2991 if (dwc->speed == USB_SPEED_SUPER) 2992 dwc3_suspend_gadget(dwc); 2993 break; 2994 case DWC3_LINK_STATE_U2: 2995 case DWC3_LINK_STATE_U3: 2996 dwc3_suspend_gadget(dwc); 2997 break; 2998 case DWC3_LINK_STATE_RESUME: 2999 dwc3_resume_gadget(dwc); 3000 break; 3001 default: 3002 /* do nothing */ 3003 break; 3004 } 3005 3006 dwc->link_state = next; 3007 } 3008 3009 static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc, 3010 unsigned int evtinfo) 3011 { 3012 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 3013 3014 if (dwc->link_state != next && next == DWC3_LINK_STATE_U3) 3015 dwc3_suspend_gadget(dwc); 3016 3017 dwc->link_state = next; 3018 } 3019 3020 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, 3021 unsigned int evtinfo) 3022 { 3023 unsigned int is_ss = evtinfo & BIT(4); 3024 3025 /* 3026 * WORKAROUND: DWC3 revison 2.20a with hibernation support 3027 * have a known issue which can cause USB CV TD.9.23 to fail 3028 * randomly. 3029 * 3030 * Because of this issue, core could generate bogus hibernation 3031 * events which SW needs to ignore. 3032 * 3033 * Refers to: 3034 * 3035 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0 3036 * Device Fallback from SuperSpeed 3037 */ 3038 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER)) 3039 return; 3040 3041 /* enter hibernation here */ 3042 } 3043 3044 static void dwc3_gadget_interrupt(struct dwc3 *dwc, 3045 const struct dwc3_event_devt *event) 3046 { 3047 switch (event->type) { 3048 case DWC3_DEVICE_EVENT_DISCONNECT: 3049 dwc3_gadget_disconnect_interrupt(dwc); 3050 break; 3051 case DWC3_DEVICE_EVENT_RESET: 3052 dwc3_gadget_reset_interrupt(dwc); 3053 break; 3054 case DWC3_DEVICE_EVENT_CONNECT_DONE: 3055 dwc3_gadget_conndone_interrupt(dwc); 3056 break; 3057 case DWC3_DEVICE_EVENT_WAKEUP: 3058 dwc3_gadget_wakeup_interrupt(dwc); 3059 break; 3060 case DWC3_DEVICE_EVENT_HIBER_REQ: 3061 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation, 3062 "unexpected hibernation event\n")) 3063 break; 3064 3065 dwc3_gadget_hibernation_interrupt(dwc, event->event_info); 3066 break; 3067 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 3068 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 3069 break; 3070 case DWC3_DEVICE_EVENT_EOPF: 3071 /* It changed to be suspend event for version 2.30a and above */ 3072 if (dwc->revision >= DWC3_REVISION_230A) { 3073 /* 3074 * Ignore suspend event until the gadget enters into 3075 * USB_STATE_CONFIGURED state. 3076 */ 3077 if (dwc->gadget.state >= USB_STATE_CONFIGURED) 3078 dwc3_gadget_suspend_interrupt(dwc, 3079 event->event_info); 3080 } 3081 break; 3082 case DWC3_DEVICE_EVENT_SOF: 3083 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 3084 case DWC3_DEVICE_EVENT_CMD_CMPL: 3085 case DWC3_DEVICE_EVENT_OVERFLOW: 3086 break; 3087 default: 3088 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 3089 } 3090 } 3091 3092 static void dwc3_process_event_entry(struct dwc3 *dwc, 3093 const union dwc3_event *event) 3094 { 3095 trace_dwc3_event(event->raw, dwc); 3096 3097 if (!event->type.is_devspec) 3098 dwc3_endpoint_interrupt(dwc, &event->depevt); 3099 else if (event->type.type == DWC3_EVENT_TYPE_DEV) 3100 dwc3_gadget_interrupt(dwc, &event->devt); 3101 else 3102 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 3103 } 3104 3105 static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) 3106 { 3107 struct dwc3 *dwc = evt->dwc; 3108 irqreturn_t ret = IRQ_NONE; 3109 int left; 3110 u32 reg; 3111 3112 left = evt->count; 3113 3114 if (!(evt->flags & DWC3_EVENT_PENDING)) 3115 return IRQ_NONE; 3116 3117 while (left > 0) { 3118 union dwc3_event event; 3119 3120 event.raw = *(u32 *) (evt->cache + evt->lpos); 3121 3122 dwc3_process_event_entry(dwc, &event); 3123 3124 /* 3125 * FIXME we wrap around correctly to the next entry as 3126 * almost all entries are 4 bytes in size. There is one 3127 * entry which has 12 bytes which is a regular entry 3128 * followed by 8 bytes data. ATM I don't know how 3129 * things are organized if we get next to the a 3130 * boundary so I worry about that once we try to handle 3131 * that. 3132 */ 3133 evt->lpos = (evt->lpos + 4) % evt->length; 3134 left -= 4; 3135 } 3136 3137 evt->count = 0; 3138 evt->flags &= ~DWC3_EVENT_PENDING; 3139 ret = IRQ_HANDLED; 3140 3141 /* Unmask interrupt */ 3142 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0)); 3143 reg &= ~DWC3_GEVNTSIZ_INTMASK; 3144 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); 3145 3146 if (dwc->imod_interval) { 3147 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); 3148 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); 3149 } 3150 3151 return ret; 3152 } 3153 3154 static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt) 3155 { 3156 struct dwc3_event_buffer *evt = _evt; 3157 struct dwc3 *dwc = evt->dwc; 3158 unsigned long flags; 3159 irqreturn_t ret = IRQ_NONE; 3160 3161 spin_lock_irqsave(&dwc->lock, flags); 3162 ret = dwc3_process_event_buf(evt); 3163 spin_unlock_irqrestore(&dwc->lock, flags); 3164 3165 return ret; 3166 } 3167 3168 static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) 3169 { 3170 struct dwc3 *dwc = evt->dwc; 3171 u32 amount; 3172 u32 count; 3173 u32 reg; 3174 3175 if (pm_runtime_suspended(dwc->dev)) { 3176 pm_runtime_get(dwc->dev); 3177 disable_irq_nosync(dwc->irq_gadget); 3178 dwc->pending_events = true; 3179 return IRQ_HANDLED; 3180 } 3181 3182 /* 3183 * With PCIe legacy interrupt, test shows that top-half irq handler can 3184 * be called again after HW interrupt deassertion. Check if bottom-half 3185 * irq event handler completes before caching new event to prevent 3186 * losing events. 3187 */ 3188 if (evt->flags & DWC3_EVENT_PENDING) 3189 return IRQ_HANDLED; 3190 3191 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); 3192 count &= DWC3_GEVNTCOUNT_MASK; 3193 if (!count) 3194 return IRQ_NONE; 3195 3196 evt->count = count; 3197 evt->flags |= DWC3_EVENT_PENDING; 3198 3199 /* Mask interrupt */ 3200 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0)); 3201 reg |= DWC3_GEVNTSIZ_INTMASK; 3202 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); 3203 3204 amount = min(count, evt->length - evt->lpos); 3205 memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount); 3206 3207 if (amount < count) 3208 memcpy(evt->cache, evt->buf, count - amount); 3209 3210 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count); 3211 3212 return IRQ_WAKE_THREAD; 3213 } 3214 3215 static irqreturn_t dwc3_interrupt(int irq, void *_evt) 3216 { 3217 struct dwc3_event_buffer *evt = _evt; 3218 3219 return dwc3_check_event_buf(evt); 3220 } 3221 3222 static int dwc3_gadget_get_irq(struct dwc3 *dwc) 3223 { 3224 struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); 3225 int irq; 3226 3227 irq = platform_get_irq_byname(dwc3_pdev, "peripheral"); 3228 if (irq > 0) 3229 goto out; 3230 3231 if (irq == -EPROBE_DEFER) 3232 goto out; 3233 3234 irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3"); 3235 if (irq > 0) 3236 goto out; 3237 3238 if (irq == -EPROBE_DEFER) 3239 goto out; 3240 3241 irq = platform_get_irq(dwc3_pdev, 0); 3242 if (irq > 0) 3243 goto out; 3244 3245 if (irq != -EPROBE_DEFER) 3246 dev_err(dwc->dev, "missing peripheral IRQ\n"); 3247 3248 if (!irq) 3249 irq = -EINVAL; 3250 3251 out: 3252 return irq; 3253 } 3254 3255 /** 3256 * dwc3_gadget_init - initializes gadget related registers 3257 * @dwc: pointer to our controller context structure 3258 * 3259 * Returns 0 on success otherwise negative errno. 3260 */ 3261 int dwc3_gadget_init(struct dwc3 *dwc) 3262 { 3263 int ret; 3264 int irq; 3265 3266 irq = dwc3_gadget_get_irq(dwc); 3267 if (irq < 0) { 3268 ret = irq; 3269 goto err0; 3270 } 3271 3272 dwc->irq_gadget = irq; 3273 3274 dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev, 3275 sizeof(*dwc->ep0_trb) * 2, 3276 &dwc->ep0_trb_addr, GFP_KERNEL); 3277 if (!dwc->ep0_trb) { 3278 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 3279 ret = -ENOMEM; 3280 goto err0; 3281 } 3282 3283 dwc->setup_buf = kzalloc(DWC3_EP0_SETUP_SIZE, GFP_KERNEL); 3284 if (!dwc->setup_buf) { 3285 ret = -ENOMEM; 3286 goto err1; 3287 } 3288 3289 dwc->bounce = dma_alloc_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, 3290 &dwc->bounce_addr, GFP_KERNEL); 3291 if (!dwc->bounce) { 3292 ret = -ENOMEM; 3293 goto err2; 3294 } 3295 3296 init_completion(&dwc->ep0_in_setup); 3297 3298 dwc->gadget.ops = &dwc3_gadget_ops; 3299 dwc->gadget.speed = USB_SPEED_UNKNOWN; 3300 dwc->gadget.sg_supported = true; 3301 dwc->gadget.name = "dwc3-gadget"; 3302 dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG; 3303 3304 /* 3305 * FIXME We might be setting max_speed to <SUPER, however versions 3306 * <2.20a of dwc3 have an issue with metastability (documented 3307 * elsewhere in this driver) which tells us we can't set max speed to 3308 * anything lower than SUPER. 3309 * 3310 * Because gadget.max_speed is only used by composite.c and function 3311 * drivers (i.e. it won't go into dwc3's registers) we are allowing this 3312 * to happen so we avoid sending SuperSpeed Capability descriptor 3313 * together with our BOS descriptor as that could confuse host into 3314 * thinking we can handle super speed. 3315 * 3316 * Note that, in fact, we won't even support GetBOS requests when speed 3317 * is less than super speed because we don't have means, yet, to tell 3318 * composite.c that we are USB 2.0 + LPM ECN. 3319 */ 3320 if (dwc->revision < DWC3_REVISION_220A && 3321 !dwc->dis_metastability_quirk) 3322 dev_info(dwc->dev, "changing max_speed on rev %08x\n", 3323 dwc->revision); 3324 3325 dwc->gadget.max_speed = dwc->maximum_speed; 3326 3327 /* 3328 * REVISIT: Here we should clear all pending IRQs to be 3329 * sure we're starting from a well known location. 3330 */ 3331 3332 ret = dwc3_gadget_init_endpoints(dwc, dwc->num_eps); 3333 if (ret) 3334 goto err3; 3335 3336 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 3337 if (ret) { 3338 dev_err(dwc->dev, "failed to register udc\n"); 3339 goto err4; 3340 } 3341 3342 return 0; 3343 3344 err4: 3345 dwc3_gadget_free_endpoints(dwc); 3346 3347 err3: 3348 dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce, 3349 dwc->bounce_addr); 3350 3351 err2: 3352 kfree(dwc->setup_buf); 3353 3354 err1: 3355 dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2, 3356 dwc->ep0_trb, dwc->ep0_trb_addr); 3357 3358 err0: 3359 return ret; 3360 } 3361 3362 /* -------------------------------------------------------------------------- */ 3363 3364 void dwc3_gadget_exit(struct dwc3 *dwc) 3365 { 3366 usb_del_gadget_udc(&dwc->gadget); 3367 dwc3_gadget_free_endpoints(dwc); 3368 dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce, 3369 dwc->bounce_addr); 3370 kfree(dwc->setup_buf); 3371 dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2, 3372 dwc->ep0_trb, dwc->ep0_trb_addr); 3373 } 3374 3375 int dwc3_gadget_suspend(struct dwc3 *dwc) 3376 { 3377 if (!dwc->gadget_driver) 3378 return 0; 3379 3380 dwc3_gadget_run_stop(dwc, false, false); 3381 dwc3_disconnect_gadget(dwc); 3382 __dwc3_gadget_stop(dwc); 3383 3384 synchronize_irq(dwc->irq_gadget); 3385 3386 return 0; 3387 } 3388 3389 int dwc3_gadget_resume(struct dwc3 *dwc) 3390 { 3391 int ret; 3392 3393 if (!dwc->gadget_driver) 3394 return 0; 3395 3396 ret = __dwc3_gadget_start(dwc); 3397 if (ret < 0) 3398 goto err0; 3399 3400 ret = dwc3_gadget_run_stop(dwc, true, false); 3401 if (ret < 0) 3402 goto err1; 3403 3404 return 0; 3405 3406 err1: 3407 __dwc3_gadget_stop(dwc); 3408 3409 err0: 3410 return ret; 3411 } 3412 3413 void dwc3_gadget_process_pending_events(struct dwc3 *dwc) 3414 { 3415 if (dwc->pending_events) { 3416 dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf); 3417 dwc->pending_events = false; 3418 enable_irq(dwc->irq_gadget); 3419 } 3420 } 3421