1 /* 2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 3 * 4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com 5 * 6 * Authors: Felipe Balbi <balbi@ti.com>, 7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 8 * 9 * This program is free software: you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 of 11 * the License as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/delay.h> 21 #include <linux/slab.h> 22 #include <linux/spinlock.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/interrupt.h> 26 #include <linux/io.h> 27 #include <linux/list.h> 28 #include <linux/dma-mapping.h> 29 30 #include <linux/usb/ch9.h> 31 #include <linux/usb/gadget.h> 32 33 #include "debug.h" 34 #include "core.h" 35 #include "gadget.h" 36 #include "io.h" 37 38 /** 39 * dwc3_gadget_set_test_mode - enables usb2 test modes 40 * @dwc: pointer to our context structure 41 * @mode: the mode to set (J, K SE0 NAK, Force Enable) 42 * 43 * Caller should take care of locking. This function will return 0 on 44 * success or -EINVAL if wrong Test Selector is passed. 45 */ 46 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) 47 { 48 u32 reg; 49 50 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 51 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 52 53 switch (mode) { 54 case TEST_J: 55 case TEST_K: 56 case TEST_SE0_NAK: 57 case TEST_PACKET: 58 case TEST_FORCE_EN: 59 reg |= mode << 1; 60 break; 61 default: 62 return -EINVAL; 63 } 64 65 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 66 67 return 0; 68 } 69 70 /** 71 * dwc3_gadget_get_link_state - gets current state of usb link 72 * @dwc: pointer to our context structure 73 * 74 * Caller should take care of locking. This function will 75 * return the link state on success (>= 0) or -ETIMEDOUT. 76 */ 77 int dwc3_gadget_get_link_state(struct dwc3 *dwc) 78 { 79 u32 reg; 80 81 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 82 83 return DWC3_DSTS_USBLNKST(reg); 84 } 85 86 /** 87 * dwc3_gadget_set_link_state - sets usb link to a particular state 88 * @dwc: pointer to our context structure 89 * @state: the state to put link into 90 * 91 * Caller should take care of locking. This function will 92 * return 0 on success or -ETIMEDOUT. 93 */ 94 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) 95 { 96 int retries = 10000; 97 u32 reg; 98 99 /* 100 * Wait until device controller is ready. Only applies to 1.94a and 101 * later RTL. 102 */ 103 if (dwc->revision >= DWC3_REVISION_194A) { 104 while (--retries) { 105 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 106 if (reg & DWC3_DSTS_DCNRD) 107 udelay(5); 108 else 109 break; 110 } 111 112 if (retries <= 0) 113 return -ETIMEDOUT; 114 } 115 116 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 117 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 118 119 /* set requested state */ 120 reg |= DWC3_DCTL_ULSTCHNGREQ(state); 121 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 122 123 /* 124 * The following code is racy when called from dwc3_gadget_wakeup, 125 * and is not needed, at least on newer versions 126 */ 127 if (dwc->revision >= DWC3_REVISION_194A) 128 return 0; 129 130 /* wait for a change in DSTS */ 131 retries = 10000; 132 while (--retries) { 133 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 134 135 if (DWC3_DSTS_USBLNKST(reg) == state) 136 return 0; 137 138 udelay(5); 139 } 140 141 return -ETIMEDOUT; 142 } 143 144 /** 145 * dwc3_ep_inc_trb - increment a trb index. 146 * @index: Pointer to the TRB index to increment. 147 * 148 * The index should never point to the link TRB. After incrementing, 149 * if it is point to the link TRB, wrap around to the beginning. The 150 * link TRB is always at the last TRB entry. 151 */ 152 static void dwc3_ep_inc_trb(u8 *index) 153 { 154 (*index)++; 155 if (*index == (DWC3_TRB_NUM - 1)) 156 *index = 0; 157 } 158 159 /** 160 * dwc3_ep_inc_enq - increment endpoint's enqueue pointer 161 * @dep: The endpoint whose enqueue pointer we're incrementing 162 */ 163 static void dwc3_ep_inc_enq(struct dwc3_ep *dep) 164 { 165 dwc3_ep_inc_trb(&dep->trb_enqueue); 166 } 167 168 /** 169 * dwc3_ep_inc_deq - increment endpoint's dequeue pointer 170 * @dep: The endpoint whose enqueue pointer we're incrementing 171 */ 172 static void dwc3_ep_inc_deq(struct dwc3_ep *dep) 173 { 174 dwc3_ep_inc_trb(&dep->trb_dequeue); 175 } 176 177 /** 178 * dwc3_gadget_giveback - call struct usb_request's ->complete callback 179 * @dep: The endpoint to whom the request belongs to 180 * @req: The request we're giving back 181 * @status: completion code for the request 182 * 183 * Must be called with controller's lock held and interrupts disabled. This 184 * function will unmap @req and call its ->complete() callback to notify upper 185 * layers that it has completed. 186 */ 187 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 188 int status) 189 { 190 struct dwc3 *dwc = dep->dwc; 191 192 req->started = false; 193 list_del(&req->list); 194 req->remaining = 0; 195 196 if (req->request.status == -EINPROGRESS) 197 req->request.status = status; 198 199 if (req->trb) 200 usb_gadget_unmap_request_by_dev(dwc->sysdev, 201 &req->request, req->direction); 202 203 req->trb = NULL; 204 205 trace_dwc3_gadget_giveback(req); 206 207 spin_unlock(&dwc->lock); 208 usb_gadget_giveback_request(&dep->endpoint, &req->request); 209 spin_lock(&dwc->lock); 210 211 if (dep->number > 1) 212 pm_runtime_put(dwc->dev); 213 } 214 215 /** 216 * dwc3_send_gadget_generic_command - issue a generic command for the controller 217 * @dwc: pointer to the controller context 218 * @cmd: the command to be issued 219 * @param: command parameter 220 * 221 * Caller should take care of locking. Issue @cmd with a given @param to @dwc 222 * and wait for its completion. 223 */ 224 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param) 225 { 226 u32 timeout = 500; 227 int status = 0; 228 int ret = 0; 229 u32 reg; 230 231 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param); 232 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT); 233 234 do { 235 reg = dwc3_readl(dwc->regs, DWC3_DGCMD); 236 if (!(reg & DWC3_DGCMD_CMDACT)) { 237 status = DWC3_DGCMD_STATUS(reg); 238 if (status) 239 ret = -EINVAL; 240 break; 241 } 242 } while (--timeout); 243 244 if (!timeout) { 245 ret = -ETIMEDOUT; 246 status = -ETIMEDOUT; 247 } 248 249 trace_dwc3_gadget_generic_cmd(cmd, param, status); 250 251 return ret; 252 } 253 254 static int __dwc3_gadget_wakeup(struct dwc3 *dwc); 255 256 /** 257 * dwc3_send_gadget_ep_cmd - issue an endpoint command 258 * @dep: the endpoint to which the command is going to be issued 259 * @cmd: the command to be issued 260 * @params: parameters to the command 261 * 262 * Caller should handle locking. This function will issue @cmd with given 263 * @params to @dep and wait for its completion. 264 */ 265 int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, 266 struct dwc3_gadget_ep_cmd_params *params) 267 { 268 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; 269 struct dwc3 *dwc = dep->dwc; 270 u32 timeout = 500; 271 u32 reg; 272 273 int cmd_status = 0; 274 int susphy = false; 275 int ret = -EINVAL; 276 277 /* 278 * Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if 279 * we're issuing an endpoint command, we must check if 280 * GUSB2PHYCFG.SUSPHY bit is set. If it is, then we need to clear it. 281 * 282 * We will also set SUSPHY bit to what it was before returning as stated 283 * by the same section on Synopsys databook. 284 */ 285 if (dwc->gadget.speed <= USB_SPEED_HIGH) { 286 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 287 if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) { 288 susphy = true; 289 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 290 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 291 } 292 } 293 294 if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) { 295 int needs_wakeup; 296 297 needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 || 298 dwc->link_state == DWC3_LINK_STATE_U2 || 299 dwc->link_state == DWC3_LINK_STATE_U3); 300 301 if (unlikely(needs_wakeup)) { 302 ret = __dwc3_gadget_wakeup(dwc); 303 dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n", 304 ret); 305 } 306 } 307 308 dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0); 309 dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1); 310 dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2); 311 312 /* 313 * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're 314 * not relying on XferNotReady, we can make use of a special "No 315 * Response Update Transfer" command where we should clear both CmdAct 316 * and CmdIOC bits. 317 * 318 * With this, we don't need to wait for command completion and can 319 * straight away issue further commands to the endpoint. 320 * 321 * NOTICE: We're making an assumption that control endpoints will never 322 * make use of Update Transfer command. This is a safe assumption 323 * because we can never have more than one request at a time with 324 * Control Endpoints. If anybody changes that assumption, this chunk 325 * needs to be updated accordingly. 326 */ 327 if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER && 328 !usb_endpoint_xfer_isoc(desc)) 329 cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT); 330 else 331 cmd |= DWC3_DEPCMD_CMDACT; 332 333 dwc3_writel(dep->regs, DWC3_DEPCMD, cmd); 334 do { 335 reg = dwc3_readl(dep->regs, DWC3_DEPCMD); 336 if (!(reg & DWC3_DEPCMD_CMDACT)) { 337 cmd_status = DWC3_DEPCMD_STATUS(reg); 338 339 switch (cmd_status) { 340 case 0: 341 ret = 0; 342 break; 343 case DEPEVT_TRANSFER_NO_RESOURCE: 344 ret = -EINVAL; 345 break; 346 case DEPEVT_TRANSFER_BUS_EXPIRY: 347 /* 348 * SW issues START TRANSFER command to 349 * isochronous ep with future frame interval. If 350 * future interval time has already passed when 351 * core receives the command, it will respond 352 * with an error status of 'Bus Expiry'. 353 * 354 * Instead of always returning -EINVAL, let's 355 * give a hint to the gadget driver that this is 356 * the case by returning -EAGAIN. 357 */ 358 ret = -EAGAIN; 359 break; 360 default: 361 dev_WARN(dwc->dev, "UNKNOWN cmd status\n"); 362 } 363 364 break; 365 } 366 } while (--timeout); 367 368 if (timeout == 0) { 369 ret = -ETIMEDOUT; 370 cmd_status = -ETIMEDOUT; 371 } 372 373 trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status); 374 375 if (ret == 0) { 376 switch (DWC3_DEPCMD_CMD(cmd)) { 377 case DWC3_DEPCMD_STARTTRANSFER: 378 dep->flags |= DWC3_EP_TRANSFER_STARTED; 379 break; 380 case DWC3_DEPCMD_ENDTRANSFER: 381 dep->flags &= ~DWC3_EP_TRANSFER_STARTED; 382 break; 383 default: 384 /* nothing */ 385 break; 386 } 387 } 388 389 if (unlikely(susphy)) { 390 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 391 reg |= DWC3_GUSB2PHYCFG_SUSPHY; 392 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 393 } 394 395 return ret; 396 } 397 398 static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep) 399 { 400 struct dwc3 *dwc = dep->dwc; 401 struct dwc3_gadget_ep_cmd_params params; 402 u32 cmd = DWC3_DEPCMD_CLEARSTALL; 403 404 /* 405 * As of core revision 2.60a the recommended programming model 406 * is to set the ClearPendIN bit when issuing a Clear Stall EP 407 * command for IN endpoints. This is to prevent an issue where 408 * some (non-compliant) hosts may not send ACK TPs for pending 409 * IN transfers due to a mishandled error condition. Synopsys 410 * STAR 9000614252. 411 */ 412 if (dep->direction && (dwc->revision >= DWC3_REVISION_260A) && 413 (dwc->gadget.speed >= USB_SPEED_SUPER)) 414 cmd |= DWC3_DEPCMD_CLEARPENDIN; 415 416 memset(¶ms, 0, sizeof(params)); 417 418 return dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 419 } 420 421 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 422 struct dwc3_trb *trb) 423 { 424 u32 offset = (char *) trb - (char *) dep->trb_pool; 425 426 return dep->trb_pool_dma + offset; 427 } 428 429 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 430 { 431 struct dwc3 *dwc = dep->dwc; 432 433 if (dep->trb_pool) 434 return 0; 435 436 dep->trb_pool = dma_alloc_coherent(dwc->sysdev, 437 sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 438 &dep->trb_pool_dma, GFP_KERNEL); 439 if (!dep->trb_pool) { 440 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 441 dep->name); 442 return -ENOMEM; 443 } 444 445 return 0; 446 } 447 448 static void dwc3_free_trb_pool(struct dwc3_ep *dep) 449 { 450 struct dwc3 *dwc = dep->dwc; 451 452 dma_free_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 453 dep->trb_pool, dep->trb_pool_dma); 454 455 dep->trb_pool = NULL; 456 dep->trb_pool_dma = 0; 457 } 458 459 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep); 460 461 /** 462 * dwc3_gadget_start_config - configure ep resources 463 * @dwc: pointer to our controller context structure 464 * @dep: endpoint that is being enabled 465 * 466 * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's 467 * completion, it will set Transfer Resource for all available endpoints. 468 * 469 * The assignment of transfer resources cannot perfectly follow the data book 470 * due to the fact that the controller driver does not have all knowledge of the 471 * configuration in advance. It is given this information piecemeal by the 472 * composite gadget framework after every SET_CONFIGURATION and 473 * SET_INTERFACE. Trying to follow the databook programming model in this 474 * scenario can cause errors. For two reasons: 475 * 476 * 1) The databook says to do %DWC3_DEPCMD_DEPSTARTCFG for every 477 * %USB_REQ_SET_CONFIGURATION and %USB_REQ_SET_INTERFACE (8.1.5). This is 478 * incorrect in the scenario of multiple interfaces. 479 * 480 * 2) The databook does not mention doing more %DWC3_DEPCMD_DEPXFERCFG for new 481 * endpoint on alt setting (8.1.6). 482 * 483 * The following simplified method is used instead: 484 * 485 * All hardware endpoints can be assigned a transfer resource and this setting 486 * will stay persistent until either a core reset or hibernation. So whenever we 487 * do a %DWC3_DEPCMD_DEPSTARTCFG(0) we can go ahead and do 488 * %DWC3_DEPCMD_DEPXFERCFG for every hardware endpoint as well. We are 489 * guaranteed that there are as many transfer resources as endpoints. 490 * 491 * This function is called for each endpoint when it is being enabled but is 492 * triggered only when called for EP0-out, which always happens first, and which 493 * should only happen in one of the above conditions. 494 */ 495 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 496 { 497 struct dwc3_gadget_ep_cmd_params params; 498 u32 cmd; 499 int i; 500 int ret; 501 502 if (dep->number) 503 return 0; 504 505 memset(¶ms, 0x00, sizeof(params)); 506 cmd = DWC3_DEPCMD_DEPSTARTCFG; 507 508 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 509 if (ret) 510 return ret; 511 512 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 513 struct dwc3_ep *dep = dwc->eps[i]; 514 515 if (!dep) 516 continue; 517 518 ret = dwc3_gadget_set_xfer_resource(dwc, dep); 519 if (ret) 520 return ret; 521 } 522 523 return 0; 524 } 525 526 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, 527 bool modify, bool restore) 528 { 529 const struct usb_ss_ep_comp_descriptor *comp_desc; 530 const struct usb_endpoint_descriptor *desc; 531 struct dwc3_gadget_ep_cmd_params params; 532 533 if (dev_WARN_ONCE(dwc->dev, modify && restore, 534 "Can't modify and restore\n")) 535 return -EINVAL; 536 537 comp_desc = dep->endpoint.comp_desc; 538 desc = dep->endpoint.desc; 539 540 memset(¶ms, 0x00, sizeof(params)); 541 542 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) 543 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); 544 545 /* Burst size is only needed in SuperSpeed mode */ 546 if (dwc->gadget.speed >= USB_SPEED_SUPER) { 547 u32 burst = dep->endpoint.maxburst; 548 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1); 549 } 550 551 if (modify) { 552 params.param0 |= DWC3_DEPCFG_ACTION_MODIFY; 553 } else if (restore) { 554 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE; 555 params.param2 |= dep->saved_state; 556 } else { 557 params.param0 |= DWC3_DEPCFG_ACTION_INIT; 558 } 559 560 if (usb_endpoint_xfer_control(desc)) 561 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN; 562 563 if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc)) 564 params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN; 565 566 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 567 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE 568 | DWC3_DEPCFG_STREAM_EVENT_EN; 569 dep->stream_capable = true; 570 } 571 572 if (!usb_endpoint_xfer_control(desc)) 573 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; 574 575 /* 576 * We are doing 1:1 mapping for endpoints, meaning 577 * Physical Endpoints 2 maps to Logical Endpoint 2 and 578 * so on. We consider the direction bit as part of the physical 579 * endpoint number. So USB endpoint 0x81 is 0x03. 580 */ 581 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); 582 583 /* 584 * We must use the lower 16 TX FIFOs even though 585 * HW might have more 586 */ 587 if (dep->direction) 588 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); 589 590 if (desc->bInterval) { 591 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); 592 dep->interval = 1 << (desc->bInterval - 1); 593 } 594 595 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, ¶ms); 596 } 597 598 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) 599 { 600 struct dwc3_gadget_ep_cmd_params params; 601 602 memset(¶ms, 0x00, sizeof(params)); 603 604 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 605 606 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE, 607 ¶ms); 608 } 609 610 /** 611 * __dwc3_gadget_ep_enable - initializes a hw endpoint 612 * @dep: endpoint to be initialized 613 * @modify: if true, modify existing endpoint configuration 614 * @restore: if true, restore endpoint configuration from scratch buffer 615 * 616 * Caller should take care of locking. Execute all necessary commands to 617 * initialize a HW endpoint so it can be used by a gadget driver. 618 */ 619 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, 620 bool modify, bool restore) 621 { 622 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; 623 struct dwc3 *dwc = dep->dwc; 624 625 u32 reg; 626 int ret; 627 628 if (!(dep->flags & DWC3_EP_ENABLED)) { 629 ret = dwc3_gadget_start_config(dwc, dep); 630 if (ret) 631 return ret; 632 } 633 634 ret = dwc3_gadget_set_ep_config(dwc, dep, modify, restore); 635 if (ret) 636 return ret; 637 638 if (!(dep->flags & DWC3_EP_ENABLED)) { 639 struct dwc3_trb *trb_st_hw; 640 struct dwc3_trb *trb_link; 641 642 dep->type = usb_endpoint_type(desc); 643 dep->flags |= DWC3_EP_ENABLED; 644 dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; 645 646 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 647 reg |= DWC3_DALEPENA_EP(dep->number); 648 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 649 650 init_waitqueue_head(&dep->wait_end_transfer); 651 652 if (usb_endpoint_xfer_control(desc)) 653 goto out; 654 655 /* Initialize the TRB ring */ 656 dep->trb_dequeue = 0; 657 dep->trb_enqueue = 0; 658 memset(dep->trb_pool, 0, 659 sizeof(struct dwc3_trb) * DWC3_TRB_NUM); 660 661 /* Link TRB. The HWO bit is never reset */ 662 trb_st_hw = &dep->trb_pool[0]; 663 664 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 665 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 666 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 667 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB; 668 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 669 } 670 671 /* 672 * Issue StartTransfer here with no-op TRB so we can always rely on No 673 * Response Update Transfer command. 674 */ 675 if (usb_endpoint_xfer_bulk(desc)) { 676 struct dwc3_gadget_ep_cmd_params params; 677 struct dwc3_trb *trb; 678 dma_addr_t trb_dma; 679 u32 cmd; 680 681 memset(¶ms, 0, sizeof(params)); 682 trb = &dep->trb_pool[0]; 683 trb_dma = dwc3_trb_dma_offset(dep, trb); 684 685 params.param0 = upper_32_bits(trb_dma); 686 params.param1 = lower_32_bits(trb_dma); 687 688 cmd = DWC3_DEPCMD_STARTTRANSFER; 689 690 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 691 if (ret < 0) 692 return ret; 693 694 dep->flags |= DWC3_EP_BUSY; 695 696 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep); 697 WARN_ON_ONCE(!dep->resource_index); 698 } 699 700 701 out: 702 trace_dwc3_gadget_ep_enable(dep); 703 704 return 0; 705 } 706 707 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force); 708 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 709 { 710 struct dwc3_request *req; 711 712 dwc3_stop_active_transfer(dwc, dep->number, true); 713 714 /* - giveback all requests to gadget driver */ 715 while (!list_empty(&dep->started_list)) { 716 req = next_request(&dep->started_list); 717 718 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 719 } 720 721 while (!list_empty(&dep->pending_list)) { 722 req = next_request(&dep->pending_list); 723 724 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 725 } 726 } 727 728 /** 729 * __dwc3_gadget_ep_disable - disables a hw endpoint 730 * @dep: the endpoint to disable 731 * 732 * This function undoes what __dwc3_gadget_ep_enable did and also removes 733 * requests which are currently being processed by the hardware and those which 734 * are not yet scheduled. 735 * 736 * Caller should take care of locking. 737 */ 738 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 739 { 740 struct dwc3 *dwc = dep->dwc; 741 u32 reg; 742 743 trace_dwc3_gadget_ep_disable(dep); 744 745 dwc3_remove_requests(dwc, dep); 746 747 /* make sure HW endpoint isn't stalled */ 748 if (dep->flags & DWC3_EP_STALL) 749 __dwc3_gadget_ep_set_halt(dep, 0, false); 750 751 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 752 reg &= ~DWC3_DALEPENA_EP(dep->number); 753 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 754 755 dep->stream_capable = false; 756 dep->type = 0; 757 dep->flags &= DWC3_EP_END_TRANSFER_PENDING; 758 759 /* Clear out the ep descriptors for non-ep0 */ 760 if (dep->number > 1) { 761 dep->endpoint.comp_desc = NULL; 762 dep->endpoint.desc = NULL; 763 } 764 765 return 0; 766 } 767 768 /* -------------------------------------------------------------------------- */ 769 770 static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 771 const struct usb_endpoint_descriptor *desc) 772 { 773 return -EINVAL; 774 } 775 776 static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 777 { 778 return -EINVAL; 779 } 780 781 /* -------------------------------------------------------------------------- */ 782 783 static int dwc3_gadget_ep_enable(struct usb_ep *ep, 784 const struct usb_endpoint_descriptor *desc) 785 { 786 struct dwc3_ep *dep; 787 struct dwc3 *dwc; 788 unsigned long flags; 789 int ret; 790 791 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 792 pr_debug("dwc3: invalid parameters\n"); 793 return -EINVAL; 794 } 795 796 if (!desc->wMaxPacketSize) { 797 pr_debug("dwc3: missing wMaxPacketSize\n"); 798 return -EINVAL; 799 } 800 801 dep = to_dwc3_ep(ep); 802 dwc = dep->dwc; 803 804 if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED, 805 "%s is already enabled\n", 806 dep->name)) 807 return 0; 808 809 spin_lock_irqsave(&dwc->lock, flags); 810 ret = __dwc3_gadget_ep_enable(dep, false, false); 811 spin_unlock_irqrestore(&dwc->lock, flags); 812 813 return ret; 814 } 815 816 static int dwc3_gadget_ep_disable(struct usb_ep *ep) 817 { 818 struct dwc3_ep *dep; 819 struct dwc3 *dwc; 820 unsigned long flags; 821 int ret; 822 823 if (!ep) { 824 pr_debug("dwc3: invalid parameters\n"); 825 return -EINVAL; 826 } 827 828 dep = to_dwc3_ep(ep); 829 dwc = dep->dwc; 830 831 if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED), 832 "%s is already disabled\n", 833 dep->name)) 834 return 0; 835 836 spin_lock_irqsave(&dwc->lock, flags); 837 ret = __dwc3_gadget_ep_disable(dep); 838 spin_unlock_irqrestore(&dwc->lock, flags); 839 840 return ret; 841 } 842 843 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 844 gfp_t gfp_flags) 845 { 846 struct dwc3_request *req; 847 struct dwc3_ep *dep = to_dwc3_ep(ep); 848 849 req = kzalloc(sizeof(*req), gfp_flags); 850 if (!req) 851 return NULL; 852 853 req->epnum = dep->number; 854 req->dep = dep; 855 856 dep->allocated_requests++; 857 858 trace_dwc3_alloc_request(req); 859 860 return &req->request; 861 } 862 863 static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 864 struct usb_request *request) 865 { 866 struct dwc3_request *req = to_dwc3_request(request); 867 struct dwc3_ep *dep = to_dwc3_ep(ep); 868 869 dep->allocated_requests--; 870 trace_dwc3_free_request(req); 871 kfree(req); 872 } 873 874 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep); 875 876 static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, 877 dma_addr_t dma, unsigned length, unsigned chain, unsigned node, 878 unsigned stream_id, unsigned short_not_ok, unsigned no_interrupt) 879 { 880 struct dwc3 *dwc = dep->dwc; 881 struct usb_gadget *gadget = &dwc->gadget; 882 enum usb_device_speed speed = gadget->speed; 883 884 dwc3_ep_inc_enq(dep); 885 886 trb->size = DWC3_TRB_SIZE_LENGTH(length); 887 trb->bpl = lower_32_bits(dma); 888 trb->bph = upper_32_bits(dma); 889 890 switch (usb_endpoint_type(dep->endpoint.desc)) { 891 case USB_ENDPOINT_XFER_CONTROL: 892 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP; 893 break; 894 895 case USB_ENDPOINT_XFER_ISOC: 896 if (!node) { 897 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 898 899 /* 900 * USB Specification 2.0 Section 5.9.2 states that: "If 901 * there is only a single transaction in the microframe, 902 * only a DATA0 data packet PID is used. If there are 903 * two transactions per microframe, DATA1 is used for 904 * the first transaction data packet and DATA0 is used 905 * for the second transaction data packet. If there are 906 * three transactions per microframe, DATA2 is used for 907 * the first transaction data packet, DATA1 is used for 908 * the second, and DATA0 is used for the third." 909 * 910 * IOW, we should satisfy the following cases: 911 * 912 * 1) length <= maxpacket 913 * - DATA0 914 * 915 * 2) maxpacket < length <= (2 * maxpacket) 916 * - DATA1, DATA0 917 * 918 * 3) (2 * maxpacket) < length <= (3 * maxpacket) 919 * - DATA2, DATA1, DATA0 920 */ 921 if (speed == USB_SPEED_HIGH) { 922 struct usb_ep *ep = &dep->endpoint; 923 unsigned int mult = ep->mult - 1; 924 unsigned int maxp = usb_endpoint_maxp(ep->desc); 925 926 if (length <= (2 * maxp)) 927 mult--; 928 929 if (length <= maxp) 930 mult--; 931 932 trb->size |= DWC3_TRB_SIZE_PCM1(mult); 933 } 934 } else { 935 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 936 } 937 938 /* always enable Interrupt on Missed ISOC */ 939 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 940 break; 941 942 case USB_ENDPOINT_XFER_BULK: 943 case USB_ENDPOINT_XFER_INT: 944 trb->ctrl = DWC3_TRBCTL_NORMAL; 945 break; 946 default: 947 /* 948 * This is only possible with faulty memory because we 949 * checked it already :) 950 */ 951 dev_WARN(dwc->dev, "Unknown endpoint type %d\n", 952 usb_endpoint_type(dep->endpoint.desc)); 953 } 954 955 /* always enable Continue on Short Packet */ 956 if (usb_endpoint_dir_out(dep->endpoint.desc)) { 957 trb->ctrl |= DWC3_TRB_CTRL_CSP; 958 959 if (short_not_ok) 960 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 961 } 962 963 if ((!no_interrupt && !chain) || 964 (dwc3_calc_trbs_left(dep) == 0)) 965 trb->ctrl |= DWC3_TRB_CTRL_IOC; 966 967 if (chain) 968 trb->ctrl |= DWC3_TRB_CTRL_CHN; 969 970 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) 971 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id); 972 973 trb->ctrl |= DWC3_TRB_CTRL_HWO; 974 975 trace_dwc3_prepare_trb(dep, trb); 976 } 977 978 /** 979 * dwc3_prepare_one_trb - setup one TRB from one request 980 * @dep: endpoint for which this request is prepared 981 * @req: dwc3_request pointer 982 * @chain: should this TRB be chained to the next? 983 * @node: only for isochronous endpoints. First TRB needs different type. 984 */ 985 static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 986 struct dwc3_request *req, unsigned chain, unsigned node) 987 { 988 struct dwc3_trb *trb; 989 unsigned length = req->request.length; 990 unsigned stream_id = req->request.stream_id; 991 unsigned short_not_ok = req->request.short_not_ok; 992 unsigned no_interrupt = req->request.no_interrupt; 993 dma_addr_t dma = req->request.dma; 994 995 trb = &dep->trb_pool[dep->trb_enqueue]; 996 997 if (!req->trb) { 998 dwc3_gadget_move_started_request(req); 999 req->trb = trb; 1000 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 1001 dep->queued_requests++; 1002 } 1003 1004 __dwc3_prepare_one_trb(dep, trb, dma, length, chain, node, 1005 stream_id, short_not_ok, no_interrupt); 1006 } 1007 1008 /** 1009 * dwc3_ep_prev_trb - returns the previous TRB in the ring 1010 * @dep: The endpoint with the TRB ring 1011 * @index: The index of the current TRB in the ring 1012 * 1013 * Returns the TRB prior to the one pointed to by the index. If the 1014 * index is 0, we will wrap backwards, skip the link TRB, and return 1015 * the one just before that. 1016 */ 1017 static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index) 1018 { 1019 u8 tmp = index; 1020 1021 if (!tmp) 1022 tmp = DWC3_TRB_NUM - 1; 1023 1024 return &dep->trb_pool[tmp - 1]; 1025 } 1026 1027 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep) 1028 { 1029 struct dwc3_trb *tmp; 1030 u8 trbs_left; 1031 1032 /* 1033 * If enqueue & dequeue are equal than it is either full or empty. 1034 * 1035 * One way to know for sure is if the TRB right before us has HWO bit 1036 * set or not. If it has, then we're definitely full and can't fit any 1037 * more transfers in our ring. 1038 */ 1039 if (dep->trb_enqueue == dep->trb_dequeue) { 1040 tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue); 1041 if (tmp->ctrl & DWC3_TRB_CTRL_HWO) 1042 return 0; 1043 1044 return DWC3_TRB_NUM - 1; 1045 } 1046 1047 trbs_left = dep->trb_dequeue - dep->trb_enqueue; 1048 trbs_left &= (DWC3_TRB_NUM - 1); 1049 1050 if (dep->trb_dequeue < dep->trb_enqueue) 1051 trbs_left--; 1052 1053 return trbs_left; 1054 } 1055 1056 static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, 1057 struct dwc3_request *req) 1058 { 1059 struct scatterlist *sg = req->sg; 1060 struct scatterlist *s; 1061 int i; 1062 1063 for_each_sg(sg, s, req->num_pending_sgs, i) { 1064 unsigned int length = req->request.length; 1065 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); 1066 unsigned int rem = length % maxp; 1067 unsigned chain = true; 1068 1069 if (sg_is_last(s)) 1070 chain = false; 1071 1072 if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) { 1073 struct dwc3 *dwc = dep->dwc; 1074 struct dwc3_trb *trb; 1075 1076 req->unaligned = true; 1077 1078 /* prepare normal TRB */ 1079 dwc3_prepare_one_trb(dep, req, true, i); 1080 1081 /* Now prepare one extra TRB to align transfer size */ 1082 trb = &dep->trb_pool[dep->trb_enqueue]; 1083 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 1084 maxp - rem, false, 0, 1085 req->request.stream_id, 1086 req->request.short_not_ok, 1087 req->request.no_interrupt); 1088 } else { 1089 dwc3_prepare_one_trb(dep, req, chain, i); 1090 } 1091 1092 if (!dwc3_calc_trbs_left(dep)) 1093 break; 1094 } 1095 } 1096 1097 static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, 1098 struct dwc3_request *req) 1099 { 1100 unsigned int length = req->request.length; 1101 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); 1102 unsigned int rem = length % maxp; 1103 1104 if (rem && usb_endpoint_dir_out(dep->endpoint.desc)) { 1105 struct dwc3 *dwc = dep->dwc; 1106 struct dwc3_trb *trb; 1107 1108 req->unaligned = true; 1109 1110 /* prepare normal TRB */ 1111 dwc3_prepare_one_trb(dep, req, true, 0); 1112 1113 /* Now prepare one extra TRB to align transfer size */ 1114 trb = &dep->trb_pool[dep->trb_enqueue]; 1115 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem, 1116 false, 0, req->request.stream_id, 1117 req->request.short_not_ok, 1118 req->request.no_interrupt); 1119 } else if (req->request.zero && req->request.length && 1120 (IS_ALIGNED(req->request.length,dep->endpoint.maxpacket))) { 1121 struct dwc3 *dwc = dep->dwc; 1122 struct dwc3_trb *trb; 1123 1124 req->zero = true; 1125 1126 /* prepare normal TRB */ 1127 dwc3_prepare_one_trb(dep, req, true, 0); 1128 1129 /* Now prepare one extra TRB to handle ZLP */ 1130 trb = &dep->trb_pool[dep->trb_enqueue]; 1131 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, 1132 false, 0, req->request.stream_id, 1133 req->request.short_not_ok, 1134 req->request.no_interrupt); 1135 } else { 1136 dwc3_prepare_one_trb(dep, req, false, 0); 1137 } 1138 } 1139 1140 /* 1141 * dwc3_prepare_trbs - setup TRBs from requests 1142 * @dep: endpoint for which requests are being prepared 1143 * 1144 * The function goes through the requests list and sets up TRBs for the 1145 * transfers. The function returns once there are no more TRBs available or 1146 * it runs out of requests. 1147 */ 1148 static void dwc3_prepare_trbs(struct dwc3_ep *dep) 1149 { 1150 struct dwc3_request *req, *n; 1151 1152 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 1153 1154 if (!dwc3_calc_trbs_left(dep)) 1155 return; 1156 1157 /* 1158 * We can get in a situation where there's a request in the started list 1159 * but there weren't enough TRBs to fully kick it in the first time 1160 * around, so it has been waiting for more TRBs to be freed up. 1161 * 1162 * In that case, we should check if we have a request with pending_sgs 1163 * in the started list and prepare TRBs for that request first, 1164 * otherwise we will prepare TRBs completely out of order and that will 1165 * break things. 1166 */ 1167 list_for_each_entry(req, &dep->started_list, list) { 1168 if (req->num_pending_sgs > 0) 1169 dwc3_prepare_one_trb_sg(dep, req); 1170 1171 if (!dwc3_calc_trbs_left(dep)) 1172 return; 1173 } 1174 1175 list_for_each_entry_safe(req, n, &dep->pending_list, list) { 1176 struct dwc3 *dwc = dep->dwc; 1177 int ret; 1178 1179 ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request, 1180 dep->direction); 1181 if (ret) 1182 return; 1183 1184 req->sg = req->request.sg; 1185 req->num_pending_sgs = req->request.num_mapped_sgs; 1186 1187 if (req->num_pending_sgs > 0) 1188 dwc3_prepare_one_trb_sg(dep, req); 1189 else 1190 dwc3_prepare_one_trb_linear(dep, req); 1191 1192 if (!dwc3_calc_trbs_left(dep)) 1193 return; 1194 } 1195 } 1196 1197 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param) 1198 { 1199 struct dwc3_gadget_ep_cmd_params params; 1200 struct dwc3_request *req; 1201 int starting; 1202 int ret; 1203 u32 cmd; 1204 1205 starting = !(dep->flags & DWC3_EP_BUSY); 1206 1207 dwc3_prepare_trbs(dep); 1208 req = next_request(&dep->started_list); 1209 if (!req) { 1210 dep->flags |= DWC3_EP_PENDING_REQUEST; 1211 return 0; 1212 } 1213 1214 memset(¶ms, 0, sizeof(params)); 1215 1216 if (starting) { 1217 params.param0 = upper_32_bits(req->trb_dma); 1218 params.param1 = lower_32_bits(req->trb_dma); 1219 cmd = DWC3_DEPCMD_STARTTRANSFER | 1220 DWC3_DEPCMD_PARAM(cmd_param); 1221 } else { 1222 cmd = DWC3_DEPCMD_UPDATETRANSFER | 1223 DWC3_DEPCMD_PARAM(dep->resource_index); 1224 } 1225 1226 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 1227 if (ret < 0) { 1228 /* 1229 * FIXME we need to iterate over the list of requests 1230 * here and stop, unmap, free and del each of the linked 1231 * requests instead of what we do now. 1232 */ 1233 if (req->trb) 1234 memset(req->trb, 0, sizeof(struct dwc3_trb)); 1235 dep->queued_requests--; 1236 dwc3_gadget_giveback(dep, req, ret); 1237 return ret; 1238 } 1239 1240 dep->flags |= DWC3_EP_BUSY; 1241 1242 if (starting) { 1243 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep); 1244 WARN_ON_ONCE(!dep->resource_index); 1245 } 1246 1247 return 0; 1248 } 1249 1250 static int __dwc3_gadget_get_frame(struct dwc3 *dwc) 1251 { 1252 u32 reg; 1253 1254 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1255 return DWC3_DSTS_SOFFN(reg); 1256 } 1257 1258 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc, 1259 struct dwc3_ep *dep, u32 cur_uf) 1260 { 1261 u32 uf; 1262 1263 if (list_empty(&dep->pending_list)) { 1264 dev_info(dwc->dev, "%s: ran out of requests\n", 1265 dep->name); 1266 dep->flags |= DWC3_EP_PENDING_REQUEST; 1267 return; 1268 } 1269 1270 /* 1271 * Schedule the first trb for one interval in the future or at 1272 * least 4 microframes. 1273 */ 1274 uf = cur_uf + max_t(u32, 4, dep->interval); 1275 1276 __dwc3_gadget_kick_transfer(dep, uf); 1277 } 1278 1279 static void dwc3_gadget_start_isoc(struct dwc3 *dwc, 1280 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1281 { 1282 u32 cur_uf, mask; 1283 1284 mask = ~(dep->interval - 1); 1285 cur_uf = event->parameters & mask; 1286 1287 __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1288 } 1289 1290 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 1291 { 1292 struct dwc3 *dwc = dep->dwc; 1293 int ret = 0; 1294 1295 if (!dep->endpoint.desc) { 1296 dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n", 1297 dep->name); 1298 return -ESHUTDOWN; 1299 } 1300 1301 if (WARN(req->dep != dep, "request %pK belongs to '%s'\n", 1302 &req->request, req->dep->name)) 1303 return -EINVAL; 1304 1305 pm_runtime_get(dwc->dev); 1306 1307 req->request.actual = 0; 1308 req->request.status = -EINPROGRESS; 1309 req->direction = dep->direction; 1310 req->epnum = dep->number; 1311 1312 trace_dwc3_ep_queue(req); 1313 1314 list_add_tail(&req->list, &dep->pending_list); 1315 1316 /* 1317 * NOTICE: Isochronous endpoints should NEVER be prestarted. We must 1318 * wait for a XferNotReady event so we will know what's the current 1319 * (micro-)frame number. 1320 * 1321 * Without this trick, we are very, very likely gonna get Bus Expiry 1322 * errors which will force us issue EndTransfer command. 1323 */ 1324 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1325 if ((dep->flags & DWC3_EP_PENDING_REQUEST)) { 1326 if (dep->flags & DWC3_EP_TRANSFER_STARTED) { 1327 dwc3_stop_active_transfer(dwc, dep->number, true); 1328 dep->flags = DWC3_EP_ENABLED; 1329 } else { 1330 u32 cur_uf; 1331 1332 cur_uf = __dwc3_gadget_get_frame(dwc); 1333 __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1334 dep->flags &= ~DWC3_EP_PENDING_REQUEST; 1335 } 1336 return 0; 1337 } 1338 1339 if ((dep->flags & DWC3_EP_BUSY) && 1340 !(dep->flags & DWC3_EP_MISSED_ISOC)) { 1341 WARN_ON_ONCE(!dep->resource_index); 1342 ret = __dwc3_gadget_kick_transfer(dep, 1343 dep->resource_index); 1344 } 1345 1346 goto out; 1347 } 1348 1349 if (!dwc3_calc_trbs_left(dep)) 1350 return 0; 1351 1352 ret = __dwc3_gadget_kick_transfer(dep, 0); 1353 out: 1354 if (ret == -EBUSY) 1355 ret = 0; 1356 1357 return ret; 1358 } 1359 1360 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 1361 gfp_t gfp_flags) 1362 { 1363 struct dwc3_request *req = to_dwc3_request(request); 1364 struct dwc3_ep *dep = to_dwc3_ep(ep); 1365 struct dwc3 *dwc = dep->dwc; 1366 1367 unsigned long flags; 1368 1369 int ret; 1370 1371 spin_lock_irqsave(&dwc->lock, flags); 1372 ret = __dwc3_gadget_ep_queue(dep, req); 1373 spin_unlock_irqrestore(&dwc->lock, flags); 1374 1375 return ret; 1376 } 1377 1378 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 1379 struct usb_request *request) 1380 { 1381 struct dwc3_request *req = to_dwc3_request(request); 1382 struct dwc3_request *r = NULL; 1383 1384 struct dwc3_ep *dep = to_dwc3_ep(ep); 1385 struct dwc3 *dwc = dep->dwc; 1386 1387 unsigned long flags; 1388 int ret = 0; 1389 1390 trace_dwc3_ep_dequeue(req); 1391 1392 spin_lock_irqsave(&dwc->lock, flags); 1393 1394 list_for_each_entry(r, &dep->pending_list, list) { 1395 if (r == req) 1396 break; 1397 } 1398 1399 if (r != req) { 1400 list_for_each_entry(r, &dep->started_list, list) { 1401 if (r == req) 1402 break; 1403 } 1404 if (r == req) { 1405 /* wait until it is processed */ 1406 dwc3_stop_active_transfer(dwc, dep->number, true); 1407 1408 /* 1409 * If request was already started, this means we had to 1410 * stop the transfer. With that we also need to ignore 1411 * all TRBs used by the request, however TRBs can only 1412 * be modified after completion of END_TRANSFER 1413 * command. So what we do here is that we wait for 1414 * END_TRANSFER completion and only after that, we jump 1415 * over TRBs by clearing HWO and incrementing dequeue 1416 * pointer. 1417 * 1418 * Note that we have 2 possible types of transfers here: 1419 * 1420 * i) Linear buffer request 1421 * ii) SG-list based request 1422 * 1423 * SG-list based requests will have r->num_pending_sgs 1424 * set to a valid number (> 0). Linear requests, 1425 * normally use a single TRB. 1426 * 1427 * For each of these two cases, if r->unaligned flag is 1428 * set, one extra TRB has been used to align transfer 1429 * size to wMaxPacketSize. 1430 * 1431 * All of these cases need to be taken into 1432 * consideration so we don't mess up our TRB ring 1433 * pointers. 1434 */ 1435 wait_event_lock_irq(dep->wait_end_transfer, 1436 !(dep->flags & DWC3_EP_END_TRANSFER_PENDING), 1437 dwc->lock); 1438 1439 if (!r->trb) 1440 goto out1; 1441 1442 if (r->num_pending_sgs) { 1443 struct dwc3_trb *trb; 1444 int i = 0; 1445 1446 for (i = 0; i < r->num_pending_sgs; i++) { 1447 trb = r->trb + i; 1448 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 1449 dwc3_ep_inc_deq(dep); 1450 } 1451 1452 if (r->unaligned || r->zero) { 1453 trb = r->trb + r->num_pending_sgs + 1; 1454 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 1455 dwc3_ep_inc_deq(dep); 1456 } 1457 } else { 1458 struct dwc3_trb *trb = r->trb; 1459 1460 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 1461 dwc3_ep_inc_deq(dep); 1462 1463 if (r->unaligned || r->zero) { 1464 trb = r->trb + 1; 1465 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 1466 dwc3_ep_inc_deq(dep); 1467 } 1468 } 1469 goto out1; 1470 } 1471 dev_err(dwc->dev, "request %pK was not queued to %s\n", 1472 request, ep->name); 1473 ret = -EINVAL; 1474 goto out0; 1475 } 1476 1477 out1: 1478 /* giveback the request */ 1479 dep->queued_requests--; 1480 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1481 1482 out0: 1483 spin_unlock_irqrestore(&dwc->lock, flags); 1484 1485 return ret; 1486 } 1487 1488 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) 1489 { 1490 struct dwc3_gadget_ep_cmd_params params; 1491 struct dwc3 *dwc = dep->dwc; 1492 int ret; 1493 1494 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1495 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 1496 return -EINVAL; 1497 } 1498 1499 memset(¶ms, 0x00, sizeof(params)); 1500 1501 if (value) { 1502 struct dwc3_trb *trb; 1503 1504 unsigned transfer_in_flight; 1505 unsigned started; 1506 1507 if (dep->flags & DWC3_EP_STALL) 1508 return 0; 1509 1510 if (dep->number > 1) 1511 trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue); 1512 else 1513 trb = &dwc->ep0_trb[dep->trb_enqueue]; 1514 1515 transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO; 1516 started = !list_empty(&dep->started_list); 1517 1518 if (!protocol && ((dep->direction && transfer_in_flight) || 1519 (!dep->direction && started))) { 1520 return -EAGAIN; 1521 } 1522 1523 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL, 1524 ¶ms); 1525 if (ret) 1526 dev_err(dwc->dev, "failed to set STALL on %s\n", 1527 dep->name); 1528 else 1529 dep->flags |= DWC3_EP_STALL; 1530 } else { 1531 if (!(dep->flags & DWC3_EP_STALL)) 1532 return 0; 1533 1534 ret = dwc3_send_clear_stall_ep_cmd(dep); 1535 if (ret) 1536 dev_err(dwc->dev, "failed to clear STALL on %s\n", 1537 dep->name); 1538 else 1539 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); 1540 } 1541 1542 return ret; 1543 } 1544 1545 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 1546 { 1547 struct dwc3_ep *dep = to_dwc3_ep(ep); 1548 struct dwc3 *dwc = dep->dwc; 1549 1550 unsigned long flags; 1551 1552 int ret; 1553 1554 spin_lock_irqsave(&dwc->lock, flags); 1555 ret = __dwc3_gadget_ep_set_halt(dep, value, false); 1556 spin_unlock_irqrestore(&dwc->lock, flags); 1557 1558 return ret; 1559 } 1560 1561 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 1562 { 1563 struct dwc3_ep *dep = to_dwc3_ep(ep); 1564 struct dwc3 *dwc = dep->dwc; 1565 unsigned long flags; 1566 int ret; 1567 1568 spin_lock_irqsave(&dwc->lock, flags); 1569 dep->flags |= DWC3_EP_WEDGE; 1570 1571 if (dep->number == 0 || dep->number == 1) 1572 ret = __dwc3_gadget_ep0_set_halt(ep, 1); 1573 else 1574 ret = __dwc3_gadget_ep_set_halt(dep, 1, false); 1575 spin_unlock_irqrestore(&dwc->lock, flags); 1576 1577 return ret; 1578 } 1579 1580 /* -------------------------------------------------------------------------- */ 1581 1582 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 1583 .bLength = USB_DT_ENDPOINT_SIZE, 1584 .bDescriptorType = USB_DT_ENDPOINT, 1585 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1586 }; 1587 1588 static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 1589 .enable = dwc3_gadget_ep0_enable, 1590 .disable = dwc3_gadget_ep0_disable, 1591 .alloc_request = dwc3_gadget_ep_alloc_request, 1592 .free_request = dwc3_gadget_ep_free_request, 1593 .queue = dwc3_gadget_ep0_queue, 1594 .dequeue = dwc3_gadget_ep_dequeue, 1595 .set_halt = dwc3_gadget_ep0_set_halt, 1596 .set_wedge = dwc3_gadget_ep_set_wedge, 1597 }; 1598 1599 static const struct usb_ep_ops dwc3_gadget_ep_ops = { 1600 .enable = dwc3_gadget_ep_enable, 1601 .disable = dwc3_gadget_ep_disable, 1602 .alloc_request = dwc3_gadget_ep_alloc_request, 1603 .free_request = dwc3_gadget_ep_free_request, 1604 .queue = dwc3_gadget_ep_queue, 1605 .dequeue = dwc3_gadget_ep_dequeue, 1606 .set_halt = dwc3_gadget_ep_set_halt, 1607 .set_wedge = dwc3_gadget_ep_set_wedge, 1608 }; 1609 1610 /* -------------------------------------------------------------------------- */ 1611 1612 static int dwc3_gadget_get_frame(struct usb_gadget *g) 1613 { 1614 struct dwc3 *dwc = gadget_to_dwc(g); 1615 1616 return __dwc3_gadget_get_frame(dwc); 1617 } 1618 1619 static int __dwc3_gadget_wakeup(struct dwc3 *dwc) 1620 { 1621 int retries; 1622 1623 int ret; 1624 u32 reg; 1625 1626 u8 link_state; 1627 u8 speed; 1628 1629 /* 1630 * According to the Databook Remote wakeup request should 1631 * be issued only when the device is in early suspend state. 1632 * 1633 * We can check that via USB Link State bits in DSTS register. 1634 */ 1635 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1636 1637 speed = reg & DWC3_DSTS_CONNECTSPD; 1638 if ((speed == DWC3_DSTS_SUPERSPEED) || 1639 (speed == DWC3_DSTS_SUPERSPEED_PLUS)) 1640 return 0; 1641 1642 link_state = DWC3_DSTS_USBLNKST(reg); 1643 1644 switch (link_state) { 1645 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1646 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1647 break; 1648 default: 1649 return -EINVAL; 1650 } 1651 1652 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); 1653 if (ret < 0) { 1654 dev_err(dwc->dev, "failed to put link in Recovery\n"); 1655 return ret; 1656 } 1657 1658 /* Recent versions do this automatically */ 1659 if (dwc->revision < DWC3_REVISION_194A) { 1660 /* write zeroes to Link Change Request */ 1661 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1662 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 1663 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1664 } 1665 1666 /* poll until Link State changes to ON */ 1667 retries = 20000; 1668 1669 while (retries--) { 1670 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1671 1672 /* in HS, means ON */ 1673 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 1674 break; 1675 } 1676 1677 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1678 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1679 return -EINVAL; 1680 } 1681 1682 return 0; 1683 } 1684 1685 static int dwc3_gadget_wakeup(struct usb_gadget *g) 1686 { 1687 struct dwc3 *dwc = gadget_to_dwc(g); 1688 unsigned long flags; 1689 int ret; 1690 1691 spin_lock_irqsave(&dwc->lock, flags); 1692 ret = __dwc3_gadget_wakeup(dwc); 1693 spin_unlock_irqrestore(&dwc->lock, flags); 1694 1695 return ret; 1696 } 1697 1698 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 1699 int is_selfpowered) 1700 { 1701 struct dwc3 *dwc = gadget_to_dwc(g); 1702 unsigned long flags; 1703 1704 spin_lock_irqsave(&dwc->lock, flags); 1705 g->is_selfpowered = !!is_selfpowered; 1706 spin_unlock_irqrestore(&dwc->lock, flags); 1707 1708 return 0; 1709 } 1710 1711 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) 1712 { 1713 u32 reg; 1714 u32 timeout = 500; 1715 1716 if (pm_runtime_suspended(dwc->dev)) 1717 return 0; 1718 1719 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1720 if (is_on) { 1721 if (dwc->revision <= DWC3_REVISION_187A) { 1722 reg &= ~DWC3_DCTL_TRGTULST_MASK; 1723 reg |= DWC3_DCTL_TRGTULST_RX_DET; 1724 } 1725 1726 if (dwc->revision >= DWC3_REVISION_194A) 1727 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1728 reg |= DWC3_DCTL_RUN_STOP; 1729 1730 if (dwc->has_hibernation) 1731 reg |= DWC3_DCTL_KEEP_CONNECT; 1732 1733 dwc->pullups_connected = true; 1734 } else { 1735 reg &= ~DWC3_DCTL_RUN_STOP; 1736 1737 if (dwc->has_hibernation && !suspend) 1738 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1739 1740 dwc->pullups_connected = false; 1741 } 1742 1743 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1744 1745 do { 1746 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1747 reg &= DWC3_DSTS_DEVCTRLHLT; 1748 } while (--timeout && !(!is_on ^ !reg)); 1749 1750 if (!timeout) 1751 return -ETIMEDOUT; 1752 1753 return 0; 1754 } 1755 1756 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 1757 { 1758 struct dwc3 *dwc = gadget_to_dwc(g); 1759 unsigned long flags; 1760 int ret; 1761 1762 is_on = !!is_on; 1763 1764 /* 1765 * Per databook, when we want to stop the gadget, if a control transfer 1766 * is still in process, complete it and get the core into setup phase. 1767 */ 1768 if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) { 1769 reinit_completion(&dwc->ep0_in_setup); 1770 1771 ret = wait_for_completion_timeout(&dwc->ep0_in_setup, 1772 msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT)); 1773 if (ret == 0) { 1774 dev_err(dwc->dev, "timed out waiting for SETUP phase\n"); 1775 return -ETIMEDOUT; 1776 } 1777 } 1778 1779 spin_lock_irqsave(&dwc->lock, flags); 1780 ret = dwc3_gadget_run_stop(dwc, is_on, false); 1781 spin_unlock_irqrestore(&dwc->lock, flags); 1782 1783 return ret; 1784 } 1785 1786 static void dwc3_gadget_enable_irq(struct dwc3 *dwc) 1787 { 1788 u32 reg; 1789 1790 /* Enable all but Start and End of Frame IRQs */ 1791 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | 1792 DWC3_DEVTEN_EVNTOVERFLOWEN | 1793 DWC3_DEVTEN_CMDCMPLTEN | 1794 DWC3_DEVTEN_ERRTICERREN | 1795 DWC3_DEVTEN_WKUPEVTEN | 1796 DWC3_DEVTEN_CONNECTDONEEN | 1797 DWC3_DEVTEN_USBRSTEN | 1798 DWC3_DEVTEN_DISCONNEVTEN); 1799 1800 if (dwc->revision < DWC3_REVISION_250A) 1801 reg |= DWC3_DEVTEN_ULSTCNGEN; 1802 1803 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 1804 } 1805 1806 static void dwc3_gadget_disable_irq(struct dwc3 *dwc) 1807 { 1808 /* mask all interrupts */ 1809 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 1810 } 1811 1812 static irqreturn_t dwc3_interrupt(int irq, void *_dwc); 1813 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc); 1814 1815 /** 1816 * dwc3_gadget_setup_nump - calculate and initialize NUMP field of %DWC3_DCFG 1817 * @dwc: pointer to our context structure 1818 * 1819 * The following looks like complex but it's actually very simple. In order to 1820 * calculate the number of packets we can burst at once on OUT transfers, we're 1821 * gonna use RxFIFO size. 1822 * 1823 * To calculate RxFIFO size we need two numbers: 1824 * MDWIDTH = size, in bits, of the internal memory bus 1825 * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits) 1826 * 1827 * Given these two numbers, the formula is simple: 1828 * 1829 * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16; 1830 * 1831 * 24 bytes is for 3x SETUP packets 1832 * 16 bytes is a clock domain crossing tolerance 1833 * 1834 * Given RxFIFO Size, NUMP = RxFIFOSize / 1024; 1835 */ 1836 static void dwc3_gadget_setup_nump(struct dwc3 *dwc) 1837 { 1838 u32 ram2_depth; 1839 u32 mdwidth; 1840 u32 nump; 1841 u32 reg; 1842 1843 ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7); 1844 mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0); 1845 1846 nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024; 1847 nump = min_t(u32, nump, 16); 1848 1849 /* update NumP */ 1850 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1851 reg &= ~DWC3_DCFG_NUMP_MASK; 1852 reg |= nump << DWC3_DCFG_NUMP_SHIFT; 1853 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1854 } 1855 1856 static int __dwc3_gadget_start(struct dwc3 *dwc) 1857 { 1858 struct dwc3_ep *dep; 1859 int ret = 0; 1860 u32 reg; 1861 1862 /* 1863 * Use IMOD if enabled via dwc->imod_interval. Otherwise, if 1864 * the core supports IMOD, disable it. 1865 */ 1866 if (dwc->imod_interval) { 1867 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); 1868 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); 1869 } else if (dwc3_has_imod(dwc)) { 1870 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0); 1871 } 1872 1873 /* 1874 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP 1875 * field instead of letting dwc3 itself calculate that automatically. 1876 * 1877 * This way, we maximize the chances that we'll be able to get several 1878 * bursts of data without going through any sort of endpoint throttling. 1879 */ 1880 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1881 reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL; 1882 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1883 1884 dwc3_gadget_setup_nump(dwc); 1885 1886 /* Start with SuperSpeed Default */ 1887 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1888 1889 dep = dwc->eps[0]; 1890 ret = __dwc3_gadget_ep_enable(dep, false, false); 1891 if (ret) { 1892 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1893 goto err0; 1894 } 1895 1896 dep = dwc->eps[1]; 1897 ret = __dwc3_gadget_ep_enable(dep, false, false); 1898 if (ret) { 1899 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1900 goto err1; 1901 } 1902 1903 /* begin to receive SETUP packets */ 1904 dwc->ep0state = EP0_SETUP_PHASE; 1905 dwc3_ep0_out_start(dwc); 1906 1907 dwc3_gadget_enable_irq(dwc); 1908 1909 return 0; 1910 1911 err1: 1912 __dwc3_gadget_ep_disable(dwc->eps[0]); 1913 1914 err0: 1915 return ret; 1916 } 1917 1918 static int dwc3_gadget_start(struct usb_gadget *g, 1919 struct usb_gadget_driver *driver) 1920 { 1921 struct dwc3 *dwc = gadget_to_dwc(g); 1922 unsigned long flags; 1923 int ret = 0; 1924 int irq; 1925 1926 irq = dwc->irq_gadget; 1927 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, 1928 IRQF_SHARED, "dwc3", dwc->ev_buf); 1929 if (ret) { 1930 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 1931 irq, ret); 1932 goto err0; 1933 } 1934 1935 spin_lock_irqsave(&dwc->lock, flags); 1936 if (dwc->gadget_driver) { 1937 dev_err(dwc->dev, "%s is already bound to %s\n", 1938 dwc->gadget.name, 1939 dwc->gadget_driver->driver.name); 1940 ret = -EBUSY; 1941 goto err1; 1942 } 1943 1944 dwc->gadget_driver = driver; 1945 1946 if (pm_runtime_active(dwc->dev)) 1947 __dwc3_gadget_start(dwc); 1948 1949 spin_unlock_irqrestore(&dwc->lock, flags); 1950 1951 return 0; 1952 1953 err1: 1954 spin_unlock_irqrestore(&dwc->lock, flags); 1955 free_irq(irq, dwc); 1956 1957 err0: 1958 return ret; 1959 } 1960 1961 static void __dwc3_gadget_stop(struct dwc3 *dwc) 1962 { 1963 dwc3_gadget_disable_irq(dwc); 1964 __dwc3_gadget_ep_disable(dwc->eps[0]); 1965 __dwc3_gadget_ep_disable(dwc->eps[1]); 1966 } 1967 1968 static int dwc3_gadget_stop(struct usb_gadget *g) 1969 { 1970 struct dwc3 *dwc = gadget_to_dwc(g); 1971 unsigned long flags; 1972 int epnum; 1973 1974 spin_lock_irqsave(&dwc->lock, flags); 1975 1976 if (pm_runtime_suspended(dwc->dev)) 1977 goto out; 1978 1979 __dwc3_gadget_stop(dwc); 1980 1981 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1982 struct dwc3_ep *dep = dwc->eps[epnum]; 1983 1984 if (!dep) 1985 continue; 1986 1987 if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) 1988 continue; 1989 1990 wait_event_lock_irq(dep->wait_end_transfer, 1991 !(dep->flags & DWC3_EP_END_TRANSFER_PENDING), 1992 dwc->lock); 1993 } 1994 1995 out: 1996 dwc->gadget_driver = NULL; 1997 spin_unlock_irqrestore(&dwc->lock, flags); 1998 1999 free_irq(dwc->irq_gadget, dwc->ev_buf); 2000 2001 return 0; 2002 } 2003 2004 static void dwc3_gadget_set_speed(struct usb_gadget *g, 2005 enum usb_device_speed speed) 2006 { 2007 struct dwc3 *dwc = gadget_to_dwc(g); 2008 unsigned long flags; 2009 u32 reg; 2010 2011 spin_lock_irqsave(&dwc->lock, flags); 2012 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2013 reg &= ~(DWC3_DCFG_SPEED_MASK); 2014 2015 /* 2016 * WORKAROUND: DWC3 revision < 2.20a have an issue 2017 * which would cause metastability state on Run/Stop 2018 * bit if we try to force the IP to USB2-only mode. 2019 * 2020 * Because of that, we cannot configure the IP to any 2021 * speed other than the SuperSpeed 2022 * 2023 * Refers to: 2024 * 2025 * STAR#9000525659: Clock Domain Crossing on DCTL in 2026 * USB 2.0 Mode 2027 */ 2028 if (dwc->revision < DWC3_REVISION_220A) { 2029 reg |= DWC3_DCFG_SUPERSPEED; 2030 } else { 2031 switch (speed) { 2032 case USB_SPEED_LOW: 2033 reg |= DWC3_DCFG_LOWSPEED; 2034 break; 2035 case USB_SPEED_FULL: 2036 reg |= DWC3_DCFG_FULLSPEED; 2037 break; 2038 case USB_SPEED_HIGH: 2039 reg |= DWC3_DCFG_HIGHSPEED; 2040 break; 2041 case USB_SPEED_SUPER: 2042 reg |= DWC3_DCFG_SUPERSPEED; 2043 break; 2044 case USB_SPEED_SUPER_PLUS: 2045 reg |= DWC3_DCFG_SUPERSPEED_PLUS; 2046 break; 2047 default: 2048 dev_err(dwc->dev, "invalid speed (%d)\n", speed); 2049 2050 if (dwc->revision & DWC3_REVISION_IS_DWC31) 2051 reg |= DWC3_DCFG_SUPERSPEED_PLUS; 2052 else 2053 reg |= DWC3_DCFG_SUPERSPEED; 2054 } 2055 } 2056 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2057 2058 spin_unlock_irqrestore(&dwc->lock, flags); 2059 } 2060 2061 static const struct usb_gadget_ops dwc3_gadget_ops = { 2062 .get_frame = dwc3_gadget_get_frame, 2063 .wakeup = dwc3_gadget_wakeup, 2064 .set_selfpowered = dwc3_gadget_set_selfpowered, 2065 .pullup = dwc3_gadget_pullup, 2066 .udc_start = dwc3_gadget_start, 2067 .udc_stop = dwc3_gadget_stop, 2068 .udc_set_speed = dwc3_gadget_set_speed, 2069 }; 2070 2071 /* -------------------------------------------------------------------------- */ 2072 2073 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 total) 2074 { 2075 struct dwc3_ep *dep; 2076 u8 epnum; 2077 2078 INIT_LIST_HEAD(&dwc->gadget.ep_list); 2079 2080 for (epnum = 0; epnum < total; epnum++) { 2081 bool direction = epnum & 1; 2082 u8 num = epnum >> 1; 2083 2084 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 2085 if (!dep) 2086 return -ENOMEM; 2087 2088 dep->dwc = dwc; 2089 dep->number = epnum; 2090 dep->direction = direction; 2091 dep->regs = dwc->regs + DWC3_DEP_BASE(epnum); 2092 dwc->eps[epnum] = dep; 2093 2094 snprintf(dep->name, sizeof(dep->name), "ep%u%s", num, 2095 direction ? "in" : "out"); 2096 2097 dep->endpoint.name = dep->name; 2098 2099 if (!(dep->number > 1)) { 2100 dep->endpoint.desc = &dwc3_gadget_ep0_desc; 2101 dep->endpoint.comp_desc = NULL; 2102 } 2103 2104 spin_lock_init(&dep->lock); 2105 2106 if (num == 0) { 2107 usb_ep_set_maxpacket_limit(&dep->endpoint, 512); 2108 dep->endpoint.maxburst = 1; 2109 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 2110 if (!direction) 2111 dwc->gadget.ep0 = &dep->endpoint; 2112 } else if (direction) { 2113 int mdwidth; 2114 int kbytes; 2115 int size; 2116 int ret; 2117 2118 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); 2119 /* MDWIDTH is represented in bits, we need it in bytes */ 2120 mdwidth /= 8; 2121 2122 size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(num)); 2123 size = DWC3_GTXFIFOSIZ_TXFDEF(size); 2124 2125 /* FIFO Depth is in MDWDITH bytes. Multiply */ 2126 size *= mdwidth; 2127 2128 kbytes = size / 1024; 2129 if (kbytes == 0) 2130 kbytes = 1; 2131 2132 /* 2133 * FIFO sizes account an extra MDWIDTH * (kbytes + 1) bytes for 2134 * internal overhead. We don't really know how these are used, 2135 * but documentation say it exists. 2136 */ 2137 size -= mdwidth * (kbytes + 1); 2138 size /= kbytes; 2139 2140 usb_ep_set_maxpacket_limit(&dep->endpoint, size); 2141 2142 dep->endpoint.max_streams = 15; 2143 dep->endpoint.ops = &dwc3_gadget_ep_ops; 2144 list_add_tail(&dep->endpoint.ep_list, 2145 &dwc->gadget.ep_list); 2146 2147 ret = dwc3_alloc_trb_pool(dep); 2148 if (ret) 2149 return ret; 2150 } else { 2151 int ret; 2152 2153 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024); 2154 dep->endpoint.max_streams = 15; 2155 dep->endpoint.ops = &dwc3_gadget_ep_ops; 2156 list_add_tail(&dep->endpoint.ep_list, 2157 &dwc->gadget.ep_list); 2158 2159 ret = dwc3_alloc_trb_pool(dep); 2160 if (ret) 2161 return ret; 2162 } 2163 2164 if (num == 0) { 2165 dep->endpoint.caps.type_control = true; 2166 } else { 2167 dep->endpoint.caps.type_iso = true; 2168 dep->endpoint.caps.type_bulk = true; 2169 dep->endpoint.caps.type_int = true; 2170 } 2171 2172 dep->endpoint.caps.dir_in = direction; 2173 dep->endpoint.caps.dir_out = !direction; 2174 2175 INIT_LIST_HEAD(&dep->pending_list); 2176 INIT_LIST_HEAD(&dep->started_list); 2177 } 2178 2179 return 0; 2180 } 2181 2182 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 2183 { 2184 struct dwc3_ep *dep; 2185 u8 epnum; 2186 2187 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2188 dep = dwc->eps[epnum]; 2189 if (!dep) 2190 continue; 2191 /* 2192 * Physical endpoints 0 and 1 are special; they form the 2193 * bi-directional USB endpoint 0. 2194 * 2195 * For those two physical endpoints, we don't allocate a TRB 2196 * pool nor do we add them the endpoints list. Due to that, we 2197 * shouldn't do these two operations otherwise we would end up 2198 * with all sorts of bugs when removing dwc3.ko. 2199 */ 2200 if (epnum != 0 && epnum != 1) { 2201 dwc3_free_trb_pool(dep); 2202 list_del(&dep->endpoint.ep_list); 2203 } 2204 2205 kfree(dep); 2206 } 2207 } 2208 2209 /* -------------------------------------------------------------------------- */ 2210 2211 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, 2212 struct dwc3_request *req, struct dwc3_trb *trb, 2213 const struct dwc3_event_depevt *event, int status, 2214 int chain) 2215 { 2216 unsigned int count; 2217 unsigned int s_pkt = 0; 2218 unsigned int trb_status; 2219 2220 dwc3_ep_inc_deq(dep); 2221 2222 if (req->trb == trb) 2223 dep->queued_requests--; 2224 2225 trace_dwc3_complete_trb(dep, trb); 2226 2227 /* 2228 * If we're in the middle of series of chained TRBs and we 2229 * receive a short transfer along the way, DWC3 will skip 2230 * through all TRBs including the last TRB in the chain (the 2231 * where CHN bit is zero. DWC3 will also avoid clearing HWO 2232 * bit and SW has to do it manually. 2233 * 2234 * We're going to do that here to avoid problems of HW trying 2235 * to use bogus TRBs for transfers. 2236 */ 2237 if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO)) 2238 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 2239 2240 /* 2241 * If we're dealing with unaligned size OUT transfer, we will be left 2242 * with one TRB pending in the ring. We need to manually clear HWO bit 2243 * from that TRB. 2244 */ 2245 if ((req->zero || req->unaligned) && (trb->ctrl & DWC3_TRB_CTRL_HWO)) { 2246 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 2247 return 1; 2248 } 2249 2250 count = trb->size & DWC3_TRB_SIZE_MASK; 2251 req->remaining += count; 2252 2253 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 2254 return 1; 2255 2256 if (dep->direction) { 2257 if (count) { 2258 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); 2259 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) { 2260 /* 2261 * If missed isoc occurred and there is 2262 * no request queued then issue END 2263 * TRANSFER, so that core generates 2264 * next xfernotready and we will issue 2265 * a fresh START TRANSFER. 2266 * If there are still queued request 2267 * then wait, do not issue either END 2268 * or UPDATE TRANSFER, just attach next 2269 * request in pending_list during 2270 * giveback.If any future queued request 2271 * is successfully transferred then we 2272 * will issue UPDATE TRANSFER for all 2273 * request in the pending_list. 2274 */ 2275 dep->flags |= DWC3_EP_MISSED_ISOC; 2276 } else { 2277 dev_err(dwc->dev, "incomplete IN transfer %s\n", 2278 dep->name); 2279 status = -ECONNRESET; 2280 } 2281 } else { 2282 dep->flags &= ~DWC3_EP_MISSED_ISOC; 2283 } 2284 } else { 2285 if (count && (event->status & DEPEVT_STATUS_SHORT)) 2286 s_pkt = 1; 2287 } 2288 2289 if (s_pkt && !chain) 2290 return 1; 2291 2292 if ((event->status & DEPEVT_STATUS_IOC) && 2293 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 2294 return 1; 2295 2296 return 0; 2297 } 2298 2299 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, 2300 const struct dwc3_event_depevt *event, int status) 2301 { 2302 struct dwc3_request *req, *n; 2303 struct dwc3_trb *trb; 2304 bool ioc = false; 2305 int ret = 0; 2306 2307 list_for_each_entry_safe(req, n, &dep->started_list, list) { 2308 unsigned length; 2309 int chain; 2310 2311 length = req->request.length; 2312 chain = req->num_pending_sgs > 0; 2313 if (chain) { 2314 struct scatterlist *sg = req->sg; 2315 struct scatterlist *s; 2316 unsigned int pending = req->num_pending_sgs; 2317 unsigned int i; 2318 2319 for_each_sg(sg, s, pending, i) { 2320 trb = &dep->trb_pool[dep->trb_dequeue]; 2321 2322 if (trb->ctrl & DWC3_TRB_CTRL_HWO) 2323 break; 2324 2325 req->sg = sg_next(s); 2326 req->num_pending_sgs--; 2327 2328 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 2329 event, status, chain); 2330 if (ret) 2331 break; 2332 } 2333 } else { 2334 trb = &dep->trb_pool[dep->trb_dequeue]; 2335 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 2336 event, status, chain); 2337 } 2338 2339 if (req->unaligned || req->zero) { 2340 trb = &dep->trb_pool[dep->trb_dequeue]; 2341 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 2342 event, status, false); 2343 req->unaligned = false; 2344 req->zero = false; 2345 } 2346 2347 req->request.actual = length - req->remaining; 2348 2349 if ((req->request.actual < length) && req->num_pending_sgs) 2350 return __dwc3_gadget_kick_transfer(dep, 0); 2351 2352 dwc3_gadget_giveback(dep, req, status); 2353 2354 if (ret) { 2355 if ((event->status & DEPEVT_STATUS_IOC) && 2356 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 2357 ioc = true; 2358 break; 2359 } 2360 } 2361 2362 /* 2363 * Our endpoint might get disabled by another thread during 2364 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1 2365 * early on so DWC3_EP_BUSY flag gets cleared 2366 */ 2367 if (!dep->endpoint.desc) 2368 return 1; 2369 2370 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 2371 list_empty(&dep->started_list)) { 2372 if (list_empty(&dep->pending_list)) { 2373 /* 2374 * If there is no entry in request list then do 2375 * not issue END TRANSFER now. Just set PENDING 2376 * flag, so that END TRANSFER is issued when an 2377 * entry is added into request list. 2378 */ 2379 dep->flags = DWC3_EP_PENDING_REQUEST; 2380 } else { 2381 dwc3_stop_active_transfer(dwc, dep->number, true); 2382 dep->flags = DWC3_EP_ENABLED; 2383 } 2384 return 1; 2385 } 2386 2387 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && ioc) 2388 return 0; 2389 2390 return 1; 2391 } 2392 2393 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, 2394 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 2395 { 2396 unsigned status = 0; 2397 int clean_busy; 2398 u32 is_xfer_complete; 2399 2400 is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE); 2401 2402 if (event->status & DEPEVT_STATUS_BUSERR) 2403 status = -ECONNRESET; 2404 2405 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status); 2406 if (clean_busy && (!dep->endpoint.desc || is_xfer_complete || 2407 usb_endpoint_xfer_isoc(dep->endpoint.desc))) 2408 dep->flags &= ~DWC3_EP_BUSY; 2409 2410 /* 2411 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. 2412 * See dwc3_gadget_linksts_change_interrupt() for 1st half. 2413 */ 2414 if (dwc->revision < DWC3_REVISION_183A) { 2415 u32 reg; 2416 int i; 2417 2418 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 2419 dep = dwc->eps[i]; 2420 2421 if (!(dep->flags & DWC3_EP_ENABLED)) 2422 continue; 2423 2424 if (!list_empty(&dep->started_list)) 2425 return; 2426 } 2427 2428 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2429 reg |= dwc->u1u2; 2430 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2431 2432 dwc->u1u2 = 0; 2433 } 2434 2435 /* 2436 * Our endpoint might get disabled by another thread during 2437 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1 2438 * early on so DWC3_EP_BUSY flag gets cleared 2439 */ 2440 if (!dep->endpoint.desc) 2441 return; 2442 2443 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2444 int ret; 2445 2446 ret = __dwc3_gadget_kick_transfer(dep, 0); 2447 if (!ret || ret == -EBUSY) 2448 return; 2449 } 2450 } 2451 2452 static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 2453 const struct dwc3_event_depevt *event) 2454 { 2455 struct dwc3_ep *dep; 2456 u8 epnum = event->endpoint_number; 2457 u8 cmd; 2458 2459 dep = dwc->eps[epnum]; 2460 2461 if (!(dep->flags & DWC3_EP_ENABLED)) { 2462 if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) 2463 return; 2464 2465 /* Handle only EPCMDCMPLT when EP disabled */ 2466 if (event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT) 2467 return; 2468 } 2469 2470 if (epnum == 0 || epnum == 1) { 2471 dwc3_ep0_interrupt(dwc, event); 2472 return; 2473 } 2474 2475 switch (event->endpoint_event) { 2476 case DWC3_DEPEVT_XFERCOMPLETE: 2477 dep->resource_index = 0; 2478 2479 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2480 dev_err(dwc->dev, "XferComplete for Isochronous endpoint\n"); 2481 return; 2482 } 2483 2484 dwc3_endpoint_transfer_complete(dwc, dep, event); 2485 break; 2486 case DWC3_DEPEVT_XFERINPROGRESS: 2487 dwc3_endpoint_transfer_complete(dwc, dep, event); 2488 break; 2489 case DWC3_DEPEVT_XFERNOTREADY: 2490 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2491 dwc3_gadget_start_isoc(dwc, dep, event); 2492 } else { 2493 int ret; 2494 2495 ret = __dwc3_gadget_kick_transfer(dep, 0); 2496 if (!ret || ret == -EBUSY) 2497 return; 2498 } 2499 2500 break; 2501 case DWC3_DEPEVT_STREAMEVT: 2502 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) { 2503 dev_err(dwc->dev, "Stream event for non-Bulk %s\n", 2504 dep->name); 2505 return; 2506 } 2507 break; 2508 case DWC3_DEPEVT_EPCMDCMPLT: 2509 cmd = DEPEVT_PARAMETER_CMD(event->parameters); 2510 2511 if (cmd == DWC3_DEPCMD_ENDTRANSFER) { 2512 dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; 2513 wake_up(&dep->wait_end_transfer); 2514 } 2515 break; 2516 case DWC3_DEPEVT_RXTXFIFOEVT: 2517 break; 2518 } 2519 } 2520 2521 static void dwc3_disconnect_gadget(struct dwc3 *dwc) 2522 { 2523 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { 2524 spin_unlock(&dwc->lock); 2525 dwc->gadget_driver->disconnect(&dwc->gadget); 2526 spin_lock(&dwc->lock); 2527 } 2528 } 2529 2530 static void dwc3_suspend_gadget(struct dwc3 *dwc) 2531 { 2532 if (dwc->gadget_driver && dwc->gadget_driver->suspend) { 2533 spin_unlock(&dwc->lock); 2534 dwc->gadget_driver->suspend(&dwc->gadget); 2535 spin_lock(&dwc->lock); 2536 } 2537 } 2538 2539 static void dwc3_resume_gadget(struct dwc3 *dwc) 2540 { 2541 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2542 spin_unlock(&dwc->lock); 2543 dwc->gadget_driver->resume(&dwc->gadget); 2544 spin_lock(&dwc->lock); 2545 } 2546 } 2547 2548 static void dwc3_reset_gadget(struct dwc3 *dwc) 2549 { 2550 if (!dwc->gadget_driver) 2551 return; 2552 2553 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) { 2554 spin_unlock(&dwc->lock); 2555 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver); 2556 spin_lock(&dwc->lock); 2557 } 2558 } 2559 2560 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force) 2561 { 2562 struct dwc3_ep *dep; 2563 struct dwc3_gadget_ep_cmd_params params; 2564 u32 cmd; 2565 int ret; 2566 2567 dep = dwc->eps[epnum]; 2568 2569 if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) || 2570 !dep->resource_index) 2571 return; 2572 2573 /* 2574 * NOTICE: We are violating what the Databook says about the 2575 * EndTransfer command. Ideally we would _always_ wait for the 2576 * EndTransfer Command Completion IRQ, but that's causing too 2577 * much trouble synchronizing between us and gadget driver. 2578 * 2579 * We have discussed this with the IP Provider and it was 2580 * suggested to giveback all requests here, but give HW some 2581 * extra time to synchronize with the interconnect. We're using 2582 * an arbitrary 100us delay for that. 2583 * 2584 * Note also that a similar handling was tested by Synopsys 2585 * (thanks a lot Paul) and nothing bad has come out of it. 2586 * In short, what we're doing is: 2587 * 2588 * - Issue EndTransfer WITH CMDIOC bit set 2589 * - Wait 100us 2590 * 2591 * As of IP version 3.10a of the DWC_usb3 IP, the controller 2592 * supports a mode to work around the above limitation. The 2593 * software can poll the CMDACT bit in the DEPCMD register 2594 * after issuing a EndTransfer command. This mode is enabled 2595 * by writing GUCTL2[14]. This polling is already done in the 2596 * dwc3_send_gadget_ep_cmd() function so if the mode is 2597 * enabled, the EndTransfer command will have completed upon 2598 * returning from this function and we don't need to delay for 2599 * 100us. 2600 * 2601 * This mode is NOT available on the DWC_usb31 IP. 2602 */ 2603 2604 cmd = DWC3_DEPCMD_ENDTRANSFER; 2605 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0; 2606 cmd |= DWC3_DEPCMD_CMDIOC; 2607 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); 2608 memset(¶ms, 0, sizeof(params)); 2609 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); 2610 WARN_ON_ONCE(ret); 2611 dep->resource_index = 0; 2612 dep->flags &= ~DWC3_EP_BUSY; 2613 2614 if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A) { 2615 dep->flags |= DWC3_EP_END_TRANSFER_PENDING; 2616 udelay(100); 2617 } 2618 } 2619 2620 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 2621 { 2622 u32 epnum; 2623 2624 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2625 struct dwc3_ep *dep; 2626 int ret; 2627 2628 dep = dwc->eps[epnum]; 2629 if (!dep) 2630 continue; 2631 2632 if (!(dep->flags & DWC3_EP_STALL)) 2633 continue; 2634 2635 dep->flags &= ~DWC3_EP_STALL; 2636 2637 ret = dwc3_send_clear_stall_ep_cmd(dep); 2638 WARN_ON_ONCE(ret); 2639 } 2640 } 2641 2642 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 2643 { 2644 int reg; 2645 2646 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2647 reg &= ~DWC3_DCTL_INITU1ENA; 2648 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2649 2650 reg &= ~DWC3_DCTL_INITU2ENA; 2651 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2652 2653 dwc3_disconnect_gadget(dwc); 2654 2655 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2656 dwc->setup_packet_pending = false; 2657 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED); 2658 2659 dwc->connected = false; 2660 } 2661 2662 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 2663 { 2664 u32 reg; 2665 2666 dwc->connected = true; 2667 2668 /* 2669 * WORKAROUND: DWC3 revisions <1.88a have an issue which 2670 * would cause a missing Disconnect Event if there's a 2671 * pending Setup Packet in the FIFO. 2672 * 2673 * There's no suggested workaround on the official Bug 2674 * report, which states that "unless the driver/application 2675 * is doing any special handling of a disconnect event, 2676 * there is no functional issue". 2677 * 2678 * Unfortunately, it turns out that we _do_ some special 2679 * handling of a disconnect event, namely complete all 2680 * pending transfers, notify gadget driver of the 2681 * disconnection, and so on. 2682 * 2683 * Our suggested workaround is to follow the Disconnect 2684 * Event steps here, instead, based on a setup_packet_pending 2685 * flag. Such flag gets set whenever we have a SETUP_PENDING 2686 * status for EP0 TRBs and gets cleared on XferComplete for the 2687 * same endpoint. 2688 * 2689 * Refers to: 2690 * 2691 * STAR#9000466709: RTL: Device : Disconnect event not 2692 * generated if setup packet pending in FIFO 2693 */ 2694 if (dwc->revision < DWC3_REVISION_188A) { 2695 if (dwc->setup_packet_pending) 2696 dwc3_gadget_disconnect_interrupt(dwc); 2697 } 2698 2699 dwc3_reset_gadget(dwc); 2700 2701 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2702 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 2703 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2704 dwc->test_mode = false; 2705 dwc3_clear_stall_all_ep(dwc); 2706 2707 /* Reset device address to zero */ 2708 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2709 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 2710 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2711 } 2712 2713 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 2714 { 2715 struct dwc3_ep *dep; 2716 int ret; 2717 u32 reg; 2718 u8 speed; 2719 2720 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2721 speed = reg & DWC3_DSTS_CONNECTSPD; 2722 dwc->speed = speed; 2723 2724 /* 2725 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 2726 * each time on Connect Done. 2727 * 2728 * Currently we always use the reset value. If any platform 2729 * wants to set this to a different value, we need to add a 2730 * setting and update GCTL.RAMCLKSEL here. 2731 */ 2732 2733 switch (speed) { 2734 case DWC3_DSTS_SUPERSPEED_PLUS: 2735 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2736 dwc->gadget.ep0->maxpacket = 512; 2737 dwc->gadget.speed = USB_SPEED_SUPER_PLUS; 2738 break; 2739 case DWC3_DSTS_SUPERSPEED: 2740 /* 2741 * WORKAROUND: DWC3 revisions <1.90a have an issue which 2742 * would cause a missing USB3 Reset event. 2743 * 2744 * In such situations, we should force a USB3 Reset 2745 * event by calling our dwc3_gadget_reset_interrupt() 2746 * routine. 2747 * 2748 * Refers to: 2749 * 2750 * STAR#9000483510: RTL: SS : USB3 reset event may 2751 * not be generated always when the link enters poll 2752 */ 2753 if (dwc->revision < DWC3_REVISION_190A) 2754 dwc3_gadget_reset_interrupt(dwc); 2755 2756 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2757 dwc->gadget.ep0->maxpacket = 512; 2758 dwc->gadget.speed = USB_SPEED_SUPER; 2759 break; 2760 case DWC3_DSTS_HIGHSPEED: 2761 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2762 dwc->gadget.ep0->maxpacket = 64; 2763 dwc->gadget.speed = USB_SPEED_HIGH; 2764 break; 2765 case DWC3_DSTS_FULLSPEED: 2766 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2767 dwc->gadget.ep0->maxpacket = 64; 2768 dwc->gadget.speed = USB_SPEED_FULL; 2769 break; 2770 case DWC3_DSTS_LOWSPEED: 2771 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); 2772 dwc->gadget.ep0->maxpacket = 8; 2773 dwc->gadget.speed = USB_SPEED_LOW; 2774 break; 2775 } 2776 2777 /* Enable USB2 LPM Capability */ 2778 2779 if ((dwc->revision > DWC3_REVISION_194A) && 2780 (speed != DWC3_DSTS_SUPERSPEED) && 2781 (speed != DWC3_DSTS_SUPERSPEED_PLUS)) { 2782 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2783 reg |= DWC3_DCFG_LPM_CAP; 2784 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2785 2786 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2787 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); 2788 2789 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold); 2790 2791 /* 2792 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and 2793 * DCFG.LPMCap is set, core responses with an ACK and the 2794 * BESL value in the LPM token is less than or equal to LPM 2795 * NYET threshold. 2796 */ 2797 WARN_ONCE(dwc->revision < DWC3_REVISION_240A 2798 && dwc->has_lpm_erratum, 2799 "LPM Erratum not available on dwc3 revisions < 2.40a\n"); 2800 2801 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A) 2802 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold); 2803 2804 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2805 } else { 2806 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2807 reg &= ~DWC3_DCTL_HIRD_THRES_MASK; 2808 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2809 } 2810 2811 dep = dwc->eps[0]; 2812 ret = __dwc3_gadget_ep_enable(dep, true, false); 2813 if (ret) { 2814 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2815 return; 2816 } 2817 2818 dep = dwc->eps[1]; 2819 ret = __dwc3_gadget_ep_enable(dep, true, false); 2820 if (ret) { 2821 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2822 return; 2823 } 2824 2825 /* 2826 * Configure PHY via GUSB3PIPECTLn if required. 2827 * 2828 * Update GTXFIFOSIZn 2829 * 2830 * In both cases reset values should be sufficient. 2831 */ 2832 } 2833 2834 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 2835 { 2836 /* 2837 * TODO take core out of low power mode when that's 2838 * implemented. 2839 */ 2840 2841 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2842 spin_unlock(&dwc->lock); 2843 dwc->gadget_driver->resume(&dwc->gadget); 2844 spin_lock(&dwc->lock); 2845 } 2846 } 2847 2848 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 2849 unsigned int evtinfo) 2850 { 2851 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2852 unsigned int pwropt; 2853 2854 /* 2855 * WORKAROUND: DWC3 < 2.50a have an issue when configured without 2856 * Hibernation mode enabled which would show up when device detects 2857 * host-initiated U3 exit. 2858 * 2859 * In that case, device will generate a Link State Change Interrupt 2860 * from U3 to RESUME which is only necessary if Hibernation is 2861 * configured in. 2862 * 2863 * There are no functional changes due to such spurious event and we 2864 * just need to ignore it. 2865 * 2866 * Refers to: 2867 * 2868 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation 2869 * operational mode 2870 */ 2871 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 2872 if ((dwc->revision < DWC3_REVISION_250A) && 2873 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { 2874 if ((dwc->link_state == DWC3_LINK_STATE_U3) && 2875 (next == DWC3_LINK_STATE_RESUME)) { 2876 return; 2877 } 2878 } 2879 2880 /* 2881 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending 2882 * on the link partner, the USB session might do multiple entry/exit 2883 * of low power states before a transfer takes place. 2884 * 2885 * Due to this problem, we might experience lower throughput. The 2886 * suggested workaround is to disable DCTL[12:9] bits if we're 2887 * transitioning from U1/U2 to U0 and enable those bits again 2888 * after a transfer completes and there are no pending transfers 2889 * on any of the enabled endpoints. 2890 * 2891 * This is the first half of that workaround. 2892 * 2893 * Refers to: 2894 * 2895 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us 2896 * core send LGO_Ux entering U0 2897 */ 2898 if (dwc->revision < DWC3_REVISION_183A) { 2899 if (next == DWC3_LINK_STATE_U0) { 2900 u32 u1u2; 2901 u32 reg; 2902 2903 switch (dwc->link_state) { 2904 case DWC3_LINK_STATE_U1: 2905 case DWC3_LINK_STATE_U2: 2906 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2907 u1u2 = reg & (DWC3_DCTL_INITU2ENA 2908 | DWC3_DCTL_ACCEPTU2ENA 2909 | DWC3_DCTL_INITU1ENA 2910 | DWC3_DCTL_ACCEPTU1ENA); 2911 2912 if (!dwc->u1u2) 2913 dwc->u1u2 = reg & u1u2; 2914 2915 reg &= ~u1u2; 2916 2917 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2918 break; 2919 default: 2920 /* do nothing */ 2921 break; 2922 } 2923 } 2924 } 2925 2926 switch (next) { 2927 case DWC3_LINK_STATE_U1: 2928 if (dwc->speed == USB_SPEED_SUPER) 2929 dwc3_suspend_gadget(dwc); 2930 break; 2931 case DWC3_LINK_STATE_U2: 2932 case DWC3_LINK_STATE_U3: 2933 dwc3_suspend_gadget(dwc); 2934 break; 2935 case DWC3_LINK_STATE_RESUME: 2936 dwc3_resume_gadget(dwc); 2937 break; 2938 default: 2939 /* do nothing */ 2940 break; 2941 } 2942 2943 dwc->link_state = next; 2944 } 2945 2946 static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc, 2947 unsigned int evtinfo) 2948 { 2949 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2950 2951 if (dwc->link_state != next && next == DWC3_LINK_STATE_U3) 2952 dwc3_suspend_gadget(dwc); 2953 2954 dwc->link_state = next; 2955 } 2956 2957 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, 2958 unsigned int evtinfo) 2959 { 2960 unsigned int is_ss = evtinfo & BIT(4); 2961 2962 /* 2963 * WORKAROUND: DWC3 revison 2.20a with hibernation support 2964 * have a known issue which can cause USB CV TD.9.23 to fail 2965 * randomly. 2966 * 2967 * Because of this issue, core could generate bogus hibernation 2968 * events which SW needs to ignore. 2969 * 2970 * Refers to: 2971 * 2972 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0 2973 * Device Fallback from SuperSpeed 2974 */ 2975 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER)) 2976 return; 2977 2978 /* enter hibernation here */ 2979 } 2980 2981 static void dwc3_gadget_interrupt(struct dwc3 *dwc, 2982 const struct dwc3_event_devt *event) 2983 { 2984 switch (event->type) { 2985 case DWC3_DEVICE_EVENT_DISCONNECT: 2986 dwc3_gadget_disconnect_interrupt(dwc); 2987 break; 2988 case DWC3_DEVICE_EVENT_RESET: 2989 dwc3_gadget_reset_interrupt(dwc); 2990 break; 2991 case DWC3_DEVICE_EVENT_CONNECT_DONE: 2992 dwc3_gadget_conndone_interrupt(dwc); 2993 break; 2994 case DWC3_DEVICE_EVENT_WAKEUP: 2995 dwc3_gadget_wakeup_interrupt(dwc); 2996 break; 2997 case DWC3_DEVICE_EVENT_HIBER_REQ: 2998 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation, 2999 "unexpected hibernation event\n")) 3000 break; 3001 3002 dwc3_gadget_hibernation_interrupt(dwc, event->event_info); 3003 break; 3004 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 3005 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 3006 break; 3007 case DWC3_DEVICE_EVENT_EOPF: 3008 /* It changed to be suspend event for version 2.30a and above */ 3009 if (dwc->revision >= DWC3_REVISION_230A) { 3010 /* 3011 * Ignore suspend event until the gadget enters into 3012 * USB_STATE_CONFIGURED state. 3013 */ 3014 if (dwc->gadget.state >= USB_STATE_CONFIGURED) 3015 dwc3_gadget_suspend_interrupt(dwc, 3016 event->event_info); 3017 } 3018 break; 3019 case DWC3_DEVICE_EVENT_SOF: 3020 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 3021 case DWC3_DEVICE_EVENT_CMD_CMPL: 3022 case DWC3_DEVICE_EVENT_OVERFLOW: 3023 break; 3024 default: 3025 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 3026 } 3027 } 3028 3029 static void dwc3_process_event_entry(struct dwc3 *dwc, 3030 const union dwc3_event *event) 3031 { 3032 trace_dwc3_event(event->raw, dwc); 3033 3034 if (!event->type.is_devspec) 3035 dwc3_endpoint_interrupt(dwc, &event->depevt); 3036 else if (event->type.type == DWC3_EVENT_TYPE_DEV) 3037 dwc3_gadget_interrupt(dwc, &event->devt); 3038 else 3039 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 3040 } 3041 3042 static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) 3043 { 3044 struct dwc3 *dwc = evt->dwc; 3045 irqreturn_t ret = IRQ_NONE; 3046 int left; 3047 u32 reg; 3048 3049 left = evt->count; 3050 3051 if (!(evt->flags & DWC3_EVENT_PENDING)) 3052 return IRQ_NONE; 3053 3054 while (left > 0) { 3055 union dwc3_event event; 3056 3057 event.raw = *(u32 *) (evt->cache + evt->lpos); 3058 3059 dwc3_process_event_entry(dwc, &event); 3060 3061 /* 3062 * FIXME we wrap around correctly to the next entry as 3063 * almost all entries are 4 bytes in size. There is one 3064 * entry which has 12 bytes which is a regular entry 3065 * followed by 8 bytes data. ATM I don't know how 3066 * things are organized if we get next to the a 3067 * boundary so I worry about that once we try to handle 3068 * that. 3069 */ 3070 evt->lpos = (evt->lpos + 4) % evt->length; 3071 left -= 4; 3072 } 3073 3074 evt->count = 0; 3075 evt->flags &= ~DWC3_EVENT_PENDING; 3076 ret = IRQ_HANDLED; 3077 3078 /* Unmask interrupt */ 3079 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0)); 3080 reg &= ~DWC3_GEVNTSIZ_INTMASK; 3081 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); 3082 3083 if (dwc->imod_interval) { 3084 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); 3085 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); 3086 } 3087 3088 return ret; 3089 } 3090 3091 static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt) 3092 { 3093 struct dwc3_event_buffer *evt = _evt; 3094 struct dwc3 *dwc = evt->dwc; 3095 unsigned long flags; 3096 irqreturn_t ret = IRQ_NONE; 3097 3098 spin_lock_irqsave(&dwc->lock, flags); 3099 ret = dwc3_process_event_buf(evt); 3100 spin_unlock_irqrestore(&dwc->lock, flags); 3101 3102 return ret; 3103 } 3104 3105 static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) 3106 { 3107 struct dwc3 *dwc = evt->dwc; 3108 u32 amount; 3109 u32 count; 3110 u32 reg; 3111 3112 if (pm_runtime_suspended(dwc->dev)) { 3113 pm_runtime_get(dwc->dev); 3114 disable_irq_nosync(dwc->irq_gadget); 3115 dwc->pending_events = true; 3116 return IRQ_HANDLED; 3117 } 3118 3119 /* 3120 * With PCIe legacy interrupt, test shows that top-half irq handler can 3121 * be called again after HW interrupt deassertion. Check if bottom-half 3122 * irq event handler completes before caching new event to prevent 3123 * losing events. 3124 */ 3125 if (evt->flags & DWC3_EVENT_PENDING) 3126 return IRQ_HANDLED; 3127 3128 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); 3129 count &= DWC3_GEVNTCOUNT_MASK; 3130 if (!count) 3131 return IRQ_NONE; 3132 3133 evt->count = count; 3134 evt->flags |= DWC3_EVENT_PENDING; 3135 3136 /* Mask interrupt */ 3137 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0)); 3138 reg |= DWC3_GEVNTSIZ_INTMASK; 3139 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); 3140 3141 amount = min(count, evt->length - evt->lpos); 3142 memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount); 3143 3144 if (amount < count) 3145 memcpy(evt->cache, evt->buf, count - amount); 3146 3147 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count); 3148 3149 return IRQ_WAKE_THREAD; 3150 } 3151 3152 static irqreturn_t dwc3_interrupt(int irq, void *_evt) 3153 { 3154 struct dwc3_event_buffer *evt = _evt; 3155 3156 return dwc3_check_event_buf(evt); 3157 } 3158 3159 static int dwc3_gadget_get_irq(struct dwc3 *dwc) 3160 { 3161 struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); 3162 int irq; 3163 3164 irq = platform_get_irq_byname(dwc3_pdev, "peripheral"); 3165 if (irq > 0) 3166 goto out; 3167 3168 if (irq == -EPROBE_DEFER) 3169 goto out; 3170 3171 irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3"); 3172 if (irq > 0) 3173 goto out; 3174 3175 if (irq == -EPROBE_DEFER) 3176 goto out; 3177 3178 irq = platform_get_irq(dwc3_pdev, 0); 3179 if (irq > 0) 3180 goto out; 3181 3182 if (irq != -EPROBE_DEFER) 3183 dev_err(dwc->dev, "missing peripheral IRQ\n"); 3184 3185 if (!irq) 3186 irq = -EINVAL; 3187 3188 out: 3189 return irq; 3190 } 3191 3192 /** 3193 * dwc3_gadget_init - initializes gadget related registers 3194 * @dwc: pointer to our controller context structure 3195 * 3196 * Returns 0 on success otherwise negative errno. 3197 */ 3198 int dwc3_gadget_init(struct dwc3 *dwc) 3199 { 3200 int ret; 3201 int irq; 3202 3203 irq = dwc3_gadget_get_irq(dwc); 3204 if (irq < 0) { 3205 ret = irq; 3206 goto err0; 3207 } 3208 3209 dwc->irq_gadget = irq; 3210 3211 dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev, 3212 sizeof(*dwc->ep0_trb) * 2, 3213 &dwc->ep0_trb_addr, GFP_KERNEL); 3214 if (!dwc->ep0_trb) { 3215 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 3216 ret = -ENOMEM; 3217 goto err0; 3218 } 3219 3220 dwc->setup_buf = kzalloc(DWC3_EP0_SETUP_SIZE, GFP_KERNEL); 3221 if (!dwc->setup_buf) { 3222 ret = -ENOMEM; 3223 goto err1; 3224 } 3225 3226 dwc->bounce = dma_alloc_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, 3227 &dwc->bounce_addr, GFP_KERNEL); 3228 if (!dwc->bounce) { 3229 ret = -ENOMEM; 3230 goto err2; 3231 } 3232 3233 init_completion(&dwc->ep0_in_setup); 3234 3235 dwc->gadget.ops = &dwc3_gadget_ops; 3236 dwc->gadget.speed = USB_SPEED_UNKNOWN; 3237 dwc->gadget.sg_supported = true; 3238 dwc->gadget.name = "dwc3-gadget"; 3239 dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG; 3240 3241 /* 3242 * FIXME We might be setting max_speed to <SUPER, however versions 3243 * <2.20a of dwc3 have an issue with metastability (documented 3244 * elsewhere in this driver) which tells us we can't set max speed to 3245 * anything lower than SUPER. 3246 * 3247 * Because gadget.max_speed is only used by composite.c and function 3248 * drivers (i.e. it won't go into dwc3's registers) we are allowing this 3249 * to happen so we avoid sending SuperSpeed Capability descriptor 3250 * together with our BOS descriptor as that could confuse host into 3251 * thinking we can handle super speed. 3252 * 3253 * Note that, in fact, we won't even support GetBOS requests when speed 3254 * is less than super speed because we don't have means, yet, to tell 3255 * composite.c that we are USB 2.0 + LPM ECN. 3256 */ 3257 if (dwc->revision < DWC3_REVISION_220A) 3258 dev_info(dwc->dev, "changing max_speed on rev %08x\n", 3259 dwc->revision); 3260 3261 dwc->gadget.max_speed = dwc->maximum_speed; 3262 3263 /* 3264 * REVISIT: Here we should clear all pending IRQs to be 3265 * sure we're starting from a well known location. 3266 */ 3267 3268 ret = dwc3_gadget_init_endpoints(dwc, dwc->num_eps); 3269 if (ret) 3270 goto err3; 3271 3272 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 3273 if (ret) { 3274 dev_err(dwc->dev, "failed to register udc\n"); 3275 goto err4; 3276 } 3277 3278 return 0; 3279 3280 err4: 3281 dwc3_gadget_free_endpoints(dwc); 3282 3283 err3: 3284 dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce, 3285 dwc->bounce_addr); 3286 3287 err2: 3288 kfree(dwc->setup_buf); 3289 3290 err1: 3291 dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2, 3292 dwc->ep0_trb, dwc->ep0_trb_addr); 3293 3294 err0: 3295 return ret; 3296 } 3297 3298 /* -------------------------------------------------------------------------- */ 3299 3300 void dwc3_gadget_exit(struct dwc3 *dwc) 3301 { 3302 usb_del_gadget_udc(&dwc->gadget); 3303 dwc3_gadget_free_endpoints(dwc); 3304 dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce, 3305 dwc->bounce_addr); 3306 kfree(dwc->setup_buf); 3307 dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2, 3308 dwc->ep0_trb, dwc->ep0_trb_addr); 3309 } 3310 3311 int dwc3_gadget_suspend(struct dwc3 *dwc) 3312 { 3313 if (!dwc->gadget_driver) 3314 return 0; 3315 3316 dwc3_gadget_run_stop(dwc, false, false); 3317 dwc3_disconnect_gadget(dwc); 3318 __dwc3_gadget_stop(dwc); 3319 3320 return 0; 3321 } 3322 3323 int dwc3_gadget_resume(struct dwc3 *dwc) 3324 { 3325 int ret; 3326 3327 if (!dwc->gadget_driver) 3328 return 0; 3329 3330 ret = __dwc3_gadget_start(dwc); 3331 if (ret < 0) 3332 goto err0; 3333 3334 ret = dwc3_gadget_run_stop(dwc, true, false); 3335 if (ret < 0) 3336 goto err1; 3337 3338 return 0; 3339 3340 err1: 3341 __dwc3_gadget_stop(dwc); 3342 3343 err0: 3344 return ret; 3345 } 3346 3347 void dwc3_gadget_process_pending_events(struct dwc3 *dwc) 3348 { 3349 if (dwc->pending_events) { 3350 dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf); 3351 dwc->pending_events = false; 3352 enable_irq(dwc->irq_gadget); 3353 } 3354 } 3355