1 /** 2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 3 * 4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com 5 * 6 * Authors: Felipe Balbi <balbi@ti.com>, 7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 8 * 9 * This program is free software: you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 of 11 * the License as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/delay.h> 21 #include <linux/slab.h> 22 #include <linux/spinlock.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/interrupt.h> 26 #include <linux/io.h> 27 #include <linux/list.h> 28 #include <linux/dma-mapping.h> 29 30 #include <linux/usb/ch9.h> 31 #include <linux/usb/gadget.h> 32 33 #include "debug.h" 34 #include "core.h" 35 #include "gadget.h" 36 #include "io.h" 37 38 /** 39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes 40 * @dwc: pointer to our context structure 41 * @mode: the mode to set (J, K SE0 NAK, Force Enable) 42 * 43 * Caller should take care of locking. This function will 44 * return 0 on success or -EINVAL if wrong Test Selector 45 * is passed 46 */ 47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) 48 { 49 u32 reg; 50 51 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 52 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 53 54 switch (mode) { 55 case TEST_J: 56 case TEST_K: 57 case TEST_SE0_NAK: 58 case TEST_PACKET: 59 case TEST_FORCE_EN: 60 reg |= mode << 1; 61 break; 62 default: 63 return -EINVAL; 64 } 65 66 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 67 68 return 0; 69 } 70 71 /** 72 * dwc3_gadget_get_link_state - Gets current state of USB Link 73 * @dwc: pointer to our context structure 74 * 75 * Caller should take care of locking. This function will 76 * return the link state on success (>= 0) or -ETIMEDOUT. 77 */ 78 int dwc3_gadget_get_link_state(struct dwc3 *dwc) 79 { 80 u32 reg; 81 82 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 83 84 return DWC3_DSTS_USBLNKST(reg); 85 } 86 87 /** 88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State 89 * @dwc: pointer to our context structure 90 * @state: the state to put link into 91 * 92 * Caller should take care of locking. This function will 93 * return 0 on success or -ETIMEDOUT. 94 */ 95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) 96 { 97 int retries = 10000; 98 u32 reg; 99 100 /* 101 * Wait until device controller is ready. Only applies to 1.94a and 102 * later RTL. 103 */ 104 if (dwc->revision >= DWC3_REVISION_194A) { 105 while (--retries) { 106 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 107 if (reg & DWC3_DSTS_DCNRD) 108 udelay(5); 109 else 110 break; 111 } 112 113 if (retries <= 0) 114 return -ETIMEDOUT; 115 } 116 117 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 119 120 /* set requested state */ 121 reg |= DWC3_DCTL_ULSTCHNGREQ(state); 122 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 123 124 /* 125 * The following code is racy when called from dwc3_gadget_wakeup, 126 * and is not needed, at least on newer versions 127 */ 128 if (dwc->revision >= DWC3_REVISION_194A) 129 return 0; 130 131 /* wait for a change in DSTS */ 132 retries = 10000; 133 while (--retries) { 134 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 135 136 if (DWC3_DSTS_USBLNKST(reg) == state) 137 return 0; 138 139 udelay(5); 140 } 141 142 dwc3_trace(trace_dwc3_gadget, 143 "link state change request timed out"); 144 145 return -ETIMEDOUT; 146 } 147 148 /** 149 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case 150 * @dwc: pointer to our context structure 151 * 152 * This function will a best effort FIFO allocation in order 153 * to improve FIFO usage and throughput, while still allowing 154 * us to enable as many endpoints as possible. 155 * 156 * Keep in mind that this operation will be highly dependent 157 * on the configured size for RAM1 - which contains TxFifo -, 158 * the amount of endpoints enabled on coreConsultant tool, and 159 * the width of the Master Bus. 160 * 161 * In the ideal world, we would always be able to satisfy the 162 * following equation: 163 * 164 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \ 165 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes 166 * 167 * Unfortunately, due to many variables that's not always the case. 168 */ 169 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc) 170 { 171 int last_fifo_depth = 0; 172 int ram1_depth; 173 int fifo_size; 174 int mdwidth; 175 int num; 176 177 if (!dwc->needs_fifo_resize) 178 return 0; 179 180 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7); 181 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); 182 183 /* MDWIDTH is represented in bits, we need it in bytes */ 184 mdwidth >>= 3; 185 186 /* 187 * FIXME For now we will only allocate 1 wMaxPacketSize space 188 * for each enabled endpoint, later patches will come to 189 * improve this algorithm so that we better use the internal 190 * FIFO space 191 */ 192 for (num = 0; num < dwc->num_in_eps; num++) { 193 /* bit0 indicates direction; 1 means IN ep */ 194 struct dwc3_ep *dep = dwc->eps[(num << 1) | 1]; 195 int mult = 1; 196 int tmp; 197 198 if (!(dep->flags & DWC3_EP_ENABLED)) 199 continue; 200 201 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) 202 || usb_endpoint_xfer_isoc(dep->endpoint.desc)) 203 mult = 3; 204 205 /* 206 * REVISIT: the following assumes we will always have enough 207 * space available on the FIFO RAM for all possible use cases. 208 * Make sure that's true somehow and change FIFO allocation 209 * accordingly. 210 * 211 * If we have Bulk or Isochronous endpoints, we want 212 * them to be able to be very, very fast. So we're giving 213 * those endpoints a fifo_size which is enough for 3 full 214 * packets 215 */ 216 tmp = mult * (dep->endpoint.maxpacket + mdwidth); 217 tmp += mdwidth; 218 219 fifo_size = DIV_ROUND_UP(tmp, mdwidth); 220 221 fifo_size |= (last_fifo_depth << 16); 222 223 dwc3_trace(trace_dwc3_gadget, "%s: Fifo Addr %04x Size %d", 224 dep->name, last_fifo_depth, fifo_size & 0xffff); 225 226 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size); 227 228 last_fifo_depth += (fifo_size & 0xffff); 229 } 230 231 return 0; 232 } 233 234 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 235 int status) 236 { 237 struct dwc3 *dwc = dep->dwc; 238 int i; 239 240 if (req->queued) { 241 i = 0; 242 do { 243 dep->busy_slot++; 244 /* 245 * Skip LINK TRB. We can't use req->trb and check for 246 * DWC3_TRBCTL_LINK_TRB because it points the TRB we 247 * just completed (not the LINK TRB). 248 */ 249 if (((dep->busy_slot & DWC3_TRB_MASK) == 250 DWC3_TRB_NUM- 1) && 251 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 252 dep->busy_slot++; 253 } while(++i < req->request.num_mapped_sgs); 254 req->queued = false; 255 } 256 list_del(&req->list); 257 req->trb = NULL; 258 259 if (req->request.status == -EINPROGRESS) 260 req->request.status = status; 261 262 if (dwc->ep0_bounced && dep->number == 0) 263 dwc->ep0_bounced = false; 264 else 265 usb_gadget_unmap_request(&dwc->gadget, &req->request, 266 req->direction); 267 268 trace_dwc3_gadget_giveback(req); 269 270 spin_unlock(&dwc->lock); 271 usb_gadget_giveback_request(&dep->endpoint, &req->request); 272 spin_lock(&dwc->lock); 273 } 274 275 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param) 276 { 277 u32 timeout = 500; 278 u32 reg; 279 280 trace_dwc3_gadget_generic_cmd(cmd, param); 281 282 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param); 283 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT); 284 285 do { 286 reg = dwc3_readl(dwc->regs, DWC3_DGCMD); 287 if (!(reg & DWC3_DGCMD_CMDACT)) { 288 dwc3_trace(trace_dwc3_gadget, 289 "Command Complete --> %d", 290 DWC3_DGCMD_STATUS(reg)); 291 if (DWC3_DGCMD_STATUS(reg)) 292 return -EINVAL; 293 return 0; 294 } 295 296 /* 297 * We can't sleep here, because it's also called from 298 * interrupt context. 299 */ 300 timeout--; 301 if (!timeout) { 302 dwc3_trace(trace_dwc3_gadget, 303 "Command Timed Out"); 304 return -ETIMEDOUT; 305 } 306 udelay(1); 307 } while (1); 308 } 309 310 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep, 311 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params) 312 { 313 struct dwc3_ep *dep = dwc->eps[ep]; 314 u32 timeout = 500; 315 u32 reg; 316 317 trace_dwc3_gadget_ep_cmd(dep, cmd, params); 318 319 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0); 320 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1); 321 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2); 322 323 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT); 324 do { 325 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep)); 326 if (!(reg & DWC3_DEPCMD_CMDACT)) { 327 dwc3_trace(trace_dwc3_gadget, 328 "Command Complete --> %d", 329 DWC3_DEPCMD_STATUS(reg)); 330 if (DWC3_DEPCMD_STATUS(reg)) 331 return -EINVAL; 332 return 0; 333 } 334 335 /* 336 * We can't sleep here, because it is also called from 337 * interrupt context. 338 */ 339 timeout--; 340 if (!timeout) { 341 dwc3_trace(trace_dwc3_gadget, 342 "Command Timed Out"); 343 return -ETIMEDOUT; 344 } 345 346 udelay(1); 347 } while (1); 348 } 349 350 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 351 struct dwc3_trb *trb) 352 { 353 u32 offset = (char *) trb - (char *) dep->trb_pool; 354 355 return dep->trb_pool_dma + offset; 356 } 357 358 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 359 { 360 struct dwc3 *dwc = dep->dwc; 361 362 if (dep->trb_pool) 363 return 0; 364 365 dep->trb_pool = dma_alloc_coherent(dwc->dev, 366 sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 367 &dep->trb_pool_dma, GFP_KERNEL); 368 if (!dep->trb_pool) { 369 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 370 dep->name); 371 return -ENOMEM; 372 } 373 374 return 0; 375 } 376 377 static void dwc3_free_trb_pool(struct dwc3_ep *dep) 378 { 379 struct dwc3 *dwc = dep->dwc; 380 381 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 382 dep->trb_pool, dep->trb_pool_dma); 383 384 dep->trb_pool = NULL; 385 dep->trb_pool_dma = 0; 386 } 387 388 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep); 389 390 /** 391 * dwc3_gadget_start_config - Configure EP resources 392 * @dwc: pointer to our controller context structure 393 * @dep: endpoint that is being enabled 394 * 395 * The assignment of transfer resources cannot perfectly follow the 396 * data book due to the fact that the controller driver does not have 397 * all knowledge of the configuration in advance. It is given this 398 * information piecemeal by the composite gadget framework after every 399 * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook 400 * programming model in this scenario can cause errors. For two 401 * reasons: 402 * 403 * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION 404 * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of 405 * multiple interfaces. 406 * 407 * 2) The databook does not mention doing more DEPXFERCFG for new 408 * endpoint on alt setting (8.1.6). 409 * 410 * The following simplified method is used instead: 411 * 412 * All hardware endpoints can be assigned a transfer resource and this 413 * setting will stay persistent until either a core reset or 414 * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and 415 * do DEPXFERCFG for every hardware endpoint as well. We are 416 * guaranteed that there are as many transfer resources as endpoints. 417 * 418 * This function is called for each endpoint when it is being enabled 419 * but is triggered only when called for EP0-out, which always happens 420 * first, and which should only happen in one of the above conditions. 421 */ 422 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 423 { 424 struct dwc3_gadget_ep_cmd_params params; 425 u32 cmd; 426 int i; 427 int ret; 428 429 if (dep->number) 430 return 0; 431 432 memset(¶ms, 0x00, sizeof(params)); 433 cmd = DWC3_DEPCMD_DEPSTARTCFG; 434 435 ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms); 436 if (ret) 437 return ret; 438 439 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 440 struct dwc3_ep *dep = dwc->eps[i]; 441 442 if (!dep) 443 continue; 444 445 ret = dwc3_gadget_set_xfer_resource(dwc, dep); 446 if (ret) 447 return ret; 448 } 449 450 return 0; 451 } 452 453 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, 454 const struct usb_endpoint_descriptor *desc, 455 const struct usb_ss_ep_comp_descriptor *comp_desc, 456 bool ignore, bool restore) 457 { 458 struct dwc3_gadget_ep_cmd_params params; 459 460 memset(¶ms, 0x00, sizeof(params)); 461 462 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) 463 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); 464 465 /* Burst size is only needed in SuperSpeed mode */ 466 if (dwc->gadget.speed >= USB_SPEED_SUPER) { 467 u32 burst = dep->endpoint.maxburst - 1; 468 469 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst); 470 } 471 472 if (ignore) 473 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM; 474 475 if (restore) { 476 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE; 477 params.param2 |= dep->saved_state; 478 } 479 480 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN 481 | DWC3_DEPCFG_XFER_NOT_READY_EN; 482 483 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 484 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE 485 | DWC3_DEPCFG_STREAM_EVENT_EN; 486 dep->stream_capable = true; 487 } 488 489 if (!usb_endpoint_xfer_control(desc)) 490 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; 491 492 /* 493 * We are doing 1:1 mapping for endpoints, meaning 494 * Physical Endpoints 2 maps to Logical Endpoint 2 and 495 * so on. We consider the direction bit as part of the physical 496 * endpoint number. So USB endpoint 0x81 is 0x03. 497 */ 498 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); 499 500 /* 501 * We must use the lower 16 TX FIFOs even though 502 * HW might have more 503 */ 504 if (dep->direction) 505 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); 506 507 if (desc->bInterval) { 508 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); 509 dep->interval = 1 << (desc->bInterval - 1); 510 } 511 512 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 513 DWC3_DEPCMD_SETEPCONFIG, ¶ms); 514 } 515 516 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) 517 { 518 struct dwc3_gadget_ep_cmd_params params; 519 520 memset(¶ms, 0x00, sizeof(params)); 521 522 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 523 524 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 525 DWC3_DEPCMD_SETTRANSFRESOURCE, ¶ms); 526 } 527 528 /** 529 * __dwc3_gadget_ep_enable - Initializes a HW endpoint 530 * @dep: endpoint to be initialized 531 * @desc: USB Endpoint Descriptor 532 * 533 * Caller should take care of locking 534 */ 535 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, 536 const struct usb_endpoint_descriptor *desc, 537 const struct usb_ss_ep_comp_descriptor *comp_desc, 538 bool ignore, bool restore) 539 { 540 struct dwc3 *dwc = dep->dwc; 541 u32 reg; 542 int ret; 543 544 dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name); 545 546 if (!(dep->flags & DWC3_EP_ENABLED)) { 547 ret = dwc3_gadget_start_config(dwc, dep); 548 if (ret) 549 return ret; 550 } 551 552 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore, 553 restore); 554 if (ret) 555 return ret; 556 557 if (!(dep->flags & DWC3_EP_ENABLED)) { 558 struct dwc3_trb *trb_st_hw; 559 struct dwc3_trb *trb_link; 560 561 dep->endpoint.desc = desc; 562 dep->comp_desc = comp_desc; 563 dep->type = usb_endpoint_type(desc); 564 dep->flags |= DWC3_EP_ENABLED; 565 566 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 567 reg |= DWC3_DALEPENA_EP(dep->number); 568 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 569 570 if (!usb_endpoint_xfer_isoc(desc)) 571 goto out; 572 573 /* Link TRB for ISOC. The HWO bit is never reset */ 574 trb_st_hw = &dep->trb_pool[0]; 575 576 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 577 memset(trb_link, 0, sizeof(*trb_link)); 578 579 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 580 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 581 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB; 582 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 583 } 584 585 out: 586 switch (usb_endpoint_type(desc)) { 587 case USB_ENDPOINT_XFER_CONTROL: 588 /* don't change name */ 589 break; 590 case USB_ENDPOINT_XFER_ISOC: 591 strlcat(dep->name, "-isoc", sizeof(dep->name)); 592 break; 593 case USB_ENDPOINT_XFER_BULK: 594 strlcat(dep->name, "-bulk", sizeof(dep->name)); 595 break; 596 case USB_ENDPOINT_XFER_INT: 597 strlcat(dep->name, "-int", sizeof(dep->name)); 598 break; 599 default: 600 dev_err(dwc->dev, "invalid endpoint transfer type\n"); 601 } 602 603 return 0; 604 } 605 606 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force); 607 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 608 { 609 struct dwc3_request *req; 610 611 if (!list_empty(&dep->req_queued)) { 612 dwc3_stop_active_transfer(dwc, dep->number, true); 613 614 /* - giveback all requests to gadget driver */ 615 while (!list_empty(&dep->req_queued)) { 616 req = next_request(&dep->req_queued); 617 618 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 619 } 620 } 621 622 while (!list_empty(&dep->request_list)) { 623 req = next_request(&dep->request_list); 624 625 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 626 } 627 } 628 629 /** 630 * __dwc3_gadget_ep_disable - Disables a HW endpoint 631 * @dep: the endpoint to disable 632 * 633 * This function also removes requests which are currently processed ny the 634 * hardware and those which are not yet scheduled. 635 * Caller should take care of locking. 636 */ 637 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 638 { 639 struct dwc3 *dwc = dep->dwc; 640 u32 reg; 641 642 dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name); 643 644 dwc3_remove_requests(dwc, dep); 645 646 /* make sure HW endpoint isn't stalled */ 647 if (dep->flags & DWC3_EP_STALL) 648 __dwc3_gadget_ep_set_halt(dep, 0, false); 649 650 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 651 reg &= ~DWC3_DALEPENA_EP(dep->number); 652 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 653 654 dep->stream_capable = false; 655 dep->endpoint.desc = NULL; 656 dep->comp_desc = NULL; 657 dep->type = 0; 658 dep->flags = 0; 659 660 snprintf(dep->name, sizeof(dep->name), "ep%d%s", 661 dep->number >> 1, 662 (dep->number & 1) ? "in" : "out"); 663 664 return 0; 665 } 666 667 /* -------------------------------------------------------------------------- */ 668 669 static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 670 const struct usb_endpoint_descriptor *desc) 671 { 672 return -EINVAL; 673 } 674 675 static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 676 { 677 return -EINVAL; 678 } 679 680 /* -------------------------------------------------------------------------- */ 681 682 static int dwc3_gadget_ep_enable(struct usb_ep *ep, 683 const struct usb_endpoint_descriptor *desc) 684 { 685 struct dwc3_ep *dep; 686 struct dwc3 *dwc; 687 unsigned long flags; 688 int ret; 689 690 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 691 pr_debug("dwc3: invalid parameters\n"); 692 return -EINVAL; 693 } 694 695 if (!desc->wMaxPacketSize) { 696 pr_debug("dwc3: missing wMaxPacketSize\n"); 697 return -EINVAL; 698 } 699 700 dep = to_dwc3_ep(ep); 701 dwc = dep->dwc; 702 703 if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED, 704 "%s is already enabled\n", 705 dep->name)) 706 return 0; 707 708 spin_lock_irqsave(&dwc->lock, flags); 709 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false); 710 spin_unlock_irqrestore(&dwc->lock, flags); 711 712 return ret; 713 } 714 715 static int dwc3_gadget_ep_disable(struct usb_ep *ep) 716 { 717 struct dwc3_ep *dep; 718 struct dwc3 *dwc; 719 unsigned long flags; 720 int ret; 721 722 if (!ep) { 723 pr_debug("dwc3: invalid parameters\n"); 724 return -EINVAL; 725 } 726 727 dep = to_dwc3_ep(ep); 728 dwc = dep->dwc; 729 730 if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED), 731 "%s is already disabled\n", 732 dep->name)) 733 return 0; 734 735 spin_lock_irqsave(&dwc->lock, flags); 736 ret = __dwc3_gadget_ep_disable(dep); 737 spin_unlock_irqrestore(&dwc->lock, flags); 738 739 return ret; 740 } 741 742 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 743 gfp_t gfp_flags) 744 { 745 struct dwc3_request *req; 746 struct dwc3_ep *dep = to_dwc3_ep(ep); 747 748 req = kzalloc(sizeof(*req), gfp_flags); 749 if (!req) 750 return NULL; 751 752 req->epnum = dep->number; 753 req->dep = dep; 754 755 trace_dwc3_alloc_request(req); 756 757 return &req->request; 758 } 759 760 static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 761 struct usb_request *request) 762 { 763 struct dwc3_request *req = to_dwc3_request(request); 764 765 trace_dwc3_free_request(req); 766 kfree(req); 767 } 768 769 /** 770 * dwc3_prepare_one_trb - setup one TRB from one request 771 * @dep: endpoint for which this request is prepared 772 * @req: dwc3_request pointer 773 */ 774 static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 775 struct dwc3_request *req, dma_addr_t dma, 776 unsigned length, unsigned last, unsigned chain, unsigned node) 777 { 778 struct dwc3_trb *trb; 779 780 dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s%s", 781 dep->name, req, (unsigned long long) dma, 782 length, last ? " last" : "", 783 chain ? " chain" : ""); 784 785 786 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK]; 787 788 if (!req->trb) { 789 dwc3_gadget_move_request_queued(req); 790 req->trb = trb; 791 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 792 req->start_slot = dep->free_slot & DWC3_TRB_MASK; 793 } 794 795 dep->free_slot++; 796 /* Skip the LINK-TRB on ISOC */ 797 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && 798 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 799 dep->free_slot++; 800 801 trb->size = DWC3_TRB_SIZE_LENGTH(length); 802 trb->bpl = lower_32_bits(dma); 803 trb->bph = upper_32_bits(dma); 804 805 switch (usb_endpoint_type(dep->endpoint.desc)) { 806 case USB_ENDPOINT_XFER_CONTROL: 807 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP; 808 break; 809 810 case USB_ENDPOINT_XFER_ISOC: 811 if (!node) 812 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 813 else 814 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 815 break; 816 817 case USB_ENDPOINT_XFER_BULK: 818 case USB_ENDPOINT_XFER_INT: 819 trb->ctrl = DWC3_TRBCTL_NORMAL; 820 break; 821 default: 822 /* 823 * This is only possible with faulty memory because we 824 * checked it already :) 825 */ 826 BUG(); 827 } 828 829 if (!req->request.no_interrupt && !chain) 830 trb->ctrl |= DWC3_TRB_CTRL_IOC; 831 832 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 833 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 834 trb->ctrl |= DWC3_TRB_CTRL_CSP; 835 } else if (last) { 836 trb->ctrl |= DWC3_TRB_CTRL_LST; 837 } 838 839 if (chain) 840 trb->ctrl |= DWC3_TRB_CTRL_CHN; 841 842 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) 843 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id); 844 845 trb->ctrl |= DWC3_TRB_CTRL_HWO; 846 847 trace_dwc3_prepare_trb(dep, trb); 848 } 849 850 /* 851 * dwc3_prepare_trbs - setup TRBs from requests 852 * @dep: endpoint for which requests are being prepared 853 * @starting: true if the endpoint is idle and no requests are queued. 854 * 855 * The function goes through the requests list and sets up TRBs for the 856 * transfers. The function returns once there are no more TRBs available or 857 * it runs out of requests. 858 */ 859 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting) 860 { 861 struct dwc3_request *req, *n; 862 u32 trbs_left; 863 u32 max; 864 unsigned int last_one = 0; 865 866 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 867 868 /* the first request must not be queued */ 869 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK; 870 871 /* Can't wrap around on a non-isoc EP since there's no link TRB */ 872 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 873 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK); 874 if (trbs_left > max) 875 trbs_left = max; 876 } 877 878 /* 879 * If busy & slot are equal than it is either full or empty. If we are 880 * starting to process requests then we are empty. Otherwise we are 881 * full and don't do anything 882 */ 883 if (!trbs_left) { 884 if (!starting) 885 return; 886 trbs_left = DWC3_TRB_NUM; 887 /* 888 * In case we start from scratch, we queue the ISOC requests 889 * starting from slot 1. This is done because we use ring 890 * buffer and have no LST bit to stop us. Instead, we place 891 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt 892 * after the first request so we start at slot 1 and have 893 * 7 requests proceed before we hit the first IOC. 894 * Other transfer types don't use the ring buffer and are 895 * processed from the first TRB until the last one. Since we 896 * don't wrap around we have to start at the beginning. 897 */ 898 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 899 dep->busy_slot = 1; 900 dep->free_slot = 1; 901 } else { 902 dep->busy_slot = 0; 903 dep->free_slot = 0; 904 } 905 } 906 907 /* The last TRB is a link TRB, not used for xfer */ 908 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc)) 909 return; 910 911 list_for_each_entry_safe(req, n, &dep->request_list, list) { 912 unsigned length; 913 dma_addr_t dma; 914 last_one = false; 915 916 if (req->request.num_mapped_sgs > 0) { 917 struct usb_request *request = &req->request; 918 struct scatterlist *sg = request->sg; 919 struct scatterlist *s; 920 int i; 921 922 for_each_sg(sg, s, request->num_mapped_sgs, i) { 923 unsigned chain = true; 924 925 length = sg_dma_len(s); 926 dma = sg_dma_address(s); 927 928 if (i == (request->num_mapped_sgs - 1) || 929 sg_is_last(s)) { 930 if (list_empty(&dep->request_list)) 931 last_one = true; 932 chain = false; 933 } 934 935 trbs_left--; 936 if (!trbs_left) 937 last_one = true; 938 939 if (last_one) 940 chain = false; 941 942 dwc3_prepare_one_trb(dep, req, dma, length, 943 last_one, chain, i); 944 945 if (last_one) 946 break; 947 } 948 949 if (last_one) 950 break; 951 } else { 952 dma = req->request.dma; 953 length = req->request.length; 954 trbs_left--; 955 956 if (!trbs_left) 957 last_one = 1; 958 959 /* Is this the last request? */ 960 if (list_is_last(&req->list, &dep->request_list)) 961 last_one = 1; 962 963 dwc3_prepare_one_trb(dep, req, dma, length, 964 last_one, false, 0); 965 966 if (last_one) 967 break; 968 } 969 } 970 } 971 972 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param, 973 int start_new) 974 { 975 struct dwc3_gadget_ep_cmd_params params; 976 struct dwc3_request *req; 977 struct dwc3 *dwc = dep->dwc; 978 int ret; 979 u32 cmd; 980 981 if (start_new && (dep->flags & DWC3_EP_BUSY)) { 982 dwc3_trace(trace_dwc3_gadget, "%s: endpoint busy", dep->name); 983 return -EBUSY; 984 } 985 986 /* 987 * If we are getting here after a short-out-packet we don't enqueue any 988 * new requests as we try to set the IOC bit only on the last request. 989 */ 990 if (start_new) { 991 if (list_empty(&dep->req_queued)) 992 dwc3_prepare_trbs(dep, start_new); 993 994 /* req points to the first request which will be sent */ 995 req = next_request(&dep->req_queued); 996 } else { 997 dwc3_prepare_trbs(dep, start_new); 998 999 /* 1000 * req points to the first request where HWO changed from 0 to 1 1001 */ 1002 req = next_request(&dep->req_queued); 1003 } 1004 if (!req) { 1005 dep->flags |= DWC3_EP_PENDING_REQUEST; 1006 return 0; 1007 } 1008 1009 memset(¶ms, 0, sizeof(params)); 1010 1011 if (start_new) { 1012 params.param0 = upper_32_bits(req->trb_dma); 1013 params.param1 = lower_32_bits(req->trb_dma); 1014 cmd = DWC3_DEPCMD_STARTTRANSFER; 1015 } else { 1016 cmd = DWC3_DEPCMD_UPDATETRANSFER; 1017 } 1018 1019 cmd |= DWC3_DEPCMD_PARAM(cmd_param); 1020 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 1021 if (ret < 0) { 1022 /* 1023 * FIXME we need to iterate over the list of requests 1024 * here and stop, unmap, free and del each of the linked 1025 * requests instead of what we do now. 1026 */ 1027 usb_gadget_unmap_request(&dwc->gadget, &req->request, 1028 req->direction); 1029 list_del(&req->list); 1030 return ret; 1031 } 1032 1033 dep->flags |= DWC3_EP_BUSY; 1034 1035 if (start_new) { 1036 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc, 1037 dep->number); 1038 WARN_ON_ONCE(!dep->resource_index); 1039 } 1040 1041 return 0; 1042 } 1043 1044 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc, 1045 struct dwc3_ep *dep, u32 cur_uf) 1046 { 1047 u32 uf; 1048 1049 if (list_empty(&dep->request_list)) { 1050 dwc3_trace(trace_dwc3_gadget, 1051 "ISOC ep %s run out for requests", 1052 dep->name); 1053 dep->flags |= DWC3_EP_PENDING_REQUEST; 1054 return; 1055 } 1056 1057 /* 4 micro frames in the future */ 1058 uf = cur_uf + dep->interval * 4; 1059 1060 __dwc3_gadget_kick_transfer(dep, uf, 1); 1061 } 1062 1063 static void dwc3_gadget_start_isoc(struct dwc3 *dwc, 1064 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1065 { 1066 u32 cur_uf, mask; 1067 1068 mask = ~(dep->interval - 1); 1069 cur_uf = event->parameters & mask; 1070 1071 __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1072 } 1073 1074 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 1075 { 1076 struct dwc3 *dwc = dep->dwc; 1077 int ret; 1078 1079 if (!dep->endpoint.desc) { 1080 dwc3_trace(trace_dwc3_gadget, 1081 "trying to queue request %p to disabled %s\n", 1082 &req->request, dep->endpoint.name); 1083 return -ESHUTDOWN; 1084 } 1085 1086 if (WARN(req->dep != dep, "request %p belongs to '%s'\n", 1087 &req->request, req->dep->name)) { 1088 dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'\n", 1089 &req->request, req->dep->name); 1090 return -EINVAL; 1091 } 1092 1093 req->request.actual = 0; 1094 req->request.status = -EINPROGRESS; 1095 req->direction = dep->direction; 1096 req->epnum = dep->number; 1097 1098 trace_dwc3_ep_queue(req); 1099 1100 /* 1101 * We only add to our list of requests now and 1102 * start consuming the list once we get XferNotReady 1103 * IRQ. 1104 * 1105 * That way, we avoid doing anything that we don't need 1106 * to do now and defer it until the point we receive a 1107 * particular token from the Host side. 1108 * 1109 * This will also avoid Host cancelling URBs due to too 1110 * many NAKs. 1111 */ 1112 ret = usb_gadget_map_request(&dwc->gadget, &req->request, 1113 dep->direction); 1114 if (ret) 1115 return ret; 1116 1117 list_add_tail(&req->list, &dep->request_list); 1118 1119 /* 1120 * If there are no pending requests and the endpoint isn't already 1121 * busy, we will just start the request straight away. 1122 * 1123 * This will save one IRQ (XFER_NOT_READY) and possibly make it a 1124 * little bit faster. 1125 */ 1126 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1127 !usb_endpoint_xfer_int(dep->endpoint.desc) && 1128 !(dep->flags & DWC3_EP_BUSY)) { 1129 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1130 goto out; 1131 } 1132 1133 /* 1134 * There are a few special cases: 1135 * 1136 * 1. XferNotReady with empty list of requests. We need to kick the 1137 * transfer here in that situation, otherwise we will be NAKing 1138 * forever. If we get XferNotReady before gadget driver has a 1139 * chance to queue a request, we will ACK the IRQ but won't be 1140 * able to receive the data until the next request is queued. 1141 * The following code is handling exactly that. 1142 * 1143 */ 1144 if (dep->flags & DWC3_EP_PENDING_REQUEST) { 1145 /* 1146 * If xfernotready is already elapsed and it is a case 1147 * of isoc transfer, then issue END TRANSFER, so that 1148 * you can receive xfernotready again and can have 1149 * notion of current microframe. 1150 */ 1151 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1152 if (list_empty(&dep->req_queued)) { 1153 dwc3_stop_active_transfer(dwc, dep->number, true); 1154 dep->flags = DWC3_EP_ENABLED; 1155 } 1156 return 0; 1157 } 1158 1159 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1160 if (!ret) 1161 dep->flags &= ~DWC3_EP_PENDING_REQUEST; 1162 1163 goto out; 1164 } 1165 1166 /* 1167 * 2. XferInProgress on Isoc EP with an active transfer. We need to 1168 * kick the transfer here after queuing a request, otherwise the 1169 * core may not see the modified TRB(s). 1170 */ 1171 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1172 (dep->flags & DWC3_EP_BUSY) && 1173 !(dep->flags & DWC3_EP_MISSED_ISOC)) { 1174 WARN_ON_ONCE(!dep->resource_index); 1175 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index, 1176 false); 1177 goto out; 1178 } 1179 1180 /* 1181 * 4. Stream Capable Bulk Endpoints. We need to start the transfer 1182 * right away, otherwise host will not know we have streams to be 1183 * handled. 1184 */ 1185 if (dep->stream_capable) 1186 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1187 1188 out: 1189 if (ret && ret != -EBUSY) 1190 dwc3_trace(trace_dwc3_gadget, 1191 "%s: failed to kick transfers\n", 1192 dep->name); 1193 if (ret == -EBUSY) 1194 ret = 0; 1195 1196 return ret; 1197 } 1198 1199 static void __dwc3_gadget_ep_zlp_complete(struct usb_ep *ep, 1200 struct usb_request *request) 1201 { 1202 dwc3_gadget_ep_free_request(ep, request); 1203 } 1204 1205 static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep) 1206 { 1207 struct dwc3_request *req; 1208 struct usb_request *request; 1209 struct usb_ep *ep = &dep->endpoint; 1210 1211 dwc3_trace(trace_dwc3_gadget, "queueing ZLP\n"); 1212 request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC); 1213 if (!request) 1214 return -ENOMEM; 1215 1216 request->length = 0; 1217 request->buf = dwc->zlp_buf; 1218 request->complete = __dwc3_gadget_ep_zlp_complete; 1219 1220 req = to_dwc3_request(request); 1221 1222 return __dwc3_gadget_ep_queue(dep, req); 1223 } 1224 1225 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 1226 gfp_t gfp_flags) 1227 { 1228 struct dwc3_request *req = to_dwc3_request(request); 1229 struct dwc3_ep *dep = to_dwc3_ep(ep); 1230 struct dwc3 *dwc = dep->dwc; 1231 1232 unsigned long flags; 1233 1234 int ret; 1235 1236 spin_lock_irqsave(&dwc->lock, flags); 1237 ret = __dwc3_gadget_ep_queue(dep, req); 1238 1239 /* 1240 * Okay, here's the thing, if gadget driver has requested for a ZLP by 1241 * setting request->zero, instead of doing magic, we will just queue an 1242 * extra usb_request ourselves so that it gets handled the same way as 1243 * any other request. 1244 */ 1245 if (ret == 0 && request->zero && request->length && 1246 (request->length % ep->maxpacket == 0)) 1247 ret = __dwc3_gadget_ep_queue_zlp(dwc, dep); 1248 1249 spin_unlock_irqrestore(&dwc->lock, flags); 1250 1251 return ret; 1252 } 1253 1254 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 1255 struct usb_request *request) 1256 { 1257 struct dwc3_request *req = to_dwc3_request(request); 1258 struct dwc3_request *r = NULL; 1259 1260 struct dwc3_ep *dep = to_dwc3_ep(ep); 1261 struct dwc3 *dwc = dep->dwc; 1262 1263 unsigned long flags; 1264 int ret = 0; 1265 1266 trace_dwc3_ep_dequeue(req); 1267 1268 spin_lock_irqsave(&dwc->lock, flags); 1269 1270 list_for_each_entry(r, &dep->request_list, list) { 1271 if (r == req) 1272 break; 1273 } 1274 1275 if (r != req) { 1276 list_for_each_entry(r, &dep->req_queued, list) { 1277 if (r == req) 1278 break; 1279 } 1280 if (r == req) { 1281 /* wait until it is processed */ 1282 dwc3_stop_active_transfer(dwc, dep->number, true); 1283 goto out1; 1284 } 1285 dev_err(dwc->dev, "request %p was not queued to %s\n", 1286 request, ep->name); 1287 ret = -EINVAL; 1288 goto out0; 1289 } 1290 1291 out1: 1292 /* giveback the request */ 1293 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1294 1295 out0: 1296 spin_unlock_irqrestore(&dwc->lock, flags); 1297 1298 return ret; 1299 } 1300 1301 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) 1302 { 1303 struct dwc3_gadget_ep_cmd_params params; 1304 struct dwc3 *dwc = dep->dwc; 1305 int ret; 1306 1307 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1308 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 1309 return -EINVAL; 1310 } 1311 1312 memset(¶ms, 0x00, sizeof(params)); 1313 1314 if (value) { 1315 if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) || 1316 (!list_empty(&dep->req_queued) || 1317 !list_empty(&dep->request_list)))) { 1318 dwc3_trace(trace_dwc3_gadget, 1319 "%s: pending request, cannot halt\n", 1320 dep->name); 1321 return -EAGAIN; 1322 } 1323 1324 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1325 DWC3_DEPCMD_SETSTALL, ¶ms); 1326 if (ret) 1327 dev_err(dwc->dev, "failed to set STALL on %s\n", 1328 dep->name); 1329 else 1330 dep->flags |= DWC3_EP_STALL; 1331 } else { 1332 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1333 DWC3_DEPCMD_CLEARSTALL, ¶ms); 1334 if (ret) 1335 dev_err(dwc->dev, "failed to clear STALL on %s\n", 1336 dep->name); 1337 else 1338 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); 1339 } 1340 1341 return ret; 1342 } 1343 1344 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 1345 { 1346 struct dwc3_ep *dep = to_dwc3_ep(ep); 1347 struct dwc3 *dwc = dep->dwc; 1348 1349 unsigned long flags; 1350 1351 int ret; 1352 1353 spin_lock_irqsave(&dwc->lock, flags); 1354 ret = __dwc3_gadget_ep_set_halt(dep, value, false); 1355 spin_unlock_irqrestore(&dwc->lock, flags); 1356 1357 return ret; 1358 } 1359 1360 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 1361 { 1362 struct dwc3_ep *dep = to_dwc3_ep(ep); 1363 struct dwc3 *dwc = dep->dwc; 1364 unsigned long flags; 1365 int ret; 1366 1367 spin_lock_irqsave(&dwc->lock, flags); 1368 dep->flags |= DWC3_EP_WEDGE; 1369 1370 if (dep->number == 0 || dep->number == 1) 1371 ret = __dwc3_gadget_ep0_set_halt(ep, 1); 1372 else 1373 ret = __dwc3_gadget_ep_set_halt(dep, 1, false); 1374 spin_unlock_irqrestore(&dwc->lock, flags); 1375 1376 return ret; 1377 } 1378 1379 /* -------------------------------------------------------------------------- */ 1380 1381 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 1382 .bLength = USB_DT_ENDPOINT_SIZE, 1383 .bDescriptorType = USB_DT_ENDPOINT, 1384 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1385 }; 1386 1387 static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 1388 .enable = dwc3_gadget_ep0_enable, 1389 .disable = dwc3_gadget_ep0_disable, 1390 .alloc_request = dwc3_gadget_ep_alloc_request, 1391 .free_request = dwc3_gadget_ep_free_request, 1392 .queue = dwc3_gadget_ep0_queue, 1393 .dequeue = dwc3_gadget_ep_dequeue, 1394 .set_halt = dwc3_gadget_ep0_set_halt, 1395 .set_wedge = dwc3_gadget_ep_set_wedge, 1396 }; 1397 1398 static const struct usb_ep_ops dwc3_gadget_ep_ops = { 1399 .enable = dwc3_gadget_ep_enable, 1400 .disable = dwc3_gadget_ep_disable, 1401 .alloc_request = dwc3_gadget_ep_alloc_request, 1402 .free_request = dwc3_gadget_ep_free_request, 1403 .queue = dwc3_gadget_ep_queue, 1404 .dequeue = dwc3_gadget_ep_dequeue, 1405 .set_halt = dwc3_gadget_ep_set_halt, 1406 .set_wedge = dwc3_gadget_ep_set_wedge, 1407 }; 1408 1409 /* -------------------------------------------------------------------------- */ 1410 1411 static int dwc3_gadget_get_frame(struct usb_gadget *g) 1412 { 1413 struct dwc3 *dwc = gadget_to_dwc(g); 1414 u32 reg; 1415 1416 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1417 return DWC3_DSTS_SOFFN(reg); 1418 } 1419 1420 static int dwc3_gadget_wakeup(struct usb_gadget *g) 1421 { 1422 struct dwc3 *dwc = gadget_to_dwc(g); 1423 1424 unsigned long timeout; 1425 unsigned long flags; 1426 1427 u32 reg; 1428 1429 int ret = 0; 1430 1431 u8 link_state; 1432 u8 speed; 1433 1434 spin_lock_irqsave(&dwc->lock, flags); 1435 1436 /* 1437 * According to the Databook Remote wakeup request should 1438 * be issued only when the device is in early suspend state. 1439 * 1440 * We can check that via USB Link State bits in DSTS register. 1441 */ 1442 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1443 1444 speed = reg & DWC3_DSTS_CONNECTSPD; 1445 if ((speed == DWC3_DSTS_SUPERSPEED) || 1446 (speed == DWC3_DSTS_SUPERSPEED_PLUS)) { 1447 dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed\n"); 1448 ret = -EINVAL; 1449 goto out; 1450 } 1451 1452 link_state = DWC3_DSTS_USBLNKST(reg); 1453 1454 switch (link_state) { 1455 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1456 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1457 break; 1458 default: 1459 dwc3_trace(trace_dwc3_gadget, 1460 "can't wakeup from '%s'\n", 1461 dwc3_gadget_link_string(link_state)); 1462 ret = -EINVAL; 1463 goto out; 1464 } 1465 1466 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); 1467 if (ret < 0) { 1468 dev_err(dwc->dev, "failed to put link in Recovery\n"); 1469 goto out; 1470 } 1471 1472 /* Recent versions do this automatically */ 1473 if (dwc->revision < DWC3_REVISION_194A) { 1474 /* write zeroes to Link Change Request */ 1475 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1476 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 1477 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1478 } 1479 1480 /* poll until Link State changes to ON */ 1481 timeout = jiffies + msecs_to_jiffies(100); 1482 1483 while (!time_after(jiffies, timeout)) { 1484 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1485 1486 /* in HS, means ON */ 1487 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 1488 break; 1489 } 1490 1491 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1492 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1493 ret = -EINVAL; 1494 } 1495 1496 out: 1497 spin_unlock_irqrestore(&dwc->lock, flags); 1498 1499 return ret; 1500 } 1501 1502 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 1503 int is_selfpowered) 1504 { 1505 struct dwc3 *dwc = gadget_to_dwc(g); 1506 unsigned long flags; 1507 1508 spin_lock_irqsave(&dwc->lock, flags); 1509 g->is_selfpowered = !!is_selfpowered; 1510 spin_unlock_irqrestore(&dwc->lock, flags); 1511 1512 return 0; 1513 } 1514 1515 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) 1516 { 1517 u32 reg; 1518 u32 timeout = 500; 1519 1520 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1521 if (is_on) { 1522 if (dwc->revision <= DWC3_REVISION_187A) { 1523 reg &= ~DWC3_DCTL_TRGTULST_MASK; 1524 reg |= DWC3_DCTL_TRGTULST_RX_DET; 1525 } 1526 1527 if (dwc->revision >= DWC3_REVISION_194A) 1528 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1529 reg |= DWC3_DCTL_RUN_STOP; 1530 1531 if (dwc->has_hibernation) 1532 reg |= DWC3_DCTL_KEEP_CONNECT; 1533 1534 dwc->pullups_connected = true; 1535 } else { 1536 reg &= ~DWC3_DCTL_RUN_STOP; 1537 1538 if (dwc->has_hibernation && !suspend) 1539 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1540 1541 dwc->pullups_connected = false; 1542 } 1543 1544 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1545 1546 do { 1547 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1548 if (is_on) { 1549 if (!(reg & DWC3_DSTS_DEVCTRLHLT)) 1550 break; 1551 } else { 1552 if (reg & DWC3_DSTS_DEVCTRLHLT) 1553 break; 1554 } 1555 timeout--; 1556 if (!timeout) 1557 return -ETIMEDOUT; 1558 udelay(1); 1559 } while (1); 1560 1561 dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s", 1562 dwc->gadget_driver 1563 ? dwc->gadget_driver->function : "no-function", 1564 is_on ? "connect" : "disconnect"); 1565 1566 return 0; 1567 } 1568 1569 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 1570 { 1571 struct dwc3 *dwc = gadget_to_dwc(g); 1572 unsigned long flags; 1573 int ret; 1574 1575 is_on = !!is_on; 1576 1577 spin_lock_irqsave(&dwc->lock, flags); 1578 ret = dwc3_gadget_run_stop(dwc, is_on, false); 1579 spin_unlock_irqrestore(&dwc->lock, flags); 1580 1581 return ret; 1582 } 1583 1584 static void dwc3_gadget_enable_irq(struct dwc3 *dwc) 1585 { 1586 u32 reg; 1587 1588 /* Enable all but Start and End of Frame IRQs */ 1589 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | 1590 DWC3_DEVTEN_EVNTOVERFLOWEN | 1591 DWC3_DEVTEN_CMDCMPLTEN | 1592 DWC3_DEVTEN_ERRTICERREN | 1593 DWC3_DEVTEN_WKUPEVTEN | 1594 DWC3_DEVTEN_ULSTCNGEN | 1595 DWC3_DEVTEN_CONNECTDONEEN | 1596 DWC3_DEVTEN_USBRSTEN | 1597 DWC3_DEVTEN_DISCONNEVTEN); 1598 1599 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 1600 } 1601 1602 static void dwc3_gadget_disable_irq(struct dwc3 *dwc) 1603 { 1604 /* mask all interrupts */ 1605 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 1606 } 1607 1608 static irqreturn_t dwc3_interrupt(int irq, void *_dwc); 1609 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc); 1610 1611 static int dwc3_gadget_start(struct usb_gadget *g, 1612 struct usb_gadget_driver *driver) 1613 { 1614 struct dwc3 *dwc = gadget_to_dwc(g); 1615 struct dwc3_ep *dep; 1616 unsigned long flags; 1617 int ret = 0; 1618 int irq; 1619 u32 reg; 1620 1621 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1622 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, 1623 IRQF_SHARED, "dwc3", dwc); 1624 if (ret) { 1625 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 1626 irq, ret); 1627 goto err0; 1628 } 1629 1630 spin_lock_irqsave(&dwc->lock, flags); 1631 1632 if (dwc->gadget_driver) { 1633 dev_err(dwc->dev, "%s is already bound to %s\n", 1634 dwc->gadget.name, 1635 dwc->gadget_driver->driver.name); 1636 ret = -EBUSY; 1637 goto err1; 1638 } 1639 1640 dwc->gadget_driver = driver; 1641 1642 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1643 reg &= ~(DWC3_DCFG_SPEED_MASK); 1644 1645 /** 1646 * WORKAROUND: DWC3 revision < 2.20a have an issue 1647 * which would cause metastability state on Run/Stop 1648 * bit if we try to force the IP to USB2-only mode. 1649 * 1650 * Because of that, we cannot configure the IP to any 1651 * speed other than the SuperSpeed 1652 * 1653 * Refers to: 1654 * 1655 * STAR#9000525659: Clock Domain Crossing on DCTL in 1656 * USB 2.0 Mode 1657 */ 1658 if (dwc->revision < DWC3_REVISION_220A) { 1659 reg |= DWC3_DCFG_SUPERSPEED; 1660 } else { 1661 switch (dwc->maximum_speed) { 1662 case USB_SPEED_LOW: 1663 reg |= DWC3_DSTS_LOWSPEED; 1664 break; 1665 case USB_SPEED_FULL: 1666 reg |= DWC3_DSTS_FULLSPEED1; 1667 break; 1668 case USB_SPEED_HIGH: 1669 reg |= DWC3_DSTS_HIGHSPEED; 1670 break; 1671 case USB_SPEED_SUPER_PLUS: 1672 reg |= DWC3_DSTS_SUPERSPEED_PLUS; 1673 break; 1674 default: 1675 dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n", 1676 dwc->maximum_speed); 1677 /* fall through */ 1678 case USB_SPEED_SUPER: 1679 reg |= DWC3_DCFG_SUPERSPEED; 1680 break; 1681 } 1682 } 1683 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1684 1685 /* Start with SuperSpeed Default */ 1686 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1687 1688 dep = dwc->eps[0]; 1689 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 1690 false); 1691 if (ret) { 1692 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1693 goto err2; 1694 } 1695 1696 dep = dwc->eps[1]; 1697 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 1698 false); 1699 if (ret) { 1700 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1701 goto err3; 1702 } 1703 1704 /* begin to receive SETUP packets */ 1705 dwc->ep0state = EP0_SETUP_PHASE; 1706 dwc3_ep0_out_start(dwc); 1707 1708 dwc3_gadget_enable_irq(dwc); 1709 1710 spin_unlock_irqrestore(&dwc->lock, flags); 1711 1712 return 0; 1713 1714 err3: 1715 __dwc3_gadget_ep_disable(dwc->eps[0]); 1716 1717 err2: 1718 dwc->gadget_driver = NULL; 1719 1720 err1: 1721 spin_unlock_irqrestore(&dwc->lock, flags); 1722 1723 free_irq(irq, dwc); 1724 1725 err0: 1726 return ret; 1727 } 1728 1729 static int dwc3_gadget_stop(struct usb_gadget *g) 1730 { 1731 struct dwc3 *dwc = gadget_to_dwc(g); 1732 unsigned long flags; 1733 int irq; 1734 1735 spin_lock_irqsave(&dwc->lock, flags); 1736 1737 dwc3_gadget_disable_irq(dwc); 1738 __dwc3_gadget_ep_disable(dwc->eps[0]); 1739 __dwc3_gadget_ep_disable(dwc->eps[1]); 1740 1741 dwc->gadget_driver = NULL; 1742 1743 spin_unlock_irqrestore(&dwc->lock, flags); 1744 1745 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1746 free_irq(irq, dwc); 1747 1748 return 0; 1749 } 1750 1751 static const struct usb_gadget_ops dwc3_gadget_ops = { 1752 .get_frame = dwc3_gadget_get_frame, 1753 .wakeup = dwc3_gadget_wakeup, 1754 .set_selfpowered = dwc3_gadget_set_selfpowered, 1755 .pullup = dwc3_gadget_pullup, 1756 .udc_start = dwc3_gadget_start, 1757 .udc_stop = dwc3_gadget_stop, 1758 }; 1759 1760 /* -------------------------------------------------------------------------- */ 1761 1762 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc, 1763 u8 num, u32 direction) 1764 { 1765 struct dwc3_ep *dep; 1766 u8 i; 1767 1768 for (i = 0; i < num; i++) { 1769 u8 epnum = (i << 1) | (!!direction); 1770 1771 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 1772 if (!dep) 1773 return -ENOMEM; 1774 1775 dep->dwc = dwc; 1776 dep->number = epnum; 1777 dep->direction = !!direction; 1778 dwc->eps[epnum] = dep; 1779 1780 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1, 1781 (epnum & 1) ? "in" : "out"); 1782 1783 dep->endpoint.name = dep->name; 1784 1785 dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name); 1786 1787 if (epnum == 0 || epnum == 1) { 1788 usb_ep_set_maxpacket_limit(&dep->endpoint, 512); 1789 dep->endpoint.maxburst = 1; 1790 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 1791 if (!epnum) 1792 dwc->gadget.ep0 = &dep->endpoint; 1793 } else { 1794 int ret; 1795 1796 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024); 1797 dep->endpoint.max_streams = 15; 1798 dep->endpoint.ops = &dwc3_gadget_ep_ops; 1799 list_add_tail(&dep->endpoint.ep_list, 1800 &dwc->gadget.ep_list); 1801 1802 ret = dwc3_alloc_trb_pool(dep); 1803 if (ret) 1804 return ret; 1805 } 1806 1807 if (epnum == 0 || epnum == 1) { 1808 dep->endpoint.caps.type_control = true; 1809 } else { 1810 dep->endpoint.caps.type_iso = true; 1811 dep->endpoint.caps.type_bulk = true; 1812 dep->endpoint.caps.type_int = true; 1813 } 1814 1815 dep->endpoint.caps.dir_in = !!direction; 1816 dep->endpoint.caps.dir_out = !direction; 1817 1818 INIT_LIST_HEAD(&dep->request_list); 1819 INIT_LIST_HEAD(&dep->req_queued); 1820 } 1821 1822 return 0; 1823 } 1824 1825 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc) 1826 { 1827 int ret; 1828 1829 INIT_LIST_HEAD(&dwc->gadget.ep_list); 1830 1831 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0); 1832 if (ret < 0) { 1833 dwc3_trace(trace_dwc3_gadget, 1834 "failed to allocate OUT endpoints"); 1835 return ret; 1836 } 1837 1838 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1); 1839 if (ret < 0) { 1840 dwc3_trace(trace_dwc3_gadget, 1841 "failed to allocate IN endpoints"); 1842 return ret; 1843 } 1844 1845 return 0; 1846 } 1847 1848 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 1849 { 1850 struct dwc3_ep *dep; 1851 u8 epnum; 1852 1853 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1854 dep = dwc->eps[epnum]; 1855 if (!dep) 1856 continue; 1857 /* 1858 * Physical endpoints 0 and 1 are special; they form the 1859 * bi-directional USB endpoint 0. 1860 * 1861 * For those two physical endpoints, we don't allocate a TRB 1862 * pool nor do we add them the endpoints list. Due to that, we 1863 * shouldn't do these two operations otherwise we would end up 1864 * with all sorts of bugs when removing dwc3.ko. 1865 */ 1866 if (epnum != 0 && epnum != 1) { 1867 dwc3_free_trb_pool(dep); 1868 list_del(&dep->endpoint.ep_list); 1869 } 1870 1871 kfree(dep); 1872 } 1873 } 1874 1875 /* -------------------------------------------------------------------------- */ 1876 1877 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, 1878 struct dwc3_request *req, struct dwc3_trb *trb, 1879 const struct dwc3_event_depevt *event, int status) 1880 { 1881 unsigned int count; 1882 unsigned int s_pkt = 0; 1883 unsigned int trb_status; 1884 1885 trace_dwc3_complete_trb(dep, trb); 1886 1887 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 1888 /* 1889 * We continue despite the error. There is not much we 1890 * can do. If we don't clean it up we loop forever. If 1891 * we skip the TRB then it gets overwritten after a 1892 * while since we use them in a ring buffer. A BUG() 1893 * would help. Lets hope that if this occurs, someone 1894 * fixes the root cause instead of looking away :) 1895 */ 1896 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n", 1897 dep->name, trb); 1898 count = trb->size & DWC3_TRB_SIZE_MASK; 1899 1900 if (dep->direction) { 1901 if (count) { 1902 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); 1903 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) { 1904 dwc3_trace(trace_dwc3_gadget, 1905 "%s: incomplete IN transfer\n", 1906 dep->name); 1907 /* 1908 * If missed isoc occurred and there is 1909 * no request queued then issue END 1910 * TRANSFER, so that core generates 1911 * next xfernotready and we will issue 1912 * a fresh START TRANSFER. 1913 * If there are still queued request 1914 * then wait, do not issue either END 1915 * or UPDATE TRANSFER, just attach next 1916 * request in request_list during 1917 * giveback.If any future queued request 1918 * is successfully transferred then we 1919 * will issue UPDATE TRANSFER for all 1920 * request in the request_list. 1921 */ 1922 dep->flags |= DWC3_EP_MISSED_ISOC; 1923 } else { 1924 dev_err(dwc->dev, "incomplete IN transfer %s\n", 1925 dep->name); 1926 status = -ECONNRESET; 1927 } 1928 } else { 1929 dep->flags &= ~DWC3_EP_MISSED_ISOC; 1930 } 1931 } else { 1932 if (count && (event->status & DEPEVT_STATUS_SHORT)) 1933 s_pkt = 1; 1934 } 1935 1936 /* 1937 * We assume here we will always receive the entire data block 1938 * which we should receive. Meaning, if we program RX to 1939 * receive 4K but we receive only 2K, we assume that's all we 1940 * should receive and we simply bounce the request back to the 1941 * gadget driver for further processing. 1942 */ 1943 req->request.actual += req->request.length - count; 1944 if (s_pkt) 1945 return 1; 1946 if ((event->status & DEPEVT_STATUS_LST) && 1947 (trb->ctrl & (DWC3_TRB_CTRL_LST | 1948 DWC3_TRB_CTRL_HWO))) 1949 return 1; 1950 if ((event->status & DEPEVT_STATUS_IOC) && 1951 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 1952 return 1; 1953 return 0; 1954 } 1955 1956 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, 1957 const struct dwc3_event_depevt *event, int status) 1958 { 1959 struct dwc3_request *req; 1960 struct dwc3_trb *trb; 1961 unsigned int slot; 1962 unsigned int i; 1963 int ret; 1964 1965 do { 1966 req = next_request(&dep->req_queued); 1967 if (WARN_ON_ONCE(!req)) 1968 return 1; 1969 1970 i = 0; 1971 do { 1972 slot = req->start_slot + i; 1973 if ((slot == DWC3_TRB_NUM - 1) && 1974 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 1975 slot++; 1976 slot %= DWC3_TRB_NUM; 1977 trb = &dep->trb_pool[slot]; 1978 1979 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 1980 event, status); 1981 if (ret) 1982 break; 1983 } while (++i < req->request.num_mapped_sgs); 1984 1985 dwc3_gadget_giveback(dep, req, status); 1986 1987 if (ret) 1988 break; 1989 } while (1); 1990 1991 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1992 list_empty(&dep->req_queued)) { 1993 if (list_empty(&dep->request_list)) { 1994 /* 1995 * If there is no entry in request list then do 1996 * not issue END TRANSFER now. Just set PENDING 1997 * flag, so that END TRANSFER is issued when an 1998 * entry is added into request list. 1999 */ 2000 dep->flags = DWC3_EP_PENDING_REQUEST; 2001 } else { 2002 dwc3_stop_active_transfer(dwc, dep->number, true); 2003 dep->flags = DWC3_EP_ENABLED; 2004 } 2005 return 1; 2006 } 2007 2008 return 1; 2009 } 2010 2011 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, 2012 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 2013 { 2014 unsigned status = 0; 2015 int clean_busy; 2016 u32 is_xfer_complete; 2017 2018 is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE); 2019 2020 if (event->status & DEPEVT_STATUS_BUSERR) 2021 status = -ECONNRESET; 2022 2023 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status); 2024 if (clean_busy && (is_xfer_complete || 2025 usb_endpoint_xfer_isoc(dep->endpoint.desc))) 2026 dep->flags &= ~DWC3_EP_BUSY; 2027 2028 /* 2029 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. 2030 * See dwc3_gadget_linksts_change_interrupt() for 1st half. 2031 */ 2032 if (dwc->revision < DWC3_REVISION_183A) { 2033 u32 reg; 2034 int i; 2035 2036 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 2037 dep = dwc->eps[i]; 2038 2039 if (!(dep->flags & DWC3_EP_ENABLED)) 2040 continue; 2041 2042 if (!list_empty(&dep->req_queued)) 2043 return; 2044 } 2045 2046 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2047 reg |= dwc->u1u2; 2048 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2049 2050 dwc->u1u2 = 0; 2051 } 2052 2053 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2054 int ret; 2055 2056 ret = __dwc3_gadget_kick_transfer(dep, 0, is_xfer_complete); 2057 if (!ret || ret == -EBUSY) 2058 return; 2059 } 2060 } 2061 2062 static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 2063 const struct dwc3_event_depevt *event) 2064 { 2065 struct dwc3_ep *dep; 2066 u8 epnum = event->endpoint_number; 2067 2068 dep = dwc->eps[epnum]; 2069 2070 if (!(dep->flags & DWC3_EP_ENABLED)) 2071 return; 2072 2073 if (epnum == 0 || epnum == 1) { 2074 dwc3_ep0_interrupt(dwc, event); 2075 return; 2076 } 2077 2078 switch (event->endpoint_event) { 2079 case DWC3_DEPEVT_XFERCOMPLETE: 2080 dep->resource_index = 0; 2081 2082 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2083 dwc3_trace(trace_dwc3_gadget, 2084 "%s is an Isochronous endpoint\n", 2085 dep->name); 2086 return; 2087 } 2088 2089 dwc3_endpoint_transfer_complete(dwc, dep, event); 2090 break; 2091 case DWC3_DEPEVT_XFERINPROGRESS: 2092 dwc3_endpoint_transfer_complete(dwc, dep, event); 2093 break; 2094 case DWC3_DEPEVT_XFERNOTREADY: 2095 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2096 dwc3_gadget_start_isoc(dwc, dep, event); 2097 } else { 2098 int active; 2099 int ret; 2100 2101 active = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE; 2102 2103 dwc3_trace(trace_dwc3_gadget, "%s: reason %s", 2104 dep->name, active ? "Transfer Active" 2105 : "Transfer Not Active"); 2106 2107 ret = __dwc3_gadget_kick_transfer(dep, 0, !active); 2108 if (!ret || ret == -EBUSY) 2109 return; 2110 2111 dwc3_trace(trace_dwc3_gadget, 2112 "%s: failed to kick transfers\n", 2113 dep->name); 2114 } 2115 2116 break; 2117 case DWC3_DEPEVT_STREAMEVT: 2118 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) { 2119 dev_err(dwc->dev, "Stream event for non-Bulk %s\n", 2120 dep->name); 2121 return; 2122 } 2123 2124 switch (event->status) { 2125 case DEPEVT_STREAMEVT_FOUND: 2126 dwc3_trace(trace_dwc3_gadget, 2127 "Stream %d found and started", 2128 event->parameters); 2129 2130 break; 2131 case DEPEVT_STREAMEVT_NOTFOUND: 2132 /* FALLTHROUGH */ 2133 default: 2134 dwc3_trace(trace_dwc3_gadget, 2135 "unable to find suitable stream\n"); 2136 } 2137 break; 2138 case DWC3_DEPEVT_RXTXFIFOEVT: 2139 dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun\n", dep->name); 2140 break; 2141 case DWC3_DEPEVT_EPCMDCMPLT: 2142 dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete"); 2143 break; 2144 } 2145 } 2146 2147 static void dwc3_disconnect_gadget(struct dwc3 *dwc) 2148 { 2149 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { 2150 spin_unlock(&dwc->lock); 2151 dwc->gadget_driver->disconnect(&dwc->gadget); 2152 spin_lock(&dwc->lock); 2153 } 2154 } 2155 2156 static void dwc3_suspend_gadget(struct dwc3 *dwc) 2157 { 2158 if (dwc->gadget_driver && dwc->gadget_driver->suspend) { 2159 spin_unlock(&dwc->lock); 2160 dwc->gadget_driver->suspend(&dwc->gadget); 2161 spin_lock(&dwc->lock); 2162 } 2163 } 2164 2165 static void dwc3_resume_gadget(struct dwc3 *dwc) 2166 { 2167 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2168 spin_unlock(&dwc->lock); 2169 dwc->gadget_driver->resume(&dwc->gadget); 2170 spin_lock(&dwc->lock); 2171 } 2172 } 2173 2174 static void dwc3_reset_gadget(struct dwc3 *dwc) 2175 { 2176 if (!dwc->gadget_driver) 2177 return; 2178 2179 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) { 2180 spin_unlock(&dwc->lock); 2181 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver); 2182 spin_lock(&dwc->lock); 2183 } 2184 } 2185 2186 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force) 2187 { 2188 struct dwc3_ep *dep; 2189 struct dwc3_gadget_ep_cmd_params params; 2190 u32 cmd; 2191 int ret; 2192 2193 dep = dwc->eps[epnum]; 2194 2195 if (!dep->resource_index) 2196 return; 2197 2198 /* 2199 * NOTICE: We are violating what the Databook says about the 2200 * EndTransfer command. Ideally we would _always_ wait for the 2201 * EndTransfer Command Completion IRQ, but that's causing too 2202 * much trouble synchronizing between us and gadget driver. 2203 * 2204 * We have discussed this with the IP Provider and it was 2205 * suggested to giveback all requests here, but give HW some 2206 * extra time to synchronize with the interconnect. We're using 2207 * an arbitrary 100us delay for that. 2208 * 2209 * Note also that a similar handling was tested by Synopsys 2210 * (thanks a lot Paul) and nothing bad has come out of it. 2211 * In short, what we're doing is: 2212 * 2213 * - Issue EndTransfer WITH CMDIOC bit set 2214 * - Wait 100us 2215 */ 2216 2217 cmd = DWC3_DEPCMD_ENDTRANSFER; 2218 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0; 2219 cmd |= DWC3_DEPCMD_CMDIOC; 2220 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); 2221 memset(¶ms, 0, sizeof(params)); 2222 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 2223 WARN_ON_ONCE(ret); 2224 dep->resource_index = 0; 2225 dep->flags &= ~DWC3_EP_BUSY; 2226 udelay(100); 2227 } 2228 2229 static void dwc3_stop_active_transfers(struct dwc3 *dwc) 2230 { 2231 u32 epnum; 2232 2233 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2234 struct dwc3_ep *dep; 2235 2236 dep = dwc->eps[epnum]; 2237 if (!dep) 2238 continue; 2239 2240 if (!(dep->flags & DWC3_EP_ENABLED)) 2241 continue; 2242 2243 dwc3_remove_requests(dwc, dep); 2244 } 2245 } 2246 2247 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 2248 { 2249 u32 epnum; 2250 2251 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2252 struct dwc3_ep *dep; 2253 struct dwc3_gadget_ep_cmd_params params; 2254 int ret; 2255 2256 dep = dwc->eps[epnum]; 2257 if (!dep) 2258 continue; 2259 2260 if (!(dep->flags & DWC3_EP_STALL)) 2261 continue; 2262 2263 dep->flags &= ~DWC3_EP_STALL; 2264 2265 memset(¶ms, 0, sizeof(params)); 2266 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 2267 DWC3_DEPCMD_CLEARSTALL, ¶ms); 2268 WARN_ON_ONCE(ret); 2269 } 2270 } 2271 2272 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 2273 { 2274 int reg; 2275 2276 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2277 reg &= ~DWC3_DCTL_INITU1ENA; 2278 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2279 2280 reg &= ~DWC3_DCTL_INITU2ENA; 2281 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2282 2283 dwc3_disconnect_gadget(dwc); 2284 2285 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2286 dwc->setup_packet_pending = false; 2287 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED); 2288 } 2289 2290 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 2291 { 2292 u32 reg; 2293 2294 /* 2295 * WORKAROUND: DWC3 revisions <1.88a have an issue which 2296 * would cause a missing Disconnect Event if there's a 2297 * pending Setup Packet in the FIFO. 2298 * 2299 * There's no suggested workaround on the official Bug 2300 * report, which states that "unless the driver/application 2301 * is doing any special handling of a disconnect event, 2302 * there is no functional issue". 2303 * 2304 * Unfortunately, it turns out that we _do_ some special 2305 * handling of a disconnect event, namely complete all 2306 * pending transfers, notify gadget driver of the 2307 * disconnection, and so on. 2308 * 2309 * Our suggested workaround is to follow the Disconnect 2310 * Event steps here, instead, based on a setup_packet_pending 2311 * flag. Such flag gets set whenever we have a SETUP_PENDING 2312 * status for EP0 TRBs and gets cleared on XferComplete for the 2313 * same endpoint. 2314 * 2315 * Refers to: 2316 * 2317 * STAR#9000466709: RTL: Device : Disconnect event not 2318 * generated if setup packet pending in FIFO 2319 */ 2320 if (dwc->revision < DWC3_REVISION_188A) { 2321 if (dwc->setup_packet_pending) 2322 dwc3_gadget_disconnect_interrupt(dwc); 2323 } 2324 2325 dwc3_reset_gadget(dwc); 2326 2327 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2328 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 2329 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2330 dwc->test_mode = false; 2331 2332 dwc3_stop_active_transfers(dwc); 2333 dwc3_clear_stall_all_ep(dwc); 2334 2335 /* Reset device address to zero */ 2336 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2337 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 2338 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2339 } 2340 2341 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed) 2342 { 2343 u32 reg; 2344 u32 usb30_clock = DWC3_GCTL_CLK_BUS; 2345 2346 /* 2347 * We change the clock only at SS but I dunno why I would want to do 2348 * this. Maybe it becomes part of the power saving plan. 2349 */ 2350 2351 if ((speed != DWC3_DSTS_SUPERSPEED) && 2352 (speed != DWC3_DSTS_SUPERSPEED_PLUS)) 2353 return; 2354 2355 /* 2356 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 2357 * each time on Connect Done. 2358 */ 2359 if (!usb30_clock) 2360 return; 2361 2362 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 2363 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock); 2364 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 2365 } 2366 2367 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 2368 { 2369 struct dwc3_ep *dep; 2370 int ret; 2371 u32 reg; 2372 u8 speed; 2373 2374 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2375 speed = reg & DWC3_DSTS_CONNECTSPD; 2376 dwc->speed = speed; 2377 2378 dwc3_update_ram_clk_sel(dwc, speed); 2379 2380 switch (speed) { 2381 case DWC3_DCFG_SUPERSPEED_PLUS: 2382 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2383 dwc->gadget.ep0->maxpacket = 512; 2384 dwc->gadget.speed = USB_SPEED_SUPER_PLUS; 2385 break; 2386 case DWC3_DCFG_SUPERSPEED: 2387 /* 2388 * WORKAROUND: DWC3 revisions <1.90a have an issue which 2389 * would cause a missing USB3 Reset event. 2390 * 2391 * In such situations, we should force a USB3 Reset 2392 * event by calling our dwc3_gadget_reset_interrupt() 2393 * routine. 2394 * 2395 * Refers to: 2396 * 2397 * STAR#9000483510: RTL: SS : USB3 reset event may 2398 * not be generated always when the link enters poll 2399 */ 2400 if (dwc->revision < DWC3_REVISION_190A) 2401 dwc3_gadget_reset_interrupt(dwc); 2402 2403 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2404 dwc->gadget.ep0->maxpacket = 512; 2405 dwc->gadget.speed = USB_SPEED_SUPER; 2406 break; 2407 case DWC3_DCFG_HIGHSPEED: 2408 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2409 dwc->gadget.ep0->maxpacket = 64; 2410 dwc->gadget.speed = USB_SPEED_HIGH; 2411 break; 2412 case DWC3_DCFG_FULLSPEED2: 2413 case DWC3_DCFG_FULLSPEED1: 2414 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2415 dwc->gadget.ep0->maxpacket = 64; 2416 dwc->gadget.speed = USB_SPEED_FULL; 2417 break; 2418 case DWC3_DCFG_LOWSPEED: 2419 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); 2420 dwc->gadget.ep0->maxpacket = 8; 2421 dwc->gadget.speed = USB_SPEED_LOW; 2422 break; 2423 } 2424 2425 /* Enable USB2 LPM Capability */ 2426 2427 if ((dwc->revision > DWC3_REVISION_194A) && 2428 (speed != DWC3_DCFG_SUPERSPEED) && 2429 (speed != DWC3_DCFG_SUPERSPEED_PLUS)) { 2430 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2431 reg |= DWC3_DCFG_LPM_CAP; 2432 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2433 2434 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2435 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); 2436 2437 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold); 2438 2439 /* 2440 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and 2441 * DCFG.LPMCap is set, core responses with an ACK and the 2442 * BESL value in the LPM token is less than or equal to LPM 2443 * NYET threshold. 2444 */ 2445 WARN_ONCE(dwc->revision < DWC3_REVISION_240A 2446 && dwc->has_lpm_erratum, 2447 "LPM Erratum not available on dwc3 revisisions < 2.40a\n"); 2448 2449 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A) 2450 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold); 2451 2452 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2453 } else { 2454 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2455 reg &= ~DWC3_DCTL_HIRD_THRES_MASK; 2456 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2457 } 2458 2459 dep = dwc->eps[0]; 2460 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, 2461 false); 2462 if (ret) { 2463 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2464 return; 2465 } 2466 2467 dep = dwc->eps[1]; 2468 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, 2469 false); 2470 if (ret) { 2471 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2472 return; 2473 } 2474 2475 /* 2476 * Configure PHY via GUSB3PIPECTLn if required. 2477 * 2478 * Update GTXFIFOSIZn 2479 * 2480 * In both cases reset values should be sufficient. 2481 */ 2482 } 2483 2484 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 2485 { 2486 /* 2487 * TODO take core out of low power mode when that's 2488 * implemented. 2489 */ 2490 2491 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2492 spin_unlock(&dwc->lock); 2493 dwc->gadget_driver->resume(&dwc->gadget); 2494 spin_lock(&dwc->lock); 2495 } 2496 } 2497 2498 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 2499 unsigned int evtinfo) 2500 { 2501 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2502 unsigned int pwropt; 2503 2504 /* 2505 * WORKAROUND: DWC3 < 2.50a have an issue when configured without 2506 * Hibernation mode enabled which would show up when device detects 2507 * host-initiated U3 exit. 2508 * 2509 * In that case, device will generate a Link State Change Interrupt 2510 * from U3 to RESUME which is only necessary if Hibernation is 2511 * configured in. 2512 * 2513 * There are no functional changes due to such spurious event and we 2514 * just need to ignore it. 2515 * 2516 * Refers to: 2517 * 2518 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation 2519 * operational mode 2520 */ 2521 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 2522 if ((dwc->revision < DWC3_REVISION_250A) && 2523 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { 2524 if ((dwc->link_state == DWC3_LINK_STATE_U3) && 2525 (next == DWC3_LINK_STATE_RESUME)) { 2526 dwc3_trace(trace_dwc3_gadget, 2527 "ignoring transition U3 -> Resume"); 2528 return; 2529 } 2530 } 2531 2532 /* 2533 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending 2534 * on the link partner, the USB session might do multiple entry/exit 2535 * of low power states before a transfer takes place. 2536 * 2537 * Due to this problem, we might experience lower throughput. The 2538 * suggested workaround is to disable DCTL[12:9] bits if we're 2539 * transitioning from U1/U2 to U0 and enable those bits again 2540 * after a transfer completes and there are no pending transfers 2541 * on any of the enabled endpoints. 2542 * 2543 * This is the first half of that workaround. 2544 * 2545 * Refers to: 2546 * 2547 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us 2548 * core send LGO_Ux entering U0 2549 */ 2550 if (dwc->revision < DWC3_REVISION_183A) { 2551 if (next == DWC3_LINK_STATE_U0) { 2552 u32 u1u2; 2553 u32 reg; 2554 2555 switch (dwc->link_state) { 2556 case DWC3_LINK_STATE_U1: 2557 case DWC3_LINK_STATE_U2: 2558 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2559 u1u2 = reg & (DWC3_DCTL_INITU2ENA 2560 | DWC3_DCTL_ACCEPTU2ENA 2561 | DWC3_DCTL_INITU1ENA 2562 | DWC3_DCTL_ACCEPTU1ENA); 2563 2564 if (!dwc->u1u2) 2565 dwc->u1u2 = reg & u1u2; 2566 2567 reg &= ~u1u2; 2568 2569 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2570 break; 2571 default: 2572 /* do nothing */ 2573 break; 2574 } 2575 } 2576 } 2577 2578 switch (next) { 2579 case DWC3_LINK_STATE_U1: 2580 if (dwc->speed == USB_SPEED_SUPER) 2581 dwc3_suspend_gadget(dwc); 2582 break; 2583 case DWC3_LINK_STATE_U2: 2584 case DWC3_LINK_STATE_U3: 2585 dwc3_suspend_gadget(dwc); 2586 break; 2587 case DWC3_LINK_STATE_RESUME: 2588 dwc3_resume_gadget(dwc); 2589 break; 2590 default: 2591 /* do nothing */ 2592 break; 2593 } 2594 2595 dwc->link_state = next; 2596 } 2597 2598 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, 2599 unsigned int evtinfo) 2600 { 2601 unsigned int is_ss = evtinfo & BIT(4); 2602 2603 /** 2604 * WORKAROUND: DWC3 revison 2.20a with hibernation support 2605 * have a known issue which can cause USB CV TD.9.23 to fail 2606 * randomly. 2607 * 2608 * Because of this issue, core could generate bogus hibernation 2609 * events which SW needs to ignore. 2610 * 2611 * Refers to: 2612 * 2613 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0 2614 * Device Fallback from SuperSpeed 2615 */ 2616 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER)) 2617 return; 2618 2619 /* enter hibernation here */ 2620 } 2621 2622 static void dwc3_gadget_interrupt(struct dwc3 *dwc, 2623 const struct dwc3_event_devt *event) 2624 { 2625 switch (event->type) { 2626 case DWC3_DEVICE_EVENT_DISCONNECT: 2627 dwc3_gadget_disconnect_interrupt(dwc); 2628 break; 2629 case DWC3_DEVICE_EVENT_RESET: 2630 dwc3_gadget_reset_interrupt(dwc); 2631 break; 2632 case DWC3_DEVICE_EVENT_CONNECT_DONE: 2633 dwc3_gadget_conndone_interrupt(dwc); 2634 break; 2635 case DWC3_DEVICE_EVENT_WAKEUP: 2636 dwc3_gadget_wakeup_interrupt(dwc); 2637 break; 2638 case DWC3_DEVICE_EVENT_HIBER_REQ: 2639 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation, 2640 "unexpected hibernation event\n")) 2641 break; 2642 2643 dwc3_gadget_hibernation_interrupt(dwc, event->event_info); 2644 break; 2645 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 2646 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 2647 break; 2648 case DWC3_DEVICE_EVENT_EOPF: 2649 dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame"); 2650 break; 2651 case DWC3_DEVICE_EVENT_SOF: 2652 dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame"); 2653 break; 2654 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 2655 dwc3_trace(trace_dwc3_gadget, "Erratic Error"); 2656 break; 2657 case DWC3_DEVICE_EVENT_CMD_CMPL: 2658 dwc3_trace(trace_dwc3_gadget, "Command Complete"); 2659 break; 2660 case DWC3_DEVICE_EVENT_OVERFLOW: 2661 dwc3_trace(trace_dwc3_gadget, "Overflow"); 2662 break; 2663 default: 2664 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 2665 } 2666 } 2667 2668 static void dwc3_process_event_entry(struct dwc3 *dwc, 2669 const union dwc3_event *event) 2670 { 2671 trace_dwc3_event(event->raw); 2672 2673 /* Endpoint IRQ, handle it and return early */ 2674 if (event->type.is_devspec == 0) { 2675 /* depevt */ 2676 return dwc3_endpoint_interrupt(dwc, &event->depevt); 2677 } 2678 2679 switch (event->type.type) { 2680 case DWC3_EVENT_TYPE_DEV: 2681 dwc3_gadget_interrupt(dwc, &event->devt); 2682 break; 2683 /* REVISIT what to do with Carkit and I2C events ? */ 2684 default: 2685 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 2686 } 2687 } 2688 2689 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf) 2690 { 2691 struct dwc3_event_buffer *evt; 2692 irqreturn_t ret = IRQ_NONE; 2693 int left; 2694 u32 reg; 2695 2696 evt = dwc->ev_buffs[buf]; 2697 left = evt->count; 2698 2699 if (!(evt->flags & DWC3_EVENT_PENDING)) 2700 return IRQ_NONE; 2701 2702 while (left > 0) { 2703 union dwc3_event event; 2704 2705 event.raw = *(u32 *) (evt->buf + evt->lpos); 2706 2707 dwc3_process_event_entry(dwc, &event); 2708 2709 /* 2710 * FIXME we wrap around correctly to the next entry as 2711 * almost all entries are 4 bytes in size. There is one 2712 * entry which has 12 bytes which is a regular entry 2713 * followed by 8 bytes data. ATM I don't know how 2714 * things are organized if we get next to the a 2715 * boundary so I worry about that once we try to handle 2716 * that. 2717 */ 2718 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE; 2719 left -= 4; 2720 2721 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4); 2722 } 2723 2724 evt->count = 0; 2725 evt->flags &= ~DWC3_EVENT_PENDING; 2726 ret = IRQ_HANDLED; 2727 2728 /* Unmask interrupt */ 2729 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf)); 2730 reg &= ~DWC3_GEVNTSIZ_INTMASK; 2731 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg); 2732 2733 return ret; 2734 } 2735 2736 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc) 2737 { 2738 struct dwc3 *dwc = _dwc; 2739 unsigned long flags; 2740 irqreturn_t ret = IRQ_NONE; 2741 int i; 2742 2743 spin_lock_irqsave(&dwc->lock, flags); 2744 2745 for (i = 0; i < dwc->num_event_buffers; i++) 2746 ret |= dwc3_process_event_buf(dwc, i); 2747 2748 spin_unlock_irqrestore(&dwc->lock, flags); 2749 2750 return ret; 2751 } 2752 2753 static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf) 2754 { 2755 struct dwc3_event_buffer *evt; 2756 u32 count; 2757 u32 reg; 2758 2759 evt = dwc->ev_buffs[buf]; 2760 2761 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf)); 2762 count &= DWC3_GEVNTCOUNT_MASK; 2763 if (!count) 2764 return IRQ_NONE; 2765 2766 evt->count = count; 2767 evt->flags |= DWC3_EVENT_PENDING; 2768 2769 /* Mask interrupt */ 2770 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf)); 2771 reg |= DWC3_GEVNTSIZ_INTMASK; 2772 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg); 2773 2774 return IRQ_WAKE_THREAD; 2775 } 2776 2777 static irqreturn_t dwc3_interrupt(int irq, void *_dwc) 2778 { 2779 struct dwc3 *dwc = _dwc; 2780 int i; 2781 irqreturn_t ret = IRQ_NONE; 2782 2783 for (i = 0; i < dwc->num_event_buffers; i++) { 2784 irqreturn_t status; 2785 2786 status = dwc3_check_event_buf(dwc, i); 2787 if (status == IRQ_WAKE_THREAD) 2788 ret = status; 2789 } 2790 2791 return ret; 2792 } 2793 2794 /** 2795 * dwc3_gadget_init - Initializes gadget related registers 2796 * @dwc: pointer to our controller context structure 2797 * 2798 * Returns 0 on success otherwise negative errno. 2799 */ 2800 int dwc3_gadget_init(struct dwc3 *dwc) 2801 { 2802 int ret; 2803 2804 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2805 &dwc->ctrl_req_addr, GFP_KERNEL); 2806 if (!dwc->ctrl_req) { 2807 dev_err(dwc->dev, "failed to allocate ctrl request\n"); 2808 ret = -ENOMEM; 2809 goto err0; 2810 } 2811 2812 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2, 2813 &dwc->ep0_trb_addr, GFP_KERNEL); 2814 if (!dwc->ep0_trb) { 2815 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 2816 ret = -ENOMEM; 2817 goto err1; 2818 } 2819 2820 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL); 2821 if (!dwc->setup_buf) { 2822 ret = -ENOMEM; 2823 goto err2; 2824 } 2825 2826 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev, 2827 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr, 2828 GFP_KERNEL); 2829 if (!dwc->ep0_bounce) { 2830 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n"); 2831 ret = -ENOMEM; 2832 goto err3; 2833 } 2834 2835 dwc->zlp_buf = kzalloc(DWC3_ZLP_BUF_SIZE, GFP_KERNEL); 2836 if (!dwc->zlp_buf) { 2837 ret = -ENOMEM; 2838 goto err4; 2839 } 2840 2841 dwc->gadget.ops = &dwc3_gadget_ops; 2842 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2843 dwc->gadget.sg_supported = true; 2844 dwc->gadget.name = "dwc3-gadget"; 2845 dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG; 2846 2847 /* 2848 * FIXME We might be setting max_speed to <SUPER, however versions 2849 * <2.20a of dwc3 have an issue with metastability (documented 2850 * elsewhere in this driver) which tells us we can't set max speed to 2851 * anything lower than SUPER. 2852 * 2853 * Because gadget.max_speed is only used by composite.c and function 2854 * drivers (i.e. it won't go into dwc3's registers) we are allowing this 2855 * to happen so we avoid sending SuperSpeed Capability descriptor 2856 * together with our BOS descriptor as that could confuse host into 2857 * thinking we can handle super speed. 2858 * 2859 * Note that, in fact, we won't even support GetBOS requests when speed 2860 * is less than super speed because we don't have means, yet, to tell 2861 * composite.c that we are USB 2.0 + LPM ECN. 2862 */ 2863 if (dwc->revision < DWC3_REVISION_220A) 2864 dwc3_trace(trace_dwc3_gadget, 2865 "Changing max_speed on rev %08x\n", 2866 dwc->revision); 2867 2868 dwc->gadget.max_speed = dwc->maximum_speed; 2869 2870 /* 2871 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize 2872 * on ep out. 2873 */ 2874 dwc->gadget.quirk_ep_out_aligned_size = true; 2875 2876 /* 2877 * REVISIT: Here we should clear all pending IRQs to be 2878 * sure we're starting from a well known location. 2879 */ 2880 2881 ret = dwc3_gadget_init_endpoints(dwc); 2882 if (ret) 2883 goto err5; 2884 2885 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 2886 if (ret) { 2887 dev_err(dwc->dev, "failed to register udc\n"); 2888 goto err5; 2889 } 2890 2891 return 0; 2892 2893 err5: 2894 kfree(dwc->zlp_buf); 2895 2896 err4: 2897 dwc3_gadget_free_endpoints(dwc); 2898 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2899 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2900 2901 err3: 2902 kfree(dwc->setup_buf); 2903 2904 err2: 2905 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2906 dwc->ep0_trb, dwc->ep0_trb_addr); 2907 2908 err1: 2909 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2910 dwc->ctrl_req, dwc->ctrl_req_addr); 2911 2912 err0: 2913 return ret; 2914 } 2915 2916 /* -------------------------------------------------------------------------- */ 2917 2918 void dwc3_gadget_exit(struct dwc3 *dwc) 2919 { 2920 usb_del_gadget_udc(&dwc->gadget); 2921 2922 dwc3_gadget_free_endpoints(dwc); 2923 2924 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2925 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2926 2927 kfree(dwc->setup_buf); 2928 kfree(dwc->zlp_buf); 2929 2930 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2931 dwc->ep0_trb, dwc->ep0_trb_addr); 2932 2933 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2934 dwc->ctrl_req, dwc->ctrl_req_addr); 2935 } 2936 2937 int dwc3_gadget_suspend(struct dwc3 *dwc) 2938 { 2939 if (dwc->pullups_connected) { 2940 dwc3_gadget_disable_irq(dwc); 2941 dwc3_gadget_run_stop(dwc, true, true); 2942 } 2943 2944 __dwc3_gadget_ep_disable(dwc->eps[0]); 2945 __dwc3_gadget_ep_disable(dwc->eps[1]); 2946 2947 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG); 2948 2949 return 0; 2950 } 2951 2952 int dwc3_gadget_resume(struct dwc3 *dwc) 2953 { 2954 struct dwc3_ep *dep; 2955 int ret; 2956 2957 /* Start with SuperSpeed Default */ 2958 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2959 2960 dep = dwc->eps[0]; 2961 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 2962 false); 2963 if (ret) 2964 goto err0; 2965 2966 dep = dwc->eps[1]; 2967 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 2968 false); 2969 if (ret) 2970 goto err1; 2971 2972 /* begin to receive SETUP packets */ 2973 dwc->ep0state = EP0_SETUP_PHASE; 2974 dwc3_ep0_out_start(dwc); 2975 2976 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg); 2977 2978 if (dwc->pullups_connected) { 2979 dwc3_gadget_enable_irq(dwc); 2980 dwc3_gadget_run_stop(dwc, true, false); 2981 } 2982 2983 return 0; 2984 2985 err1: 2986 __dwc3_gadget_ep_disable(dwc->eps[0]); 2987 2988 err0: 2989 return ret; 2990 } 2991