1 /** 2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 3 * 4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com 5 * 6 * Authors: Felipe Balbi <balbi@ti.com>, 7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 8 * 9 * This program is free software: you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 of 11 * the License as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/delay.h> 21 #include <linux/slab.h> 22 #include <linux/spinlock.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/interrupt.h> 26 #include <linux/io.h> 27 #include <linux/list.h> 28 #include <linux/dma-mapping.h> 29 30 #include <linux/usb/ch9.h> 31 #include <linux/usb/gadget.h> 32 33 #include "debug.h" 34 #include "core.h" 35 #include "gadget.h" 36 #include "io.h" 37 38 /** 39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes 40 * @dwc: pointer to our context structure 41 * @mode: the mode to set (J, K SE0 NAK, Force Enable) 42 * 43 * Caller should take care of locking. This function will 44 * return 0 on success or -EINVAL if wrong Test Selector 45 * is passed 46 */ 47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) 48 { 49 u32 reg; 50 51 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 52 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 53 54 switch (mode) { 55 case TEST_J: 56 case TEST_K: 57 case TEST_SE0_NAK: 58 case TEST_PACKET: 59 case TEST_FORCE_EN: 60 reg |= mode << 1; 61 break; 62 default: 63 return -EINVAL; 64 } 65 66 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 67 68 return 0; 69 } 70 71 /** 72 * dwc3_gadget_get_link_state - Gets current state of USB Link 73 * @dwc: pointer to our context structure 74 * 75 * Caller should take care of locking. This function will 76 * return the link state on success (>= 0) or -ETIMEDOUT. 77 */ 78 int dwc3_gadget_get_link_state(struct dwc3 *dwc) 79 { 80 u32 reg; 81 82 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 83 84 return DWC3_DSTS_USBLNKST(reg); 85 } 86 87 /** 88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State 89 * @dwc: pointer to our context structure 90 * @state: the state to put link into 91 * 92 * Caller should take care of locking. This function will 93 * return 0 on success or -ETIMEDOUT. 94 */ 95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) 96 { 97 int retries = 10000; 98 u32 reg; 99 100 /* 101 * Wait until device controller is ready. Only applies to 1.94a and 102 * later RTL. 103 */ 104 if (dwc->revision >= DWC3_REVISION_194A) { 105 while (--retries) { 106 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 107 if (reg & DWC3_DSTS_DCNRD) 108 udelay(5); 109 else 110 break; 111 } 112 113 if (retries <= 0) 114 return -ETIMEDOUT; 115 } 116 117 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 119 120 /* set requested state */ 121 reg |= DWC3_DCTL_ULSTCHNGREQ(state); 122 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 123 124 /* 125 * The following code is racy when called from dwc3_gadget_wakeup, 126 * and is not needed, at least on newer versions 127 */ 128 if (dwc->revision >= DWC3_REVISION_194A) 129 return 0; 130 131 /* wait for a change in DSTS */ 132 retries = 10000; 133 while (--retries) { 134 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 135 136 if (DWC3_DSTS_USBLNKST(reg) == state) 137 return 0; 138 139 udelay(5); 140 } 141 142 dwc3_trace(trace_dwc3_gadget, 143 "link state change request timed out"); 144 145 return -ETIMEDOUT; 146 } 147 148 /** 149 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case 150 * @dwc: pointer to our context structure 151 * 152 * This function will a best effort FIFO allocation in order 153 * to improve FIFO usage and throughput, while still allowing 154 * us to enable as many endpoints as possible. 155 * 156 * Keep in mind that this operation will be highly dependent 157 * on the configured size for RAM1 - which contains TxFifo -, 158 * the amount of endpoints enabled on coreConsultant tool, and 159 * the width of the Master Bus. 160 * 161 * In the ideal world, we would always be able to satisfy the 162 * following equation: 163 * 164 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \ 165 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes 166 * 167 * Unfortunately, due to many variables that's not always the case. 168 */ 169 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc) 170 { 171 int last_fifo_depth = 0; 172 int ram1_depth; 173 int fifo_size; 174 int mdwidth; 175 int num; 176 177 if (!dwc->needs_fifo_resize) 178 return 0; 179 180 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7); 181 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); 182 183 /* MDWIDTH is represented in bits, we need it in bytes */ 184 mdwidth >>= 3; 185 186 /* 187 * FIXME For now we will only allocate 1 wMaxPacketSize space 188 * for each enabled endpoint, later patches will come to 189 * improve this algorithm so that we better use the internal 190 * FIFO space 191 */ 192 for (num = 0; num < dwc->num_in_eps; num++) { 193 /* bit0 indicates direction; 1 means IN ep */ 194 struct dwc3_ep *dep = dwc->eps[(num << 1) | 1]; 195 int mult = 1; 196 int tmp; 197 198 if (!(dep->flags & DWC3_EP_ENABLED)) 199 continue; 200 201 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) 202 || usb_endpoint_xfer_isoc(dep->endpoint.desc)) 203 mult = 3; 204 205 /* 206 * REVISIT: the following assumes we will always have enough 207 * space available on the FIFO RAM for all possible use cases. 208 * Make sure that's true somehow and change FIFO allocation 209 * accordingly. 210 * 211 * If we have Bulk or Isochronous endpoints, we want 212 * them to be able to be very, very fast. So we're giving 213 * those endpoints a fifo_size which is enough for 3 full 214 * packets 215 */ 216 tmp = mult * (dep->endpoint.maxpacket + mdwidth); 217 tmp += mdwidth; 218 219 fifo_size = DIV_ROUND_UP(tmp, mdwidth); 220 221 fifo_size |= (last_fifo_depth << 16); 222 223 dwc3_trace(trace_dwc3_gadget, "%s: Fifo Addr %04x Size %d", 224 dep->name, last_fifo_depth, fifo_size & 0xffff); 225 226 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size); 227 228 last_fifo_depth += (fifo_size & 0xffff); 229 } 230 231 return 0; 232 } 233 234 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 235 int status) 236 { 237 struct dwc3 *dwc = dep->dwc; 238 int i; 239 240 if (req->queued) { 241 i = 0; 242 do { 243 dep->busy_slot++; 244 /* 245 * Skip LINK TRB. We can't use req->trb and check for 246 * DWC3_TRBCTL_LINK_TRB because it points the TRB we 247 * just completed (not the LINK TRB). 248 */ 249 if (((dep->busy_slot & DWC3_TRB_MASK) == 250 DWC3_TRB_NUM- 1) && 251 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 252 dep->busy_slot++; 253 } while(++i < req->request.num_mapped_sgs); 254 req->queued = false; 255 } 256 list_del(&req->list); 257 req->trb = NULL; 258 259 if (req->request.status == -EINPROGRESS) 260 req->request.status = status; 261 262 if (dwc->ep0_bounced && dep->number == 0) 263 dwc->ep0_bounced = false; 264 else 265 usb_gadget_unmap_request(&dwc->gadget, &req->request, 266 req->direction); 267 268 trace_dwc3_gadget_giveback(req); 269 270 spin_unlock(&dwc->lock); 271 usb_gadget_giveback_request(&dep->endpoint, &req->request); 272 spin_lock(&dwc->lock); 273 } 274 275 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param) 276 { 277 u32 timeout = 500; 278 u32 reg; 279 280 trace_dwc3_gadget_generic_cmd(cmd, param); 281 282 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param); 283 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT); 284 285 do { 286 reg = dwc3_readl(dwc->regs, DWC3_DGCMD); 287 if (!(reg & DWC3_DGCMD_CMDACT)) { 288 dwc3_trace(trace_dwc3_gadget, 289 "Command Complete --> %d", 290 DWC3_DGCMD_STATUS(reg)); 291 if (DWC3_DGCMD_STATUS(reg)) 292 return -EINVAL; 293 return 0; 294 } 295 296 /* 297 * We can't sleep here, because it's also called from 298 * interrupt context. 299 */ 300 timeout--; 301 if (!timeout) { 302 dwc3_trace(trace_dwc3_gadget, 303 "Command Timed Out"); 304 return -ETIMEDOUT; 305 } 306 udelay(1); 307 } while (1); 308 } 309 310 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep, 311 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params) 312 { 313 struct dwc3_ep *dep = dwc->eps[ep]; 314 u32 timeout = 500; 315 u32 reg; 316 317 trace_dwc3_gadget_ep_cmd(dep, cmd, params); 318 319 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0); 320 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1); 321 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2); 322 323 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT); 324 do { 325 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep)); 326 if (!(reg & DWC3_DEPCMD_CMDACT)) { 327 dwc3_trace(trace_dwc3_gadget, 328 "Command Complete --> %d", 329 DWC3_DEPCMD_STATUS(reg)); 330 if (DWC3_DEPCMD_STATUS(reg)) 331 return -EINVAL; 332 return 0; 333 } 334 335 /* 336 * We can't sleep here, because it is also called from 337 * interrupt context. 338 */ 339 timeout--; 340 if (!timeout) { 341 dwc3_trace(trace_dwc3_gadget, 342 "Command Timed Out"); 343 return -ETIMEDOUT; 344 } 345 346 udelay(1); 347 } while (1); 348 } 349 350 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 351 struct dwc3_trb *trb) 352 { 353 u32 offset = (char *) trb - (char *) dep->trb_pool; 354 355 return dep->trb_pool_dma + offset; 356 } 357 358 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 359 { 360 struct dwc3 *dwc = dep->dwc; 361 362 if (dep->trb_pool) 363 return 0; 364 365 dep->trb_pool = dma_alloc_coherent(dwc->dev, 366 sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 367 &dep->trb_pool_dma, GFP_KERNEL); 368 if (!dep->trb_pool) { 369 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 370 dep->name); 371 return -ENOMEM; 372 } 373 374 return 0; 375 } 376 377 static void dwc3_free_trb_pool(struct dwc3_ep *dep) 378 { 379 struct dwc3 *dwc = dep->dwc; 380 381 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 382 dep->trb_pool, dep->trb_pool_dma); 383 384 dep->trb_pool = NULL; 385 dep->trb_pool_dma = 0; 386 } 387 388 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep); 389 390 /** 391 * dwc3_gadget_start_config - Configure EP resources 392 * @dwc: pointer to our controller context structure 393 * @dep: endpoint that is being enabled 394 * 395 * The assignment of transfer resources cannot perfectly follow the 396 * data book due to the fact that the controller driver does not have 397 * all knowledge of the configuration in advance. It is given this 398 * information piecemeal by the composite gadget framework after every 399 * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook 400 * programming model in this scenario can cause errors. For two 401 * reasons: 402 * 403 * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION 404 * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of 405 * multiple interfaces. 406 * 407 * 2) The databook does not mention doing more DEPXFERCFG for new 408 * endpoint on alt setting (8.1.6). 409 * 410 * The following simplified method is used instead: 411 * 412 * All hardware endpoints can be assigned a transfer resource and this 413 * setting will stay persistent until either a core reset or 414 * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and 415 * do DEPXFERCFG for every hardware endpoint as well. We are 416 * guaranteed that there are as many transfer resources as endpoints. 417 * 418 * This function is called for each endpoint when it is being enabled 419 * but is triggered only when called for EP0-out, which always happens 420 * first, and which should only happen in one of the above conditions. 421 */ 422 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 423 { 424 struct dwc3_gadget_ep_cmd_params params; 425 u32 cmd; 426 int i; 427 int ret; 428 429 if (dep->number) 430 return 0; 431 432 memset(¶ms, 0x00, sizeof(params)); 433 cmd = DWC3_DEPCMD_DEPSTARTCFG; 434 435 ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms); 436 if (ret) 437 return ret; 438 439 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 440 struct dwc3_ep *dep = dwc->eps[i]; 441 442 if (!dep) 443 continue; 444 445 ret = dwc3_gadget_set_xfer_resource(dwc, dep); 446 if (ret) 447 return ret; 448 } 449 450 return 0; 451 } 452 453 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, 454 const struct usb_endpoint_descriptor *desc, 455 const struct usb_ss_ep_comp_descriptor *comp_desc, 456 bool ignore, bool restore) 457 { 458 struct dwc3_gadget_ep_cmd_params params; 459 460 memset(¶ms, 0x00, sizeof(params)); 461 462 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) 463 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); 464 465 /* Burst size is only needed in SuperSpeed mode */ 466 if (dwc->gadget.speed >= USB_SPEED_SUPER) { 467 u32 burst = dep->endpoint.maxburst - 1; 468 469 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst); 470 } 471 472 if (ignore) 473 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM; 474 475 if (restore) { 476 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE; 477 params.param2 |= dep->saved_state; 478 } 479 480 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN 481 | DWC3_DEPCFG_XFER_NOT_READY_EN; 482 483 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 484 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE 485 | DWC3_DEPCFG_STREAM_EVENT_EN; 486 dep->stream_capable = true; 487 } 488 489 if (!usb_endpoint_xfer_control(desc)) 490 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; 491 492 /* 493 * We are doing 1:1 mapping for endpoints, meaning 494 * Physical Endpoints 2 maps to Logical Endpoint 2 and 495 * so on. We consider the direction bit as part of the physical 496 * endpoint number. So USB endpoint 0x81 is 0x03. 497 */ 498 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); 499 500 /* 501 * We must use the lower 16 TX FIFOs even though 502 * HW might have more 503 */ 504 if (dep->direction) 505 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); 506 507 if (desc->bInterval) { 508 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); 509 dep->interval = 1 << (desc->bInterval - 1); 510 } 511 512 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 513 DWC3_DEPCMD_SETEPCONFIG, ¶ms); 514 } 515 516 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) 517 { 518 struct dwc3_gadget_ep_cmd_params params; 519 520 memset(¶ms, 0x00, sizeof(params)); 521 522 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 523 524 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 525 DWC3_DEPCMD_SETTRANSFRESOURCE, ¶ms); 526 } 527 528 /** 529 * __dwc3_gadget_ep_enable - Initializes a HW endpoint 530 * @dep: endpoint to be initialized 531 * @desc: USB Endpoint Descriptor 532 * 533 * Caller should take care of locking 534 */ 535 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, 536 const struct usb_endpoint_descriptor *desc, 537 const struct usb_ss_ep_comp_descriptor *comp_desc, 538 bool ignore, bool restore) 539 { 540 struct dwc3 *dwc = dep->dwc; 541 u32 reg; 542 int ret; 543 544 dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name); 545 546 if (!(dep->flags & DWC3_EP_ENABLED)) { 547 ret = dwc3_gadget_start_config(dwc, dep); 548 if (ret) 549 return ret; 550 } 551 552 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore, 553 restore); 554 if (ret) 555 return ret; 556 557 if (!(dep->flags & DWC3_EP_ENABLED)) { 558 struct dwc3_trb *trb_st_hw; 559 struct dwc3_trb *trb_link; 560 561 dep->endpoint.desc = desc; 562 dep->comp_desc = comp_desc; 563 dep->type = usb_endpoint_type(desc); 564 dep->flags |= DWC3_EP_ENABLED; 565 566 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 567 reg |= DWC3_DALEPENA_EP(dep->number); 568 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 569 570 if (!usb_endpoint_xfer_isoc(desc)) 571 return 0; 572 573 /* Link TRB for ISOC. The HWO bit is never reset */ 574 trb_st_hw = &dep->trb_pool[0]; 575 576 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 577 memset(trb_link, 0, sizeof(*trb_link)); 578 579 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 580 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 581 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB; 582 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 583 } 584 585 switch (usb_endpoint_type(desc)) { 586 case USB_ENDPOINT_XFER_CONTROL: 587 strlcat(dep->name, "-control", sizeof(dep->name)); 588 break; 589 case USB_ENDPOINT_XFER_ISOC: 590 strlcat(dep->name, "-isoc", sizeof(dep->name)); 591 break; 592 case USB_ENDPOINT_XFER_BULK: 593 strlcat(dep->name, "-bulk", sizeof(dep->name)); 594 break; 595 case USB_ENDPOINT_XFER_INT: 596 strlcat(dep->name, "-int", sizeof(dep->name)); 597 break; 598 default: 599 dev_err(dwc->dev, "invalid endpoint transfer type\n"); 600 } 601 602 return 0; 603 } 604 605 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force); 606 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 607 { 608 struct dwc3_request *req; 609 610 if (!list_empty(&dep->req_queued)) { 611 dwc3_stop_active_transfer(dwc, dep->number, true); 612 613 /* - giveback all requests to gadget driver */ 614 while (!list_empty(&dep->req_queued)) { 615 req = next_request(&dep->req_queued); 616 617 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 618 } 619 } 620 621 while (!list_empty(&dep->request_list)) { 622 req = next_request(&dep->request_list); 623 624 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 625 } 626 } 627 628 /** 629 * __dwc3_gadget_ep_disable - Disables a HW endpoint 630 * @dep: the endpoint to disable 631 * 632 * This function also removes requests which are currently processed ny the 633 * hardware and those which are not yet scheduled. 634 * Caller should take care of locking. 635 */ 636 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 637 { 638 struct dwc3 *dwc = dep->dwc; 639 u32 reg; 640 641 dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name); 642 643 dwc3_remove_requests(dwc, dep); 644 645 /* make sure HW endpoint isn't stalled */ 646 if (dep->flags & DWC3_EP_STALL) 647 __dwc3_gadget_ep_set_halt(dep, 0, false); 648 649 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 650 reg &= ~DWC3_DALEPENA_EP(dep->number); 651 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 652 653 dep->stream_capable = false; 654 dep->endpoint.desc = NULL; 655 dep->comp_desc = NULL; 656 dep->type = 0; 657 dep->flags = 0; 658 659 snprintf(dep->name, sizeof(dep->name), "ep%d%s", 660 dep->number >> 1, 661 (dep->number & 1) ? "in" : "out"); 662 663 return 0; 664 } 665 666 /* -------------------------------------------------------------------------- */ 667 668 static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 669 const struct usb_endpoint_descriptor *desc) 670 { 671 return -EINVAL; 672 } 673 674 static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 675 { 676 return -EINVAL; 677 } 678 679 /* -------------------------------------------------------------------------- */ 680 681 static int dwc3_gadget_ep_enable(struct usb_ep *ep, 682 const struct usb_endpoint_descriptor *desc) 683 { 684 struct dwc3_ep *dep; 685 struct dwc3 *dwc; 686 unsigned long flags; 687 int ret; 688 689 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 690 pr_debug("dwc3: invalid parameters\n"); 691 return -EINVAL; 692 } 693 694 if (!desc->wMaxPacketSize) { 695 pr_debug("dwc3: missing wMaxPacketSize\n"); 696 return -EINVAL; 697 } 698 699 dep = to_dwc3_ep(ep); 700 dwc = dep->dwc; 701 702 if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED, 703 "%s is already enabled\n", 704 dep->name)) 705 return 0; 706 707 spin_lock_irqsave(&dwc->lock, flags); 708 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false); 709 spin_unlock_irqrestore(&dwc->lock, flags); 710 711 return ret; 712 } 713 714 static int dwc3_gadget_ep_disable(struct usb_ep *ep) 715 { 716 struct dwc3_ep *dep; 717 struct dwc3 *dwc; 718 unsigned long flags; 719 int ret; 720 721 if (!ep) { 722 pr_debug("dwc3: invalid parameters\n"); 723 return -EINVAL; 724 } 725 726 dep = to_dwc3_ep(ep); 727 dwc = dep->dwc; 728 729 if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED), 730 "%s is already disabled\n", 731 dep->name)) 732 return 0; 733 734 spin_lock_irqsave(&dwc->lock, flags); 735 ret = __dwc3_gadget_ep_disable(dep); 736 spin_unlock_irqrestore(&dwc->lock, flags); 737 738 return ret; 739 } 740 741 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 742 gfp_t gfp_flags) 743 { 744 struct dwc3_request *req; 745 struct dwc3_ep *dep = to_dwc3_ep(ep); 746 747 req = kzalloc(sizeof(*req), gfp_flags); 748 if (!req) 749 return NULL; 750 751 req->epnum = dep->number; 752 req->dep = dep; 753 754 trace_dwc3_alloc_request(req); 755 756 return &req->request; 757 } 758 759 static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 760 struct usb_request *request) 761 { 762 struct dwc3_request *req = to_dwc3_request(request); 763 764 trace_dwc3_free_request(req); 765 kfree(req); 766 } 767 768 /** 769 * dwc3_prepare_one_trb - setup one TRB from one request 770 * @dep: endpoint for which this request is prepared 771 * @req: dwc3_request pointer 772 */ 773 static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 774 struct dwc3_request *req, dma_addr_t dma, 775 unsigned length, unsigned last, unsigned chain, unsigned node) 776 { 777 struct dwc3_trb *trb; 778 779 dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s%s", 780 dep->name, req, (unsigned long long) dma, 781 length, last ? " last" : "", 782 chain ? " chain" : ""); 783 784 785 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK]; 786 787 if (!req->trb) { 788 dwc3_gadget_move_request_queued(req); 789 req->trb = trb; 790 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 791 req->start_slot = dep->free_slot & DWC3_TRB_MASK; 792 } 793 794 dep->free_slot++; 795 /* Skip the LINK-TRB on ISOC */ 796 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && 797 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 798 dep->free_slot++; 799 800 trb->size = DWC3_TRB_SIZE_LENGTH(length); 801 trb->bpl = lower_32_bits(dma); 802 trb->bph = upper_32_bits(dma); 803 804 switch (usb_endpoint_type(dep->endpoint.desc)) { 805 case USB_ENDPOINT_XFER_CONTROL: 806 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP; 807 break; 808 809 case USB_ENDPOINT_XFER_ISOC: 810 if (!node) 811 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 812 else 813 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 814 break; 815 816 case USB_ENDPOINT_XFER_BULK: 817 case USB_ENDPOINT_XFER_INT: 818 trb->ctrl = DWC3_TRBCTL_NORMAL; 819 break; 820 default: 821 /* 822 * This is only possible with faulty memory because we 823 * checked it already :) 824 */ 825 BUG(); 826 } 827 828 if (!req->request.no_interrupt && !chain) 829 trb->ctrl |= DWC3_TRB_CTRL_IOC; 830 831 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 832 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 833 trb->ctrl |= DWC3_TRB_CTRL_CSP; 834 } else if (last) { 835 trb->ctrl |= DWC3_TRB_CTRL_LST; 836 } 837 838 if (chain) 839 trb->ctrl |= DWC3_TRB_CTRL_CHN; 840 841 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) 842 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id); 843 844 trb->ctrl |= DWC3_TRB_CTRL_HWO; 845 846 trace_dwc3_prepare_trb(dep, trb); 847 } 848 849 /* 850 * dwc3_prepare_trbs - setup TRBs from requests 851 * @dep: endpoint for which requests are being prepared 852 * @starting: true if the endpoint is idle and no requests are queued. 853 * 854 * The function goes through the requests list and sets up TRBs for the 855 * transfers. The function returns once there are no more TRBs available or 856 * it runs out of requests. 857 */ 858 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting) 859 { 860 struct dwc3_request *req, *n; 861 u32 trbs_left; 862 u32 max; 863 unsigned int last_one = 0; 864 865 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 866 867 /* the first request must not be queued */ 868 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK; 869 870 /* Can't wrap around on a non-isoc EP since there's no link TRB */ 871 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 872 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK); 873 if (trbs_left > max) 874 trbs_left = max; 875 } 876 877 /* 878 * If busy & slot are equal than it is either full or empty. If we are 879 * starting to process requests then we are empty. Otherwise we are 880 * full and don't do anything 881 */ 882 if (!trbs_left) { 883 if (!starting) 884 return; 885 trbs_left = DWC3_TRB_NUM; 886 /* 887 * In case we start from scratch, we queue the ISOC requests 888 * starting from slot 1. This is done because we use ring 889 * buffer and have no LST bit to stop us. Instead, we place 890 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt 891 * after the first request so we start at slot 1 and have 892 * 7 requests proceed before we hit the first IOC. 893 * Other transfer types don't use the ring buffer and are 894 * processed from the first TRB until the last one. Since we 895 * don't wrap around we have to start at the beginning. 896 */ 897 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 898 dep->busy_slot = 1; 899 dep->free_slot = 1; 900 } else { 901 dep->busy_slot = 0; 902 dep->free_slot = 0; 903 } 904 } 905 906 /* The last TRB is a link TRB, not used for xfer */ 907 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc)) 908 return; 909 910 list_for_each_entry_safe(req, n, &dep->request_list, list) { 911 unsigned length; 912 dma_addr_t dma; 913 last_one = false; 914 915 if (req->request.num_mapped_sgs > 0) { 916 struct usb_request *request = &req->request; 917 struct scatterlist *sg = request->sg; 918 struct scatterlist *s; 919 int i; 920 921 for_each_sg(sg, s, request->num_mapped_sgs, i) { 922 unsigned chain = true; 923 924 length = sg_dma_len(s); 925 dma = sg_dma_address(s); 926 927 if (i == (request->num_mapped_sgs - 1) || 928 sg_is_last(s)) { 929 if (list_empty(&dep->request_list)) 930 last_one = true; 931 chain = false; 932 } 933 934 trbs_left--; 935 if (!trbs_left) 936 last_one = true; 937 938 if (last_one) 939 chain = false; 940 941 dwc3_prepare_one_trb(dep, req, dma, length, 942 last_one, chain, i); 943 944 if (last_one) 945 break; 946 } 947 948 if (last_one) 949 break; 950 } else { 951 dma = req->request.dma; 952 length = req->request.length; 953 trbs_left--; 954 955 if (!trbs_left) 956 last_one = 1; 957 958 /* Is this the last request? */ 959 if (list_is_last(&req->list, &dep->request_list)) 960 last_one = 1; 961 962 dwc3_prepare_one_trb(dep, req, dma, length, 963 last_one, false, 0); 964 965 if (last_one) 966 break; 967 } 968 } 969 } 970 971 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param, 972 int start_new) 973 { 974 struct dwc3_gadget_ep_cmd_params params; 975 struct dwc3_request *req; 976 struct dwc3 *dwc = dep->dwc; 977 int ret; 978 u32 cmd; 979 980 if (start_new && (dep->flags & DWC3_EP_BUSY)) { 981 dwc3_trace(trace_dwc3_gadget, "%s: endpoint busy", dep->name); 982 return -EBUSY; 983 } 984 985 /* 986 * If we are getting here after a short-out-packet we don't enqueue any 987 * new requests as we try to set the IOC bit only on the last request. 988 */ 989 if (start_new) { 990 if (list_empty(&dep->req_queued)) 991 dwc3_prepare_trbs(dep, start_new); 992 993 /* req points to the first request which will be sent */ 994 req = next_request(&dep->req_queued); 995 } else { 996 dwc3_prepare_trbs(dep, start_new); 997 998 /* 999 * req points to the first request where HWO changed from 0 to 1 1000 */ 1001 req = next_request(&dep->req_queued); 1002 } 1003 if (!req) { 1004 dep->flags |= DWC3_EP_PENDING_REQUEST; 1005 return 0; 1006 } 1007 1008 memset(¶ms, 0, sizeof(params)); 1009 1010 if (start_new) { 1011 params.param0 = upper_32_bits(req->trb_dma); 1012 params.param1 = lower_32_bits(req->trb_dma); 1013 cmd = DWC3_DEPCMD_STARTTRANSFER; 1014 } else { 1015 cmd = DWC3_DEPCMD_UPDATETRANSFER; 1016 } 1017 1018 cmd |= DWC3_DEPCMD_PARAM(cmd_param); 1019 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 1020 if (ret < 0) { 1021 /* 1022 * FIXME we need to iterate over the list of requests 1023 * here and stop, unmap, free and del each of the linked 1024 * requests instead of what we do now. 1025 */ 1026 usb_gadget_unmap_request(&dwc->gadget, &req->request, 1027 req->direction); 1028 list_del(&req->list); 1029 return ret; 1030 } 1031 1032 dep->flags |= DWC3_EP_BUSY; 1033 1034 if (start_new) { 1035 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc, 1036 dep->number); 1037 WARN_ON_ONCE(!dep->resource_index); 1038 } 1039 1040 return 0; 1041 } 1042 1043 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc, 1044 struct dwc3_ep *dep, u32 cur_uf) 1045 { 1046 u32 uf; 1047 1048 if (list_empty(&dep->request_list)) { 1049 dwc3_trace(trace_dwc3_gadget, 1050 "ISOC ep %s run out for requests", 1051 dep->name); 1052 dep->flags |= DWC3_EP_PENDING_REQUEST; 1053 return; 1054 } 1055 1056 /* 4 micro frames in the future */ 1057 uf = cur_uf + dep->interval * 4; 1058 1059 __dwc3_gadget_kick_transfer(dep, uf, 1); 1060 } 1061 1062 static void dwc3_gadget_start_isoc(struct dwc3 *dwc, 1063 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1064 { 1065 u32 cur_uf, mask; 1066 1067 mask = ~(dep->interval - 1); 1068 cur_uf = event->parameters & mask; 1069 1070 __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1071 } 1072 1073 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 1074 { 1075 struct dwc3 *dwc = dep->dwc; 1076 int ret; 1077 1078 if (!dep->endpoint.desc) { 1079 dwc3_trace(trace_dwc3_gadget, 1080 "trying to queue request %p to disabled %s\n", 1081 &req->request, dep->endpoint.name); 1082 return -ESHUTDOWN; 1083 } 1084 1085 if (WARN(req->dep != dep, "request %p belongs to '%s'\n", 1086 &req->request, req->dep->name)) { 1087 dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'\n", 1088 &req->request, req->dep->name); 1089 return -EINVAL; 1090 } 1091 1092 req->request.actual = 0; 1093 req->request.status = -EINPROGRESS; 1094 req->direction = dep->direction; 1095 req->epnum = dep->number; 1096 1097 trace_dwc3_ep_queue(req); 1098 1099 /* 1100 * We only add to our list of requests now and 1101 * start consuming the list once we get XferNotReady 1102 * IRQ. 1103 * 1104 * That way, we avoid doing anything that we don't need 1105 * to do now and defer it until the point we receive a 1106 * particular token from the Host side. 1107 * 1108 * This will also avoid Host cancelling URBs due to too 1109 * many NAKs. 1110 */ 1111 ret = usb_gadget_map_request(&dwc->gadget, &req->request, 1112 dep->direction); 1113 if (ret) 1114 return ret; 1115 1116 list_add_tail(&req->list, &dep->request_list); 1117 1118 /* 1119 * If there are no pending requests and the endpoint isn't already 1120 * busy, we will just start the request straight away. 1121 * 1122 * This will save one IRQ (XFER_NOT_READY) and possibly make it a 1123 * little bit faster. 1124 */ 1125 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1126 !usb_endpoint_xfer_int(dep->endpoint.desc) && 1127 !(dep->flags & DWC3_EP_BUSY)) { 1128 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1129 goto out; 1130 } 1131 1132 /* 1133 * There are a few special cases: 1134 * 1135 * 1. XferNotReady with empty list of requests. We need to kick the 1136 * transfer here in that situation, otherwise we will be NAKing 1137 * forever. If we get XferNotReady before gadget driver has a 1138 * chance to queue a request, we will ACK the IRQ but won't be 1139 * able to receive the data until the next request is queued. 1140 * The following code is handling exactly that. 1141 * 1142 */ 1143 if (dep->flags & DWC3_EP_PENDING_REQUEST) { 1144 /* 1145 * If xfernotready is already elapsed and it is a case 1146 * of isoc transfer, then issue END TRANSFER, so that 1147 * you can receive xfernotready again and can have 1148 * notion of current microframe. 1149 */ 1150 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1151 if (list_empty(&dep->req_queued)) { 1152 dwc3_stop_active_transfer(dwc, dep->number, true); 1153 dep->flags = DWC3_EP_ENABLED; 1154 } 1155 return 0; 1156 } 1157 1158 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1159 if (!ret) 1160 dep->flags &= ~DWC3_EP_PENDING_REQUEST; 1161 1162 goto out; 1163 } 1164 1165 /* 1166 * 2. XferInProgress on Isoc EP with an active transfer. We need to 1167 * kick the transfer here after queuing a request, otherwise the 1168 * core may not see the modified TRB(s). 1169 */ 1170 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1171 (dep->flags & DWC3_EP_BUSY) && 1172 !(dep->flags & DWC3_EP_MISSED_ISOC)) { 1173 WARN_ON_ONCE(!dep->resource_index); 1174 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index, 1175 false); 1176 goto out; 1177 } 1178 1179 /* 1180 * 4. Stream Capable Bulk Endpoints. We need to start the transfer 1181 * right away, otherwise host will not know we have streams to be 1182 * handled. 1183 */ 1184 if (dep->stream_capable) 1185 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1186 1187 out: 1188 if (ret && ret != -EBUSY) 1189 dwc3_trace(trace_dwc3_gadget, 1190 "%s: failed to kick transfers\n", 1191 dep->name); 1192 if (ret == -EBUSY) 1193 ret = 0; 1194 1195 return ret; 1196 } 1197 1198 static void __dwc3_gadget_ep_zlp_complete(struct usb_ep *ep, 1199 struct usb_request *request) 1200 { 1201 dwc3_gadget_ep_free_request(ep, request); 1202 } 1203 1204 static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep) 1205 { 1206 struct dwc3_request *req; 1207 struct usb_request *request; 1208 struct usb_ep *ep = &dep->endpoint; 1209 1210 dwc3_trace(trace_dwc3_gadget, "queueing ZLP\n"); 1211 request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC); 1212 if (!request) 1213 return -ENOMEM; 1214 1215 request->length = 0; 1216 request->buf = dwc->zlp_buf; 1217 request->complete = __dwc3_gadget_ep_zlp_complete; 1218 1219 req = to_dwc3_request(request); 1220 1221 return __dwc3_gadget_ep_queue(dep, req); 1222 } 1223 1224 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 1225 gfp_t gfp_flags) 1226 { 1227 struct dwc3_request *req = to_dwc3_request(request); 1228 struct dwc3_ep *dep = to_dwc3_ep(ep); 1229 struct dwc3 *dwc = dep->dwc; 1230 1231 unsigned long flags; 1232 1233 int ret; 1234 1235 spin_lock_irqsave(&dwc->lock, flags); 1236 ret = __dwc3_gadget_ep_queue(dep, req); 1237 1238 /* 1239 * Okay, here's the thing, if gadget driver has requested for a ZLP by 1240 * setting request->zero, instead of doing magic, we will just queue an 1241 * extra usb_request ourselves so that it gets handled the same way as 1242 * any other request. 1243 */ 1244 if (ret == 0 && request->zero && request->length && 1245 (request->length % ep->maxpacket == 0)) 1246 ret = __dwc3_gadget_ep_queue_zlp(dwc, dep); 1247 1248 spin_unlock_irqrestore(&dwc->lock, flags); 1249 1250 return ret; 1251 } 1252 1253 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 1254 struct usb_request *request) 1255 { 1256 struct dwc3_request *req = to_dwc3_request(request); 1257 struct dwc3_request *r = NULL; 1258 1259 struct dwc3_ep *dep = to_dwc3_ep(ep); 1260 struct dwc3 *dwc = dep->dwc; 1261 1262 unsigned long flags; 1263 int ret = 0; 1264 1265 trace_dwc3_ep_dequeue(req); 1266 1267 spin_lock_irqsave(&dwc->lock, flags); 1268 1269 list_for_each_entry(r, &dep->request_list, list) { 1270 if (r == req) 1271 break; 1272 } 1273 1274 if (r != req) { 1275 list_for_each_entry(r, &dep->req_queued, list) { 1276 if (r == req) 1277 break; 1278 } 1279 if (r == req) { 1280 /* wait until it is processed */ 1281 dwc3_stop_active_transfer(dwc, dep->number, true); 1282 goto out1; 1283 } 1284 dev_err(dwc->dev, "request %p was not queued to %s\n", 1285 request, ep->name); 1286 ret = -EINVAL; 1287 goto out0; 1288 } 1289 1290 out1: 1291 /* giveback the request */ 1292 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1293 1294 out0: 1295 spin_unlock_irqrestore(&dwc->lock, flags); 1296 1297 return ret; 1298 } 1299 1300 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) 1301 { 1302 struct dwc3_gadget_ep_cmd_params params; 1303 struct dwc3 *dwc = dep->dwc; 1304 int ret; 1305 1306 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1307 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 1308 return -EINVAL; 1309 } 1310 1311 memset(¶ms, 0x00, sizeof(params)); 1312 1313 if (value) { 1314 if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) || 1315 (!list_empty(&dep->req_queued) || 1316 !list_empty(&dep->request_list)))) { 1317 dwc3_trace(trace_dwc3_gadget, 1318 "%s: pending request, cannot halt\n", 1319 dep->name); 1320 return -EAGAIN; 1321 } 1322 1323 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1324 DWC3_DEPCMD_SETSTALL, ¶ms); 1325 if (ret) 1326 dev_err(dwc->dev, "failed to set STALL on %s\n", 1327 dep->name); 1328 else 1329 dep->flags |= DWC3_EP_STALL; 1330 } else { 1331 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1332 DWC3_DEPCMD_CLEARSTALL, ¶ms); 1333 if (ret) 1334 dev_err(dwc->dev, "failed to clear STALL on %s\n", 1335 dep->name); 1336 else 1337 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); 1338 } 1339 1340 return ret; 1341 } 1342 1343 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 1344 { 1345 struct dwc3_ep *dep = to_dwc3_ep(ep); 1346 struct dwc3 *dwc = dep->dwc; 1347 1348 unsigned long flags; 1349 1350 int ret; 1351 1352 spin_lock_irqsave(&dwc->lock, flags); 1353 ret = __dwc3_gadget_ep_set_halt(dep, value, false); 1354 spin_unlock_irqrestore(&dwc->lock, flags); 1355 1356 return ret; 1357 } 1358 1359 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 1360 { 1361 struct dwc3_ep *dep = to_dwc3_ep(ep); 1362 struct dwc3 *dwc = dep->dwc; 1363 unsigned long flags; 1364 int ret; 1365 1366 spin_lock_irqsave(&dwc->lock, flags); 1367 dep->flags |= DWC3_EP_WEDGE; 1368 1369 if (dep->number == 0 || dep->number == 1) 1370 ret = __dwc3_gadget_ep0_set_halt(ep, 1); 1371 else 1372 ret = __dwc3_gadget_ep_set_halt(dep, 1, false); 1373 spin_unlock_irqrestore(&dwc->lock, flags); 1374 1375 return ret; 1376 } 1377 1378 /* -------------------------------------------------------------------------- */ 1379 1380 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 1381 .bLength = USB_DT_ENDPOINT_SIZE, 1382 .bDescriptorType = USB_DT_ENDPOINT, 1383 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1384 }; 1385 1386 static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 1387 .enable = dwc3_gadget_ep0_enable, 1388 .disable = dwc3_gadget_ep0_disable, 1389 .alloc_request = dwc3_gadget_ep_alloc_request, 1390 .free_request = dwc3_gadget_ep_free_request, 1391 .queue = dwc3_gadget_ep0_queue, 1392 .dequeue = dwc3_gadget_ep_dequeue, 1393 .set_halt = dwc3_gadget_ep0_set_halt, 1394 .set_wedge = dwc3_gadget_ep_set_wedge, 1395 }; 1396 1397 static const struct usb_ep_ops dwc3_gadget_ep_ops = { 1398 .enable = dwc3_gadget_ep_enable, 1399 .disable = dwc3_gadget_ep_disable, 1400 .alloc_request = dwc3_gadget_ep_alloc_request, 1401 .free_request = dwc3_gadget_ep_free_request, 1402 .queue = dwc3_gadget_ep_queue, 1403 .dequeue = dwc3_gadget_ep_dequeue, 1404 .set_halt = dwc3_gadget_ep_set_halt, 1405 .set_wedge = dwc3_gadget_ep_set_wedge, 1406 }; 1407 1408 /* -------------------------------------------------------------------------- */ 1409 1410 static int dwc3_gadget_get_frame(struct usb_gadget *g) 1411 { 1412 struct dwc3 *dwc = gadget_to_dwc(g); 1413 u32 reg; 1414 1415 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1416 return DWC3_DSTS_SOFFN(reg); 1417 } 1418 1419 static int dwc3_gadget_wakeup(struct usb_gadget *g) 1420 { 1421 struct dwc3 *dwc = gadget_to_dwc(g); 1422 1423 unsigned long timeout; 1424 unsigned long flags; 1425 1426 u32 reg; 1427 1428 int ret = 0; 1429 1430 u8 link_state; 1431 u8 speed; 1432 1433 spin_lock_irqsave(&dwc->lock, flags); 1434 1435 /* 1436 * According to the Databook Remote wakeup request should 1437 * be issued only when the device is in early suspend state. 1438 * 1439 * We can check that via USB Link State bits in DSTS register. 1440 */ 1441 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1442 1443 speed = reg & DWC3_DSTS_CONNECTSPD; 1444 if ((speed == DWC3_DSTS_SUPERSPEED) || 1445 (speed == DWC3_DSTS_SUPERSPEED_PLUS)) { 1446 dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed\n"); 1447 ret = -EINVAL; 1448 goto out; 1449 } 1450 1451 link_state = DWC3_DSTS_USBLNKST(reg); 1452 1453 switch (link_state) { 1454 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1455 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1456 break; 1457 default: 1458 dwc3_trace(trace_dwc3_gadget, 1459 "can't wakeup from '%s'\n", 1460 dwc3_gadget_link_string(link_state)); 1461 ret = -EINVAL; 1462 goto out; 1463 } 1464 1465 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); 1466 if (ret < 0) { 1467 dev_err(dwc->dev, "failed to put link in Recovery\n"); 1468 goto out; 1469 } 1470 1471 /* Recent versions do this automatically */ 1472 if (dwc->revision < DWC3_REVISION_194A) { 1473 /* write zeroes to Link Change Request */ 1474 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1475 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 1476 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1477 } 1478 1479 /* poll until Link State changes to ON */ 1480 timeout = jiffies + msecs_to_jiffies(100); 1481 1482 while (!time_after(jiffies, timeout)) { 1483 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1484 1485 /* in HS, means ON */ 1486 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 1487 break; 1488 } 1489 1490 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1491 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1492 ret = -EINVAL; 1493 } 1494 1495 out: 1496 spin_unlock_irqrestore(&dwc->lock, flags); 1497 1498 return ret; 1499 } 1500 1501 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 1502 int is_selfpowered) 1503 { 1504 struct dwc3 *dwc = gadget_to_dwc(g); 1505 unsigned long flags; 1506 1507 spin_lock_irqsave(&dwc->lock, flags); 1508 g->is_selfpowered = !!is_selfpowered; 1509 spin_unlock_irqrestore(&dwc->lock, flags); 1510 1511 return 0; 1512 } 1513 1514 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) 1515 { 1516 u32 reg; 1517 u32 timeout = 500; 1518 1519 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1520 if (is_on) { 1521 if (dwc->revision <= DWC3_REVISION_187A) { 1522 reg &= ~DWC3_DCTL_TRGTULST_MASK; 1523 reg |= DWC3_DCTL_TRGTULST_RX_DET; 1524 } 1525 1526 if (dwc->revision >= DWC3_REVISION_194A) 1527 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1528 reg |= DWC3_DCTL_RUN_STOP; 1529 1530 if (dwc->has_hibernation) 1531 reg |= DWC3_DCTL_KEEP_CONNECT; 1532 1533 dwc->pullups_connected = true; 1534 } else { 1535 reg &= ~DWC3_DCTL_RUN_STOP; 1536 1537 if (dwc->has_hibernation && !suspend) 1538 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1539 1540 dwc->pullups_connected = false; 1541 } 1542 1543 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1544 1545 do { 1546 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1547 if (is_on) { 1548 if (!(reg & DWC3_DSTS_DEVCTRLHLT)) 1549 break; 1550 } else { 1551 if (reg & DWC3_DSTS_DEVCTRLHLT) 1552 break; 1553 } 1554 timeout--; 1555 if (!timeout) 1556 return -ETIMEDOUT; 1557 udelay(1); 1558 } while (1); 1559 1560 dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s", 1561 dwc->gadget_driver 1562 ? dwc->gadget_driver->function : "no-function", 1563 is_on ? "connect" : "disconnect"); 1564 1565 return 0; 1566 } 1567 1568 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 1569 { 1570 struct dwc3 *dwc = gadget_to_dwc(g); 1571 unsigned long flags; 1572 int ret; 1573 1574 is_on = !!is_on; 1575 1576 spin_lock_irqsave(&dwc->lock, flags); 1577 ret = dwc3_gadget_run_stop(dwc, is_on, false); 1578 spin_unlock_irqrestore(&dwc->lock, flags); 1579 1580 return ret; 1581 } 1582 1583 static void dwc3_gadget_enable_irq(struct dwc3 *dwc) 1584 { 1585 u32 reg; 1586 1587 /* Enable all but Start and End of Frame IRQs */ 1588 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | 1589 DWC3_DEVTEN_EVNTOVERFLOWEN | 1590 DWC3_DEVTEN_CMDCMPLTEN | 1591 DWC3_DEVTEN_ERRTICERREN | 1592 DWC3_DEVTEN_WKUPEVTEN | 1593 DWC3_DEVTEN_ULSTCNGEN | 1594 DWC3_DEVTEN_CONNECTDONEEN | 1595 DWC3_DEVTEN_USBRSTEN | 1596 DWC3_DEVTEN_DISCONNEVTEN); 1597 1598 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 1599 } 1600 1601 static void dwc3_gadget_disable_irq(struct dwc3 *dwc) 1602 { 1603 /* mask all interrupts */ 1604 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 1605 } 1606 1607 static irqreturn_t dwc3_interrupt(int irq, void *_dwc); 1608 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc); 1609 1610 static int dwc3_gadget_start(struct usb_gadget *g, 1611 struct usb_gadget_driver *driver) 1612 { 1613 struct dwc3 *dwc = gadget_to_dwc(g); 1614 struct dwc3_ep *dep; 1615 unsigned long flags; 1616 int ret = 0; 1617 int irq; 1618 u32 reg; 1619 1620 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1621 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, 1622 IRQF_SHARED, "dwc3", dwc); 1623 if (ret) { 1624 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 1625 irq, ret); 1626 goto err0; 1627 } 1628 1629 spin_lock_irqsave(&dwc->lock, flags); 1630 1631 if (dwc->gadget_driver) { 1632 dev_err(dwc->dev, "%s is already bound to %s\n", 1633 dwc->gadget.name, 1634 dwc->gadget_driver->driver.name); 1635 ret = -EBUSY; 1636 goto err1; 1637 } 1638 1639 dwc->gadget_driver = driver; 1640 1641 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1642 reg &= ~(DWC3_DCFG_SPEED_MASK); 1643 1644 /** 1645 * WORKAROUND: DWC3 revision < 2.20a have an issue 1646 * which would cause metastability state on Run/Stop 1647 * bit if we try to force the IP to USB2-only mode. 1648 * 1649 * Because of that, we cannot configure the IP to any 1650 * speed other than the SuperSpeed 1651 * 1652 * Refers to: 1653 * 1654 * STAR#9000525659: Clock Domain Crossing on DCTL in 1655 * USB 2.0 Mode 1656 */ 1657 if (dwc->revision < DWC3_REVISION_220A) { 1658 reg |= DWC3_DCFG_SUPERSPEED; 1659 } else { 1660 switch (dwc->maximum_speed) { 1661 case USB_SPEED_LOW: 1662 reg |= DWC3_DSTS_LOWSPEED; 1663 break; 1664 case USB_SPEED_FULL: 1665 reg |= DWC3_DSTS_FULLSPEED1; 1666 break; 1667 case USB_SPEED_HIGH: 1668 reg |= DWC3_DSTS_HIGHSPEED; 1669 break; 1670 case USB_SPEED_SUPER_PLUS: 1671 reg |= DWC3_DSTS_SUPERSPEED_PLUS; 1672 break; 1673 default: 1674 dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n", 1675 dwc->maximum_speed); 1676 /* fall through */ 1677 case USB_SPEED_SUPER: 1678 reg |= DWC3_DCFG_SUPERSPEED; 1679 break; 1680 } 1681 } 1682 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1683 1684 /* Start with SuperSpeed Default */ 1685 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1686 1687 dep = dwc->eps[0]; 1688 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 1689 false); 1690 if (ret) { 1691 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1692 goto err2; 1693 } 1694 1695 dep = dwc->eps[1]; 1696 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 1697 false); 1698 if (ret) { 1699 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1700 goto err3; 1701 } 1702 1703 /* begin to receive SETUP packets */ 1704 dwc->ep0state = EP0_SETUP_PHASE; 1705 dwc3_ep0_out_start(dwc); 1706 1707 dwc3_gadget_enable_irq(dwc); 1708 1709 spin_unlock_irqrestore(&dwc->lock, flags); 1710 1711 return 0; 1712 1713 err3: 1714 __dwc3_gadget_ep_disable(dwc->eps[0]); 1715 1716 err2: 1717 dwc->gadget_driver = NULL; 1718 1719 err1: 1720 spin_unlock_irqrestore(&dwc->lock, flags); 1721 1722 free_irq(irq, dwc); 1723 1724 err0: 1725 return ret; 1726 } 1727 1728 static int dwc3_gadget_stop(struct usb_gadget *g) 1729 { 1730 struct dwc3 *dwc = gadget_to_dwc(g); 1731 unsigned long flags; 1732 int irq; 1733 1734 spin_lock_irqsave(&dwc->lock, flags); 1735 1736 dwc3_gadget_disable_irq(dwc); 1737 __dwc3_gadget_ep_disable(dwc->eps[0]); 1738 __dwc3_gadget_ep_disable(dwc->eps[1]); 1739 1740 dwc->gadget_driver = NULL; 1741 1742 spin_unlock_irqrestore(&dwc->lock, flags); 1743 1744 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1745 free_irq(irq, dwc); 1746 1747 return 0; 1748 } 1749 1750 static const struct usb_gadget_ops dwc3_gadget_ops = { 1751 .get_frame = dwc3_gadget_get_frame, 1752 .wakeup = dwc3_gadget_wakeup, 1753 .set_selfpowered = dwc3_gadget_set_selfpowered, 1754 .pullup = dwc3_gadget_pullup, 1755 .udc_start = dwc3_gadget_start, 1756 .udc_stop = dwc3_gadget_stop, 1757 }; 1758 1759 /* -------------------------------------------------------------------------- */ 1760 1761 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc, 1762 u8 num, u32 direction) 1763 { 1764 struct dwc3_ep *dep; 1765 u8 i; 1766 1767 for (i = 0; i < num; i++) { 1768 u8 epnum = (i << 1) | (!!direction); 1769 1770 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 1771 if (!dep) 1772 return -ENOMEM; 1773 1774 dep->dwc = dwc; 1775 dep->number = epnum; 1776 dep->direction = !!direction; 1777 dwc->eps[epnum] = dep; 1778 1779 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1, 1780 (epnum & 1) ? "in" : "out"); 1781 1782 dep->endpoint.name = dep->name; 1783 1784 dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name); 1785 1786 if (epnum == 0 || epnum == 1) { 1787 usb_ep_set_maxpacket_limit(&dep->endpoint, 512); 1788 dep->endpoint.maxburst = 1; 1789 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 1790 if (!epnum) 1791 dwc->gadget.ep0 = &dep->endpoint; 1792 } else { 1793 int ret; 1794 1795 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024); 1796 dep->endpoint.max_streams = 15; 1797 dep->endpoint.ops = &dwc3_gadget_ep_ops; 1798 list_add_tail(&dep->endpoint.ep_list, 1799 &dwc->gadget.ep_list); 1800 1801 ret = dwc3_alloc_trb_pool(dep); 1802 if (ret) 1803 return ret; 1804 } 1805 1806 if (epnum == 0 || epnum == 1) { 1807 dep->endpoint.caps.type_control = true; 1808 } else { 1809 dep->endpoint.caps.type_iso = true; 1810 dep->endpoint.caps.type_bulk = true; 1811 dep->endpoint.caps.type_int = true; 1812 } 1813 1814 dep->endpoint.caps.dir_in = !!direction; 1815 dep->endpoint.caps.dir_out = !direction; 1816 1817 INIT_LIST_HEAD(&dep->request_list); 1818 INIT_LIST_HEAD(&dep->req_queued); 1819 } 1820 1821 return 0; 1822 } 1823 1824 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc) 1825 { 1826 int ret; 1827 1828 INIT_LIST_HEAD(&dwc->gadget.ep_list); 1829 1830 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0); 1831 if (ret < 0) { 1832 dwc3_trace(trace_dwc3_gadget, 1833 "failed to allocate OUT endpoints"); 1834 return ret; 1835 } 1836 1837 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1); 1838 if (ret < 0) { 1839 dwc3_trace(trace_dwc3_gadget, 1840 "failed to allocate IN endpoints"); 1841 return ret; 1842 } 1843 1844 return 0; 1845 } 1846 1847 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 1848 { 1849 struct dwc3_ep *dep; 1850 u8 epnum; 1851 1852 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1853 dep = dwc->eps[epnum]; 1854 if (!dep) 1855 continue; 1856 /* 1857 * Physical endpoints 0 and 1 are special; they form the 1858 * bi-directional USB endpoint 0. 1859 * 1860 * For those two physical endpoints, we don't allocate a TRB 1861 * pool nor do we add them the endpoints list. Due to that, we 1862 * shouldn't do these two operations otherwise we would end up 1863 * with all sorts of bugs when removing dwc3.ko. 1864 */ 1865 if (epnum != 0 && epnum != 1) { 1866 dwc3_free_trb_pool(dep); 1867 list_del(&dep->endpoint.ep_list); 1868 } 1869 1870 kfree(dep); 1871 } 1872 } 1873 1874 /* -------------------------------------------------------------------------- */ 1875 1876 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, 1877 struct dwc3_request *req, struct dwc3_trb *trb, 1878 const struct dwc3_event_depevt *event, int status) 1879 { 1880 unsigned int count; 1881 unsigned int s_pkt = 0; 1882 unsigned int trb_status; 1883 1884 trace_dwc3_complete_trb(dep, trb); 1885 1886 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 1887 /* 1888 * We continue despite the error. There is not much we 1889 * can do. If we don't clean it up we loop forever. If 1890 * we skip the TRB then it gets overwritten after a 1891 * while since we use them in a ring buffer. A BUG() 1892 * would help. Lets hope that if this occurs, someone 1893 * fixes the root cause instead of looking away :) 1894 */ 1895 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n", 1896 dep->name, trb); 1897 count = trb->size & DWC3_TRB_SIZE_MASK; 1898 1899 if (dep->direction) { 1900 if (count) { 1901 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); 1902 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) { 1903 dwc3_trace(trace_dwc3_gadget, 1904 "%s: incomplete IN transfer\n", 1905 dep->name); 1906 /* 1907 * If missed isoc occurred and there is 1908 * no request queued then issue END 1909 * TRANSFER, so that core generates 1910 * next xfernotready and we will issue 1911 * a fresh START TRANSFER. 1912 * If there are still queued request 1913 * then wait, do not issue either END 1914 * or UPDATE TRANSFER, just attach next 1915 * request in request_list during 1916 * giveback.If any future queued request 1917 * is successfully transferred then we 1918 * will issue UPDATE TRANSFER for all 1919 * request in the request_list. 1920 */ 1921 dep->flags |= DWC3_EP_MISSED_ISOC; 1922 } else { 1923 dev_err(dwc->dev, "incomplete IN transfer %s\n", 1924 dep->name); 1925 status = -ECONNRESET; 1926 } 1927 } else { 1928 dep->flags &= ~DWC3_EP_MISSED_ISOC; 1929 } 1930 } else { 1931 if (count && (event->status & DEPEVT_STATUS_SHORT)) 1932 s_pkt = 1; 1933 } 1934 1935 /* 1936 * We assume here we will always receive the entire data block 1937 * which we should receive. Meaning, if we program RX to 1938 * receive 4K but we receive only 2K, we assume that's all we 1939 * should receive and we simply bounce the request back to the 1940 * gadget driver for further processing. 1941 */ 1942 req->request.actual += req->request.length - count; 1943 if (s_pkt) 1944 return 1; 1945 if ((event->status & DEPEVT_STATUS_LST) && 1946 (trb->ctrl & (DWC3_TRB_CTRL_LST | 1947 DWC3_TRB_CTRL_HWO))) 1948 return 1; 1949 if ((event->status & DEPEVT_STATUS_IOC) && 1950 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 1951 return 1; 1952 return 0; 1953 } 1954 1955 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, 1956 const struct dwc3_event_depevt *event, int status) 1957 { 1958 struct dwc3_request *req; 1959 struct dwc3_trb *trb; 1960 unsigned int slot; 1961 unsigned int i; 1962 int ret; 1963 1964 do { 1965 req = next_request(&dep->req_queued); 1966 if (WARN_ON_ONCE(!req)) 1967 return 1; 1968 1969 i = 0; 1970 do { 1971 slot = req->start_slot + i; 1972 if ((slot == DWC3_TRB_NUM - 1) && 1973 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 1974 slot++; 1975 slot %= DWC3_TRB_NUM; 1976 trb = &dep->trb_pool[slot]; 1977 1978 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 1979 event, status); 1980 if (ret) 1981 break; 1982 } while (++i < req->request.num_mapped_sgs); 1983 1984 dwc3_gadget_giveback(dep, req, status); 1985 1986 if (ret) 1987 break; 1988 } while (1); 1989 1990 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1991 list_empty(&dep->req_queued)) { 1992 if (list_empty(&dep->request_list)) { 1993 /* 1994 * If there is no entry in request list then do 1995 * not issue END TRANSFER now. Just set PENDING 1996 * flag, so that END TRANSFER is issued when an 1997 * entry is added into request list. 1998 */ 1999 dep->flags = DWC3_EP_PENDING_REQUEST; 2000 } else { 2001 dwc3_stop_active_transfer(dwc, dep->number, true); 2002 dep->flags = DWC3_EP_ENABLED; 2003 } 2004 return 1; 2005 } 2006 2007 return 1; 2008 } 2009 2010 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, 2011 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 2012 { 2013 unsigned status = 0; 2014 int clean_busy; 2015 u32 is_xfer_complete; 2016 2017 is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE); 2018 2019 if (event->status & DEPEVT_STATUS_BUSERR) 2020 status = -ECONNRESET; 2021 2022 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status); 2023 if (clean_busy && (is_xfer_complete || 2024 usb_endpoint_xfer_isoc(dep->endpoint.desc))) 2025 dep->flags &= ~DWC3_EP_BUSY; 2026 2027 /* 2028 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. 2029 * See dwc3_gadget_linksts_change_interrupt() for 1st half. 2030 */ 2031 if (dwc->revision < DWC3_REVISION_183A) { 2032 u32 reg; 2033 int i; 2034 2035 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 2036 dep = dwc->eps[i]; 2037 2038 if (!(dep->flags & DWC3_EP_ENABLED)) 2039 continue; 2040 2041 if (!list_empty(&dep->req_queued)) 2042 return; 2043 } 2044 2045 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2046 reg |= dwc->u1u2; 2047 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2048 2049 dwc->u1u2 = 0; 2050 } 2051 2052 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2053 int ret; 2054 2055 ret = __dwc3_gadget_kick_transfer(dep, 0, is_xfer_complete); 2056 if (!ret || ret == -EBUSY) 2057 return; 2058 } 2059 } 2060 2061 static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 2062 const struct dwc3_event_depevt *event) 2063 { 2064 struct dwc3_ep *dep; 2065 u8 epnum = event->endpoint_number; 2066 2067 dep = dwc->eps[epnum]; 2068 2069 if (!(dep->flags & DWC3_EP_ENABLED)) 2070 return; 2071 2072 if (epnum == 0 || epnum == 1) { 2073 dwc3_ep0_interrupt(dwc, event); 2074 return; 2075 } 2076 2077 switch (event->endpoint_event) { 2078 case DWC3_DEPEVT_XFERCOMPLETE: 2079 dep->resource_index = 0; 2080 2081 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2082 dwc3_trace(trace_dwc3_gadget, 2083 "%s is an Isochronous endpoint\n", 2084 dep->name); 2085 return; 2086 } 2087 2088 dwc3_endpoint_transfer_complete(dwc, dep, event); 2089 break; 2090 case DWC3_DEPEVT_XFERINPROGRESS: 2091 dwc3_endpoint_transfer_complete(dwc, dep, event); 2092 break; 2093 case DWC3_DEPEVT_XFERNOTREADY: 2094 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2095 dwc3_gadget_start_isoc(dwc, dep, event); 2096 } else { 2097 int active; 2098 int ret; 2099 2100 active = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE; 2101 2102 dwc3_trace(trace_dwc3_gadget, "%s: reason %s", 2103 dep->name, active ? "Transfer Active" 2104 : "Transfer Not Active"); 2105 2106 ret = __dwc3_gadget_kick_transfer(dep, 0, !active); 2107 if (!ret || ret == -EBUSY) 2108 return; 2109 2110 dwc3_trace(trace_dwc3_gadget, 2111 "%s: failed to kick transfers\n", 2112 dep->name); 2113 } 2114 2115 break; 2116 case DWC3_DEPEVT_STREAMEVT: 2117 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) { 2118 dev_err(dwc->dev, "Stream event for non-Bulk %s\n", 2119 dep->name); 2120 return; 2121 } 2122 2123 switch (event->status) { 2124 case DEPEVT_STREAMEVT_FOUND: 2125 dwc3_trace(trace_dwc3_gadget, 2126 "Stream %d found and started", 2127 event->parameters); 2128 2129 break; 2130 case DEPEVT_STREAMEVT_NOTFOUND: 2131 /* FALLTHROUGH */ 2132 default: 2133 dwc3_trace(trace_dwc3_gadget, 2134 "unable to find suitable stream\n"); 2135 } 2136 break; 2137 case DWC3_DEPEVT_RXTXFIFOEVT: 2138 dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun\n", dep->name); 2139 break; 2140 case DWC3_DEPEVT_EPCMDCMPLT: 2141 dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete"); 2142 break; 2143 } 2144 } 2145 2146 static void dwc3_disconnect_gadget(struct dwc3 *dwc) 2147 { 2148 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { 2149 spin_unlock(&dwc->lock); 2150 dwc->gadget_driver->disconnect(&dwc->gadget); 2151 spin_lock(&dwc->lock); 2152 } 2153 } 2154 2155 static void dwc3_suspend_gadget(struct dwc3 *dwc) 2156 { 2157 if (dwc->gadget_driver && dwc->gadget_driver->suspend) { 2158 spin_unlock(&dwc->lock); 2159 dwc->gadget_driver->suspend(&dwc->gadget); 2160 spin_lock(&dwc->lock); 2161 } 2162 } 2163 2164 static void dwc3_resume_gadget(struct dwc3 *dwc) 2165 { 2166 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2167 spin_unlock(&dwc->lock); 2168 dwc->gadget_driver->resume(&dwc->gadget); 2169 spin_lock(&dwc->lock); 2170 } 2171 } 2172 2173 static void dwc3_reset_gadget(struct dwc3 *dwc) 2174 { 2175 if (!dwc->gadget_driver) 2176 return; 2177 2178 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) { 2179 spin_unlock(&dwc->lock); 2180 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver); 2181 spin_lock(&dwc->lock); 2182 } 2183 } 2184 2185 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force) 2186 { 2187 struct dwc3_ep *dep; 2188 struct dwc3_gadget_ep_cmd_params params; 2189 u32 cmd; 2190 int ret; 2191 2192 dep = dwc->eps[epnum]; 2193 2194 if (!dep->resource_index) 2195 return; 2196 2197 /* 2198 * NOTICE: We are violating what the Databook says about the 2199 * EndTransfer command. Ideally we would _always_ wait for the 2200 * EndTransfer Command Completion IRQ, but that's causing too 2201 * much trouble synchronizing between us and gadget driver. 2202 * 2203 * We have discussed this with the IP Provider and it was 2204 * suggested to giveback all requests here, but give HW some 2205 * extra time to synchronize with the interconnect. We're using 2206 * an arbitrary 100us delay for that. 2207 * 2208 * Note also that a similar handling was tested by Synopsys 2209 * (thanks a lot Paul) and nothing bad has come out of it. 2210 * In short, what we're doing is: 2211 * 2212 * - Issue EndTransfer WITH CMDIOC bit set 2213 * - Wait 100us 2214 */ 2215 2216 cmd = DWC3_DEPCMD_ENDTRANSFER; 2217 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0; 2218 cmd |= DWC3_DEPCMD_CMDIOC; 2219 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); 2220 memset(¶ms, 0, sizeof(params)); 2221 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 2222 WARN_ON_ONCE(ret); 2223 dep->resource_index = 0; 2224 dep->flags &= ~DWC3_EP_BUSY; 2225 udelay(100); 2226 } 2227 2228 static void dwc3_stop_active_transfers(struct dwc3 *dwc) 2229 { 2230 u32 epnum; 2231 2232 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2233 struct dwc3_ep *dep; 2234 2235 dep = dwc->eps[epnum]; 2236 if (!dep) 2237 continue; 2238 2239 if (!(dep->flags & DWC3_EP_ENABLED)) 2240 continue; 2241 2242 dwc3_remove_requests(dwc, dep); 2243 } 2244 } 2245 2246 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 2247 { 2248 u32 epnum; 2249 2250 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2251 struct dwc3_ep *dep; 2252 struct dwc3_gadget_ep_cmd_params params; 2253 int ret; 2254 2255 dep = dwc->eps[epnum]; 2256 if (!dep) 2257 continue; 2258 2259 if (!(dep->flags & DWC3_EP_STALL)) 2260 continue; 2261 2262 dep->flags &= ~DWC3_EP_STALL; 2263 2264 memset(¶ms, 0, sizeof(params)); 2265 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 2266 DWC3_DEPCMD_CLEARSTALL, ¶ms); 2267 WARN_ON_ONCE(ret); 2268 } 2269 } 2270 2271 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 2272 { 2273 int reg; 2274 2275 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2276 reg &= ~DWC3_DCTL_INITU1ENA; 2277 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2278 2279 reg &= ~DWC3_DCTL_INITU2ENA; 2280 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2281 2282 dwc3_disconnect_gadget(dwc); 2283 2284 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2285 dwc->setup_packet_pending = false; 2286 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED); 2287 } 2288 2289 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 2290 { 2291 u32 reg; 2292 2293 /* 2294 * WORKAROUND: DWC3 revisions <1.88a have an issue which 2295 * would cause a missing Disconnect Event if there's a 2296 * pending Setup Packet in the FIFO. 2297 * 2298 * There's no suggested workaround on the official Bug 2299 * report, which states that "unless the driver/application 2300 * is doing any special handling of a disconnect event, 2301 * there is no functional issue". 2302 * 2303 * Unfortunately, it turns out that we _do_ some special 2304 * handling of a disconnect event, namely complete all 2305 * pending transfers, notify gadget driver of the 2306 * disconnection, and so on. 2307 * 2308 * Our suggested workaround is to follow the Disconnect 2309 * Event steps here, instead, based on a setup_packet_pending 2310 * flag. Such flag gets set whenever we have a SETUP_PENDING 2311 * status for EP0 TRBs and gets cleared on XferComplete for the 2312 * same endpoint. 2313 * 2314 * Refers to: 2315 * 2316 * STAR#9000466709: RTL: Device : Disconnect event not 2317 * generated if setup packet pending in FIFO 2318 */ 2319 if (dwc->revision < DWC3_REVISION_188A) { 2320 if (dwc->setup_packet_pending) 2321 dwc3_gadget_disconnect_interrupt(dwc); 2322 } 2323 2324 dwc3_reset_gadget(dwc); 2325 2326 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2327 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 2328 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2329 dwc->test_mode = false; 2330 2331 dwc3_stop_active_transfers(dwc); 2332 dwc3_clear_stall_all_ep(dwc); 2333 2334 /* Reset device address to zero */ 2335 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2336 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 2337 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2338 } 2339 2340 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed) 2341 { 2342 u32 reg; 2343 u32 usb30_clock = DWC3_GCTL_CLK_BUS; 2344 2345 /* 2346 * We change the clock only at SS but I dunno why I would want to do 2347 * this. Maybe it becomes part of the power saving plan. 2348 */ 2349 2350 if ((speed != DWC3_DSTS_SUPERSPEED) && 2351 (speed != DWC3_DSTS_SUPERSPEED_PLUS)) 2352 return; 2353 2354 /* 2355 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 2356 * each time on Connect Done. 2357 */ 2358 if (!usb30_clock) 2359 return; 2360 2361 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 2362 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock); 2363 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 2364 } 2365 2366 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 2367 { 2368 struct dwc3_ep *dep; 2369 int ret; 2370 u32 reg; 2371 u8 speed; 2372 2373 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2374 speed = reg & DWC3_DSTS_CONNECTSPD; 2375 dwc->speed = speed; 2376 2377 dwc3_update_ram_clk_sel(dwc, speed); 2378 2379 switch (speed) { 2380 case DWC3_DCFG_SUPERSPEED_PLUS: 2381 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2382 dwc->gadget.ep0->maxpacket = 512; 2383 dwc->gadget.speed = USB_SPEED_SUPER_PLUS; 2384 break; 2385 case DWC3_DCFG_SUPERSPEED: 2386 /* 2387 * WORKAROUND: DWC3 revisions <1.90a have an issue which 2388 * would cause a missing USB3 Reset event. 2389 * 2390 * In such situations, we should force a USB3 Reset 2391 * event by calling our dwc3_gadget_reset_interrupt() 2392 * routine. 2393 * 2394 * Refers to: 2395 * 2396 * STAR#9000483510: RTL: SS : USB3 reset event may 2397 * not be generated always when the link enters poll 2398 */ 2399 if (dwc->revision < DWC3_REVISION_190A) 2400 dwc3_gadget_reset_interrupt(dwc); 2401 2402 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2403 dwc->gadget.ep0->maxpacket = 512; 2404 dwc->gadget.speed = USB_SPEED_SUPER; 2405 break; 2406 case DWC3_DCFG_HIGHSPEED: 2407 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2408 dwc->gadget.ep0->maxpacket = 64; 2409 dwc->gadget.speed = USB_SPEED_HIGH; 2410 break; 2411 case DWC3_DCFG_FULLSPEED2: 2412 case DWC3_DCFG_FULLSPEED1: 2413 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2414 dwc->gadget.ep0->maxpacket = 64; 2415 dwc->gadget.speed = USB_SPEED_FULL; 2416 break; 2417 case DWC3_DCFG_LOWSPEED: 2418 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); 2419 dwc->gadget.ep0->maxpacket = 8; 2420 dwc->gadget.speed = USB_SPEED_LOW; 2421 break; 2422 } 2423 2424 /* Enable USB2 LPM Capability */ 2425 2426 if ((dwc->revision > DWC3_REVISION_194A) && 2427 (speed != DWC3_DCFG_SUPERSPEED) && 2428 (speed != DWC3_DCFG_SUPERSPEED_PLUS)) { 2429 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2430 reg |= DWC3_DCFG_LPM_CAP; 2431 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2432 2433 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2434 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); 2435 2436 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold); 2437 2438 /* 2439 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and 2440 * DCFG.LPMCap is set, core responses with an ACK and the 2441 * BESL value in the LPM token is less than or equal to LPM 2442 * NYET threshold. 2443 */ 2444 WARN_ONCE(dwc->revision < DWC3_REVISION_240A 2445 && dwc->has_lpm_erratum, 2446 "LPM Erratum not available on dwc3 revisisions < 2.40a\n"); 2447 2448 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A) 2449 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold); 2450 2451 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2452 } else { 2453 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2454 reg &= ~DWC3_DCTL_HIRD_THRES_MASK; 2455 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2456 } 2457 2458 dep = dwc->eps[0]; 2459 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, 2460 false); 2461 if (ret) { 2462 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2463 return; 2464 } 2465 2466 dep = dwc->eps[1]; 2467 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, 2468 false); 2469 if (ret) { 2470 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2471 return; 2472 } 2473 2474 /* 2475 * Configure PHY via GUSB3PIPECTLn if required. 2476 * 2477 * Update GTXFIFOSIZn 2478 * 2479 * In both cases reset values should be sufficient. 2480 */ 2481 } 2482 2483 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 2484 { 2485 /* 2486 * TODO take core out of low power mode when that's 2487 * implemented. 2488 */ 2489 2490 dwc->gadget_driver->resume(&dwc->gadget); 2491 } 2492 2493 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 2494 unsigned int evtinfo) 2495 { 2496 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2497 unsigned int pwropt; 2498 2499 /* 2500 * WORKAROUND: DWC3 < 2.50a have an issue when configured without 2501 * Hibernation mode enabled which would show up when device detects 2502 * host-initiated U3 exit. 2503 * 2504 * In that case, device will generate a Link State Change Interrupt 2505 * from U3 to RESUME which is only necessary if Hibernation is 2506 * configured in. 2507 * 2508 * There are no functional changes due to such spurious event and we 2509 * just need to ignore it. 2510 * 2511 * Refers to: 2512 * 2513 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation 2514 * operational mode 2515 */ 2516 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 2517 if ((dwc->revision < DWC3_REVISION_250A) && 2518 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { 2519 if ((dwc->link_state == DWC3_LINK_STATE_U3) && 2520 (next == DWC3_LINK_STATE_RESUME)) { 2521 dwc3_trace(trace_dwc3_gadget, 2522 "ignoring transition U3 -> Resume"); 2523 return; 2524 } 2525 } 2526 2527 /* 2528 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending 2529 * on the link partner, the USB session might do multiple entry/exit 2530 * of low power states before a transfer takes place. 2531 * 2532 * Due to this problem, we might experience lower throughput. The 2533 * suggested workaround is to disable DCTL[12:9] bits if we're 2534 * transitioning from U1/U2 to U0 and enable those bits again 2535 * after a transfer completes and there are no pending transfers 2536 * on any of the enabled endpoints. 2537 * 2538 * This is the first half of that workaround. 2539 * 2540 * Refers to: 2541 * 2542 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us 2543 * core send LGO_Ux entering U0 2544 */ 2545 if (dwc->revision < DWC3_REVISION_183A) { 2546 if (next == DWC3_LINK_STATE_U0) { 2547 u32 u1u2; 2548 u32 reg; 2549 2550 switch (dwc->link_state) { 2551 case DWC3_LINK_STATE_U1: 2552 case DWC3_LINK_STATE_U2: 2553 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2554 u1u2 = reg & (DWC3_DCTL_INITU2ENA 2555 | DWC3_DCTL_ACCEPTU2ENA 2556 | DWC3_DCTL_INITU1ENA 2557 | DWC3_DCTL_ACCEPTU1ENA); 2558 2559 if (!dwc->u1u2) 2560 dwc->u1u2 = reg & u1u2; 2561 2562 reg &= ~u1u2; 2563 2564 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2565 break; 2566 default: 2567 /* do nothing */ 2568 break; 2569 } 2570 } 2571 } 2572 2573 switch (next) { 2574 case DWC3_LINK_STATE_U1: 2575 if (dwc->speed == USB_SPEED_SUPER) 2576 dwc3_suspend_gadget(dwc); 2577 break; 2578 case DWC3_LINK_STATE_U2: 2579 case DWC3_LINK_STATE_U3: 2580 dwc3_suspend_gadget(dwc); 2581 break; 2582 case DWC3_LINK_STATE_RESUME: 2583 dwc3_resume_gadget(dwc); 2584 break; 2585 default: 2586 /* do nothing */ 2587 break; 2588 } 2589 2590 dwc->link_state = next; 2591 } 2592 2593 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, 2594 unsigned int evtinfo) 2595 { 2596 unsigned int is_ss = evtinfo & BIT(4); 2597 2598 /** 2599 * WORKAROUND: DWC3 revison 2.20a with hibernation support 2600 * have a known issue which can cause USB CV TD.9.23 to fail 2601 * randomly. 2602 * 2603 * Because of this issue, core could generate bogus hibernation 2604 * events which SW needs to ignore. 2605 * 2606 * Refers to: 2607 * 2608 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0 2609 * Device Fallback from SuperSpeed 2610 */ 2611 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER)) 2612 return; 2613 2614 /* enter hibernation here */ 2615 } 2616 2617 static void dwc3_gadget_interrupt(struct dwc3 *dwc, 2618 const struct dwc3_event_devt *event) 2619 { 2620 switch (event->type) { 2621 case DWC3_DEVICE_EVENT_DISCONNECT: 2622 dwc3_gadget_disconnect_interrupt(dwc); 2623 break; 2624 case DWC3_DEVICE_EVENT_RESET: 2625 dwc3_gadget_reset_interrupt(dwc); 2626 break; 2627 case DWC3_DEVICE_EVENT_CONNECT_DONE: 2628 dwc3_gadget_conndone_interrupt(dwc); 2629 break; 2630 case DWC3_DEVICE_EVENT_WAKEUP: 2631 dwc3_gadget_wakeup_interrupt(dwc); 2632 break; 2633 case DWC3_DEVICE_EVENT_HIBER_REQ: 2634 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation, 2635 "unexpected hibernation event\n")) 2636 break; 2637 2638 dwc3_gadget_hibernation_interrupt(dwc, event->event_info); 2639 break; 2640 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 2641 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 2642 break; 2643 case DWC3_DEVICE_EVENT_EOPF: 2644 dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame"); 2645 break; 2646 case DWC3_DEVICE_EVENT_SOF: 2647 dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame"); 2648 break; 2649 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 2650 dwc3_trace(trace_dwc3_gadget, "Erratic Error"); 2651 break; 2652 case DWC3_DEVICE_EVENT_CMD_CMPL: 2653 dwc3_trace(trace_dwc3_gadget, "Command Complete"); 2654 break; 2655 case DWC3_DEVICE_EVENT_OVERFLOW: 2656 dwc3_trace(trace_dwc3_gadget, "Overflow"); 2657 break; 2658 default: 2659 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 2660 } 2661 } 2662 2663 static void dwc3_process_event_entry(struct dwc3 *dwc, 2664 const union dwc3_event *event) 2665 { 2666 trace_dwc3_event(event->raw); 2667 2668 /* Endpoint IRQ, handle it and return early */ 2669 if (event->type.is_devspec == 0) { 2670 /* depevt */ 2671 return dwc3_endpoint_interrupt(dwc, &event->depevt); 2672 } 2673 2674 switch (event->type.type) { 2675 case DWC3_EVENT_TYPE_DEV: 2676 dwc3_gadget_interrupt(dwc, &event->devt); 2677 break; 2678 /* REVISIT what to do with Carkit and I2C events ? */ 2679 default: 2680 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 2681 } 2682 } 2683 2684 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf) 2685 { 2686 struct dwc3_event_buffer *evt; 2687 irqreturn_t ret = IRQ_NONE; 2688 int left; 2689 u32 reg; 2690 2691 evt = dwc->ev_buffs[buf]; 2692 left = evt->count; 2693 2694 if (!(evt->flags & DWC3_EVENT_PENDING)) 2695 return IRQ_NONE; 2696 2697 while (left > 0) { 2698 union dwc3_event event; 2699 2700 event.raw = *(u32 *) (evt->buf + evt->lpos); 2701 2702 dwc3_process_event_entry(dwc, &event); 2703 2704 /* 2705 * FIXME we wrap around correctly to the next entry as 2706 * almost all entries are 4 bytes in size. There is one 2707 * entry which has 12 bytes which is a regular entry 2708 * followed by 8 bytes data. ATM I don't know how 2709 * things are organized if we get next to the a 2710 * boundary so I worry about that once we try to handle 2711 * that. 2712 */ 2713 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE; 2714 left -= 4; 2715 2716 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4); 2717 } 2718 2719 evt->count = 0; 2720 evt->flags &= ~DWC3_EVENT_PENDING; 2721 ret = IRQ_HANDLED; 2722 2723 /* Unmask interrupt */ 2724 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf)); 2725 reg &= ~DWC3_GEVNTSIZ_INTMASK; 2726 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg); 2727 2728 return ret; 2729 } 2730 2731 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc) 2732 { 2733 struct dwc3 *dwc = _dwc; 2734 unsigned long flags; 2735 irqreturn_t ret = IRQ_NONE; 2736 int i; 2737 2738 spin_lock_irqsave(&dwc->lock, flags); 2739 2740 for (i = 0; i < dwc->num_event_buffers; i++) 2741 ret |= dwc3_process_event_buf(dwc, i); 2742 2743 spin_unlock_irqrestore(&dwc->lock, flags); 2744 2745 return ret; 2746 } 2747 2748 static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf) 2749 { 2750 struct dwc3_event_buffer *evt; 2751 u32 count; 2752 u32 reg; 2753 2754 evt = dwc->ev_buffs[buf]; 2755 2756 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf)); 2757 count &= DWC3_GEVNTCOUNT_MASK; 2758 if (!count) 2759 return IRQ_NONE; 2760 2761 evt->count = count; 2762 evt->flags |= DWC3_EVENT_PENDING; 2763 2764 /* Mask interrupt */ 2765 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf)); 2766 reg |= DWC3_GEVNTSIZ_INTMASK; 2767 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg); 2768 2769 return IRQ_WAKE_THREAD; 2770 } 2771 2772 static irqreturn_t dwc3_interrupt(int irq, void *_dwc) 2773 { 2774 struct dwc3 *dwc = _dwc; 2775 int i; 2776 irqreturn_t ret = IRQ_NONE; 2777 2778 for (i = 0; i < dwc->num_event_buffers; i++) { 2779 irqreturn_t status; 2780 2781 status = dwc3_check_event_buf(dwc, i); 2782 if (status == IRQ_WAKE_THREAD) 2783 ret = status; 2784 } 2785 2786 return ret; 2787 } 2788 2789 /** 2790 * dwc3_gadget_init - Initializes gadget related registers 2791 * @dwc: pointer to our controller context structure 2792 * 2793 * Returns 0 on success otherwise negative errno. 2794 */ 2795 int dwc3_gadget_init(struct dwc3 *dwc) 2796 { 2797 int ret; 2798 2799 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2800 &dwc->ctrl_req_addr, GFP_KERNEL); 2801 if (!dwc->ctrl_req) { 2802 dev_err(dwc->dev, "failed to allocate ctrl request\n"); 2803 ret = -ENOMEM; 2804 goto err0; 2805 } 2806 2807 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2, 2808 &dwc->ep0_trb_addr, GFP_KERNEL); 2809 if (!dwc->ep0_trb) { 2810 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 2811 ret = -ENOMEM; 2812 goto err1; 2813 } 2814 2815 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL); 2816 if (!dwc->setup_buf) { 2817 ret = -ENOMEM; 2818 goto err2; 2819 } 2820 2821 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev, 2822 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr, 2823 GFP_KERNEL); 2824 if (!dwc->ep0_bounce) { 2825 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n"); 2826 ret = -ENOMEM; 2827 goto err3; 2828 } 2829 2830 dwc->zlp_buf = kzalloc(DWC3_ZLP_BUF_SIZE, GFP_KERNEL); 2831 if (!dwc->zlp_buf) { 2832 ret = -ENOMEM; 2833 goto err4; 2834 } 2835 2836 dwc->gadget.ops = &dwc3_gadget_ops; 2837 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2838 dwc->gadget.sg_supported = true; 2839 dwc->gadget.name = "dwc3-gadget"; 2840 dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG; 2841 2842 /* 2843 * FIXME We might be setting max_speed to <SUPER, however versions 2844 * <2.20a of dwc3 have an issue with metastability (documented 2845 * elsewhere in this driver) which tells us we can't set max speed to 2846 * anything lower than SUPER. 2847 * 2848 * Because gadget.max_speed is only used by composite.c and function 2849 * drivers (i.e. it won't go into dwc3's registers) we are allowing this 2850 * to happen so we avoid sending SuperSpeed Capability descriptor 2851 * together with our BOS descriptor as that could confuse host into 2852 * thinking we can handle super speed. 2853 * 2854 * Note that, in fact, we won't even support GetBOS requests when speed 2855 * is less than super speed because we don't have means, yet, to tell 2856 * composite.c that we are USB 2.0 + LPM ECN. 2857 */ 2858 if (dwc->revision < DWC3_REVISION_220A) 2859 dwc3_trace(trace_dwc3_gadget, 2860 "Changing max_speed on rev %08x\n", 2861 dwc->revision); 2862 2863 dwc->gadget.max_speed = dwc->maximum_speed; 2864 2865 /* 2866 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize 2867 * on ep out. 2868 */ 2869 dwc->gadget.quirk_ep_out_aligned_size = true; 2870 2871 /* 2872 * REVISIT: Here we should clear all pending IRQs to be 2873 * sure we're starting from a well known location. 2874 */ 2875 2876 ret = dwc3_gadget_init_endpoints(dwc); 2877 if (ret) 2878 goto err5; 2879 2880 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 2881 if (ret) { 2882 dev_err(dwc->dev, "failed to register udc\n"); 2883 goto err5; 2884 } 2885 2886 return 0; 2887 2888 err5: 2889 kfree(dwc->zlp_buf); 2890 2891 err4: 2892 dwc3_gadget_free_endpoints(dwc); 2893 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2894 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2895 2896 err3: 2897 kfree(dwc->setup_buf); 2898 2899 err2: 2900 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2901 dwc->ep0_trb, dwc->ep0_trb_addr); 2902 2903 err1: 2904 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2905 dwc->ctrl_req, dwc->ctrl_req_addr); 2906 2907 err0: 2908 return ret; 2909 } 2910 2911 /* -------------------------------------------------------------------------- */ 2912 2913 void dwc3_gadget_exit(struct dwc3 *dwc) 2914 { 2915 usb_del_gadget_udc(&dwc->gadget); 2916 2917 dwc3_gadget_free_endpoints(dwc); 2918 2919 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2920 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2921 2922 kfree(dwc->setup_buf); 2923 kfree(dwc->zlp_buf); 2924 2925 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2926 dwc->ep0_trb, dwc->ep0_trb_addr); 2927 2928 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2929 dwc->ctrl_req, dwc->ctrl_req_addr); 2930 } 2931 2932 int dwc3_gadget_suspend(struct dwc3 *dwc) 2933 { 2934 if (dwc->pullups_connected) { 2935 dwc3_gadget_disable_irq(dwc); 2936 dwc3_gadget_run_stop(dwc, true, true); 2937 } 2938 2939 __dwc3_gadget_ep_disable(dwc->eps[0]); 2940 __dwc3_gadget_ep_disable(dwc->eps[1]); 2941 2942 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG); 2943 2944 return 0; 2945 } 2946 2947 int dwc3_gadget_resume(struct dwc3 *dwc) 2948 { 2949 struct dwc3_ep *dep; 2950 int ret; 2951 2952 /* Start with SuperSpeed Default */ 2953 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2954 2955 dep = dwc->eps[0]; 2956 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 2957 false); 2958 if (ret) 2959 goto err0; 2960 2961 dep = dwc->eps[1]; 2962 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 2963 false); 2964 if (ret) 2965 goto err1; 2966 2967 /* begin to receive SETUP packets */ 2968 dwc->ep0state = EP0_SETUP_PHASE; 2969 dwc3_ep0_out_start(dwc); 2970 2971 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg); 2972 2973 if (dwc->pullups_connected) { 2974 dwc3_gadget_enable_irq(dwc); 2975 dwc3_gadget_run_stop(dwc, true, false); 2976 } 2977 2978 return 0; 2979 2980 err1: 2981 __dwc3_gadget_ep_disable(dwc->eps[0]); 2982 2983 err0: 2984 return ret; 2985 } 2986