1 /** 2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link 3 * 4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com 5 * 6 * Authors: Felipe Balbi <balbi@ti.com>, 7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 8 * 9 * This program is free software: you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 of 11 * the License as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/delay.h> 21 #include <linux/slab.h> 22 #include <linux/spinlock.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/interrupt.h> 26 #include <linux/io.h> 27 #include <linux/list.h> 28 #include <linux/dma-mapping.h> 29 30 #include <linux/usb/ch9.h> 31 #include <linux/usb/gadget.h> 32 33 #include "debug.h" 34 #include "core.h" 35 #include "gadget.h" 36 #include "io.h" 37 38 /** 39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes 40 * @dwc: pointer to our context structure 41 * @mode: the mode to set (J, K SE0 NAK, Force Enable) 42 * 43 * Caller should take care of locking. This function will 44 * return 0 on success or -EINVAL if wrong Test Selector 45 * is passed 46 */ 47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) 48 { 49 u32 reg; 50 51 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 52 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 53 54 switch (mode) { 55 case TEST_J: 56 case TEST_K: 57 case TEST_SE0_NAK: 58 case TEST_PACKET: 59 case TEST_FORCE_EN: 60 reg |= mode << 1; 61 break; 62 default: 63 return -EINVAL; 64 } 65 66 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 67 68 return 0; 69 } 70 71 /** 72 * dwc3_gadget_get_link_state - Gets current state of USB Link 73 * @dwc: pointer to our context structure 74 * 75 * Caller should take care of locking. This function will 76 * return the link state on success (>= 0) or -ETIMEDOUT. 77 */ 78 int dwc3_gadget_get_link_state(struct dwc3 *dwc) 79 { 80 u32 reg; 81 82 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 83 84 return DWC3_DSTS_USBLNKST(reg); 85 } 86 87 /** 88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State 89 * @dwc: pointer to our context structure 90 * @state: the state to put link into 91 * 92 * Caller should take care of locking. This function will 93 * return 0 on success or -ETIMEDOUT. 94 */ 95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) 96 { 97 int retries = 10000; 98 u32 reg; 99 100 /* 101 * Wait until device controller is ready. Only applies to 1.94a and 102 * later RTL. 103 */ 104 if (dwc->revision >= DWC3_REVISION_194A) { 105 while (--retries) { 106 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 107 if (reg & DWC3_DSTS_DCNRD) 108 udelay(5); 109 else 110 break; 111 } 112 113 if (retries <= 0) 114 return -ETIMEDOUT; 115 } 116 117 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 119 120 /* set requested state */ 121 reg |= DWC3_DCTL_ULSTCHNGREQ(state); 122 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 123 124 /* 125 * The following code is racy when called from dwc3_gadget_wakeup, 126 * and is not needed, at least on newer versions 127 */ 128 if (dwc->revision >= DWC3_REVISION_194A) 129 return 0; 130 131 /* wait for a change in DSTS */ 132 retries = 10000; 133 while (--retries) { 134 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 135 136 if (DWC3_DSTS_USBLNKST(reg) == state) 137 return 0; 138 139 udelay(5); 140 } 141 142 dwc3_trace(trace_dwc3_gadget, 143 "link state change request timed out"); 144 145 return -ETIMEDOUT; 146 } 147 148 /** 149 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case 150 * @dwc: pointer to our context structure 151 * 152 * This function will a best effort FIFO allocation in order 153 * to improve FIFO usage and throughput, while still allowing 154 * us to enable as many endpoints as possible. 155 * 156 * Keep in mind that this operation will be highly dependent 157 * on the configured size for RAM1 - which contains TxFifo -, 158 * the amount of endpoints enabled on coreConsultant tool, and 159 * the width of the Master Bus. 160 * 161 * In the ideal world, we would always be able to satisfy the 162 * following equation: 163 * 164 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \ 165 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes 166 * 167 * Unfortunately, due to many variables that's not always the case. 168 */ 169 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc) 170 { 171 int last_fifo_depth = 0; 172 int ram1_depth; 173 int fifo_size; 174 int mdwidth; 175 int num; 176 177 if (!dwc->needs_fifo_resize) 178 return 0; 179 180 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7); 181 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); 182 183 /* MDWIDTH is represented in bits, we need it in bytes */ 184 mdwidth >>= 3; 185 186 /* 187 * FIXME For now we will only allocate 1 wMaxPacketSize space 188 * for each enabled endpoint, later patches will come to 189 * improve this algorithm so that we better use the internal 190 * FIFO space 191 */ 192 for (num = 0; num < dwc->num_in_eps; num++) { 193 /* bit0 indicates direction; 1 means IN ep */ 194 struct dwc3_ep *dep = dwc->eps[(num << 1) | 1]; 195 int mult = 1; 196 int tmp; 197 198 if (!(dep->flags & DWC3_EP_ENABLED)) 199 continue; 200 201 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) 202 || usb_endpoint_xfer_isoc(dep->endpoint.desc)) 203 mult = 3; 204 205 /* 206 * REVISIT: the following assumes we will always have enough 207 * space available on the FIFO RAM for all possible use cases. 208 * Make sure that's true somehow and change FIFO allocation 209 * accordingly. 210 * 211 * If we have Bulk or Isochronous endpoints, we want 212 * them to be able to be very, very fast. So we're giving 213 * those endpoints a fifo_size which is enough for 3 full 214 * packets 215 */ 216 tmp = mult * (dep->endpoint.maxpacket + mdwidth); 217 tmp += mdwidth; 218 219 fifo_size = DIV_ROUND_UP(tmp, mdwidth); 220 221 fifo_size |= (last_fifo_depth << 16); 222 223 dwc3_trace(trace_dwc3_gadget, "%s: Fifo Addr %04x Size %d", 224 dep->name, last_fifo_depth, fifo_size & 0xffff); 225 226 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size); 227 228 last_fifo_depth += (fifo_size & 0xffff); 229 } 230 231 return 0; 232 } 233 234 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, 235 int status) 236 { 237 struct dwc3 *dwc = dep->dwc; 238 int i; 239 240 if (req->queued) { 241 i = 0; 242 do { 243 dep->busy_slot++; 244 /* 245 * Skip LINK TRB. We can't use req->trb and check for 246 * DWC3_TRBCTL_LINK_TRB because it points the TRB we 247 * just completed (not the LINK TRB). 248 */ 249 if (((dep->busy_slot & DWC3_TRB_MASK) == 250 DWC3_TRB_NUM- 1) && 251 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 252 dep->busy_slot++; 253 } while(++i < req->request.num_mapped_sgs); 254 req->queued = false; 255 } 256 list_del(&req->list); 257 req->trb = NULL; 258 259 if (req->request.status == -EINPROGRESS) 260 req->request.status = status; 261 262 if (dwc->ep0_bounced && dep->number == 0) 263 dwc->ep0_bounced = false; 264 else 265 usb_gadget_unmap_request(&dwc->gadget, &req->request, 266 req->direction); 267 268 trace_dwc3_gadget_giveback(req); 269 270 spin_unlock(&dwc->lock); 271 usb_gadget_giveback_request(&dep->endpoint, &req->request); 272 spin_lock(&dwc->lock); 273 } 274 275 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param) 276 { 277 u32 timeout = 500; 278 u32 reg; 279 280 trace_dwc3_gadget_generic_cmd(cmd, param); 281 282 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param); 283 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT); 284 285 do { 286 reg = dwc3_readl(dwc->regs, DWC3_DGCMD); 287 if (!(reg & DWC3_DGCMD_CMDACT)) { 288 dwc3_trace(trace_dwc3_gadget, 289 "Command Complete --> %d", 290 DWC3_DGCMD_STATUS(reg)); 291 if (DWC3_DGCMD_STATUS(reg)) 292 return -EINVAL; 293 return 0; 294 } 295 296 /* 297 * We can't sleep here, because it's also called from 298 * interrupt context. 299 */ 300 timeout--; 301 if (!timeout) { 302 dwc3_trace(trace_dwc3_gadget, 303 "Command Timed Out"); 304 return -ETIMEDOUT; 305 } 306 udelay(1); 307 } while (1); 308 } 309 310 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep, 311 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params) 312 { 313 struct dwc3_ep *dep = dwc->eps[ep]; 314 u32 timeout = 500; 315 u32 reg; 316 317 trace_dwc3_gadget_ep_cmd(dep, cmd, params); 318 319 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0); 320 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1); 321 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2); 322 323 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT); 324 do { 325 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep)); 326 if (!(reg & DWC3_DEPCMD_CMDACT)) { 327 dwc3_trace(trace_dwc3_gadget, 328 "Command Complete --> %d", 329 DWC3_DEPCMD_STATUS(reg)); 330 if (DWC3_DEPCMD_STATUS(reg)) 331 return -EINVAL; 332 return 0; 333 } 334 335 /* 336 * We can't sleep here, because it is also called from 337 * interrupt context. 338 */ 339 timeout--; 340 if (!timeout) { 341 dwc3_trace(trace_dwc3_gadget, 342 "Command Timed Out"); 343 return -ETIMEDOUT; 344 } 345 346 udelay(1); 347 } while (1); 348 } 349 350 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 351 struct dwc3_trb *trb) 352 { 353 u32 offset = (char *) trb - (char *) dep->trb_pool; 354 355 return dep->trb_pool_dma + offset; 356 } 357 358 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) 359 { 360 struct dwc3 *dwc = dep->dwc; 361 362 if (dep->trb_pool) 363 return 0; 364 365 dep->trb_pool = dma_alloc_coherent(dwc->dev, 366 sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 367 &dep->trb_pool_dma, GFP_KERNEL); 368 if (!dep->trb_pool) { 369 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", 370 dep->name); 371 return -ENOMEM; 372 } 373 374 return 0; 375 } 376 377 static void dwc3_free_trb_pool(struct dwc3_ep *dep) 378 { 379 struct dwc3 *dwc = dep->dwc; 380 381 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, 382 dep->trb_pool, dep->trb_pool_dma); 383 384 dep->trb_pool = NULL; 385 dep->trb_pool_dma = 0; 386 } 387 388 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 389 { 390 struct dwc3_gadget_ep_cmd_params params; 391 u32 cmd; 392 393 memset(¶ms, 0x00, sizeof(params)); 394 395 if (dep->number != 1) { 396 cmd = DWC3_DEPCMD_DEPSTARTCFG; 397 /* XferRscIdx == 0 for ep0 and 2 for the remaining */ 398 if (dep->number > 1) { 399 if (dwc->start_config_issued) 400 return 0; 401 dwc->start_config_issued = true; 402 cmd |= DWC3_DEPCMD_PARAM(2); 403 } 404 405 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms); 406 } 407 408 return 0; 409 } 410 411 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, 412 const struct usb_endpoint_descriptor *desc, 413 const struct usb_ss_ep_comp_descriptor *comp_desc, 414 bool ignore, bool restore) 415 { 416 struct dwc3_gadget_ep_cmd_params params; 417 418 memset(¶ms, 0x00, sizeof(params)); 419 420 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) 421 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); 422 423 /* Burst size is only needed in SuperSpeed mode */ 424 if (dwc->gadget.speed == USB_SPEED_SUPER) { 425 u32 burst = dep->endpoint.maxburst - 1; 426 427 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst); 428 } 429 430 if (ignore) 431 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM; 432 433 if (restore) { 434 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE; 435 params.param2 |= dep->saved_state; 436 } 437 438 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN 439 | DWC3_DEPCFG_XFER_NOT_READY_EN; 440 441 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 442 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE 443 | DWC3_DEPCFG_STREAM_EVENT_EN; 444 dep->stream_capable = true; 445 } 446 447 if (!usb_endpoint_xfer_control(desc)) 448 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; 449 450 /* 451 * We are doing 1:1 mapping for endpoints, meaning 452 * Physical Endpoints 2 maps to Logical Endpoint 2 and 453 * so on. We consider the direction bit as part of the physical 454 * endpoint number. So USB endpoint 0x81 is 0x03. 455 */ 456 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); 457 458 /* 459 * We must use the lower 16 TX FIFOs even though 460 * HW might have more 461 */ 462 if (dep->direction) 463 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); 464 465 if (desc->bInterval) { 466 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); 467 dep->interval = 1 << (desc->bInterval - 1); 468 } 469 470 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 471 DWC3_DEPCMD_SETEPCONFIG, ¶ms); 472 } 473 474 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) 475 { 476 struct dwc3_gadget_ep_cmd_params params; 477 478 memset(¶ms, 0x00, sizeof(params)); 479 480 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); 481 482 return dwc3_send_gadget_ep_cmd(dwc, dep->number, 483 DWC3_DEPCMD_SETTRANSFRESOURCE, ¶ms); 484 } 485 486 /** 487 * __dwc3_gadget_ep_enable - Initializes a HW endpoint 488 * @dep: endpoint to be initialized 489 * @desc: USB Endpoint Descriptor 490 * 491 * Caller should take care of locking 492 */ 493 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, 494 const struct usb_endpoint_descriptor *desc, 495 const struct usb_ss_ep_comp_descriptor *comp_desc, 496 bool ignore, bool restore) 497 { 498 struct dwc3 *dwc = dep->dwc; 499 u32 reg; 500 int ret; 501 502 dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name); 503 504 if (!(dep->flags & DWC3_EP_ENABLED)) { 505 ret = dwc3_gadget_start_config(dwc, dep); 506 if (ret) 507 return ret; 508 } 509 510 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore, 511 restore); 512 if (ret) 513 return ret; 514 515 if (!(dep->flags & DWC3_EP_ENABLED)) { 516 struct dwc3_trb *trb_st_hw; 517 struct dwc3_trb *trb_link; 518 519 ret = dwc3_gadget_set_xfer_resource(dwc, dep); 520 if (ret) 521 return ret; 522 523 dep->endpoint.desc = desc; 524 dep->comp_desc = comp_desc; 525 dep->type = usb_endpoint_type(desc); 526 dep->flags |= DWC3_EP_ENABLED; 527 528 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 529 reg |= DWC3_DALEPENA_EP(dep->number); 530 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 531 532 if (!usb_endpoint_xfer_isoc(desc)) 533 return 0; 534 535 /* Link TRB for ISOC. The HWO bit is never reset */ 536 trb_st_hw = &dep->trb_pool[0]; 537 538 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 539 memset(trb_link, 0, sizeof(*trb_link)); 540 541 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 542 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 543 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB; 544 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 545 } 546 547 switch (usb_endpoint_type(desc)) { 548 case USB_ENDPOINT_XFER_CONTROL: 549 strlcat(dep->name, "-control", sizeof(dep->name)); 550 break; 551 case USB_ENDPOINT_XFER_ISOC: 552 strlcat(dep->name, "-isoc", sizeof(dep->name)); 553 break; 554 case USB_ENDPOINT_XFER_BULK: 555 strlcat(dep->name, "-bulk", sizeof(dep->name)); 556 break; 557 case USB_ENDPOINT_XFER_INT: 558 strlcat(dep->name, "-int", sizeof(dep->name)); 559 break; 560 default: 561 dev_err(dwc->dev, "invalid endpoint transfer type\n"); 562 } 563 564 return 0; 565 } 566 567 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force); 568 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) 569 { 570 struct dwc3_request *req; 571 572 if (!list_empty(&dep->req_queued)) { 573 dwc3_stop_active_transfer(dwc, dep->number, true); 574 575 /* - giveback all requests to gadget driver */ 576 while (!list_empty(&dep->req_queued)) { 577 req = next_request(&dep->req_queued); 578 579 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 580 } 581 } 582 583 while (!list_empty(&dep->request_list)) { 584 req = next_request(&dep->request_list); 585 586 dwc3_gadget_giveback(dep, req, -ESHUTDOWN); 587 } 588 } 589 590 /** 591 * __dwc3_gadget_ep_disable - Disables a HW endpoint 592 * @dep: the endpoint to disable 593 * 594 * This function also removes requests which are currently processed ny the 595 * hardware and those which are not yet scheduled. 596 * Caller should take care of locking. 597 */ 598 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) 599 { 600 struct dwc3 *dwc = dep->dwc; 601 u32 reg; 602 603 dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name); 604 605 dwc3_remove_requests(dwc, dep); 606 607 /* make sure HW endpoint isn't stalled */ 608 if (dep->flags & DWC3_EP_STALL) 609 __dwc3_gadget_ep_set_halt(dep, 0, false); 610 611 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 612 reg &= ~DWC3_DALEPENA_EP(dep->number); 613 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 614 615 dep->stream_capable = false; 616 dep->endpoint.desc = NULL; 617 dep->comp_desc = NULL; 618 dep->type = 0; 619 dep->flags = 0; 620 621 snprintf(dep->name, sizeof(dep->name), "ep%d%s", 622 dep->number >> 1, 623 (dep->number & 1) ? "in" : "out"); 624 625 return 0; 626 } 627 628 /* -------------------------------------------------------------------------- */ 629 630 static int dwc3_gadget_ep0_enable(struct usb_ep *ep, 631 const struct usb_endpoint_descriptor *desc) 632 { 633 return -EINVAL; 634 } 635 636 static int dwc3_gadget_ep0_disable(struct usb_ep *ep) 637 { 638 return -EINVAL; 639 } 640 641 /* -------------------------------------------------------------------------- */ 642 643 static int dwc3_gadget_ep_enable(struct usb_ep *ep, 644 const struct usb_endpoint_descriptor *desc) 645 { 646 struct dwc3_ep *dep; 647 struct dwc3 *dwc; 648 unsigned long flags; 649 int ret; 650 651 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 652 pr_debug("dwc3: invalid parameters\n"); 653 return -EINVAL; 654 } 655 656 if (!desc->wMaxPacketSize) { 657 pr_debug("dwc3: missing wMaxPacketSize\n"); 658 return -EINVAL; 659 } 660 661 dep = to_dwc3_ep(ep); 662 dwc = dep->dwc; 663 664 if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED, 665 "%s is already enabled\n", 666 dep->name)) 667 return 0; 668 669 spin_lock_irqsave(&dwc->lock, flags); 670 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false); 671 spin_unlock_irqrestore(&dwc->lock, flags); 672 673 return ret; 674 } 675 676 static int dwc3_gadget_ep_disable(struct usb_ep *ep) 677 { 678 struct dwc3_ep *dep; 679 struct dwc3 *dwc; 680 unsigned long flags; 681 int ret; 682 683 if (!ep) { 684 pr_debug("dwc3: invalid parameters\n"); 685 return -EINVAL; 686 } 687 688 dep = to_dwc3_ep(ep); 689 dwc = dep->dwc; 690 691 if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED), 692 "%s is already disabled\n", 693 dep->name)) 694 return 0; 695 696 spin_lock_irqsave(&dwc->lock, flags); 697 ret = __dwc3_gadget_ep_disable(dep); 698 spin_unlock_irqrestore(&dwc->lock, flags); 699 700 return ret; 701 } 702 703 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, 704 gfp_t gfp_flags) 705 { 706 struct dwc3_request *req; 707 struct dwc3_ep *dep = to_dwc3_ep(ep); 708 709 req = kzalloc(sizeof(*req), gfp_flags); 710 if (!req) 711 return NULL; 712 713 req->epnum = dep->number; 714 req->dep = dep; 715 716 trace_dwc3_alloc_request(req); 717 718 return &req->request; 719 } 720 721 static void dwc3_gadget_ep_free_request(struct usb_ep *ep, 722 struct usb_request *request) 723 { 724 struct dwc3_request *req = to_dwc3_request(request); 725 726 trace_dwc3_free_request(req); 727 kfree(req); 728 } 729 730 /** 731 * dwc3_prepare_one_trb - setup one TRB from one request 732 * @dep: endpoint for which this request is prepared 733 * @req: dwc3_request pointer 734 */ 735 static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 736 struct dwc3_request *req, dma_addr_t dma, 737 unsigned length, unsigned last, unsigned chain, unsigned node) 738 { 739 struct dwc3_trb *trb; 740 741 dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s%s", 742 dep->name, req, (unsigned long long) dma, 743 length, last ? " last" : "", 744 chain ? " chain" : ""); 745 746 747 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK]; 748 749 if (!req->trb) { 750 dwc3_gadget_move_request_queued(req); 751 req->trb = trb; 752 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 753 req->start_slot = dep->free_slot & DWC3_TRB_MASK; 754 } 755 756 dep->free_slot++; 757 /* Skip the LINK-TRB on ISOC */ 758 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && 759 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 760 dep->free_slot++; 761 762 trb->size = DWC3_TRB_SIZE_LENGTH(length); 763 trb->bpl = lower_32_bits(dma); 764 trb->bph = upper_32_bits(dma); 765 766 switch (usb_endpoint_type(dep->endpoint.desc)) { 767 case USB_ENDPOINT_XFER_CONTROL: 768 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP; 769 break; 770 771 case USB_ENDPOINT_XFER_ISOC: 772 if (!node) 773 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 774 else 775 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 776 break; 777 778 case USB_ENDPOINT_XFER_BULK: 779 case USB_ENDPOINT_XFER_INT: 780 trb->ctrl = DWC3_TRBCTL_NORMAL; 781 break; 782 default: 783 /* 784 * This is only possible with faulty memory because we 785 * checked it already :) 786 */ 787 BUG(); 788 } 789 790 if (!req->request.no_interrupt && !chain) 791 trb->ctrl |= DWC3_TRB_CTRL_IOC; 792 793 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 794 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 795 trb->ctrl |= DWC3_TRB_CTRL_CSP; 796 } else if (last) { 797 trb->ctrl |= DWC3_TRB_CTRL_LST; 798 } 799 800 if (chain) 801 trb->ctrl |= DWC3_TRB_CTRL_CHN; 802 803 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) 804 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id); 805 806 trb->ctrl |= DWC3_TRB_CTRL_HWO; 807 808 trace_dwc3_prepare_trb(dep, trb); 809 } 810 811 /* 812 * dwc3_prepare_trbs - setup TRBs from requests 813 * @dep: endpoint for which requests are being prepared 814 * @starting: true if the endpoint is idle and no requests are queued. 815 * 816 * The function goes through the requests list and sets up TRBs for the 817 * transfers. The function returns once there are no more TRBs available or 818 * it runs out of requests. 819 */ 820 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting) 821 { 822 struct dwc3_request *req, *n; 823 u32 trbs_left; 824 u32 max; 825 unsigned int last_one = 0; 826 827 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); 828 829 /* the first request must not be queued */ 830 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK; 831 832 /* Can't wrap around on a non-isoc EP since there's no link TRB */ 833 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 834 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK); 835 if (trbs_left > max) 836 trbs_left = max; 837 } 838 839 /* 840 * If busy & slot are equal than it is either full or empty. If we are 841 * starting to process requests then we are empty. Otherwise we are 842 * full and don't do anything 843 */ 844 if (!trbs_left) { 845 if (!starting) 846 return; 847 trbs_left = DWC3_TRB_NUM; 848 /* 849 * In case we start from scratch, we queue the ISOC requests 850 * starting from slot 1. This is done because we use ring 851 * buffer and have no LST bit to stop us. Instead, we place 852 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt 853 * after the first request so we start at slot 1 and have 854 * 7 requests proceed before we hit the first IOC. 855 * Other transfer types don't use the ring buffer and are 856 * processed from the first TRB until the last one. Since we 857 * don't wrap around we have to start at the beginning. 858 */ 859 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 860 dep->busy_slot = 1; 861 dep->free_slot = 1; 862 } else { 863 dep->busy_slot = 0; 864 dep->free_slot = 0; 865 } 866 } 867 868 /* The last TRB is a link TRB, not used for xfer */ 869 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc)) 870 return; 871 872 list_for_each_entry_safe(req, n, &dep->request_list, list) { 873 unsigned length; 874 dma_addr_t dma; 875 last_one = false; 876 877 if (req->request.num_mapped_sgs > 0) { 878 struct usb_request *request = &req->request; 879 struct scatterlist *sg = request->sg; 880 struct scatterlist *s; 881 int i; 882 883 for_each_sg(sg, s, request->num_mapped_sgs, i) { 884 unsigned chain = true; 885 886 length = sg_dma_len(s); 887 dma = sg_dma_address(s); 888 889 if (i == (request->num_mapped_sgs - 1) || 890 sg_is_last(s)) { 891 if (list_empty(&dep->request_list)) 892 last_one = true; 893 chain = false; 894 } 895 896 trbs_left--; 897 if (!trbs_left) 898 last_one = true; 899 900 if (last_one) 901 chain = false; 902 903 dwc3_prepare_one_trb(dep, req, dma, length, 904 last_one, chain, i); 905 906 if (last_one) 907 break; 908 } 909 910 if (last_one) 911 break; 912 } else { 913 dma = req->request.dma; 914 length = req->request.length; 915 trbs_left--; 916 917 if (!trbs_left) 918 last_one = 1; 919 920 /* Is this the last request? */ 921 if (list_is_last(&req->list, &dep->request_list)) 922 last_one = 1; 923 924 dwc3_prepare_one_trb(dep, req, dma, length, 925 last_one, false, 0); 926 927 if (last_one) 928 break; 929 } 930 } 931 } 932 933 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param, 934 int start_new) 935 { 936 struct dwc3_gadget_ep_cmd_params params; 937 struct dwc3_request *req; 938 struct dwc3 *dwc = dep->dwc; 939 int ret; 940 u32 cmd; 941 942 if (start_new && (dep->flags & DWC3_EP_BUSY)) { 943 dwc3_trace(trace_dwc3_gadget, "%s: endpoint busy", dep->name); 944 return -EBUSY; 945 } 946 947 /* 948 * If we are getting here after a short-out-packet we don't enqueue any 949 * new requests as we try to set the IOC bit only on the last request. 950 */ 951 if (start_new) { 952 if (list_empty(&dep->req_queued)) 953 dwc3_prepare_trbs(dep, start_new); 954 955 /* req points to the first request which will be sent */ 956 req = next_request(&dep->req_queued); 957 } else { 958 dwc3_prepare_trbs(dep, start_new); 959 960 /* 961 * req points to the first request where HWO changed from 0 to 1 962 */ 963 req = next_request(&dep->req_queued); 964 } 965 if (!req) { 966 dep->flags |= DWC3_EP_PENDING_REQUEST; 967 return 0; 968 } 969 970 memset(¶ms, 0, sizeof(params)); 971 972 if (start_new) { 973 params.param0 = upper_32_bits(req->trb_dma); 974 params.param1 = lower_32_bits(req->trb_dma); 975 cmd = DWC3_DEPCMD_STARTTRANSFER; 976 } else { 977 cmd = DWC3_DEPCMD_UPDATETRANSFER; 978 } 979 980 cmd |= DWC3_DEPCMD_PARAM(cmd_param); 981 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 982 if (ret < 0) { 983 /* 984 * FIXME we need to iterate over the list of requests 985 * here and stop, unmap, free and del each of the linked 986 * requests instead of what we do now. 987 */ 988 usb_gadget_unmap_request(&dwc->gadget, &req->request, 989 req->direction); 990 list_del(&req->list); 991 return ret; 992 } 993 994 dep->flags |= DWC3_EP_BUSY; 995 996 if (start_new) { 997 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc, 998 dep->number); 999 WARN_ON_ONCE(!dep->resource_index); 1000 } 1001 1002 return 0; 1003 } 1004 1005 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc, 1006 struct dwc3_ep *dep, u32 cur_uf) 1007 { 1008 u32 uf; 1009 1010 if (list_empty(&dep->request_list)) { 1011 dwc3_trace(trace_dwc3_gadget, 1012 "ISOC ep %s run out for requests", 1013 dep->name); 1014 dep->flags |= DWC3_EP_PENDING_REQUEST; 1015 return; 1016 } 1017 1018 /* 4 micro frames in the future */ 1019 uf = cur_uf + dep->interval * 4; 1020 1021 __dwc3_gadget_kick_transfer(dep, uf, 1); 1022 } 1023 1024 static void dwc3_gadget_start_isoc(struct dwc3 *dwc, 1025 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1026 { 1027 u32 cur_uf, mask; 1028 1029 mask = ~(dep->interval - 1); 1030 cur_uf = event->parameters & mask; 1031 1032 __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1033 } 1034 1035 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) 1036 { 1037 struct dwc3 *dwc = dep->dwc; 1038 int ret; 1039 1040 if (!dep->endpoint.desc) { 1041 dwc3_trace(trace_dwc3_gadget, 1042 "trying to queue request %p to disabled %s\n", 1043 &req->request, dep->endpoint.name); 1044 return -ESHUTDOWN; 1045 } 1046 1047 if (WARN(req->dep != dep, "request %p belongs to '%s'\n", 1048 &req->request, req->dep->name)) { 1049 dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'\n", 1050 &req->request, req->dep->name); 1051 return -EINVAL; 1052 } 1053 1054 req->request.actual = 0; 1055 req->request.status = -EINPROGRESS; 1056 req->direction = dep->direction; 1057 req->epnum = dep->number; 1058 1059 trace_dwc3_ep_queue(req); 1060 1061 /* 1062 * We only add to our list of requests now and 1063 * start consuming the list once we get XferNotReady 1064 * IRQ. 1065 * 1066 * That way, we avoid doing anything that we don't need 1067 * to do now and defer it until the point we receive a 1068 * particular token from the Host side. 1069 * 1070 * This will also avoid Host cancelling URBs due to too 1071 * many NAKs. 1072 */ 1073 ret = usb_gadget_map_request(&dwc->gadget, &req->request, 1074 dep->direction); 1075 if (ret) 1076 return ret; 1077 1078 list_add_tail(&req->list, &dep->request_list); 1079 1080 /* 1081 * If there are no pending requests and the endpoint isn't already 1082 * busy, we will just start the request straight away. 1083 * 1084 * This will save one IRQ (XFER_NOT_READY) and possibly make it a 1085 * little bit faster. 1086 */ 1087 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1088 !usb_endpoint_xfer_int(dep->endpoint.desc) && 1089 !(dep->flags & DWC3_EP_BUSY)) { 1090 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1091 goto out; 1092 } 1093 1094 /* 1095 * There are a few special cases: 1096 * 1097 * 1. XferNotReady with empty list of requests. We need to kick the 1098 * transfer here in that situation, otherwise we will be NAKing 1099 * forever. If we get XferNotReady before gadget driver has a 1100 * chance to queue a request, we will ACK the IRQ but won't be 1101 * able to receive the data until the next request is queued. 1102 * The following code is handling exactly that. 1103 * 1104 */ 1105 if (dep->flags & DWC3_EP_PENDING_REQUEST) { 1106 /* 1107 * If xfernotready is already elapsed and it is a case 1108 * of isoc transfer, then issue END TRANSFER, so that 1109 * you can receive xfernotready again and can have 1110 * notion of current microframe. 1111 */ 1112 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1113 if (list_empty(&dep->req_queued)) { 1114 dwc3_stop_active_transfer(dwc, dep->number, true); 1115 dep->flags = DWC3_EP_ENABLED; 1116 } 1117 return 0; 1118 } 1119 1120 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1121 if (!ret) 1122 dep->flags &= ~DWC3_EP_PENDING_REQUEST; 1123 1124 goto out; 1125 } 1126 1127 /* 1128 * 2. XferInProgress on Isoc EP with an active transfer. We need to 1129 * kick the transfer here after queuing a request, otherwise the 1130 * core may not see the modified TRB(s). 1131 */ 1132 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1133 (dep->flags & DWC3_EP_BUSY) && 1134 !(dep->flags & DWC3_EP_MISSED_ISOC)) { 1135 WARN_ON_ONCE(!dep->resource_index); 1136 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index, 1137 false); 1138 goto out; 1139 } 1140 1141 /* 1142 * 4. Stream Capable Bulk Endpoints. We need to start the transfer 1143 * right away, otherwise host will not know we have streams to be 1144 * handled. 1145 */ 1146 if (dep->stream_capable) 1147 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1148 1149 out: 1150 if (ret && ret != -EBUSY) 1151 dwc3_trace(trace_dwc3_gadget, 1152 "%s: failed to kick transfers\n", 1153 dep->name); 1154 if (ret == -EBUSY) 1155 ret = 0; 1156 1157 return ret; 1158 } 1159 1160 static void __dwc3_gadget_ep_zlp_complete(struct usb_ep *ep, 1161 struct usb_request *request) 1162 { 1163 dwc3_gadget_ep_free_request(ep, request); 1164 } 1165 1166 static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep) 1167 { 1168 struct dwc3_request *req; 1169 struct usb_request *request; 1170 struct usb_ep *ep = &dep->endpoint; 1171 1172 dwc3_trace(trace_dwc3_gadget, "queueing ZLP\n"); 1173 request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC); 1174 if (!request) 1175 return -ENOMEM; 1176 1177 request->length = 0; 1178 request->buf = dwc->zlp_buf; 1179 request->complete = __dwc3_gadget_ep_zlp_complete; 1180 1181 req = to_dwc3_request(request); 1182 1183 return __dwc3_gadget_ep_queue(dep, req); 1184 } 1185 1186 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 1187 gfp_t gfp_flags) 1188 { 1189 struct dwc3_request *req = to_dwc3_request(request); 1190 struct dwc3_ep *dep = to_dwc3_ep(ep); 1191 struct dwc3 *dwc = dep->dwc; 1192 1193 unsigned long flags; 1194 1195 int ret; 1196 1197 spin_lock_irqsave(&dwc->lock, flags); 1198 ret = __dwc3_gadget_ep_queue(dep, req); 1199 1200 /* 1201 * Okay, here's the thing, if gadget driver has requested for a ZLP by 1202 * setting request->zero, instead of doing magic, we will just queue an 1203 * extra usb_request ourselves so that it gets handled the same way as 1204 * any other request. 1205 */ 1206 if (ret == 0 && request->zero && request->length && 1207 (request->length % ep->maxpacket == 0)) 1208 ret = __dwc3_gadget_ep_queue_zlp(dwc, dep); 1209 1210 spin_unlock_irqrestore(&dwc->lock, flags); 1211 1212 return ret; 1213 } 1214 1215 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, 1216 struct usb_request *request) 1217 { 1218 struct dwc3_request *req = to_dwc3_request(request); 1219 struct dwc3_request *r = NULL; 1220 1221 struct dwc3_ep *dep = to_dwc3_ep(ep); 1222 struct dwc3 *dwc = dep->dwc; 1223 1224 unsigned long flags; 1225 int ret = 0; 1226 1227 trace_dwc3_ep_dequeue(req); 1228 1229 spin_lock_irqsave(&dwc->lock, flags); 1230 1231 list_for_each_entry(r, &dep->request_list, list) { 1232 if (r == req) 1233 break; 1234 } 1235 1236 if (r != req) { 1237 list_for_each_entry(r, &dep->req_queued, list) { 1238 if (r == req) 1239 break; 1240 } 1241 if (r == req) { 1242 /* wait until it is processed */ 1243 dwc3_stop_active_transfer(dwc, dep->number, true); 1244 goto out1; 1245 } 1246 dev_err(dwc->dev, "request %p was not queued to %s\n", 1247 request, ep->name); 1248 ret = -EINVAL; 1249 goto out0; 1250 } 1251 1252 out1: 1253 /* giveback the request */ 1254 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1255 1256 out0: 1257 spin_unlock_irqrestore(&dwc->lock, flags); 1258 1259 return ret; 1260 } 1261 1262 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) 1263 { 1264 struct dwc3_gadget_ep_cmd_params params; 1265 struct dwc3 *dwc = dep->dwc; 1266 int ret; 1267 1268 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1269 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); 1270 return -EINVAL; 1271 } 1272 1273 memset(¶ms, 0x00, sizeof(params)); 1274 1275 if (value) { 1276 if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) || 1277 (!list_empty(&dep->req_queued) || 1278 !list_empty(&dep->request_list)))) { 1279 dwc3_trace(trace_dwc3_gadget, 1280 "%s: pending request, cannot halt\n", 1281 dep->name); 1282 return -EAGAIN; 1283 } 1284 1285 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1286 DWC3_DEPCMD_SETSTALL, ¶ms); 1287 if (ret) 1288 dev_err(dwc->dev, "failed to set STALL on %s\n", 1289 dep->name); 1290 else 1291 dep->flags |= DWC3_EP_STALL; 1292 } else { 1293 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1294 DWC3_DEPCMD_CLEARSTALL, ¶ms); 1295 if (ret) 1296 dev_err(dwc->dev, "failed to clear STALL on %s\n", 1297 dep->name); 1298 else 1299 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); 1300 } 1301 1302 return ret; 1303 } 1304 1305 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) 1306 { 1307 struct dwc3_ep *dep = to_dwc3_ep(ep); 1308 struct dwc3 *dwc = dep->dwc; 1309 1310 unsigned long flags; 1311 1312 int ret; 1313 1314 spin_lock_irqsave(&dwc->lock, flags); 1315 ret = __dwc3_gadget_ep_set_halt(dep, value, false); 1316 spin_unlock_irqrestore(&dwc->lock, flags); 1317 1318 return ret; 1319 } 1320 1321 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) 1322 { 1323 struct dwc3_ep *dep = to_dwc3_ep(ep); 1324 struct dwc3 *dwc = dep->dwc; 1325 unsigned long flags; 1326 int ret; 1327 1328 spin_lock_irqsave(&dwc->lock, flags); 1329 dep->flags |= DWC3_EP_WEDGE; 1330 1331 if (dep->number == 0 || dep->number == 1) 1332 ret = __dwc3_gadget_ep0_set_halt(ep, 1); 1333 else 1334 ret = __dwc3_gadget_ep_set_halt(dep, 1, false); 1335 spin_unlock_irqrestore(&dwc->lock, flags); 1336 1337 return ret; 1338 } 1339 1340 /* -------------------------------------------------------------------------- */ 1341 1342 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 1343 .bLength = USB_DT_ENDPOINT_SIZE, 1344 .bDescriptorType = USB_DT_ENDPOINT, 1345 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1346 }; 1347 1348 static const struct usb_ep_ops dwc3_gadget_ep0_ops = { 1349 .enable = dwc3_gadget_ep0_enable, 1350 .disable = dwc3_gadget_ep0_disable, 1351 .alloc_request = dwc3_gadget_ep_alloc_request, 1352 .free_request = dwc3_gadget_ep_free_request, 1353 .queue = dwc3_gadget_ep0_queue, 1354 .dequeue = dwc3_gadget_ep_dequeue, 1355 .set_halt = dwc3_gadget_ep0_set_halt, 1356 .set_wedge = dwc3_gadget_ep_set_wedge, 1357 }; 1358 1359 static const struct usb_ep_ops dwc3_gadget_ep_ops = { 1360 .enable = dwc3_gadget_ep_enable, 1361 .disable = dwc3_gadget_ep_disable, 1362 .alloc_request = dwc3_gadget_ep_alloc_request, 1363 .free_request = dwc3_gadget_ep_free_request, 1364 .queue = dwc3_gadget_ep_queue, 1365 .dequeue = dwc3_gadget_ep_dequeue, 1366 .set_halt = dwc3_gadget_ep_set_halt, 1367 .set_wedge = dwc3_gadget_ep_set_wedge, 1368 }; 1369 1370 /* -------------------------------------------------------------------------- */ 1371 1372 static int dwc3_gadget_get_frame(struct usb_gadget *g) 1373 { 1374 struct dwc3 *dwc = gadget_to_dwc(g); 1375 u32 reg; 1376 1377 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1378 return DWC3_DSTS_SOFFN(reg); 1379 } 1380 1381 static int dwc3_gadget_wakeup(struct usb_gadget *g) 1382 { 1383 struct dwc3 *dwc = gadget_to_dwc(g); 1384 1385 unsigned long timeout; 1386 unsigned long flags; 1387 1388 u32 reg; 1389 1390 int ret = 0; 1391 1392 u8 link_state; 1393 u8 speed; 1394 1395 spin_lock_irqsave(&dwc->lock, flags); 1396 1397 /* 1398 * According to the Databook Remote wakeup request should 1399 * be issued only when the device is in early suspend state. 1400 * 1401 * We can check that via USB Link State bits in DSTS register. 1402 */ 1403 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1404 1405 speed = reg & DWC3_DSTS_CONNECTSPD; 1406 if (speed == DWC3_DSTS_SUPERSPEED) { 1407 dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed\n"); 1408 ret = -EINVAL; 1409 goto out; 1410 } 1411 1412 link_state = DWC3_DSTS_USBLNKST(reg); 1413 1414 switch (link_state) { 1415 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1416 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1417 break; 1418 default: 1419 dwc3_trace(trace_dwc3_gadget, 1420 "can't wakeup from '%s'\n", 1421 dwc3_gadget_link_string(link_state)); 1422 ret = -EINVAL; 1423 goto out; 1424 } 1425 1426 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); 1427 if (ret < 0) { 1428 dev_err(dwc->dev, "failed to put link in Recovery\n"); 1429 goto out; 1430 } 1431 1432 /* Recent versions do this automatically */ 1433 if (dwc->revision < DWC3_REVISION_194A) { 1434 /* write zeroes to Link Change Request */ 1435 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1436 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; 1437 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1438 } 1439 1440 /* poll until Link State changes to ON */ 1441 timeout = jiffies + msecs_to_jiffies(100); 1442 1443 while (!time_after(jiffies, timeout)) { 1444 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1445 1446 /* in HS, means ON */ 1447 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) 1448 break; 1449 } 1450 1451 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { 1452 dev_err(dwc->dev, "failed to send remote wakeup\n"); 1453 ret = -EINVAL; 1454 } 1455 1456 out: 1457 spin_unlock_irqrestore(&dwc->lock, flags); 1458 1459 return ret; 1460 } 1461 1462 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, 1463 int is_selfpowered) 1464 { 1465 struct dwc3 *dwc = gadget_to_dwc(g); 1466 unsigned long flags; 1467 1468 spin_lock_irqsave(&dwc->lock, flags); 1469 g->is_selfpowered = !!is_selfpowered; 1470 spin_unlock_irqrestore(&dwc->lock, flags); 1471 1472 return 0; 1473 } 1474 1475 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) 1476 { 1477 u32 reg; 1478 u32 timeout = 500; 1479 1480 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 1481 if (is_on) { 1482 if (dwc->revision <= DWC3_REVISION_187A) { 1483 reg &= ~DWC3_DCTL_TRGTULST_MASK; 1484 reg |= DWC3_DCTL_TRGTULST_RX_DET; 1485 } 1486 1487 if (dwc->revision >= DWC3_REVISION_194A) 1488 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1489 reg |= DWC3_DCTL_RUN_STOP; 1490 1491 if (dwc->has_hibernation) 1492 reg |= DWC3_DCTL_KEEP_CONNECT; 1493 1494 dwc->pullups_connected = true; 1495 } else { 1496 reg &= ~DWC3_DCTL_RUN_STOP; 1497 1498 if (dwc->has_hibernation && !suspend) 1499 reg &= ~DWC3_DCTL_KEEP_CONNECT; 1500 1501 dwc->pullups_connected = false; 1502 } 1503 1504 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 1505 1506 do { 1507 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1508 if (is_on) { 1509 if (!(reg & DWC3_DSTS_DEVCTRLHLT)) 1510 break; 1511 } else { 1512 if (reg & DWC3_DSTS_DEVCTRLHLT) 1513 break; 1514 } 1515 timeout--; 1516 if (!timeout) 1517 return -ETIMEDOUT; 1518 udelay(1); 1519 } while (1); 1520 1521 dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s", 1522 dwc->gadget_driver 1523 ? dwc->gadget_driver->function : "no-function", 1524 is_on ? "connect" : "disconnect"); 1525 1526 return 0; 1527 } 1528 1529 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) 1530 { 1531 struct dwc3 *dwc = gadget_to_dwc(g); 1532 unsigned long flags; 1533 int ret; 1534 1535 is_on = !!is_on; 1536 1537 spin_lock_irqsave(&dwc->lock, flags); 1538 ret = dwc3_gadget_run_stop(dwc, is_on, false); 1539 spin_unlock_irqrestore(&dwc->lock, flags); 1540 1541 return ret; 1542 } 1543 1544 static void dwc3_gadget_enable_irq(struct dwc3 *dwc) 1545 { 1546 u32 reg; 1547 1548 /* Enable all but Start and End of Frame IRQs */ 1549 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | 1550 DWC3_DEVTEN_EVNTOVERFLOWEN | 1551 DWC3_DEVTEN_CMDCMPLTEN | 1552 DWC3_DEVTEN_ERRTICERREN | 1553 DWC3_DEVTEN_WKUPEVTEN | 1554 DWC3_DEVTEN_ULSTCNGEN | 1555 DWC3_DEVTEN_CONNECTDONEEN | 1556 DWC3_DEVTEN_USBRSTEN | 1557 DWC3_DEVTEN_DISCONNEVTEN); 1558 1559 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); 1560 } 1561 1562 static void dwc3_gadget_disable_irq(struct dwc3 *dwc) 1563 { 1564 /* mask all interrupts */ 1565 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); 1566 } 1567 1568 static irqreturn_t dwc3_interrupt(int irq, void *_dwc); 1569 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc); 1570 1571 static int dwc3_gadget_start(struct usb_gadget *g, 1572 struct usb_gadget_driver *driver) 1573 { 1574 struct dwc3 *dwc = gadget_to_dwc(g); 1575 struct dwc3_ep *dep; 1576 unsigned long flags; 1577 int ret = 0; 1578 int irq; 1579 u32 reg; 1580 1581 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1582 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, 1583 IRQF_SHARED, "dwc3", dwc); 1584 if (ret) { 1585 dev_err(dwc->dev, "failed to request irq #%d --> %d\n", 1586 irq, ret); 1587 goto err0; 1588 } 1589 1590 spin_lock_irqsave(&dwc->lock, flags); 1591 1592 if (dwc->gadget_driver) { 1593 dev_err(dwc->dev, "%s is already bound to %s\n", 1594 dwc->gadget.name, 1595 dwc->gadget_driver->driver.name); 1596 ret = -EBUSY; 1597 goto err1; 1598 } 1599 1600 dwc->gadget_driver = driver; 1601 1602 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 1603 reg &= ~(DWC3_DCFG_SPEED_MASK); 1604 1605 /** 1606 * WORKAROUND: DWC3 revision < 2.20a have an issue 1607 * which would cause metastability state on Run/Stop 1608 * bit if we try to force the IP to USB2-only mode. 1609 * 1610 * Because of that, we cannot configure the IP to any 1611 * speed other than the SuperSpeed 1612 * 1613 * Refers to: 1614 * 1615 * STAR#9000525659: Clock Domain Crossing on DCTL in 1616 * USB 2.0 Mode 1617 */ 1618 if (dwc->revision < DWC3_REVISION_220A) { 1619 reg |= DWC3_DCFG_SUPERSPEED; 1620 } else { 1621 switch (dwc->maximum_speed) { 1622 case USB_SPEED_LOW: 1623 reg |= DWC3_DSTS_LOWSPEED; 1624 break; 1625 case USB_SPEED_FULL: 1626 reg |= DWC3_DSTS_FULLSPEED1; 1627 break; 1628 case USB_SPEED_HIGH: 1629 reg |= DWC3_DSTS_HIGHSPEED; 1630 break; 1631 case USB_SPEED_SUPER: /* FALLTHROUGH */ 1632 case USB_SPEED_UNKNOWN: /* FALTHROUGH */ 1633 default: 1634 reg |= DWC3_DSTS_SUPERSPEED; 1635 } 1636 } 1637 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1638 1639 dwc->start_config_issued = false; 1640 1641 /* Start with SuperSpeed Default */ 1642 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1643 1644 dep = dwc->eps[0]; 1645 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 1646 false); 1647 if (ret) { 1648 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1649 goto err2; 1650 } 1651 1652 dep = dwc->eps[1]; 1653 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 1654 false); 1655 if (ret) { 1656 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 1657 goto err3; 1658 } 1659 1660 /* begin to receive SETUP packets */ 1661 dwc->ep0state = EP0_SETUP_PHASE; 1662 dwc3_ep0_out_start(dwc); 1663 1664 dwc3_gadget_enable_irq(dwc); 1665 1666 spin_unlock_irqrestore(&dwc->lock, flags); 1667 1668 return 0; 1669 1670 err3: 1671 __dwc3_gadget_ep_disable(dwc->eps[0]); 1672 1673 err2: 1674 dwc->gadget_driver = NULL; 1675 1676 err1: 1677 spin_unlock_irqrestore(&dwc->lock, flags); 1678 1679 free_irq(irq, dwc); 1680 1681 err0: 1682 return ret; 1683 } 1684 1685 static int dwc3_gadget_stop(struct usb_gadget *g) 1686 { 1687 struct dwc3 *dwc = gadget_to_dwc(g); 1688 unsigned long flags; 1689 int irq; 1690 1691 spin_lock_irqsave(&dwc->lock, flags); 1692 1693 dwc3_gadget_disable_irq(dwc); 1694 __dwc3_gadget_ep_disable(dwc->eps[0]); 1695 __dwc3_gadget_ep_disable(dwc->eps[1]); 1696 1697 dwc->gadget_driver = NULL; 1698 1699 spin_unlock_irqrestore(&dwc->lock, flags); 1700 1701 irq = platform_get_irq(to_platform_device(dwc->dev), 0); 1702 free_irq(irq, dwc); 1703 1704 return 0; 1705 } 1706 1707 static const struct usb_gadget_ops dwc3_gadget_ops = { 1708 .get_frame = dwc3_gadget_get_frame, 1709 .wakeup = dwc3_gadget_wakeup, 1710 .set_selfpowered = dwc3_gadget_set_selfpowered, 1711 .pullup = dwc3_gadget_pullup, 1712 .udc_start = dwc3_gadget_start, 1713 .udc_stop = dwc3_gadget_stop, 1714 }; 1715 1716 /* -------------------------------------------------------------------------- */ 1717 1718 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc, 1719 u8 num, u32 direction) 1720 { 1721 struct dwc3_ep *dep; 1722 u8 i; 1723 1724 for (i = 0; i < num; i++) { 1725 u8 epnum = (i << 1) | (!!direction); 1726 1727 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 1728 if (!dep) 1729 return -ENOMEM; 1730 1731 dep->dwc = dwc; 1732 dep->number = epnum; 1733 dep->direction = !!direction; 1734 dwc->eps[epnum] = dep; 1735 1736 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1, 1737 (epnum & 1) ? "in" : "out"); 1738 1739 dep->endpoint.name = dep->name; 1740 1741 dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name); 1742 1743 if (epnum == 0 || epnum == 1) { 1744 usb_ep_set_maxpacket_limit(&dep->endpoint, 512); 1745 dep->endpoint.maxburst = 1; 1746 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 1747 if (!epnum) 1748 dwc->gadget.ep0 = &dep->endpoint; 1749 } else { 1750 int ret; 1751 1752 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024); 1753 dep->endpoint.max_streams = 15; 1754 dep->endpoint.ops = &dwc3_gadget_ep_ops; 1755 list_add_tail(&dep->endpoint.ep_list, 1756 &dwc->gadget.ep_list); 1757 1758 ret = dwc3_alloc_trb_pool(dep); 1759 if (ret) 1760 return ret; 1761 } 1762 1763 if (epnum == 0 || epnum == 1) { 1764 dep->endpoint.caps.type_control = true; 1765 } else { 1766 dep->endpoint.caps.type_iso = true; 1767 dep->endpoint.caps.type_bulk = true; 1768 dep->endpoint.caps.type_int = true; 1769 } 1770 1771 dep->endpoint.caps.dir_in = !!direction; 1772 dep->endpoint.caps.dir_out = !direction; 1773 1774 INIT_LIST_HEAD(&dep->request_list); 1775 INIT_LIST_HEAD(&dep->req_queued); 1776 } 1777 1778 return 0; 1779 } 1780 1781 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc) 1782 { 1783 int ret; 1784 1785 INIT_LIST_HEAD(&dwc->gadget.ep_list); 1786 1787 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0); 1788 if (ret < 0) { 1789 dwc3_trace(trace_dwc3_gadget, 1790 "failed to allocate OUT endpoints"); 1791 return ret; 1792 } 1793 1794 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1); 1795 if (ret < 0) { 1796 dwc3_trace(trace_dwc3_gadget, 1797 "failed to allocate IN endpoints"); 1798 return ret; 1799 } 1800 1801 return 0; 1802 } 1803 1804 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) 1805 { 1806 struct dwc3_ep *dep; 1807 u8 epnum; 1808 1809 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 1810 dep = dwc->eps[epnum]; 1811 if (!dep) 1812 continue; 1813 /* 1814 * Physical endpoints 0 and 1 are special; they form the 1815 * bi-directional USB endpoint 0. 1816 * 1817 * For those two physical endpoints, we don't allocate a TRB 1818 * pool nor do we add them the endpoints list. Due to that, we 1819 * shouldn't do these two operations otherwise we would end up 1820 * with all sorts of bugs when removing dwc3.ko. 1821 */ 1822 if (epnum != 0 && epnum != 1) { 1823 dwc3_free_trb_pool(dep); 1824 list_del(&dep->endpoint.ep_list); 1825 } 1826 1827 kfree(dep); 1828 } 1829 } 1830 1831 /* -------------------------------------------------------------------------- */ 1832 1833 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, 1834 struct dwc3_request *req, struct dwc3_trb *trb, 1835 const struct dwc3_event_depevt *event, int status) 1836 { 1837 unsigned int count; 1838 unsigned int s_pkt = 0; 1839 unsigned int trb_status; 1840 1841 trace_dwc3_complete_trb(dep, trb); 1842 1843 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 1844 /* 1845 * We continue despite the error. There is not much we 1846 * can do. If we don't clean it up we loop forever. If 1847 * we skip the TRB then it gets overwritten after a 1848 * while since we use them in a ring buffer. A BUG() 1849 * would help. Lets hope that if this occurs, someone 1850 * fixes the root cause instead of looking away :) 1851 */ 1852 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n", 1853 dep->name, trb); 1854 count = trb->size & DWC3_TRB_SIZE_MASK; 1855 1856 if (dep->direction) { 1857 if (count) { 1858 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); 1859 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) { 1860 dwc3_trace(trace_dwc3_gadget, 1861 "%s: incomplete IN transfer\n", 1862 dep->name); 1863 /* 1864 * If missed isoc occurred and there is 1865 * no request queued then issue END 1866 * TRANSFER, so that core generates 1867 * next xfernotready and we will issue 1868 * a fresh START TRANSFER. 1869 * If there are still queued request 1870 * then wait, do not issue either END 1871 * or UPDATE TRANSFER, just attach next 1872 * request in request_list during 1873 * giveback.If any future queued request 1874 * is successfully transferred then we 1875 * will issue UPDATE TRANSFER for all 1876 * request in the request_list. 1877 */ 1878 dep->flags |= DWC3_EP_MISSED_ISOC; 1879 } else { 1880 dev_err(dwc->dev, "incomplete IN transfer %s\n", 1881 dep->name); 1882 status = -ECONNRESET; 1883 } 1884 } else { 1885 dep->flags &= ~DWC3_EP_MISSED_ISOC; 1886 } 1887 } else { 1888 if (count && (event->status & DEPEVT_STATUS_SHORT)) 1889 s_pkt = 1; 1890 } 1891 1892 /* 1893 * We assume here we will always receive the entire data block 1894 * which we should receive. Meaning, if we program RX to 1895 * receive 4K but we receive only 2K, we assume that's all we 1896 * should receive and we simply bounce the request back to the 1897 * gadget driver for further processing. 1898 */ 1899 req->request.actual += req->request.length - count; 1900 if (s_pkt) 1901 return 1; 1902 if ((event->status & DEPEVT_STATUS_LST) && 1903 (trb->ctrl & (DWC3_TRB_CTRL_LST | 1904 DWC3_TRB_CTRL_HWO))) 1905 return 1; 1906 if ((event->status & DEPEVT_STATUS_IOC) && 1907 (trb->ctrl & DWC3_TRB_CTRL_IOC)) 1908 return 1; 1909 return 0; 1910 } 1911 1912 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, 1913 const struct dwc3_event_depevt *event, int status) 1914 { 1915 struct dwc3_request *req; 1916 struct dwc3_trb *trb; 1917 unsigned int slot; 1918 unsigned int i; 1919 int ret; 1920 1921 do { 1922 req = next_request(&dep->req_queued); 1923 if (WARN_ON_ONCE(!req)) 1924 return 1; 1925 1926 i = 0; 1927 do { 1928 slot = req->start_slot + i; 1929 if ((slot == DWC3_TRB_NUM - 1) && 1930 usb_endpoint_xfer_isoc(dep->endpoint.desc)) 1931 slot++; 1932 slot %= DWC3_TRB_NUM; 1933 trb = &dep->trb_pool[slot]; 1934 1935 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 1936 event, status); 1937 if (ret) 1938 break; 1939 } while (++i < req->request.num_mapped_sgs); 1940 1941 dwc3_gadget_giveback(dep, req, status); 1942 1943 if (ret) 1944 break; 1945 } while (1); 1946 1947 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1948 list_empty(&dep->req_queued)) { 1949 if (list_empty(&dep->request_list)) { 1950 /* 1951 * If there is no entry in request list then do 1952 * not issue END TRANSFER now. Just set PENDING 1953 * flag, so that END TRANSFER is issued when an 1954 * entry is added into request list. 1955 */ 1956 dep->flags = DWC3_EP_PENDING_REQUEST; 1957 } else { 1958 dwc3_stop_active_transfer(dwc, dep->number, true); 1959 dep->flags = DWC3_EP_ENABLED; 1960 } 1961 return 1; 1962 } 1963 1964 return 1; 1965 } 1966 1967 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, 1968 struct dwc3_ep *dep, const struct dwc3_event_depevt *event) 1969 { 1970 unsigned status = 0; 1971 int clean_busy; 1972 u32 is_xfer_complete; 1973 1974 is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE); 1975 1976 if (event->status & DEPEVT_STATUS_BUSERR) 1977 status = -ECONNRESET; 1978 1979 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status); 1980 if (clean_busy && (is_xfer_complete || 1981 usb_endpoint_xfer_isoc(dep->endpoint.desc))) 1982 dep->flags &= ~DWC3_EP_BUSY; 1983 1984 /* 1985 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. 1986 * See dwc3_gadget_linksts_change_interrupt() for 1st half. 1987 */ 1988 if (dwc->revision < DWC3_REVISION_183A) { 1989 u32 reg; 1990 int i; 1991 1992 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { 1993 dep = dwc->eps[i]; 1994 1995 if (!(dep->flags & DWC3_EP_ENABLED)) 1996 continue; 1997 1998 if (!list_empty(&dep->req_queued)) 1999 return; 2000 } 2001 2002 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2003 reg |= dwc->u1u2; 2004 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2005 2006 dwc->u1u2 = 0; 2007 } 2008 2009 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2010 int ret; 2011 2012 ret = __dwc3_gadget_kick_transfer(dep, 0, is_xfer_complete); 2013 if (!ret || ret == -EBUSY) 2014 return; 2015 } 2016 } 2017 2018 static void dwc3_endpoint_interrupt(struct dwc3 *dwc, 2019 const struct dwc3_event_depevt *event) 2020 { 2021 struct dwc3_ep *dep; 2022 u8 epnum = event->endpoint_number; 2023 2024 dep = dwc->eps[epnum]; 2025 2026 if (!(dep->flags & DWC3_EP_ENABLED)) 2027 return; 2028 2029 if (epnum == 0 || epnum == 1) { 2030 dwc3_ep0_interrupt(dwc, event); 2031 return; 2032 } 2033 2034 switch (event->endpoint_event) { 2035 case DWC3_DEPEVT_XFERCOMPLETE: 2036 dep->resource_index = 0; 2037 2038 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2039 dwc3_trace(trace_dwc3_gadget, 2040 "%s is an Isochronous endpoint\n", 2041 dep->name); 2042 return; 2043 } 2044 2045 dwc3_endpoint_transfer_complete(dwc, dep, event); 2046 break; 2047 case DWC3_DEPEVT_XFERINPROGRESS: 2048 dwc3_endpoint_transfer_complete(dwc, dep, event); 2049 break; 2050 case DWC3_DEPEVT_XFERNOTREADY: 2051 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 2052 dwc3_gadget_start_isoc(dwc, dep, event); 2053 } else { 2054 int active; 2055 int ret; 2056 2057 active = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE; 2058 2059 dwc3_trace(trace_dwc3_gadget, "%s: reason %s", 2060 dep->name, active ? "Transfer Active" 2061 : "Transfer Not Active"); 2062 2063 ret = __dwc3_gadget_kick_transfer(dep, 0, !active); 2064 if (!ret || ret == -EBUSY) 2065 return; 2066 2067 dwc3_trace(trace_dwc3_gadget, 2068 "%s: failed to kick transfers\n", 2069 dep->name); 2070 } 2071 2072 break; 2073 case DWC3_DEPEVT_STREAMEVT: 2074 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) { 2075 dev_err(dwc->dev, "Stream event for non-Bulk %s\n", 2076 dep->name); 2077 return; 2078 } 2079 2080 switch (event->status) { 2081 case DEPEVT_STREAMEVT_FOUND: 2082 dwc3_trace(trace_dwc3_gadget, 2083 "Stream %d found and started", 2084 event->parameters); 2085 2086 break; 2087 case DEPEVT_STREAMEVT_NOTFOUND: 2088 /* FALLTHROUGH */ 2089 default: 2090 dwc3_trace(trace_dwc3_gadget, 2091 "unable to find suitable stream\n"); 2092 } 2093 break; 2094 case DWC3_DEPEVT_RXTXFIFOEVT: 2095 dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun\n", dep->name); 2096 break; 2097 case DWC3_DEPEVT_EPCMDCMPLT: 2098 dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete"); 2099 break; 2100 } 2101 } 2102 2103 static void dwc3_disconnect_gadget(struct dwc3 *dwc) 2104 { 2105 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { 2106 spin_unlock(&dwc->lock); 2107 dwc->gadget_driver->disconnect(&dwc->gadget); 2108 spin_lock(&dwc->lock); 2109 } 2110 } 2111 2112 static void dwc3_suspend_gadget(struct dwc3 *dwc) 2113 { 2114 if (dwc->gadget_driver && dwc->gadget_driver->suspend) { 2115 spin_unlock(&dwc->lock); 2116 dwc->gadget_driver->suspend(&dwc->gadget); 2117 spin_lock(&dwc->lock); 2118 } 2119 } 2120 2121 static void dwc3_resume_gadget(struct dwc3 *dwc) 2122 { 2123 if (dwc->gadget_driver && dwc->gadget_driver->resume) { 2124 spin_unlock(&dwc->lock); 2125 dwc->gadget_driver->resume(&dwc->gadget); 2126 spin_lock(&dwc->lock); 2127 } 2128 } 2129 2130 static void dwc3_reset_gadget(struct dwc3 *dwc) 2131 { 2132 if (!dwc->gadget_driver) 2133 return; 2134 2135 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) { 2136 spin_unlock(&dwc->lock); 2137 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver); 2138 spin_lock(&dwc->lock); 2139 } 2140 } 2141 2142 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force) 2143 { 2144 struct dwc3_ep *dep; 2145 struct dwc3_gadget_ep_cmd_params params; 2146 u32 cmd; 2147 int ret; 2148 2149 dep = dwc->eps[epnum]; 2150 2151 if (!dep->resource_index) 2152 return; 2153 2154 /* 2155 * NOTICE: We are violating what the Databook says about the 2156 * EndTransfer command. Ideally we would _always_ wait for the 2157 * EndTransfer Command Completion IRQ, but that's causing too 2158 * much trouble synchronizing between us and gadget driver. 2159 * 2160 * We have discussed this with the IP Provider and it was 2161 * suggested to giveback all requests here, but give HW some 2162 * extra time to synchronize with the interconnect. We're using 2163 * an arbitrary 100us delay for that. 2164 * 2165 * Note also that a similar handling was tested by Synopsys 2166 * (thanks a lot Paul) and nothing bad has come out of it. 2167 * In short, what we're doing is: 2168 * 2169 * - Issue EndTransfer WITH CMDIOC bit set 2170 * - Wait 100us 2171 */ 2172 2173 cmd = DWC3_DEPCMD_ENDTRANSFER; 2174 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0; 2175 cmd |= DWC3_DEPCMD_CMDIOC; 2176 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); 2177 memset(¶ms, 0, sizeof(params)); 2178 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); 2179 WARN_ON_ONCE(ret); 2180 dep->resource_index = 0; 2181 dep->flags &= ~DWC3_EP_BUSY; 2182 udelay(100); 2183 } 2184 2185 static void dwc3_stop_active_transfers(struct dwc3 *dwc) 2186 { 2187 u32 epnum; 2188 2189 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2190 struct dwc3_ep *dep; 2191 2192 dep = dwc->eps[epnum]; 2193 if (!dep) 2194 continue; 2195 2196 if (!(dep->flags & DWC3_EP_ENABLED)) 2197 continue; 2198 2199 dwc3_remove_requests(dwc, dep); 2200 } 2201 } 2202 2203 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) 2204 { 2205 u32 epnum; 2206 2207 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2208 struct dwc3_ep *dep; 2209 struct dwc3_gadget_ep_cmd_params params; 2210 int ret; 2211 2212 dep = dwc->eps[epnum]; 2213 if (!dep) 2214 continue; 2215 2216 if (!(dep->flags & DWC3_EP_STALL)) 2217 continue; 2218 2219 dep->flags &= ~DWC3_EP_STALL; 2220 2221 memset(¶ms, 0, sizeof(params)); 2222 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 2223 DWC3_DEPCMD_CLEARSTALL, ¶ms); 2224 WARN_ON_ONCE(ret); 2225 } 2226 } 2227 2228 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) 2229 { 2230 int reg; 2231 2232 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2233 reg &= ~DWC3_DCTL_INITU1ENA; 2234 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2235 2236 reg &= ~DWC3_DCTL_INITU2ENA; 2237 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2238 2239 dwc3_disconnect_gadget(dwc); 2240 dwc->start_config_issued = false; 2241 2242 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2243 dwc->setup_packet_pending = false; 2244 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED); 2245 } 2246 2247 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) 2248 { 2249 u32 reg; 2250 2251 /* 2252 * WORKAROUND: DWC3 revisions <1.88a have an issue which 2253 * would cause a missing Disconnect Event if there's a 2254 * pending Setup Packet in the FIFO. 2255 * 2256 * There's no suggested workaround on the official Bug 2257 * report, which states that "unless the driver/application 2258 * is doing any special handling of a disconnect event, 2259 * there is no functional issue". 2260 * 2261 * Unfortunately, it turns out that we _do_ some special 2262 * handling of a disconnect event, namely complete all 2263 * pending transfers, notify gadget driver of the 2264 * disconnection, and so on. 2265 * 2266 * Our suggested workaround is to follow the Disconnect 2267 * Event steps here, instead, based on a setup_packet_pending 2268 * flag. Such flag gets set whenever we have a SETUP_PENDING 2269 * status for EP0 TRBs and gets cleared on XferComplete for the 2270 * same endpoint. 2271 * 2272 * Refers to: 2273 * 2274 * STAR#9000466709: RTL: Device : Disconnect event not 2275 * generated if setup packet pending in FIFO 2276 */ 2277 if (dwc->revision < DWC3_REVISION_188A) { 2278 if (dwc->setup_packet_pending) 2279 dwc3_gadget_disconnect_interrupt(dwc); 2280 } 2281 2282 dwc3_reset_gadget(dwc); 2283 2284 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2285 reg &= ~DWC3_DCTL_TSTCTRL_MASK; 2286 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2287 dwc->test_mode = false; 2288 2289 dwc3_stop_active_transfers(dwc); 2290 dwc3_clear_stall_all_ep(dwc); 2291 dwc->start_config_issued = false; 2292 2293 /* Reset device address to zero */ 2294 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2295 reg &= ~(DWC3_DCFG_DEVADDR_MASK); 2296 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2297 } 2298 2299 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed) 2300 { 2301 u32 reg; 2302 u32 usb30_clock = DWC3_GCTL_CLK_BUS; 2303 2304 /* 2305 * We change the clock only at SS but I dunno why I would want to do 2306 * this. Maybe it becomes part of the power saving plan. 2307 */ 2308 2309 if (speed != DWC3_DSTS_SUPERSPEED) 2310 return; 2311 2312 /* 2313 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed 2314 * each time on Connect Done. 2315 */ 2316 if (!usb30_clock) 2317 return; 2318 2319 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 2320 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock); 2321 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 2322 } 2323 2324 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 2325 { 2326 struct dwc3_ep *dep; 2327 int ret; 2328 u32 reg; 2329 u8 speed; 2330 2331 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2332 speed = reg & DWC3_DSTS_CONNECTSPD; 2333 dwc->speed = speed; 2334 2335 dwc3_update_ram_clk_sel(dwc, speed); 2336 2337 switch (speed) { 2338 case DWC3_DCFG_SUPERSPEED: 2339 /* 2340 * WORKAROUND: DWC3 revisions <1.90a have an issue which 2341 * would cause a missing USB3 Reset event. 2342 * 2343 * In such situations, we should force a USB3 Reset 2344 * event by calling our dwc3_gadget_reset_interrupt() 2345 * routine. 2346 * 2347 * Refers to: 2348 * 2349 * STAR#9000483510: RTL: SS : USB3 reset event may 2350 * not be generated always when the link enters poll 2351 */ 2352 if (dwc->revision < DWC3_REVISION_190A) 2353 dwc3_gadget_reset_interrupt(dwc); 2354 2355 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2356 dwc->gadget.ep0->maxpacket = 512; 2357 dwc->gadget.speed = USB_SPEED_SUPER; 2358 break; 2359 case DWC3_DCFG_HIGHSPEED: 2360 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2361 dwc->gadget.ep0->maxpacket = 64; 2362 dwc->gadget.speed = USB_SPEED_HIGH; 2363 break; 2364 case DWC3_DCFG_FULLSPEED2: 2365 case DWC3_DCFG_FULLSPEED1: 2366 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2367 dwc->gadget.ep0->maxpacket = 64; 2368 dwc->gadget.speed = USB_SPEED_FULL; 2369 break; 2370 case DWC3_DCFG_LOWSPEED: 2371 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); 2372 dwc->gadget.ep0->maxpacket = 8; 2373 dwc->gadget.speed = USB_SPEED_LOW; 2374 break; 2375 } 2376 2377 /* Enable USB2 LPM Capability */ 2378 2379 if ((dwc->revision > DWC3_REVISION_194A) 2380 && (speed != DWC3_DCFG_SUPERSPEED)) { 2381 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2382 reg |= DWC3_DCFG_LPM_CAP; 2383 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 2384 2385 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2386 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); 2387 2388 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold); 2389 2390 /* 2391 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and 2392 * DCFG.LPMCap is set, core responses with an ACK and the 2393 * BESL value in the LPM token is less than or equal to LPM 2394 * NYET threshold. 2395 */ 2396 WARN_ONCE(dwc->revision < DWC3_REVISION_240A 2397 && dwc->has_lpm_erratum, 2398 "LPM Erratum not available on dwc3 revisisions < 2.40a\n"); 2399 2400 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A) 2401 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold); 2402 2403 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2404 } else { 2405 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2406 reg &= ~DWC3_DCTL_HIRD_THRES_MASK; 2407 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2408 } 2409 2410 dep = dwc->eps[0]; 2411 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, 2412 false); 2413 if (ret) { 2414 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2415 return; 2416 } 2417 2418 dep = dwc->eps[1]; 2419 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, 2420 false); 2421 if (ret) { 2422 dev_err(dwc->dev, "failed to enable %s\n", dep->name); 2423 return; 2424 } 2425 2426 /* 2427 * Configure PHY via GUSB3PIPECTLn if required. 2428 * 2429 * Update GTXFIFOSIZn 2430 * 2431 * In both cases reset values should be sufficient. 2432 */ 2433 } 2434 2435 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) 2436 { 2437 /* 2438 * TODO take core out of low power mode when that's 2439 * implemented. 2440 */ 2441 2442 dwc->gadget_driver->resume(&dwc->gadget); 2443 } 2444 2445 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 2446 unsigned int evtinfo) 2447 { 2448 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; 2449 unsigned int pwropt; 2450 2451 /* 2452 * WORKAROUND: DWC3 < 2.50a have an issue when configured without 2453 * Hibernation mode enabled which would show up when device detects 2454 * host-initiated U3 exit. 2455 * 2456 * In that case, device will generate a Link State Change Interrupt 2457 * from U3 to RESUME which is only necessary if Hibernation is 2458 * configured in. 2459 * 2460 * There are no functional changes due to such spurious event and we 2461 * just need to ignore it. 2462 * 2463 * Refers to: 2464 * 2465 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation 2466 * operational mode 2467 */ 2468 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); 2469 if ((dwc->revision < DWC3_REVISION_250A) && 2470 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { 2471 if ((dwc->link_state == DWC3_LINK_STATE_U3) && 2472 (next == DWC3_LINK_STATE_RESUME)) { 2473 dwc3_trace(trace_dwc3_gadget, 2474 "ignoring transition U3 -> Resume"); 2475 return; 2476 } 2477 } 2478 2479 /* 2480 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending 2481 * on the link partner, the USB session might do multiple entry/exit 2482 * of low power states before a transfer takes place. 2483 * 2484 * Due to this problem, we might experience lower throughput. The 2485 * suggested workaround is to disable DCTL[12:9] bits if we're 2486 * transitioning from U1/U2 to U0 and enable those bits again 2487 * after a transfer completes and there are no pending transfers 2488 * on any of the enabled endpoints. 2489 * 2490 * This is the first half of that workaround. 2491 * 2492 * Refers to: 2493 * 2494 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us 2495 * core send LGO_Ux entering U0 2496 */ 2497 if (dwc->revision < DWC3_REVISION_183A) { 2498 if (next == DWC3_LINK_STATE_U0) { 2499 u32 u1u2; 2500 u32 reg; 2501 2502 switch (dwc->link_state) { 2503 case DWC3_LINK_STATE_U1: 2504 case DWC3_LINK_STATE_U2: 2505 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 2506 u1u2 = reg & (DWC3_DCTL_INITU2ENA 2507 | DWC3_DCTL_ACCEPTU2ENA 2508 | DWC3_DCTL_INITU1ENA 2509 | DWC3_DCTL_ACCEPTU1ENA); 2510 2511 if (!dwc->u1u2) 2512 dwc->u1u2 = reg & u1u2; 2513 2514 reg &= ~u1u2; 2515 2516 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2517 break; 2518 default: 2519 /* do nothing */ 2520 break; 2521 } 2522 } 2523 } 2524 2525 switch (next) { 2526 case DWC3_LINK_STATE_U1: 2527 if (dwc->speed == USB_SPEED_SUPER) 2528 dwc3_suspend_gadget(dwc); 2529 break; 2530 case DWC3_LINK_STATE_U2: 2531 case DWC3_LINK_STATE_U3: 2532 dwc3_suspend_gadget(dwc); 2533 break; 2534 case DWC3_LINK_STATE_RESUME: 2535 dwc3_resume_gadget(dwc); 2536 break; 2537 default: 2538 /* do nothing */ 2539 break; 2540 } 2541 2542 dwc->link_state = next; 2543 } 2544 2545 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, 2546 unsigned int evtinfo) 2547 { 2548 unsigned int is_ss = evtinfo & BIT(4); 2549 2550 /** 2551 * WORKAROUND: DWC3 revison 2.20a with hibernation support 2552 * have a known issue which can cause USB CV TD.9.23 to fail 2553 * randomly. 2554 * 2555 * Because of this issue, core could generate bogus hibernation 2556 * events which SW needs to ignore. 2557 * 2558 * Refers to: 2559 * 2560 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0 2561 * Device Fallback from SuperSpeed 2562 */ 2563 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER)) 2564 return; 2565 2566 /* enter hibernation here */ 2567 } 2568 2569 static void dwc3_gadget_interrupt(struct dwc3 *dwc, 2570 const struct dwc3_event_devt *event) 2571 { 2572 switch (event->type) { 2573 case DWC3_DEVICE_EVENT_DISCONNECT: 2574 dwc3_gadget_disconnect_interrupt(dwc); 2575 break; 2576 case DWC3_DEVICE_EVENT_RESET: 2577 dwc3_gadget_reset_interrupt(dwc); 2578 break; 2579 case DWC3_DEVICE_EVENT_CONNECT_DONE: 2580 dwc3_gadget_conndone_interrupt(dwc); 2581 break; 2582 case DWC3_DEVICE_EVENT_WAKEUP: 2583 dwc3_gadget_wakeup_interrupt(dwc); 2584 break; 2585 case DWC3_DEVICE_EVENT_HIBER_REQ: 2586 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation, 2587 "unexpected hibernation event\n")) 2588 break; 2589 2590 dwc3_gadget_hibernation_interrupt(dwc, event->event_info); 2591 break; 2592 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: 2593 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); 2594 break; 2595 case DWC3_DEVICE_EVENT_EOPF: 2596 dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame"); 2597 break; 2598 case DWC3_DEVICE_EVENT_SOF: 2599 dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame"); 2600 break; 2601 case DWC3_DEVICE_EVENT_ERRATIC_ERROR: 2602 dwc3_trace(trace_dwc3_gadget, "Erratic Error"); 2603 break; 2604 case DWC3_DEVICE_EVENT_CMD_CMPL: 2605 dwc3_trace(trace_dwc3_gadget, "Command Complete"); 2606 break; 2607 case DWC3_DEVICE_EVENT_OVERFLOW: 2608 dwc3_trace(trace_dwc3_gadget, "Overflow"); 2609 break; 2610 default: 2611 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type); 2612 } 2613 } 2614 2615 static void dwc3_process_event_entry(struct dwc3 *dwc, 2616 const union dwc3_event *event) 2617 { 2618 trace_dwc3_event(event->raw); 2619 2620 /* Endpoint IRQ, handle it and return early */ 2621 if (event->type.is_devspec == 0) { 2622 /* depevt */ 2623 return dwc3_endpoint_interrupt(dwc, &event->depevt); 2624 } 2625 2626 switch (event->type.type) { 2627 case DWC3_EVENT_TYPE_DEV: 2628 dwc3_gadget_interrupt(dwc, &event->devt); 2629 break; 2630 /* REVISIT what to do with Carkit and I2C events ? */ 2631 default: 2632 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); 2633 } 2634 } 2635 2636 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf) 2637 { 2638 struct dwc3_event_buffer *evt; 2639 irqreturn_t ret = IRQ_NONE; 2640 int left; 2641 u32 reg; 2642 2643 evt = dwc->ev_buffs[buf]; 2644 left = evt->count; 2645 2646 if (!(evt->flags & DWC3_EVENT_PENDING)) 2647 return IRQ_NONE; 2648 2649 while (left > 0) { 2650 union dwc3_event event; 2651 2652 event.raw = *(u32 *) (evt->buf + evt->lpos); 2653 2654 dwc3_process_event_entry(dwc, &event); 2655 2656 /* 2657 * FIXME we wrap around correctly to the next entry as 2658 * almost all entries are 4 bytes in size. There is one 2659 * entry which has 12 bytes which is a regular entry 2660 * followed by 8 bytes data. ATM I don't know how 2661 * things are organized if we get next to the a 2662 * boundary so I worry about that once we try to handle 2663 * that. 2664 */ 2665 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE; 2666 left -= 4; 2667 2668 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4); 2669 } 2670 2671 evt->count = 0; 2672 evt->flags &= ~DWC3_EVENT_PENDING; 2673 ret = IRQ_HANDLED; 2674 2675 /* Unmask interrupt */ 2676 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf)); 2677 reg &= ~DWC3_GEVNTSIZ_INTMASK; 2678 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg); 2679 2680 return ret; 2681 } 2682 2683 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc) 2684 { 2685 struct dwc3 *dwc = _dwc; 2686 unsigned long flags; 2687 irqreturn_t ret = IRQ_NONE; 2688 int i; 2689 2690 spin_lock_irqsave(&dwc->lock, flags); 2691 2692 for (i = 0; i < dwc->num_event_buffers; i++) 2693 ret |= dwc3_process_event_buf(dwc, i); 2694 2695 spin_unlock_irqrestore(&dwc->lock, flags); 2696 2697 return ret; 2698 } 2699 2700 static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf) 2701 { 2702 struct dwc3_event_buffer *evt; 2703 u32 count; 2704 u32 reg; 2705 2706 evt = dwc->ev_buffs[buf]; 2707 2708 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf)); 2709 count &= DWC3_GEVNTCOUNT_MASK; 2710 if (!count) 2711 return IRQ_NONE; 2712 2713 evt->count = count; 2714 evt->flags |= DWC3_EVENT_PENDING; 2715 2716 /* Mask interrupt */ 2717 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf)); 2718 reg |= DWC3_GEVNTSIZ_INTMASK; 2719 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg); 2720 2721 return IRQ_WAKE_THREAD; 2722 } 2723 2724 static irqreturn_t dwc3_interrupt(int irq, void *_dwc) 2725 { 2726 struct dwc3 *dwc = _dwc; 2727 int i; 2728 irqreturn_t ret = IRQ_NONE; 2729 2730 for (i = 0; i < dwc->num_event_buffers; i++) { 2731 irqreturn_t status; 2732 2733 status = dwc3_check_event_buf(dwc, i); 2734 if (status == IRQ_WAKE_THREAD) 2735 ret = status; 2736 } 2737 2738 return ret; 2739 } 2740 2741 /** 2742 * dwc3_gadget_init - Initializes gadget related registers 2743 * @dwc: pointer to our controller context structure 2744 * 2745 * Returns 0 on success otherwise negative errno. 2746 */ 2747 int dwc3_gadget_init(struct dwc3 *dwc) 2748 { 2749 int ret; 2750 2751 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2752 &dwc->ctrl_req_addr, GFP_KERNEL); 2753 if (!dwc->ctrl_req) { 2754 dev_err(dwc->dev, "failed to allocate ctrl request\n"); 2755 ret = -ENOMEM; 2756 goto err0; 2757 } 2758 2759 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2, 2760 &dwc->ep0_trb_addr, GFP_KERNEL); 2761 if (!dwc->ep0_trb) { 2762 dev_err(dwc->dev, "failed to allocate ep0 trb\n"); 2763 ret = -ENOMEM; 2764 goto err1; 2765 } 2766 2767 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL); 2768 if (!dwc->setup_buf) { 2769 ret = -ENOMEM; 2770 goto err2; 2771 } 2772 2773 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev, 2774 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr, 2775 GFP_KERNEL); 2776 if (!dwc->ep0_bounce) { 2777 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n"); 2778 ret = -ENOMEM; 2779 goto err3; 2780 } 2781 2782 dwc->zlp_buf = kzalloc(DWC3_ZLP_BUF_SIZE, GFP_KERNEL); 2783 if (!dwc->zlp_buf) { 2784 ret = -ENOMEM; 2785 goto err4; 2786 } 2787 2788 dwc->gadget.ops = &dwc3_gadget_ops; 2789 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2790 dwc->gadget.sg_supported = true; 2791 dwc->gadget.name = "dwc3-gadget"; 2792 dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG; 2793 2794 /* 2795 * FIXME We might be setting max_speed to <SUPER, however versions 2796 * <2.20a of dwc3 have an issue with metastability (documented 2797 * elsewhere in this driver) which tells us we can't set max speed to 2798 * anything lower than SUPER. 2799 * 2800 * Because gadget.max_speed is only used by composite.c and function 2801 * drivers (i.e. it won't go into dwc3's registers) we are allowing this 2802 * to happen so we avoid sending SuperSpeed Capability descriptor 2803 * together with our BOS descriptor as that could confuse host into 2804 * thinking we can handle super speed. 2805 * 2806 * Note that, in fact, we won't even support GetBOS requests when speed 2807 * is less than super speed because we don't have means, yet, to tell 2808 * composite.c that we are USB 2.0 + LPM ECN. 2809 */ 2810 if (dwc->revision < DWC3_REVISION_220A) 2811 dwc3_trace(trace_dwc3_gadget, 2812 "Changing max_speed on rev %08x\n", 2813 dwc->revision); 2814 2815 dwc->gadget.max_speed = dwc->maximum_speed; 2816 2817 /* 2818 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize 2819 * on ep out. 2820 */ 2821 dwc->gadget.quirk_ep_out_aligned_size = true; 2822 2823 /* 2824 * REVISIT: Here we should clear all pending IRQs to be 2825 * sure we're starting from a well known location. 2826 */ 2827 2828 ret = dwc3_gadget_init_endpoints(dwc); 2829 if (ret) 2830 goto err5; 2831 2832 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 2833 if (ret) { 2834 dev_err(dwc->dev, "failed to register udc\n"); 2835 goto err5; 2836 } 2837 2838 return 0; 2839 2840 err5: 2841 kfree(dwc->zlp_buf); 2842 2843 err4: 2844 dwc3_gadget_free_endpoints(dwc); 2845 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2846 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2847 2848 err3: 2849 kfree(dwc->setup_buf); 2850 2851 err2: 2852 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2853 dwc->ep0_trb, dwc->ep0_trb_addr); 2854 2855 err1: 2856 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2857 dwc->ctrl_req, dwc->ctrl_req_addr); 2858 2859 err0: 2860 return ret; 2861 } 2862 2863 /* -------------------------------------------------------------------------- */ 2864 2865 void dwc3_gadget_exit(struct dwc3 *dwc) 2866 { 2867 usb_del_gadget_udc(&dwc->gadget); 2868 2869 dwc3_gadget_free_endpoints(dwc); 2870 2871 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2872 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2873 2874 kfree(dwc->setup_buf); 2875 kfree(dwc->zlp_buf); 2876 2877 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2878 dwc->ep0_trb, dwc->ep0_trb_addr); 2879 2880 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), 2881 dwc->ctrl_req, dwc->ctrl_req_addr); 2882 } 2883 2884 int dwc3_gadget_suspend(struct dwc3 *dwc) 2885 { 2886 if (dwc->pullups_connected) { 2887 dwc3_gadget_disable_irq(dwc); 2888 dwc3_gadget_run_stop(dwc, true, true); 2889 } 2890 2891 __dwc3_gadget_ep_disable(dwc->eps[0]); 2892 __dwc3_gadget_ep_disable(dwc->eps[1]); 2893 2894 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG); 2895 2896 return 0; 2897 } 2898 2899 int dwc3_gadget_resume(struct dwc3 *dwc) 2900 { 2901 struct dwc3_ep *dep; 2902 int ret; 2903 2904 /* Start with SuperSpeed Default */ 2905 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2906 2907 dep = dwc->eps[0]; 2908 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 2909 false); 2910 if (ret) 2911 goto err0; 2912 2913 dep = dwc->eps[1]; 2914 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, 2915 false); 2916 if (ret) 2917 goto err1; 2918 2919 /* begin to receive SETUP packets */ 2920 dwc->ep0state = EP0_SETUP_PHASE; 2921 dwc3_ep0_out_start(dwc); 2922 2923 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg); 2924 2925 if (dwc->pullups_connected) { 2926 dwc3_gadget_enable_irq(dwc); 2927 dwc3_gadget_run_stop(dwc, true, false); 2928 } 2929 2930 return 0; 2931 2932 err1: 2933 __dwc3_gadget_ep_disable(dwc->eps[0]); 2934 2935 err0: 2936 return ret; 2937 } 2938