1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * mtu3_gadget.c - MediaTek usb3 DRD peripheral support 4 * 5 * Copyright (C) 2016 MediaTek Inc. 6 * 7 * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> 8 */ 9 10 #include "mtu3.h" 11 #include "mtu3_trace.h" 12 13 void mtu3_req_complete(struct mtu3_ep *mep, 14 struct usb_request *req, int status) 15 __releases(mep->mtu->lock) 16 __acquires(mep->mtu->lock) 17 { 18 struct mtu3_request *mreq = to_mtu3_request(req); 19 struct mtu3 *mtu = mreq->mtu; 20 21 list_del(&mreq->list); 22 if (req->status == -EINPROGRESS) 23 req->status = status; 24 25 trace_mtu3_req_complete(mreq); 26 spin_unlock(&mtu->lock); 27 28 /* ep0 makes use of PIO, needn't unmap it */ 29 if (mep->epnum) 30 usb_gadget_unmap_request(&mtu->g, req, mep->is_in); 31 32 dev_dbg(mtu->dev, "%s complete req: %p, sts %d, %d/%d\n", 33 mep->name, req, req->status, req->actual, req->length); 34 35 usb_gadget_giveback_request(&mep->ep, req); 36 spin_lock(&mtu->lock); 37 } 38 39 static void nuke(struct mtu3_ep *mep, const int status) 40 { 41 struct mtu3_request *mreq = NULL; 42 43 if (list_empty(&mep->req_list)) 44 return; 45 46 dev_dbg(mep->mtu->dev, "abort %s's req: sts %d\n", mep->name, status); 47 48 /* exclude EP0 */ 49 if (mep->epnum) 50 mtu3_qmu_flush(mep); 51 52 while (!list_empty(&mep->req_list)) { 53 mreq = list_first_entry(&mep->req_list, 54 struct mtu3_request, list); 55 mtu3_req_complete(mep, &mreq->request, status); 56 } 57 } 58 59 static int mtu3_ep_enable(struct mtu3_ep *mep) 60 { 61 const struct usb_endpoint_descriptor *desc; 62 const struct usb_ss_ep_comp_descriptor *comp_desc; 63 struct mtu3 *mtu = mep->mtu; 64 u32 interval = 0; 65 u32 mult = 0; 66 u32 burst = 0; 67 int max_packet; 68 int ret; 69 70 desc = mep->desc; 71 comp_desc = mep->comp_desc; 72 mep->type = usb_endpoint_type(desc); 73 max_packet = usb_endpoint_maxp(desc); 74 mep->maxp = max_packet & GENMASK(10, 0); 75 76 switch (mtu->g.speed) { 77 case USB_SPEED_SUPER: 78 case USB_SPEED_SUPER_PLUS: 79 if (usb_endpoint_xfer_int(desc) || 80 usb_endpoint_xfer_isoc(desc)) { 81 interval = desc->bInterval; 82 interval = clamp_val(interval, 1, 16) - 1; 83 if (usb_endpoint_xfer_isoc(desc) && comp_desc) 84 mult = comp_desc->bmAttributes; 85 } 86 if (comp_desc) 87 burst = comp_desc->bMaxBurst; 88 89 break; 90 case USB_SPEED_HIGH: 91 if (usb_endpoint_xfer_isoc(desc) || 92 usb_endpoint_xfer_int(desc)) { 93 interval = desc->bInterval; 94 interval = clamp_val(interval, 1, 16) - 1; 95 burst = (max_packet & GENMASK(12, 11)) >> 11; 96 } 97 break; 98 default: 99 break; /*others are ignored */ 100 } 101 102 dev_dbg(mtu->dev, "%s maxp:%d, interval:%d, burst:%d, mult:%d\n", 103 __func__, mep->maxp, interval, burst, mult); 104 105 mep->ep.maxpacket = mep->maxp; 106 mep->ep.desc = desc; 107 mep->ep.comp_desc = comp_desc; 108 109 /* slot mainly affects bulk/isoc transfer, so ignore int */ 110 mep->slot = usb_endpoint_xfer_int(desc) ? 0 : mtu->slot; 111 112 ret = mtu3_config_ep(mtu, mep, interval, burst, mult); 113 if (ret < 0) 114 return ret; 115 116 ret = mtu3_gpd_ring_alloc(mep); 117 if (ret < 0) { 118 mtu3_deconfig_ep(mtu, mep); 119 return ret; 120 } 121 122 mtu3_qmu_start(mep); 123 124 return 0; 125 } 126 127 static int mtu3_ep_disable(struct mtu3_ep *mep) 128 { 129 struct mtu3 *mtu = mep->mtu; 130 131 mtu3_qmu_stop(mep); 132 133 /* abort all pending requests */ 134 nuke(mep, -ESHUTDOWN); 135 mtu3_deconfig_ep(mtu, mep); 136 mtu3_gpd_ring_free(mep); 137 138 mep->desc = NULL; 139 mep->ep.desc = NULL; 140 mep->comp_desc = NULL; 141 mep->type = 0; 142 mep->flags = 0; 143 144 return 0; 145 } 146 147 static int mtu3_gadget_ep_enable(struct usb_ep *ep, 148 const struct usb_endpoint_descriptor *desc) 149 { 150 struct mtu3_ep *mep; 151 struct mtu3 *mtu; 152 unsigned long flags; 153 int ret = -EINVAL; 154 155 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 156 pr_debug("%s invalid parameters\n", __func__); 157 return -EINVAL; 158 } 159 160 if (!desc->wMaxPacketSize) { 161 pr_debug("%s missing wMaxPacketSize\n", __func__); 162 return -EINVAL; 163 } 164 mep = to_mtu3_ep(ep); 165 mtu = mep->mtu; 166 167 /* check ep number and direction against endpoint */ 168 if (usb_endpoint_num(desc) != mep->epnum) 169 return -EINVAL; 170 171 if (!!usb_endpoint_dir_in(desc) ^ !!mep->is_in) 172 return -EINVAL; 173 174 dev_dbg(mtu->dev, "%s %s\n", __func__, ep->name); 175 176 if (mep->flags & MTU3_EP_ENABLED) { 177 dev_WARN_ONCE(mtu->dev, true, "%s is already enabled\n", 178 mep->name); 179 return 0; 180 } 181 182 spin_lock_irqsave(&mtu->lock, flags); 183 mep->desc = desc; 184 mep->comp_desc = ep->comp_desc; 185 186 ret = mtu3_ep_enable(mep); 187 if (ret) 188 goto error; 189 190 mep->flags = MTU3_EP_ENABLED; 191 mtu->active_ep++; 192 193 error: 194 spin_unlock_irqrestore(&mtu->lock, flags); 195 196 dev_dbg(mtu->dev, "%s active_ep=%d\n", __func__, mtu->active_ep); 197 trace_mtu3_gadget_ep_enable(mep); 198 199 return ret; 200 } 201 202 static int mtu3_gadget_ep_disable(struct usb_ep *ep) 203 { 204 struct mtu3_ep *mep = to_mtu3_ep(ep); 205 struct mtu3 *mtu = mep->mtu; 206 unsigned long flags; 207 208 dev_dbg(mtu->dev, "%s %s\n", __func__, mep->name); 209 trace_mtu3_gadget_ep_disable(mep); 210 211 if (!(mep->flags & MTU3_EP_ENABLED)) { 212 dev_warn(mtu->dev, "%s is already disabled\n", mep->name); 213 return 0; 214 } 215 216 spin_lock_irqsave(&mtu->lock, flags); 217 mtu3_ep_disable(mep); 218 mep->flags = 0; 219 mtu->active_ep--; 220 spin_unlock_irqrestore(&(mtu->lock), flags); 221 222 dev_dbg(mtu->dev, "%s active_ep=%d, mtu3 is_active=%d\n", 223 __func__, mtu->active_ep, mtu->is_active); 224 225 return 0; 226 } 227 228 struct usb_request *mtu3_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) 229 { 230 struct mtu3_ep *mep = to_mtu3_ep(ep); 231 struct mtu3_request *mreq; 232 233 mreq = kzalloc(sizeof(*mreq), gfp_flags); 234 if (!mreq) 235 return NULL; 236 237 mreq->request.dma = DMA_ADDR_INVALID; 238 mreq->epnum = mep->epnum; 239 mreq->mep = mep; 240 trace_mtu3_alloc_request(mreq); 241 242 return &mreq->request; 243 } 244 245 void mtu3_free_request(struct usb_ep *ep, struct usb_request *req) 246 { 247 struct mtu3_request *mreq = to_mtu3_request(req); 248 249 trace_mtu3_free_request(mreq); 250 kfree(mreq); 251 } 252 253 static int mtu3_gadget_queue(struct usb_ep *ep, 254 struct usb_request *req, gfp_t gfp_flags) 255 { 256 struct mtu3_ep *mep = to_mtu3_ep(ep); 257 struct mtu3_request *mreq = to_mtu3_request(req); 258 struct mtu3 *mtu = mep->mtu; 259 unsigned long flags; 260 int ret = 0; 261 262 if (!req->buf) 263 return -ENODATA; 264 265 if (mreq->mep != mep) 266 return -EINVAL; 267 268 dev_dbg(mtu->dev, "%s %s EP%d(%s), req=%p, maxp=%d, len#%d\n", 269 __func__, mep->is_in ? "TX" : "RX", mreq->epnum, ep->name, 270 mreq, ep->maxpacket, mreq->request.length); 271 272 if (req->length > GPD_BUF_SIZE || 273 (mtu->gen2cp && req->length > GPD_BUF_SIZE_EL)) { 274 dev_warn(mtu->dev, 275 "req length > supported MAX:%d requested:%d\n", 276 mtu->gen2cp ? GPD_BUF_SIZE_EL : GPD_BUF_SIZE, 277 req->length); 278 return -EOPNOTSUPP; 279 } 280 281 /* don't queue if the ep is down */ 282 if (!mep->desc) { 283 dev_dbg(mtu->dev, "req=%p queued to %s while it's disabled\n", 284 req, ep->name); 285 return -ESHUTDOWN; 286 } 287 288 mreq->mtu = mtu; 289 mreq->request.actual = 0; 290 mreq->request.status = -EINPROGRESS; 291 292 ret = usb_gadget_map_request(&mtu->g, req, mep->is_in); 293 if (ret) { 294 dev_err(mtu->dev, "dma mapping failed\n"); 295 return ret; 296 } 297 298 spin_lock_irqsave(&mtu->lock, flags); 299 300 if (mtu3_prepare_transfer(mep)) { 301 ret = -EAGAIN; 302 goto error; 303 } 304 305 list_add_tail(&mreq->list, &mep->req_list); 306 mtu3_insert_gpd(mep, mreq); 307 mtu3_qmu_resume(mep); 308 309 error: 310 spin_unlock_irqrestore(&mtu->lock, flags); 311 trace_mtu3_gadget_queue(mreq); 312 313 return ret; 314 } 315 316 static int mtu3_gadget_dequeue(struct usb_ep *ep, struct usb_request *req) 317 { 318 struct mtu3_ep *mep = to_mtu3_ep(ep); 319 struct mtu3_request *mreq = to_mtu3_request(req); 320 struct mtu3_request *r; 321 struct mtu3 *mtu = mep->mtu; 322 unsigned long flags; 323 int ret = 0; 324 325 if (mreq->mep != mep) 326 return -EINVAL; 327 328 dev_dbg(mtu->dev, "%s : req=%p\n", __func__, req); 329 trace_mtu3_gadget_dequeue(mreq); 330 331 spin_lock_irqsave(&mtu->lock, flags); 332 333 list_for_each_entry(r, &mep->req_list, list) { 334 if (r == mreq) 335 break; 336 } 337 if (r != mreq) { 338 dev_dbg(mtu->dev, "req=%p not queued to %s\n", req, ep->name); 339 ret = -EINVAL; 340 goto done; 341 } 342 343 mtu3_qmu_flush(mep); /* REVISIT: set BPS ?? */ 344 mtu3_req_complete(mep, req, -ECONNRESET); 345 mtu3_qmu_start(mep); 346 347 done: 348 spin_unlock_irqrestore(&mtu->lock, flags); 349 350 return ret; 351 } 352 353 /* 354 * Set or clear the halt bit of an EP. 355 * A halted EP won't TX/RX any data but will queue requests. 356 */ 357 static int mtu3_gadget_ep_set_halt(struct usb_ep *ep, int value) 358 { 359 struct mtu3_ep *mep = to_mtu3_ep(ep); 360 struct mtu3 *mtu = mep->mtu; 361 struct mtu3_request *mreq; 362 unsigned long flags; 363 int ret = 0; 364 365 dev_dbg(mtu->dev, "%s : %s...", __func__, ep->name); 366 367 spin_lock_irqsave(&mtu->lock, flags); 368 369 if (mep->type == USB_ENDPOINT_XFER_ISOC) { 370 ret = -EINVAL; 371 goto done; 372 } 373 374 mreq = next_request(mep); 375 if (value) { 376 /* 377 * If there is not request for TX-EP, QMU will not transfer 378 * data to TX-FIFO, so no need check whether TX-FIFO 379 * holds bytes or not here 380 */ 381 if (mreq) { 382 dev_dbg(mtu->dev, "req in progress, cannot halt %s\n", 383 ep->name); 384 ret = -EAGAIN; 385 goto done; 386 } 387 } else { 388 mep->flags &= ~MTU3_EP_WEDGE; 389 } 390 391 dev_dbg(mtu->dev, "%s %s stall\n", ep->name, value ? "set" : "clear"); 392 393 mtu3_ep_stall_set(mep, value); 394 395 done: 396 spin_unlock_irqrestore(&mtu->lock, flags); 397 trace_mtu3_gadget_ep_set_halt(mep); 398 399 return ret; 400 } 401 402 /* Sets the halt feature with the clear requests ignored */ 403 static int mtu3_gadget_ep_set_wedge(struct usb_ep *ep) 404 { 405 struct mtu3_ep *mep = to_mtu3_ep(ep); 406 407 mep->flags |= MTU3_EP_WEDGE; 408 409 return usb_ep_set_halt(ep); 410 } 411 412 static const struct usb_ep_ops mtu3_ep_ops = { 413 .enable = mtu3_gadget_ep_enable, 414 .disable = mtu3_gadget_ep_disable, 415 .alloc_request = mtu3_alloc_request, 416 .free_request = mtu3_free_request, 417 .queue = mtu3_gadget_queue, 418 .dequeue = mtu3_gadget_dequeue, 419 .set_halt = mtu3_gadget_ep_set_halt, 420 .set_wedge = mtu3_gadget_ep_set_wedge, 421 }; 422 423 static int mtu3_gadget_get_frame(struct usb_gadget *gadget) 424 { 425 struct mtu3 *mtu = gadget_to_mtu3(gadget); 426 427 return (int)mtu3_readl(mtu->mac_base, U3D_USB20_FRAME_NUM); 428 } 429 430 static int mtu3_gadget_wakeup(struct usb_gadget *gadget) 431 { 432 struct mtu3 *mtu = gadget_to_mtu3(gadget); 433 unsigned long flags; 434 435 dev_dbg(mtu->dev, "%s\n", __func__); 436 437 /* remote wakeup feature is not enabled by host */ 438 if (!mtu->may_wakeup) 439 return -EOPNOTSUPP; 440 441 spin_lock_irqsave(&mtu->lock, flags); 442 if (mtu->g.speed >= USB_SPEED_SUPER) { 443 mtu3_setbits(mtu->mac_base, U3D_LINK_POWER_CONTROL, UX_EXIT); 444 } else { 445 mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT, RESUME); 446 spin_unlock_irqrestore(&mtu->lock, flags); 447 usleep_range(10000, 11000); 448 spin_lock_irqsave(&mtu->lock, flags); 449 mtu3_clrbits(mtu->mac_base, U3D_POWER_MANAGEMENT, RESUME); 450 } 451 spin_unlock_irqrestore(&mtu->lock, flags); 452 return 0; 453 } 454 455 static int mtu3_gadget_set_self_powered(struct usb_gadget *gadget, 456 int is_selfpowered) 457 { 458 struct mtu3 *mtu = gadget_to_mtu3(gadget); 459 460 mtu->is_self_powered = !!is_selfpowered; 461 return 0; 462 } 463 464 static int mtu3_gadget_pullup(struct usb_gadget *gadget, int is_on) 465 { 466 struct mtu3 *mtu = gadget_to_mtu3(gadget); 467 unsigned long flags; 468 469 dev_dbg(mtu->dev, "%s (%s) for %sactive device\n", __func__, 470 is_on ? "on" : "off", mtu->is_active ? "" : "in"); 471 472 /* we'd rather not pullup unless the device is active. */ 473 spin_lock_irqsave(&mtu->lock, flags); 474 475 is_on = !!is_on; 476 if (!mtu->is_active) { 477 /* save it for mtu3_start() to process the request */ 478 mtu->softconnect = is_on; 479 } else if (is_on != mtu->softconnect) { 480 mtu->softconnect = is_on; 481 mtu3_dev_on_off(mtu, is_on); 482 } 483 484 spin_unlock_irqrestore(&mtu->lock, flags); 485 486 return 0; 487 } 488 489 static int mtu3_gadget_start(struct usb_gadget *gadget, 490 struct usb_gadget_driver *driver) 491 { 492 struct mtu3 *mtu = gadget_to_mtu3(gadget); 493 unsigned long flags; 494 495 if (mtu->gadget_driver) { 496 dev_err(mtu->dev, "%s is already bound to %s\n", 497 mtu->g.name, mtu->gadget_driver->driver.name); 498 return -EBUSY; 499 } 500 501 dev_dbg(mtu->dev, "bind driver %s\n", driver->function); 502 503 spin_lock_irqsave(&mtu->lock, flags); 504 505 mtu->softconnect = 0; 506 mtu->gadget_driver = driver; 507 508 if (mtu->ssusb->dr_mode == USB_DR_MODE_PERIPHERAL) 509 mtu3_start(mtu); 510 511 spin_unlock_irqrestore(&mtu->lock, flags); 512 513 return 0; 514 } 515 516 static void stop_activity(struct mtu3 *mtu) 517 { 518 struct usb_gadget_driver *driver = mtu->gadget_driver; 519 int i; 520 521 /* don't disconnect if it's not connected */ 522 if (mtu->g.speed == USB_SPEED_UNKNOWN) 523 driver = NULL; 524 else 525 mtu->g.speed = USB_SPEED_UNKNOWN; 526 527 /* deactivate the hardware */ 528 if (mtu->softconnect) { 529 mtu->softconnect = 0; 530 mtu3_dev_on_off(mtu, 0); 531 } 532 533 /* 534 * killing any outstanding requests will quiesce the driver; 535 * then report disconnect 536 */ 537 nuke(mtu->ep0, -ESHUTDOWN); 538 for (i = 1; i < mtu->num_eps; i++) { 539 nuke(mtu->in_eps + i, -ESHUTDOWN); 540 nuke(mtu->out_eps + i, -ESHUTDOWN); 541 } 542 543 if (driver) { 544 spin_unlock(&mtu->lock); 545 driver->disconnect(&mtu->g); 546 spin_lock(&mtu->lock); 547 } 548 } 549 550 static int mtu3_gadget_stop(struct usb_gadget *g) 551 { 552 struct mtu3 *mtu = gadget_to_mtu3(g); 553 unsigned long flags; 554 555 dev_dbg(mtu->dev, "%s\n", __func__); 556 557 spin_lock_irqsave(&mtu->lock, flags); 558 559 stop_activity(mtu); 560 mtu->gadget_driver = NULL; 561 562 if (mtu->ssusb->dr_mode == USB_DR_MODE_PERIPHERAL) 563 mtu3_stop(mtu); 564 565 spin_unlock_irqrestore(&mtu->lock, flags); 566 567 synchronize_irq(mtu->irq); 568 return 0; 569 } 570 571 static void 572 mtu3_gadget_set_speed(struct usb_gadget *g, enum usb_device_speed speed) 573 { 574 struct mtu3 *mtu = gadget_to_mtu3(g); 575 unsigned long flags; 576 577 dev_dbg(mtu->dev, "%s %s\n", __func__, usb_speed_string(speed)); 578 579 spin_lock_irqsave(&mtu->lock, flags); 580 mtu3_set_speed(mtu, speed); 581 spin_unlock_irqrestore(&mtu->lock, flags); 582 } 583 584 static const struct usb_gadget_ops mtu3_gadget_ops = { 585 .get_frame = mtu3_gadget_get_frame, 586 .wakeup = mtu3_gadget_wakeup, 587 .set_selfpowered = mtu3_gadget_set_self_powered, 588 .pullup = mtu3_gadget_pullup, 589 .udc_start = mtu3_gadget_start, 590 .udc_stop = mtu3_gadget_stop, 591 .udc_set_speed = mtu3_gadget_set_speed, 592 }; 593 594 static void mtu3_state_reset(struct mtu3 *mtu) 595 { 596 mtu->address = 0; 597 mtu->ep0_state = MU3D_EP0_STATE_SETUP; 598 mtu->may_wakeup = 0; 599 mtu->u1_enable = 0; 600 mtu->u2_enable = 0; 601 mtu->delayed_status = false; 602 mtu->test_mode = false; 603 } 604 605 static void init_hw_ep(struct mtu3 *mtu, struct mtu3_ep *mep, 606 u32 epnum, u32 is_in) 607 { 608 mep->epnum = epnum; 609 mep->mtu = mtu; 610 mep->is_in = is_in; 611 612 INIT_LIST_HEAD(&mep->req_list); 613 614 sprintf(mep->name, "ep%d%s", epnum, 615 !epnum ? "" : (is_in ? "in" : "out")); 616 617 mep->ep.name = mep->name; 618 INIT_LIST_HEAD(&mep->ep.ep_list); 619 620 /* initialize maxpacket as SS */ 621 if (!epnum) { 622 usb_ep_set_maxpacket_limit(&mep->ep, 512); 623 mep->ep.caps.type_control = true; 624 mep->ep.ops = &mtu3_ep0_ops; 625 mtu->g.ep0 = &mep->ep; 626 } else { 627 usb_ep_set_maxpacket_limit(&mep->ep, 1024); 628 mep->ep.caps.type_iso = true; 629 mep->ep.caps.type_bulk = true; 630 mep->ep.caps.type_int = true; 631 mep->ep.ops = &mtu3_ep_ops; 632 list_add_tail(&mep->ep.ep_list, &mtu->g.ep_list); 633 } 634 635 dev_dbg(mtu->dev, "%s, name=%s, maxp=%d\n", __func__, mep->ep.name, 636 mep->ep.maxpacket); 637 638 if (!epnum) { 639 mep->ep.caps.dir_in = true; 640 mep->ep.caps.dir_out = true; 641 } else if (is_in) { 642 mep->ep.caps.dir_in = true; 643 } else { 644 mep->ep.caps.dir_out = true; 645 } 646 } 647 648 static void mtu3_gadget_init_eps(struct mtu3 *mtu) 649 { 650 u8 epnum; 651 652 /* initialize endpoint list just once */ 653 INIT_LIST_HEAD(&(mtu->g.ep_list)); 654 655 dev_dbg(mtu->dev, "%s num_eps(1 for a pair of tx&rx ep)=%d\n", 656 __func__, mtu->num_eps); 657 658 init_hw_ep(mtu, mtu->ep0, 0, 0); 659 for (epnum = 1; epnum < mtu->num_eps; epnum++) { 660 init_hw_ep(mtu, mtu->in_eps + epnum, epnum, 1); 661 init_hw_ep(mtu, mtu->out_eps + epnum, epnum, 0); 662 } 663 } 664 665 int mtu3_gadget_setup(struct mtu3 *mtu) 666 { 667 mtu->g.ops = &mtu3_gadget_ops; 668 mtu->g.max_speed = mtu->max_speed; 669 mtu->g.speed = USB_SPEED_UNKNOWN; 670 mtu->g.sg_supported = 0; 671 mtu->g.name = MTU3_DRIVER_NAME; 672 mtu->is_active = 0; 673 mtu->delayed_status = false; 674 675 mtu3_gadget_init_eps(mtu); 676 677 return usb_add_gadget_udc(mtu->dev, &mtu->g); 678 } 679 680 void mtu3_gadget_cleanup(struct mtu3 *mtu) 681 { 682 usb_del_gadget_udc(&mtu->g); 683 } 684 685 void mtu3_gadget_resume(struct mtu3 *mtu) 686 { 687 dev_dbg(mtu->dev, "gadget RESUME\n"); 688 if (mtu->gadget_driver && mtu->gadget_driver->resume) { 689 spin_unlock(&mtu->lock); 690 mtu->gadget_driver->resume(&mtu->g); 691 spin_lock(&mtu->lock); 692 } 693 } 694 695 /* called when SOF packets stop for 3+ msec or enters U3 */ 696 void mtu3_gadget_suspend(struct mtu3 *mtu) 697 { 698 dev_dbg(mtu->dev, "gadget SUSPEND\n"); 699 if (mtu->gadget_driver && mtu->gadget_driver->suspend) { 700 spin_unlock(&mtu->lock); 701 mtu->gadget_driver->suspend(&mtu->g); 702 spin_lock(&mtu->lock); 703 } 704 } 705 706 /* called when VBUS drops below session threshold, and in other cases */ 707 void mtu3_gadget_disconnect(struct mtu3 *mtu) 708 { 709 dev_dbg(mtu->dev, "gadget DISCONNECT\n"); 710 if (mtu->gadget_driver && mtu->gadget_driver->disconnect) { 711 spin_unlock(&mtu->lock); 712 mtu->gadget_driver->disconnect(&mtu->g); 713 spin_lock(&mtu->lock); 714 } 715 716 mtu3_state_reset(mtu); 717 usb_gadget_set_state(&mtu->g, USB_STATE_NOTATTACHED); 718 } 719 720 void mtu3_gadget_reset(struct mtu3 *mtu) 721 { 722 dev_dbg(mtu->dev, "gadget RESET\n"); 723 724 /* report disconnect, if we didn't flush EP state */ 725 if (mtu->g.speed != USB_SPEED_UNKNOWN) 726 mtu3_gadget_disconnect(mtu); 727 else 728 mtu3_state_reset(mtu); 729 } 730