1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * mtu3_gadget.c - MediaTek usb3 DRD peripheral support 4 * 5 * Copyright (C) 2016 MediaTek Inc. 6 * 7 * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> 8 */ 9 10 #include "mtu3.h" 11 12 void mtu3_req_complete(struct mtu3_ep *mep, 13 struct usb_request *req, int status) 14 __releases(mep->mtu->lock) 15 __acquires(mep->mtu->lock) 16 { 17 struct mtu3_request *mreq; 18 struct mtu3 *mtu; 19 int busy = mep->busy; 20 21 mreq = to_mtu3_request(req); 22 list_del(&mreq->list); 23 if (mreq->request.status == -EINPROGRESS) 24 mreq->request.status = status; 25 26 mtu = mreq->mtu; 27 mep->busy = 1; 28 spin_unlock(&mtu->lock); 29 30 /* ep0 makes use of PIO, needn't unmap it */ 31 if (mep->epnum) 32 usb_gadget_unmap_request(&mtu->g, req, mep->is_in); 33 34 dev_dbg(mtu->dev, "%s complete req: %p, sts %d, %d/%d\n", mep->name, 35 req, req->status, mreq->request.actual, mreq->request.length); 36 37 usb_gadget_giveback_request(&mep->ep, &mreq->request); 38 39 spin_lock(&mtu->lock); 40 mep->busy = busy; 41 } 42 43 static void nuke(struct mtu3_ep *mep, const int status) 44 { 45 struct mtu3_request *mreq = NULL; 46 47 mep->busy = 1; 48 if (list_empty(&mep->req_list)) 49 return; 50 51 dev_dbg(mep->mtu->dev, "abort %s's req: sts %d\n", mep->name, status); 52 53 /* exclude EP0 */ 54 if (mep->epnum) 55 mtu3_qmu_flush(mep); 56 57 while (!list_empty(&mep->req_list)) { 58 mreq = list_first_entry(&mep->req_list, 59 struct mtu3_request, list); 60 mtu3_req_complete(mep, &mreq->request, status); 61 } 62 } 63 64 static int mtu3_ep_enable(struct mtu3_ep *mep) 65 { 66 const struct usb_endpoint_descriptor *desc; 67 const struct usb_ss_ep_comp_descriptor *comp_desc; 68 struct mtu3 *mtu = mep->mtu; 69 u32 interval = 0; 70 u32 mult = 0; 71 u32 burst = 0; 72 int max_packet; 73 int ret; 74 75 desc = mep->desc; 76 comp_desc = mep->comp_desc; 77 mep->type = usb_endpoint_type(desc); 78 max_packet = usb_endpoint_maxp(desc); 79 mep->maxp = max_packet & GENMASK(10, 0); 80 81 switch (mtu->g.speed) { 82 case USB_SPEED_SUPER: 83 case USB_SPEED_SUPER_PLUS: 84 if (usb_endpoint_xfer_int(desc) || 85 usb_endpoint_xfer_isoc(desc)) { 86 interval = desc->bInterval; 87 interval = clamp_val(interval, 1, 16) - 1; 88 if (usb_endpoint_xfer_isoc(desc) && comp_desc) 89 mult = comp_desc->bmAttributes; 90 } 91 if (comp_desc) 92 burst = comp_desc->bMaxBurst; 93 94 break; 95 case USB_SPEED_HIGH: 96 if (usb_endpoint_xfer_isoc(desc) || 97 usb_endpoint_xfer_int(desc)) { 98 interval = desc->bInterval; 99 interval = clamp_val(interval, 1, 16) - 1; 100 burst = (max_packet & GENMASK(12, 11)) >> 11; 101 } 102 break; 103 default: 104 break; /*others are ignored */ 105 } 106 107 dev_dbg(mtu->dev, "%s maxp:%d, interval:%d, burst:%d, mult:%d\n", 108 __func__, mep->maxp, interval, burst, mult); 109 110 mep->ep.maxpacket = mep->maxp; 111 mep->ep.desc = desc; 112 mep->ep.comp_desc = comp_desc; 113 114 /* slot mainly affects bulk/isoc transfer, so ignore int */ 115 mep->slot = usb_endpoint_xfer_int(desc) ? 0 : mtu->slot; 116 117 ret = mtu3_config_ep(mtu, mep, interval, burst, mult); 118 if (ret < 0) 119 return ret; 120 121 ret = mtu3_gpd_ring_alloc(mep); 122 if (ret < 0) { 123 mtu3_deconfig_ep(mtu, mep); 124 return ret; 125 } 126 127 mtu3_qmu_start(mep); 128 129 return 0; 130 } 131 132 static int mtu3_ep_disable(struct mtu3_ep *mep) 133 { 134 struct mtu3 *mtu = mep->mtu; 135 136 mtu3_qmu_stop(mep); 137 138 /* abort all pending requests */ 139 nuke(mep, -ESHUTDOWN); 140 mtu3_deconfig_ep(mtu, mep); 141 mtu3_gpd_ring_free(mep); 142 143 mep->desc = NULL; 144 mep->ep.desc = NULL; 145 mep->comp_desc = NULL; 146 mep->type = 0; 147 mep->flags = 0; 148 149 return 0; 150 } 151 152 static int mtu3_gadget_ep_enable(struct usb_ep *ep, 153 const struct usb_endpoint_descriptor *desc) 154 { 155 struct mtu3_ep *mep; 156 struct mtu3 *mtu; 157 unsigned long flags; 158 int ret = -EINVAL; 159 160 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 161 pr_debug("%s invalid parameters\n", __func__); 162 return -EINVAL; 163 } 164 165 if (!desc->wMaxPacketSize) { 166 pr_debug("%s missing wMaxPacketSize\n", __func__); 167 return -EINVAL; 168 } 169 mep = to_mtu3_ep(ep); 170 mtu = mep->mtu; 171 172 /* check ep number and direction against endpoint */ 173 if (usb_endpoint_num(desc) != mep->epnum) 174 return -EINVAL; 175 176 if (!!usb_endpoint_dir_in(desc) ^ !!mep->is_in) 177 return -EINVAL; 178 179 dev_dbg(mtu->dev, "%s %s\n", __func__, ep->name); 180 181 if (mep->flags & MTU3_EP_ENABLED) { 182 dev_WARN_ONCE(mtu->dev, true, "%s is already enabled\n", 183 mep->name); 184 return 0; 185 } 186 187 spin_lock_irqsave(&mtu->lock, flags); 188 mep->desc = desc; 189 mep->comp_desc = ep->comp_desc; 190 191 ret = mtu3_ep_enable(mep); 192 if (ret) 193 goto error; 194 195 mep->busy = 0; 196 mep->wedged = 0; 197 mep->flags |= MTU3_EP_ENABLED; 198 mtu->active_ep++; 199 200 error: 201 spin_unlock_irqrestore(&mtu->lock, flags); 202 203 dev_dbg(mtu->dev, "%s active_ep=%d\n", __func__, mtu->active_ep); 204 205 return ret; 206 } 207 208 static int mtu3_gadget_ep_disable(struct usb_ep *ep) 209 { 210 struct mtu3_ep *mep = to_mtu3_ep(ep); 211 struct mtu3 *mtu = mep->mtu; 212 unsigned long flags; 213 214 dev_dbg(mtu->dev, "%s %s\n", __func__, mep->name); 215 216 if (!(mep->flags & MTU3_EP_ENABLED)) { 217 dev_warn(mtu->dev, "%s is already disabled\n", mep->name); 218 return 0; 219 } 220 221 spin_lock_irqsave(&mtu->lock, flags); 222 mtu3_ep_disable(mep); 223 mep->flags &= ~MTU3_EP_ENABLED; 224 mtu->active_ep--; 225 spin_unlock_irqrestore(&(mtu->lock), flags); 226 227 dev_dbg(mtu->dev, "%s active_ep=%d, mtu3 is_active=%d\n", 228 __func__, mtu->active_ep, mtu->is_active); 229 230 return 0; 231 } 232 233 struct usb_request *mtu3_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) 234 { 235 struct mtu3_ep *mep = to_mtu3_ep(ep); 236 struct mtu3_request *mreq; 237 238 mreq = kzalloc(sizeof(*mreq), gfp_flags); 239 if (!mreq) 240 return NULL; 241 242 mreq->request.dma = DMA_ADDR_INVALID; 243 mreq->epnum = mep->epnum; 244 mreq->mep = mep; 245 246 return &mreq->request; 247 } 248 249 void mtu3_free_request(struct usb_ep *ep, struct usb_request *req) 250 { 251 kfree(to_mtu3_request(req)); 252 } 253 254 static int mtu3_gadget_queue(struct usb_ep *ep, 255 struct usb_request *req, gfp_t gfp_flags) 256 { 257 struct mtu3_ep *mep; 258 struct mtu3_request *mreq; 259 struct mtu3 *mtu; 260 unsigned long flags; 261 int ret = 0; 262 263 if (!ep || !req) 264 return -EINVAL; 265 266 if (!req->buf) 267 return -ENODATA; 268 269 mep = to_mtu3_ep(ep); 270 mtu = mep->mtu; 271 mreq = to_mtu3_request(req); 272 mreq->mtu = mtu; 273 274 if (mreq->mep != mep) 275 return -EINVAL; 276 277 dev_dbg(mtu->dev, "%s %s EP%d(%s), req=%p, maxp=%d, len#%d\n", 278 __func__, mep->is_in ? "TX" : "RX", mreq->epnum, ep->name, 279 mreq, ep->maxpacket, mreq->request.length); 280 281 if (req->length > GPD_BUF_SIZE) { 282 dev_warn(mtu->dev, 283 "req length > supported MAX:%d requested:%d\n", 284 GPD_BUF_SIZE, req->length); 285 return -EOPNOTSUPP; 286 } 287 288 /* don't queue if the ep is down */ 289 if (!mep->desc) { 290 dev_dbg(mtu->dev, "req=%p queued to %s while it's disabled\n", 291 req, ep->name); 292 return -ESHUTDOWN; 293 } 294 295 mreq->request.actual = 0; 296 mreq->request.status = -EINPROGRESS; 297 298 ret = usb_gadget_map_request(&mtu->g, req, mep->is_in); 299 if (ret) { 300 dev_err(mtu->dev, "dma mapping failed\n"); 301 return ret; 302 } 303 304 spin_lock_irqsave(&mtu->lock, flags); 305 306 if (mtu3_prepare_transfer(mep)) { 307 ret = -EAGAIN; 308 goto error; 309 } 310 311 list_add_tail(&mreq->list, &mep->req_list); 312 mtu3_insert_gpd(mep, mreq); 313 mtu3_qmu_resume(mep); 314 315 error: 316 spin_unlock_irqrestore(&mtu->lock, flags); 317 318 return ret; 319 } 320 321 static int mtu3_gadget_dequeue(struct usb_ep *ep, struct usb_request *req) 322 { 323 struct mtu3_ep *mep = to_mtu3_ep(ep); 324 struct mtu3_request *mreq = to_mtu3_request(req); 325 struct mtu3_request *r; 326 unsigned long flags; 327 int ret = 0; 328 struct mtu3 *mtu = mep->mtu; 329 330 if (!ep || !req || mreq->mep != mep) 331 return -EINVAL; 332 333 dev_dbg(mtu->dev, "%s : req=%p\n", __func__, req); 334 335 spin_lock_irqsave(&mtu->lock, flags); 336 337 list_for_each_entry(r, &mep->req_list, list) { 338 if (r == mreq) 339 break; 340 } 341 if (r != mreq) { 342 dev_dbg(mtu->dev, "req=%p not queued to %s\n", req, ep->name); 343 ret = -EINVAL; 344 goto done; 345 } 346 347 mtu3_qmu_flush(mep); /* REVISIT: set BPS ?? */ 348 mtu3_req_complete(mep, req, -ECONNRESET); 349 mtu3_qmu_start(mep); 350 351 done: 352 spin_unlock_irqrestore(&mtu->lock, flags); 353 354 return ret; 355 } 356 357 /* 358 * Set or clear the halt bit of an EP. 359 * A halted EP won't TX/RX any data but will queue requests. 360 */ 361 static int mtu3_gadget_ep_set_halt(struct usb_ep *ep, int value) 362 { 363 struct mtu3_ep *mep = to_mtu3_ep(ep); 364 struct mtu3 *mtu = mep->mtu; 365 struct mtu3_request *mreq; 366 unsigned long flags; 367 int ret = 0; 368 369 if (!ep) 370 return -EINVAL; 371 372 dev_dbg(mtu->dev, "%s : %s...", __func__, ep->name); 373 374 spin_lock_irqsave(&mtu->lock, flags); 375 376 if (mep->type == USB_ENDPOINT_XFER_ISOC) { 377 ret = -EINVAL; 378 goto done; 379 } 380 381 mreq = next_request(mep); 382 if (value) { 383 /* 384 * If there is not request for TX-EP, QMU will not transfer 385 * data to TX-FIFO, so no need check whether TX-FIFO 386 * holds bytes or not here 387 */ 388 if (mreq) { 389 dev_dbg(mtu->dev, "req in progress, cannot halt %s\n", 390 ep->name); 391 ret = -EAGAIN; 392 goto done; 393 } 394 } else { 395 mep->wedged = 0; 396 } 397 398 dev_dbg(mtu->dev, "%s %s stall\n", ep->name, value ? "set" : "clear"); 399 400 mtu3_ep_stall_set(mep, value); 401 402 done: 403 spin_unlock_irqrestore(&mtu->lock, flags); 404 405 return ret; 406 } 407 408 /* Sets the halt feature with the clear requests ignored */ 409 static int mtu3_gadget_ep_set_wedge(struct usb_ep *ep) 410 { 411 struct mtu3_ep *mep = to_mtu3_ep(ep); 412 413 if (!ep) 414 return -EINVAL; 415 416 mep->wedged = 1; 417 418 return usb_ep_set_halt(ep); 419 } 420 421 static const struct usb_ep_ops mtu3_ep_ops = { 422 .enable = mtu3_gadget_ep_enable, 423 .disable = mtu3_gadget_ep_disable, 424 .alloc_request = mtu3_alloc_request, 425 .free_request = mtu3_free_request, 426 .queue = mtu3_gadget_queue, 427 .dequeue = mtu3_gadget_dequeue, 428 .set_halt = mtu3_gadget_ep_set_halt, 429 .set_wedge = mtu3_gadget_ep_set_wedge, 430 }; 431 432 static int mtu3_gadget_get_frame(struct usb_gadget *gadget) 433 { 434 struct mtu3 *mtu = gadget_to_mtu3(gadget); 435 436 return (int)mtu3_readl(mtu->mac_base, U3D_USB20_FRAME_NUM); 437 } 438 439 static int mtu3_gadget_wakeup(struct usb_gadget *gadget) 440 { 441 struct mtu3 *mtu = gadget_to_mtu3(gadget); 442 unsigned long flags; 443 444 dev_dbg(mtu->dev, "%s\n", __func__); 445 446 /* remote wakeup feature is not enabled by host */ 447 if (!mtu->may_wakeup) 448 return -EOPNOTSUPP; 449 450 spin_lock_irqsave(&mtu->lock, flags); 451 if (mtu->g.speed >= USB_SPEED_SUPER) { 452 mtu3_setbits(mtu->mac_base, U3D_LINK_POWER_CONTROL, UX_EXIT); 453 } else { 454 mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT, RESUME); 455 spin_unlock_irqrestore(&mtu->lock, flags); 456 usleep_range(10000, 11000); 457 spin_lock_irqsave(&mtu->lock, flags); 458 mtu3_clrbits(mtu->mac_base, U3D_POWER_MANAGEMENT, RESUME); 459 } 460 spin_unlock_irqrestore(&mtu->lock, flags); 461 return 0; 462 } 463 464 static int mtu3_gadget_set_self_powered(struct usb_gadget *gadget, 465 int is_selfpowered) 466 { 467 struct mtu3 *mtu = gadget_to_mtu3(gadget); 468 469 mtu->is_self_powered = !!is_selfpowered; 470 return 0; 471 } 472 473 static int mtu3_gadget_pullup(struct usb_gadget *gadget, int is_on) 474 { 475 struct mtu3 *mtu = gadget_to_mtu3(gadget); 476 unsigned long flags; 477 478 dev_dbg(mtu->dev, "%s (%s) for %sactive device\n", __func__, 479 is_on ? "on" : "off", mtu->is_active ? "" : "in"); 480 481 /* we'd rather not pullup unless the device is active. */ 482 spin_lock_irqsave(&mtu->lock, flags); 483 484 is_on = !!is_on; 485 if (!mtu->is_active) { 486 /* save it for mtu3_start() to process the request */ 487 mtu->softconnect = is_on; 488 } else if (is_on != mtu->softconnect) { 489 mtu->softconnect = is_on; 490 mtu3_dev_on_off(mtu, is_on); 491 } 492 493 spin_unlock_irqrestore(&mtu->lock, flags); 494 495 return 0; 496 } 497 498 static int mtu3_gadget_start(struct usb_gadget *gadget, 499 struct usb_gadget_driver *driver) 500 { 501 struct mtu3 *mtu = gadget_to_mtu3(gadget); 502 unsigned long flags; 503 504 if (mtu->gadget_driver) { 505 dev_err(mtu->dev, "%s is already bound to %s\n", 506 mtu->g.name, mtu->gadget_driver->driver.name); 507 return -EBUSY; 508 } 509 510 dev_dbg(mtu->dev, "bind driver %s\n", driver->function); 511 512 spin_lock_irqsave(&mtu->lock, flags); 513 514 mtu->softconnect = 0; 515 mtu->gadget_driver = driver; 516 517 if (mtu->ssusb->dr_mode == USB_DR_MODE_PERIPHERAL) 518 mtu3_start(mtu); 519 520 spin_unlock_irqrestore(&mtu->lock, flags); 521 522 return 0; 523 } 524 525 static void stop_activity(struct mtu3 *mtu) 526 { 527 struct usb_gadget_driver *driver = mtu->gadget_driver; 528 int i; 529 530 /* don't disconnect if it's not connected */ 531 if (mtu->g.speed == USB_SPEED_UNKNOWN) 532 driver = NULL; 533 else 534 mtu->g.speed = USB_SPEED_UNKNOWN; 535 536 /* deactivate the hardware */ 537 if (mtu->softconnect) { 538 mtu->softconnect = 0; 539 mtu3_dev_on_off(mtu, 0); 540 } 541 542 /* 543 * killing any outstanding requests will quiesce the driver; 544 * then report disconnect 545 */ 546 nuke(mtu->ep0, -ESHUTDOWN); 547 for (i = 1; i < mtu->num_eps; i++) { 548 nuke(mtu->in_eps + i, -ESHUTDOWN); 549 nuke(mtu->out_eps + i, -ESHUTDOWN); 550 } 551 552 if (driver) { 553 spin_unlock(&mtu->lock); 554 driver->disconnect(&mtu->g); 555 spin_lock(&mtu->lock); 556 } 557 } 558 559 static int mtu3_gadget_stop(struct usb_gadget *g) 560 { 561 struct mtu3 *mtu = gadget_to_mtu3(g); 562 unsigned long flags; 563 564 dev_dbg(mtu->dev, "%s\n", __func__); 565 566 spin_lock_irqsave(&mtu->lock, flags); 567 568 stop_activity(mtu); 569 mtu->gadget_driver = NULL; 570 571 if (mtu->ssusb->dr_mode == USB_DR_MODE_PERIPHERAL) 572 mtu3_stop(mtu); 573 574 spin_unlock_irqrestore(&mtu->lock, flags); 575 576 return 0; 577 } 578 579 static const struct usb_gadget_ops mtu3_gadget_ops = { 580 .get_frame = mtu3_gadget_get_frame, 581 .wakeup = mtu3_gadget_wakeup, 582 .set_selfpowered = mtu3_gadget_set_self_powered, 583 .pullup = mtu3_gadget_pullup, 584 .udc_start = mtu3_gadget_start, 585 .udc_stop = mtu3_gadget_stop, 586 }; 587 588 static void init_hw_ep(struct mtu3 *mtu, struct mtu3_ep *mep, 589 u32 epnum, u32 is_in) 590 { 591 mep->epnum = epnum; 592 mep->mtu = mtu; 593 mep->is_in = is_in; 594 595 INIT_LIST_HEAD(&mep->req_list); 596 597 sprintf(mep->name, "ep%d%s", epnum, 598 !epnum ? "" : (is_in ? "in" : "out")); 599 600 mep->ep.name = mep->name; 601 INIT_LIST_HEAD(&mep->ep.ep_list); 602 603 /* initialize maxpacket as SS */ 604 if (!epnum) { 605 usb_ep_set_maxpacket_limit(&mep->ep, 512); 606 mep->ep.caps.type_control = true; 607 mep->ep.ops = &mtu3_ep0_ops; 608 mtu->g.ep0 = &mep->ep; 609 } else { 610 usb_ep_set_maxpacket_limit(&mep->ep, 1024); 611 mep->ep.caps.type_iso = true; 612 mep->ep.caps.type_bulk = true; 613 mep->ep.caps.type_int = true; 614 mep->ep.ops = &mtu3_ep_ops; 615 list_add_tail(&mep->ep.ep_list, &mtu->g.ep_list); 616 } 617 618 dev_dbg(mtu->dev, "%s, name=%s, maxp=%d\n", __func__, mep->ep.name, 619 mep->ep.maxpacket); 620 621 if (!epnum) { 622 mep->ep.caps.dir_in = true; 623 mep->ep.caps.dir_out = true; 624 } else if (is_in) { 625 mep->ep.caps.dir_in = true; 626 } else { 627 mep->ep.caps.dir_out = true; 628 } 629 } 630 631 static void mtu3_gadget_init_eps(struct mtu3 *mtu) 632 { 633 u8 epnum; 634 635 /* initialize endpoint list just once */ 636 INIT_LIST_HEAD(&(mtu->g.ep_list)); 637 638 dev_dbg(mtu->dev, "%s num_eps(1 for a pair of tx&rx ep)=%d\n", 639 __func__, mtu->num_eps); 640 641 init_hw_ep(mtu, mtu->ep0, 0, 0); 642 for (epnum = 1; epnum < mtu->num_eps; epnum++) { 643 init_hw_ep(mtu, mtu->in_eps + epnum, epnum, 1); 644 init_hw_ep(mtu, mtu->out_eps + epnum, epnum, 0); 645 } 646 } 647 648 int mtu3_gadget_setup(struct mtu3 *mtu) 649 { 650 int ret; 651 652 mtu->g.ops = &mtu3_gadget_ops; 653 mtu->g.max_speed = mtu->max_speed; 654 mtu->g.speed = USB_SPEED_UNKNOWN; 655 mtu->g.sg_supported = 0; 656 mtu->g.name = MTU3_DRIVER_NAME; 657 mtu->is_active = 0; 658 mtu->delayed_status = false; 659 660 mtu3_gadget_init_eps(mtu); 661 662 ret = usb_add_gadget_udc(mtu->dev, &mtu->g); 663 if (ret) { 664 dev_err(mtu->dev, "failed to register udc\n"); 665 return ret; 666 } 667 668 usb_gadget_set_state(&mtu->g, USB_STATE_NOTATTACHED); 669 670 return 0; 671 } 672 673 void mtu3_gadget_cleanup(struct mtu3 *mtu) 674 { 675 usb_del_gadget_udc(&mtu->g); 676 } 677 678 void mtu3_gadget_resume(struct mtu3 *mtu) 679 { 680 dev_dbg(mtu->dev, "gadget RESUME\n"); 681 if (mtu->gadget_driver && mtu->gadget_driver->resume) { 682 spin_unlock(&mtu->lock); 683 mtu->gadget_driver->resume(&mtu->g); 684 spin_lock(&mtu->lock); 685 } 686 } 687 688 /* called when SOF packets stop for 3+ msec or enters U3 */ 689 void mtu3_gadget_suspend(struct mtu3 *mtu) 690 { 691 dev_dbg(mtu->dev, "gadget SUSPEND\n"); 692 if (mtu->gadget_driver && mtu->gadget_driver->suspend) { 693 spin_unlock(&mtu->lock); 694 mtu->gadget_driver->suspend(&mtu->g); 695 spin_lock(&mtu->lock); 696 } 697 } 698 699 /* called when VBUS drops below session threshold, and in other cases */ 700 void mtu3_gadget_disconnect(struct mtu3 *mtu) 701 { 702 dev_dbg(mtu->dev, "gadget DISCONNECT\n"); 703 if (mtu->gadget_driver && mtu->gadget_driver->disconnect) { 704 spin_unlock(&mtu->lock); 705 mtu->gadget_driver->disconnect(&mtu->g); 706 spin_lock(&mtu->lock); 707 } 708 709 usb_gadget_set_state(&mtu->g, USB_STATE_NOTATTACHED); 710 } 711 712 void mtu3_gadget_reset(struct mtu3 *mtu) 713 { 714 dev_dbg(mtu->dev, "gadget RESET\n"); 715 716 /* report disconnect, if we didn't flush EP state */ 717 if (mtu->g.speed != USB_SPEED_UNKNOWN) 718 mtu3_gadget_disconnect(mtu); 719 720 mtu->address = 0; 721 mtu->ep0_state = MU3D_EP0_STATE_SETUP; 722 mtu->may_wakeup = 0; 723 mtu->u1_enable = 0; 724 mtu->u2_enable = 0; 725 mtu->delayed_status = false; 726 } 727