1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * core.c - DesignWare USB3 DRD Controller Core file 4 * 5 * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com 6 * 7 * Authors: Felipe Balbi <balbi@ti.com>, 8 * Sebastian Andrzej Siewior <bigeasy@linutronix.de> 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/version.h> 13 #include <linux/module.h> 14 #include <linux/kernel.h> 15 #include <linux/slab.h> 16 #include <linux/spinlock.h> 17 #include <linux/platform_device.h> 18 #include <linux/pm_runtime.h> 19 #include <linux/interrupt.h> 20 #include <linux/ioport.h> 21 #include <linux/io.h> 22 #include <linux/list.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/of.h> 26 #include <linux/acpi.h> 27 #include <linux/pinctrl/consumer.h> 28 #include <linux/reset.h> 29 #include <linux/bitfield.h> 30 31 #include <linux/usb/ch9.h> 32 #include <linux/usb/gadget.h> 33 #include <linux/usb/of.h> 34 #include <linux/usb/otg.h> 35 36 #include "core.h" 37 #include "gadget.h" 38 #include "io.h" 39 40 #include "debug.h" 41 42 #define DWC3_DEFAULT_AUTOSUSPEND_DELAY 5000 /* ms */ 43 44 /** 45 * dwc3_get_dr_mode - Validates and sets dr_mode 46 * @dwc: pointer to our context structure 47 */ 48 static int dwc3_get_dr_mode(struct dwc3 *dwc) 49 { 50 enum usb_dr_mode mode; 51 struct device *dev = dwc->dev; 52 unsigned int hw_mode; 53 54 if (dwc->dr_mode == USB_DR_MODE_UNKNOWN) 55 dwc->dr_mode = USB_DR_MODE_OTG; 56 57 mode = dwc->dr_mode; 58 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 59 60 switch (hw_mode) { 61 case DWC3_GHWPARAMS0_MODE_GADGET: 62 if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) { 63 dev_err(dev, 64 "Controller does not support host mode.\n"); 65 return -EINVAL; 66 } 67 mode = USB_DR_MODE_PERIPHERAL; 68 break; 69 case DWC3_GHWPARAMS0_MODE_HOST: 70 if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) { 71 dev_err(dev, 72 "Controller does not support device mode.\n"); 73 return -EINVAL; 74 } 75 mode = USB_DR_MODE_HOST; 76 break; 77 default: 78 if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) 79 mode = USB_DR_MODE_HOST; 80 else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) 81 mode = USB_DR_MODE_PERIPHERAL; 82 83 /* 84 * DWC_usb31 and DWC_usb3 v3.30a and higher do not support OTG 85 * mode. If the controller supports DRD but the dr_mode is not 86 * specified or set to OTG, then set the mode to peripheral. 87 */ 88 if (mode == USB_DR_MODE_OTG && 89 (!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) || 90 !device_property_read_bool(dwc->dev, "usb-role-switch")) && 91 !DWC3_VER_IS_PRIOR(DWC3, 330A)) 92 mode = USB_DR_MODE_PERIPHERAL; 93 } 94 95 if (mode != dwc->dr_mode) { 96 dev_warn(dev, 97 "Configuration mismatch. dr_mode forced to %s\n", 98 mode == USB_DR_MODE_HOST ? "host" : "gadget"); 99 100 dwc->dr_mode = mode; 101 } 102 103 return 0; 104 } 105 106 void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode) 107 { 108 u32 reg; 109 110 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 111 reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG)); 112 reg |= DWC3_GCTL_PRTCAPDIR(mode); 113 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 114 115 dwc->current_dr_role = mode; 116 } 117 118 static void __dwc3_set_mode(struct work_struct *work) 119 { 120 struct dwc3 *dwc = work_to_dwc(work); 121 unsigned long flags; 122 int ret; 123 u32 reg; 124 125 mutex_lock(&dwc->mutex); 126 127 pm_runtime_get_sync(dwc->dev); 128 129 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG) 130 dwc3_otg_update(dwc, 0); 131 132 if (!dwc->desired_dr_role) 133 goto out; 134 135 if (dwc->desired_dr_role == dwc->current_dr_role) 136 goto out; 137 138 if (dwc->desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev) 139 goto out; 140 141 switch (dwc->current_dr_role) { 142 case DWC3_GCTL_PRTCAP_HOST: 143 dwc3_host_exit(dwc); 144 break; 145 case DWC3_GCTL_PRTCAP_DEVICE: 146 dwc3_gadget_exit(dwc); 147 dwc3_event_buffers_cleanup(dwc); 148 break; 149 case DWC3_GCTL_PRTCAP_OTG: 150 dwc3_otg_exit(dwc); 151 spin_lock_irqsave(&dwc->lock, flags); 152 dwc->desired_otg_role = DWC3_OTG_ROLE_IDLE; 153 spin_unlock_irqrestore(&dwc->lock, flags); 154 dwc3_otg_update(dwc, 1); 155 break; 156 default: 157 break; 158 } 159 160 /* For DRD host or device mode only */ 161 if (dwc->desired_dr_role != DWC3_GCTL_PRTCAP_OTG) { 162 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 163 reg |= DWC3_GCTL_CORESOFTRESET; 164 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 165 166 /* 167 * Wait for internal clocks to synchronized. DWC_usb31 and 168 * DWC_usb32 may need at least 50ms (less for DWC_usb3). To 169 * keep it consistent across different IPs, let's wait up to 170 * 100ms before clearing GCTL.CORESOFTRESET. 171 */ 172 msleep(100); 173 174 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 175 reg &= ~DWC3_GCTL_CORESOFTRESET; 176 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 177 } 178 179 spin_lock_irqsave(&dwc->lock, flags); 180 181 dwc3_set_prtcap(dwc, dwc->desired_dr_role); 182 183 spin_unlock_irqrestore(&dwc->lock, flags); 184 185 switch (dwc->desired_dr_role) { 186 case DWC3_GCTL_PRTCAP_HOST: 187 ret = dwc3_host_init(dwc); 188 if (ret) { 189 dev_err(dwc->dev, "failed to initialize host\n"); 190 } else { 191 if (dwc->usb2_phy) 192 otg_set_vbus(dwc->usb2_phy->otg, true); 193 phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST); 194 phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST); 195 if (dwc->dis_split_quirk) { 196 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); 197 reg |= DWC3_GUCTL3_SPLITDISABLE; 198 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); 199 } 200 } 201 break; 202 case DWC3_GCTL_PRTCAP_DEVICE: 203 dwc3_core_soft_reset(dwc); 204 205 dwc3_event_buffers_setup(dwc); 206 207 if (dwc->usb2_phy) 208 otg_set_vbus(dwc->usb2_phy->otg, false); 209 phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE); 210 phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE); 211 212 ret = dwc3_gadget_init(dwc); 213 if (ret) 214 dev_err(dwc->dev, "failed to initialize peripheral\n"); 215 break; 216 case DWC3_GCTL_PRTCAP_OTG: 217 dwc3_otg_init(dwc); 218 dwc3_otg_update(dwc, 0); 219 break; 220 default: 221 break; 222 } 223 224 out: 225 pm_runtime_mark_last_busy(dwc->dev); 226 pm_runtime_put_autosuspend(dwc->dev); 227 mutex_unlock(&dwc->mutex); 228 } 229 230 void dwc3_set_mode(struct dwc3 *dwc, u32 mode) 231 { 232 unsigned long flags; 233 234 if (dwc->dr_mode != USB_DR_MODE_OTG) 235 return; 236 237 spin_lock_irqsave(&dwc->lock, flags); 238 dwc->desired_dr_role = mode; 239 spin_unlock_irqrestore(&dwc->lock, flags); 240 241 queue_work(system_freezable_wq, &dwc->drd_work); 242 } 243 244 u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type) 245 { 246 struct dwc3 *dwc = dep->dwc; 247 u32 reg; 248 249 dwc3_writel(dwc->regs, DWC3_GDBGFIFOSPACE, 250 DWC3_GDBGFIFOSPACE_NUM(dep->number) | 251 DWC3_GDBGFIFOSPACE_TYPE(type)); 252 253 reg = dwc3_readl(dwc->regs, DWC3_GDBGFIFOSPACE); 254 255 return DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(reg); 256 } 257 258 /** 259 * dwc3_core_soft_reset - Issues core soft reset and PHY reset 260 * @dwc: pointer to our context structure 261 */ 262 int dwc3_core_soft_reset(struct dwc3 *dwc) 263 { 264 u32 reg; 265 int retries = 1000; 266 267 /* 268 * We're resetting only the device side because, if we're in host mode, 269 * XHCI driver will reset the host block. If dwc3 was configured for 270 * host-only mode, then we can return early. 271 */ 272 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) 273 return 0; 274 275 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 276 reg |= DWC3_DCTL_CSFTRST; 277 reg &= ~DWC3_DCTL_RUN_STOP; 278 dwc3_gadget_dctl_write_safe(dwc, reg); 279 280 /* 281 * For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit 282 * is cleared only after all the clocks are synchronized. This can 283 * take a little more than 50ms. Set the polling rate at 20ms 284 * for 10 times instead. 285 */ 286 if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32)) 287 retries = 10; 288 289 do { 290 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 291 if (!(reg & DWC3_DCTL_CSFTRST)) 292 goto done; 293 294 if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32)) 295 msleep(20); 296 else 297 udelay(1); 298 } while (--retries); 299 300 return -ETIMEDOUT; 301 302 done: 303 /* 304 * For DWC_usb31 controller 1.80a and prior, once DCTL.CSFRST bit 305 * is cleared, we must wait at least 50ms before accessing the PHY 306 * domain (synchronization delay). 307 */ 308 if (DWC3_VER_IS_WITHIN(DWC31, ANY, 180A)) 309 msleep(50); 310 311 return 0; 312 } 313 314 /* 315 * dwc3_frame_length_adjustment - Adjusts frame length if required 316 * @dwc3: Pointer to our controller context structure 317 */ 318 static void dwc3_frame_length_adjustment(struct dwc3 *dwc) 319 { 320 u32 reg; 321 u32 dft; 322 323 if (DWC3_VER_IS_PRIOR(DWC3, 250A)) 324 return; 325 326 if (dwc->fladj == 0) 327 return; 328 329 reg = dwc3_readl(dwc->regs, DWC3_GFLADJ); 330 dft = reg & DWC3_GFLADJ_30MHZ_MASK; 331 if (dft != dwc->fladj) { 332 reg &= ~DWC3_GFLADJ_30MHZ_MASK; 333 reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj; 334 dwc3_writel(dwc->regs, DWC3_GFLADJ, reg); 335 } 336 } 337 338 /** 339 * dwc3_ref_clk_period - Reference clock period configuration 340 * Default reference clock period depends on hardware 341 * configuration. For systems with reference clock that differs 342 * from the default, this will set clock period in DWC3_GUCTL 343 * register. 344 * @dwc: Pointer to our controller context structure 345 * @ref_clk_per: reference clock period in ns 346 */ 347 static void dwc3_ref_clk_period(struct dwc3 *dwc) 348 { 349 unsigned long period; 350 unsigned long fladj; 351 unsigned long decr; 352 unsigned long rate; 353 u32 reg; 354 355 if (dwc->ref_clk) { 356 rate = clk_get_rate(dwc->ref_clk); 357 if (!rate) 358 return; 359 period = NSEC_PER_SEC / rate; 360 } else if (dwc->ref_clk_per) { 361 period = dwc->ref_clk_per; 362 rate = NSEC_PER_SEC / period; 363 } else { 364 return; 365 } 366 367 reg = dwc3_readl(dwc->regs, DWC3_GUCTL); 368 reg &= ~DWC3_GUCTL_REFCLKPER_MASK; 369 reg |= FIELD_PREP(DWC3_GUCTL_REFCLKPER_MASK, period); 370 dwc3_writel(dwc->regs, DWC3_GUCTL, reg); 371 372 if (DWC3_VER_IS_PRIOR(DWC3, 250A)) 373 return; 374 375 /* 376 * The calculation below is 377 * 378 * 125000 * (NSEC_PER_SEC / (rate * period) - 1) 379 * 380 * but rearranged for fixed-point arithmetic. The division must be 381 * 64-bit because 125000 * NSEC_PER_SEC doesn't fit in 32 bits (and 382 * neither does rate * period). 383 * 384 * Note that rate * period ~= NSEC_PER_SECOND, minus the number of 385 * nanoseconds of error caused by the truncation which happened during 386 * the division when calculating rate or period (whichever one was 387 * derived from the other). We first calculate the relative error, then 388 * scale it to units of 8 ppm. 389 */ 390 fladj = div64_u64(125000ULL * NSEC_PER_SEC, (u64)rate * period); 391 fladj -= 125000; 392 393 /* 394 * The documented 240MHz constant is scaled by 2 to get PLS1 as well. 395 */ 396 decr = 480000000 / rate; 397 398 reg = dwc3_readl(dwc->regs, DWC3_GFLADJ); 399 reg &= ~DWC3_GFLADJ_REFCLK_FLADJ_MASK 400 & ~DWC3_GFLADJ_240MHZDECR 401 & ~DWC3_GFLADJ_240MHZDECR_PLS1; 402 reg |= FIELD_PREP(DWC3_GFLADJ_REFCLK_FLADJ_MASK, fladj) 403 | FIELD_PREP(DWC3_GFLADJ_240MHZDECR, decr >> 1) 404 | FIELD_PREP(DWC3_GFLADJ_240MHZDECR_PLS1, decr & 1); 405 dwc3_writel(dwc->regs, DWC3_GFLADJ, reg); 406 } 407 408 /** 409 * dwc3_free_one_event_buffer - Frees one event buffer 410 * @dwc: Pointer to our controller context structure 411 * @evt: Pointer to event buffer to be freed 412 */ 413 static void dwc3_free_one_event_buffer(struct dwc3 *dwc, 414 struct dwc3_event_buffer *evt) 415 { 416 dma_free_coherent(dwc->sysdev, evt->length, evt->buf, evt->dma); 417 } 418 419 /** 420 * dwc3_alloc_one_event_buffer - Allocates one event buffer structure 421 * @dwc: Pointer to our controller context structure 422 * @length: size of the event buffer 423 * 424 * Returns a pointer to the allocated event buffer structure on success 425 * otherwise ERR_PTR(errno). 426 */ 427 static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc, 428 unsigned length) 429 { 430 struct dwc3_event_buffer *evt; 431 432 evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL); 433 if (!evt) 434 return ERR_PTR(-ENOMEM); 435 436 evt->dwc = dwc; 437 evt->length = length; 438 evt->cache = devm_kzalloc(dwc->dev, length, GFP_KERNEL); 439 if (!evt->cache) 440 return ERR_PTR(-ENOMEM); 441 442 evt->buf = dma_alloc_coherent(dwc->sysdev, length, 443 &evt->dma, GFP_KERNEL); 444 if (!evt->buf) 445 return ERR_PTR(-ENOMEM); 446 447 return evt; 448 } 449 450 /** 451 * dwc3_free_event_buffers - frees all allocated event buffers 452 * @dwc: Pointer to our controller context structure 453 */ 454 static void dwc3_free_event_buffers(struct dwc3 *dwc) 455 { 456 struct dwc3_event_buffer *evt; 457 458 evt = dwc->ev_buf; 459 if (evt) 460 dwc3_free_one_event_buffer(dwc, evt); 461 } 462 463 /** 464 * dwc3_alloc_event_buffers - Allocates @num event buffers of size @length 465 * @dwc: pointer to our controller context structure 466 * @length: size of event buffer 467 * 468 * Returns 0 on success otherwise negative errno. In the error case, dwc 469 * may contain some buffers allocated but not all which were requested. 470 */ 471 static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length) 472 { 473 struct dwc3_event_buffer *evt; 474 475 evt = dwc3_alloc_one_event_buffer(dwc, length); 476 if (IS_ERR(evt)) { 477 dev_err(dwc->dev, "can't allocate event buffer\n"); 478 return PTR_ERR(evt); 479 } 480 dwc->ev_buf = evt; 481 482 return 0; 483 } 484 485 /** 486 * dwc3_event_buffers_setup - setup our allocated event buffers 487 * @dwc: pointer to our controller context structure 488 * 489 * Returns 0 on success otherwise negative errno. 490 */ 491 int dwc3_event_buffers_setup(struct dwc3 *dwc) 492 { 493 struct dwc3_event_buffer *evt; 494 495 evt = dwc->ev_buf; 496 evt->lpos = 0; 497 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), 498 lower_32_bits(evt->dma)); 499 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 500 upper_32_bits(evt->dma)); 501 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), 502 DWC3_GEVNTSIZ_SIZE(evt->length)); 503 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0); 504 505 return 0; 506 } 507 508 void dwc3_event_buffers_cleanup(struct dwc3 *dwc) 509 { 510 struct dwc3_event_buffer *evt; 511 512 evt = dwc->ev_buf; 513 514 evt->lpos = 0; 515 516 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), 0); 517 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 0); 518 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), DWC3_GEVNTSIZ_INTMASK 519 | DWC3_GEVNTSIZ_SIZE(0)); 520 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0); 521 } 522 523 static int dwc3_alloc_scratch_buffers(struct dwc3 *dwc) 524 { 525 if (!dwc->has_hibernation) 526 return 0; 527 528 if (!dwc->nr_scratch) 529 return 0; 530 531 dwc->scratchbuf = kmalloc_array(dwc->nr_scratch, 532 DWC3_SCRATCHBUF_SIZE, GFP_KERNEL); 533 if (!dwc->scratchbuf) 534 return -ENOMEM; 535 536 return 0; 537 } 538 539 static int dwc3_setup_scratch_buffers(struct dwc3 *dwc) 540 { 541 dma_addr_t scratch_addr; 542 u32 param; 543 int ret; 544 545 if (!dwc->has_hibernation) 546 return 0; 547 548 if (!dwc->nr_scratch) 549 return 0; 550 551 /* should never fall here */ 552 if (!WARN_ON(dwc->scratchbuf)) 553 return 0; 554 555 scratch_addr = dma_map_single(dwc->sysdev, dwc->scratchbuf, 556 dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE, 557 DMA_BIDIRECTIONAL); 558 if (dma_mapping_error(dwc->sysdev, scratch_addr)) { 559 dev_err(dwc->sysdev, "failed to map scratch buffer\n"); 560 ret = -EFAULT; 561 goto err0; 562 } 563 564 dwc->scratch_addr = scratch_addr; 565 566 param = lower_32_bits(scratch_addr); 567 568 ret = dwc3_send_gadget_generic_command(dwc, 569 DWC3_DGCMD_SET_SCRATCHPAD_ADDR_LO, param); 570 if (ret < 0) 571 goto err1; 572 573 param = upper_32_bits(scratch_addr); 574 575 ret = dwc3_send_gadget_generic_command(dwc, 576 DWC3_DGCMD_SET_SCRATCHPAD_ADDR_HI, param); 577 if (ret < 0) 578 goto err1; 579 580 return 0; 581 582 err1: 583 dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch * 584 DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL); 585 586 err0: 587 return ret; 588 } 589 590 static void dwc3_free_scratch_buffers(struct dwc3 *dwc) 591 { 592 if (!dwc->has_hibernation) 593 return; 594 595 if (!dwc->nr_scratch) 596 return; 597 598 /* should never fall here */ 599 if (!WARN_ON(dwc->scratchbuf)) 600 return; 601 602 dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch * 603 DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL); 604 kfree(dwc->scratchbuf); 605 } 606 607 static void dwc3_core_num_eps(struct dwc3 *dwc) 608 { 609 struct dwc3_hwparams *parms = &dwc->hwparams; 610 611 dwc->num_eps = DWC3_NUM_EPS(parms); 612 } 613 614 static void dwc3_cache_hwparams(struct dwc3 *dwc) 615 { 616 struct dwc3_hwparams *parms = &dwc->hwparams; 617 618 parms->hwparams0 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS0); 619 parms->hwparams1 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS1); 620 parms->hwparams2 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS2); 621 parms->hwparams3 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS3); 622 parms->hwparams4 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS4); 623 parms->hwparams5 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS5); 624 parms->hwparams6 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS6); 625 parms->hwparams7 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS7); 626 parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8); 627 628 if (DWC3_IP_IS(DWC32)) 629 parms->hwparams9 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS9); 630 } 631 632 static int dwc3_core_ulpi_init(struct dwc3 *dwc) 633 { 634 int intf; 635 int ret = 0; 636 637 intf = DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3); 638 639 if (intf == DWC3_GHWPARAMS3_HSPHY_IFC_ULPI || 640 (intf == DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI && 641 dwc->hsphy_interface && 642 !strncmp(dwc->hsphy_interface, "ulpi", 4))) 643 ret = dwc3_ulpi_init(dwc); 644 645 return ret; 646 } 647 648 /** 649 * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core 650 * @dwc: Pointer to our controller context structure 651 * 652 * Returns 0 on success. The USB PHY interfaces are configured but not 653 * initialized. The PHY interfaces and the PHYs get initialized together with 654 * the core in dwc3_core_init. 655 */ 656 static int dwc3_phy_setup(struct dwc3 *dwc) 657 { 658 unsigned int hw_mode; 659 u32 reg; 660 661 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 662 663 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); 664 665 /* 666 * Make sure UX_EXIT_PX is cleared as that causes issues with some 667 * PHYs. Also, this bit is not supposed to be used in normal operation. 668 */ 669 reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX; 670 671 /* 672 * Above 1.94a, it is recommended to set DWC3_GUSB3PIPECTL_SUSPHY 673 * to '0' during coreConsultant configuration. So default value 674 * will be '0' when the core is reset. Application needs to set it 675 * to '1' after the core initialization is completed. 676 */ 677 if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) 678 reg |= DWC3_GUSB3PIPECTL_SUSPHY; 679 680 /* 681 * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be cleared after 682 * power-on reset, and it can be set after core initialization, which is 683 * after device soft-reset during initialization. 684 */ 685 if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD) 686 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; 687 688 if (dwc->u2ss_inp3_quirk) 689 reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK; 690 691 if (dwc->dis_rxdet_inp3_quirk) 692 reg |= DWC3_GUSB3PIPECTL_DISRXDETINP3; 693 694 if (dwc->req_p1p2p3_quirk) 695 reg |= DWC3_GUSB3PIPECTL_REQP1P2P3; 696 697 if (dwc->del_p1p2p3_quirk) 698 reg |= DWC3_GUSB3PIPECTL_DEP1P2P3_EN; 699 700 if (dwc->del_phy_power_chg_quirk) 701 reg |= DWC3_GUSB3PIPECTL_DEPOCHANGE; 702 703 if (dwc->lfps_filter_quirk) 704 reg |= DWC3_GUSB3PIPECTL_LFPSFILT; 705 706 if (dwc->rx_detect_poll_quirk) 707 reg |= DWC3_GUSB3PIPECTL_RX_DETOPOLL; 708 709 if (dwc->tx_de_emphasis_quirk) 710 reg |= DWC3_GUSB3PIPECTL_TX_DEEPH(dwc->tx_de_emphasis); 711 712 if (dwc->dis_u3_susphy_quirk) 713 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; 714 715 if (dwc->dis_del_phy_power_chg_quirk) 716 reg &= ~DWC3_GUSB3PIPECTL_DEPOCHANGE; 717 718 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg); 719 720 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 721 722 /* Select the HS PHY interface */ 723 switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) { 724 case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI: 725 if (dwc->hsphy_interface && 726 !strncmp(dwc->hsphy_interface, "utmi", 4)) { 727 reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI; 728 break; 729 } else if (dwc->hsphy_interface && 730 !strncmp(dwc->hsphy_interface, "ulpi", 4)) { 731 reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI; 732 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 733 } else { 734 /* Relying on default value. */ 735 if (!(reg & DWC3_GUSB2PHYCFG_ULPI_UTMI)) 736 break; 737 } 738 fallthrough; 739 case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI: 740 default: 741 break; 742 } 743 744 switch (dwc->hsphy_mode) { 745 case USBPHY_INTERFACE_MODE_UTMI: 746 reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK | 747 DWC3_GUSB2PHYCFG_USBTRDTIM_MASK); 748 reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_8_BIT) | 749 DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_8_BIT); 750 break; 751 case USBPHY_INTERFACE_MODE_UTMIW: 752 reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK | 753 DWC3_GUSB2PHYCFG_USBTRDTIM_MASK); 754 reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_16_BIT) | 755 DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_16_BIT); 756 break; 757 default: 758 break; 759 } 760 761 /* 762 * Above 1.94a, it is recommended to set DWC3_GUSB2PHYCFG_SUSPHY to 763 * '0' during coreConsultant configuration. So default value will 764 * be '0' when the core is reset. Application needs to set it to 765 * '1' after the core initialization is completed. 766 */ 767 if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) 768 reg |= DWC3_GUSB2PHYCFG_SUSPHY; 769 770 /* 771 * For DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared after 772 * power-on reset, and it can be set after core initialization, which is 773 * after device soft-reset during initialization. 774 */ 775 if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD) 776 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 777 778 if (dwc->dis_u2_susphy_quirk) 779 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 780 781 if (dwc->dis_enblslpm_quirk) 782 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; 783 else 784 reg |= DWC3_GUSB2PHYCFG_ENBLSLPM; 785 786 if (dwc->dis_u2_freeclk_exists_quirk) 787 reg &= ~DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS; 788 789 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 790 791 return 0; 792 } 793 794 static int dwc3_clk_enable(struct dwc3 *dwc) 795 { 796 int ret; 797 798 ret = clk_prepare_enable(dwc->bus_clk); 799 if (ret) 800 return ret; 801 802 ret = clk_prepare_enable(dwc->ref_clk); 803 if (ret) 804 goto disable_bus_clk; 805 806 ret = clk_prepare_enable(dwc->susp_clk); 807 if (ret) 808 goto disable_ref_clk; 809 810 return 0; 811 812 disable_ref_clk: 813 clk_disable_unprepare(dwc->ref_clk); 814 disable_bus_clk: 815 clk_disable_unprepare(dwc->bus_clk); 816 return ret; 817 } 818 819 static void dwc3_clk_disable(struct dwc3 *dwc) 820 { 821 clk_disable_unprepare(dwc->susp_clk); 822 clk_disable_unprepare(dwc->ref_clk); 823 clk_disable_unprepare(dwc->bus_clk); 824 } 825 826 static void dwc3_core_exit(struct dwc3 *dwc) 827 { 828 dwc3_event_buffers_cleanup(dwc); 829 830 usb_phy_shutdown(dwc->usb2_phy); 831 usb_phy_shutdown(dwc->usb3_phy); 832 phy_exit(dwc->usb2_generic_phy); 833 phy_exit(dwc->usb3_generic_phy); 834 835 usb_phy_set_suspend(dwc->usb2_phy, 1); 836 usb_phy_set_suspend(dwc->usb3_phy, 1); 837 phy_power_off(dwc->usb2_generic_phy); 838 phy_power_off(dwc->usb3_generic_phy); 839 dwc3_clk_disable(dwc); 840 reset_control_assert(dwc->reset); 841 } 842 843 static bool dwc3_core_is_valid(struct dwc3 *dwc) 844 { 845 u32 reg; 846 847 reg = dwc3_readl(dwc->regs, DWC3_GSNPSID); 848 dwc->ip = DWC3_GSNPS_ID(reg); 849 850 /* This should read as U3 followed by revision number */ 851 if (DWC3_IP_IS(DWC3)) { 852 dwc->revision = reg; 853 } else if (DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) { 854 dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER); 855 dwc->version_type = dwc3_readl(dwc->regs, DWC3_VER_TYPE); 856 } else { 857 return false; 858 } 859 860 return true; 861 } 862 863 static void dwc3_core_setup_global_control(struct dwc3 *dwc) 864 { 865 u32 hwparams4 = dwc->hwparams.hwparams4; 866 u32 reg; 867 868 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 869 reg &= ~DWC3_GCTL_SCALEDOWN_MASK; 870 871 switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1)) { 872 case DWC3_GHWPARAMS1_EN_PWROPT_CLK: 873 /** 874 * WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an 875 * issue which would cause xHCI compliance tests to fail. 876 * 877 * Because of that we cannot enable clock gating on such 878 * configurations. 879 * 880 * Refers to: 881 * 882 * STAR#9000588375: Clock Gating, SOF Issues when ref_clk-Based 883 * SOF/ITP Mode Used 884 */ 885 if ((dwc->dr_mode == USB_DR_MODE_HOST || 886 dwc->dr_mode == USB_DR_MODE_OTG) && 887 DWC3_VER_IS_WITHIN(DWC3, 210A, 250A)) 888 reg |= DWC3_GCTL_DSBLCLKGTNG | DWC3_GCTL_SOFITPSYNC; 889 else 890 reg &= ~DWC3_GCTL_DSBLCLKGTNG; 891 break; 892 case DWC3_GHWPARAMS1_EN_PWROPT_HIB: 893 /* enable hibernation here */ 894 dwc->nr_scratch = DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(hwparams4); 895 896 /* 897 * REVISIT Enabling this bit so that host-mode hibernation 898 * will work. Device-mode hibernation is not yet implemented. 899 */ 900 reg |= DWC3_GCTL_GBLHIBERNATIONEN; 901 break; 902 default: 903 /* nothing */ 904 break; 905 } 906 907 /* check if current dwc3 is on simulation board */ 908 if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) { 909 dev_info(dwc->dev, "Running with FPGA optimizations\n"); 910 dwc->is_fpga = true; 911 } 912 913 WARN_ONCE(dwc->disable_scramble_quirk && !dwc->is_fpga, 914 "disable_scramble cannot be used on non-FPGA builds\n"); 915 916 if (dwc->disable_scramble_quirk && dwc->is_fpga) 917 reg |= DWC3_GCTL_DISSCRAMBLE; 918 else 919 reg &= ~DWC3_GCTL_DISSCRAMBLE; 920 921 if (dwc->u2exit_lfps_quirk) 922 reg |= DWC3_GCTL_U2EXIT_LFPS; 923 924 /* 925 * WORKAROUND: DWC3 revisions <1.90a have a bug 926 * where the device can fail to connect at SuperSpeed 927 * and falls back to high-speed mode which causes 928 * the device to enter a Connect/Disconnect loop 929 */ 930 if (DWC3_VER_IS_PRIOR(DWC3, 190A)) 931 reg |= DWC3_GCTL_U2RSTECN; 932 933 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 934 } 935 936 static int dwc3_core_get_phy(struct dwc3 *dwc); 937 static int dwc3_core_ulpi_init(struct dwc3 *dwc); 938 939 /* set global incr burst type configuration registers */ 940 static void dwc3_set_incr_burst_type(struct dwc3 *dwc) 941 { 942 struct device *dev = dwc->dev; 943 /* incrx_mode : for INCR burst type. */ 944 bool incrx_mode; 945 /* incrx_size : for size of INCRX burst. */ 946 u32 incrx_size; 947 u32 *vals; 948 u32 cfg; 949 int ntype; 950 int ret; 951 int i; 952 953 cfg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0); 954 955 /* 956 * Handle property "snps,incr-burst-type-adjustment". 957 * Get the number of value from this property: 958 * result <= 0, means this property is not supported. 959 * result = 1, means INCRx burst mode supported. 960 * result > 1, means undefined length burst mode supported. 961 */ 962 ntype = device_property_count_u32(dev, "snps,incr-burst-type-adjustment"); 963 if (ntype <= 0) 964 return; 965 966 vals = kcalloc(ntype, sizeof(u32), GFP_KERNEL); 967 if (!vals) { 968 dev_err(dev, "Error to get memory\n"); 969 return; 970 } 971 972 /* Get INCR burst type, and parse it */ 973 ret = device_property_read_u32_array(dev, 974 "snps,incr-burst-type-adjustment", vals, ntype); 975 if (ret) { 976 kfree(vals); 977 dev_err(dev, "Error to get property\n"); 978 return; 979 } 980 981 incrx_size = *vals; 982 983 if (ntype > 1) { 984 /* INCRX (undefined length) burst mode */ 985 incrx_mode = INCRX_UNDEF_LENGTH_BURST_MODE; 986 for (i = 1; i < ntype; i++) { 987 if (vals[i] > incrx_size) 988 incrx_size = vals[i]; 989 } 990 } else { 991 /* INCRX burst mode */ 992 incrx_mode = INCRX_BURST_MODE; 993 } 994 995 kfree(vals); 996 997 /* Enable Undefined Length INCR Burst and Enable INCRx Burst */ 998 cfg &= ~DWC3_GSBUSCFG0_INCRBRST_MASK; 999 if (incrx_mode) 1000 cfg |= DWC3_GSBUSCFG0_INCRBRSTENA; 1001 switch (incrx_size) { 1002 case 256: 1003 cfg |= DWC3_GSBUSCFG0_INCR256BRSTENA; 1004 break; 1005 case 128: 1006 cfg |= DWC3_GSBUSCFG0_INCR128BRSTENA; 1007 break; 1008 case 64: 1009 cfg |= DWC3_GSBUSCFG0_INCR64BRSTENA; 1010 break; 1011 case 32: 1012 cfg |= DWC3_GSBUSCFG0_INCR32BRSTENA; 1013 break; 1014 case 16: 1015 cfg |= DWC3_GSBUSCFG0_INCR16BRSTENA; 1016 break; 1017 case 8: 1018 cfg |= DWC3_GSBUSCFG0_INCR8BRSTENA; 1019 break; 1020 case 4: 1021 cfg |= DWC3_GSBUSCFG0_INCR4BRSTENA; 1022 break; 1023 case 1: 1024 break; 1025 default: 1026 dev_err(dev, "Invalid property\n"); 1027 break; 1028 } 1029 1030 dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, cfg); 1031 } 1032 1033 /** 1034 * dwc3_core_init - Low-level initialization of DWC3 Core 1035 * @dwc: Pointer to our controller context structure 1036 * 1037 * Returns 0 on success otherwise negative errno. 1038 */ 1039 static int dwc3_core_init(struct dwc3 *dwc) 1040 { 1041 unsigned int hw_mode; 1042 u32 reg; 1043 int ret; 1044 1045 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); 1046 1047 /* 1048 * Write Linux Version Code to our GUID register so it's easy to figure 1049 * out which kernel version a bug was found. 1050 */ 1051 dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE); 1052 1053 ret = dwc3_phy_setup(dwc); 1054 if (ret) 1055 goto err0; 1056 1057 if (!dwc->ulpi_ready) { 1058 ret = dwc3_core_ulpi_init(dwc); 1059 if (ret) 1060 goto err0; 1061 dwc->ulpi_ready = true; 1062 } 1063 1064 if (!dwc->phys_ready) { 1065 ret = dwc3_core_get_phy(dwc); 1066 if (ret) 1067 goto err0a; 1068 dwc->phys_ready = true; 1069 } 1070 1071 usb_phy_init(dwc->usb2_phy); 1072 usb_phy_init(dwc->usb3_phy); 1073 ret = phy_init(dwc->usb2_generic_phy); 1074 if (ret < 0) 1075 goto err0a; 1076 1077 ret = phy_init(dwc->usb3_generic_phy); 1078 if (ret < 0) { 1079 phy_exit(dwc->usb2_generic_phy); 1080 goto err0a; 1081 } 1082 1083 ret = dwc3_core_soft_reset(dwc); 1084 if (ret) 1085 goto err1; 1086 1087 if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD && 1088 !DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) { 1089 if (!dwc->dis_u3_susphy_quirk) { 1090 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); 1091 reg |= DWC3_GUSB3PIPECTL_SUSPHY; 1092 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg); 1093 } 1094 1095 if (!dwc->dis_u2_susphy_quirk) { 1096 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 1097 reg |= DWC3_GUSB2PHYCFG_SUSPHY; 1098 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 1099 } 1100 } 1101 1102 dwc3_core_setup_global_control(dwc); 1103 dwc3_core_num_eps(dwc); 1104 1105 ret = dwc3_setup_scratch_buffers(dwc); 1106 if (ret) 1107 goto err1; 1108 1109 /* Adjust Frame Length */ 1110 dwc3_frame_length_adjustment(dwc); 1111 1112 /* Adjust Reference Clock Period */ 1113 dwc3_ref_clk_period(dwc); 1114 1115 dwc3_set_incr_burst_type(dwc); 1116 1117 usb_phy_set_suspend(dwc->usb2_phy, 0); 1118 usb_phy_set_suspend(dwc->usb3_phy, 0); 1119 ret = phy_power_on(dwc->usb2_generic_phy); 1120 if (ret < 0) 1121 goto err2; 1122 1123 ret = phy_power_on(dwc->usb3_generic_phy); 1124 if (ret < 0) 1125 goto err3; 1126 1127 ret = dwc3_event_buffers_setup(dwc); 1128 if (ret) { 1129 dev_err(dwc->dev, "failed to setup event buffers\n"); 1130 goto err4; 1131 } 1132 1133 /* 1134 * ENDXFER polling is available on version 3.10a and later of 1135 * the DWC_usb3 controller. It is NOT available in the 1136 * DWC_usb31 controller. 1137 */ 1138 if (DWC3_VER_IS_WITHIN(DWC3, 310A, ANY)) { 1139 reg = dwc3_readl(dwc->regs, DWC3_GUCTL2); 1140 reg |= DWC3_GUCTL2_RST_ACTBITLATER; 1141 dwc3_writel(dwc->regs, DWC3_GUCTL2, reg); 1142 } 1143 1144 if (!DWC3_VER_IS_PRIOR(DWC3, 250A)) { 1145 reg = dwc3_readl(dwc->regs, DWC3_GUCTL1); 1146 1147 /* 1148 * Enable hardware control of sending remote wakeup 1149 * in HS when the device is in the L1 state. 1150 */ 1151 if (!DWC3_VER_IS_PRIOR(DWC3, 290A)) 1152 reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW; 1153 1154 /* 1155 * Decouple USB 2.0 L1 & L2 events which will allow for 1156 * gadget driver to only receive U3/L2 suspend & wakeup 1157 * events and prevent the more frequent L1 LPM transitions 1158 * from interrupting the driver. 1159 */ 1160 if (!DWC3_VER_IS_PRIOR(DWC3, 300A)) 1161 reg |= DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT; 1162 1163 if (dwc->dis_tx_ipgap_linecheck_quirk) 1164 reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS; 1165 1166 if (dwc->parkmode_disable_ss_quirk) 1167 reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS; 1168 1169 if (DWC3_VER_IS_WITHIN(DWC3, 290A, ANY) && 1170 (dwc->maximum_speed == USB_SPEED_HIGH || 1171 dwc->maximum_speed == USB_SPEED_FULL)) 1172 reg |= DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK; 1173 1174 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); 1175 } 1176 1177 if (dwc->dr_mode == USB_DR_MODE_HOST || 1178 dwc->dr_mode == USB_DR_MODE_OTG) { 1179 reg = dwc3_readl(dwc->regs, DWC3_GUCTL); 1180 1181 /* 1182 * Enable Auto retry Feature to make the controller operating in 1183 * Host mode on seeing transaction errors(CRC errors or internal 1184 * overrun scenerios) on IN transfers to reply to the device 1185 * with a non-terminating retry ACK (i.e, an ACK transcation 1186 * packet with Retry=1 & Nump != 0) 1187 */ 1188 reg |= DWC3_GUCTL_HSTINAUTORETRY; 1189 1190 dwc3_writel(dwc->regs, DWC3_GUCTL, reg); 1191 } 1192 1193 /* 1194 * Must config both number of packets and max burst settings to enable 1195 * RX and/or TX threshold. 1196 */ 1197 if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) { 1198 u8 rx_thr_num = dwc->rx_thr_num_pkt_prd; 1199 u8 rx_maxburst = dwc->rx_max_burst_prd; 1200 u8 tx_thr_num = dwc->tx_thr_num_pkt_prd; 1201 u8 tx_maxburst = dwc->tx_max_burst_prd; 1202 1203 if (rx_thr_num && rx_maxburst) { 1204 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG); 1205 reg |= DWC31_RXTHRNUMPKTSEL_PRD; 1206 1207 reg &= ~DWC31_RXTHRNUMPKT_PRD(~0); 1208 reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num); 1209 1210 reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0); 1211 reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst); 1212 1213 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg); 1214 } 1215 1216 if (tx_thr_num && tx_maxburst) { 1217 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG); 1218 reg |= DWC31_TXTHRNUMPKTSEL_PRD; 1219 1220 reg &= ~DWC31_TXTHRNUMPKT_PRD(~0); 1221 reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num); 1222 1223 reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0); 1224 reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst); 1225 1226 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg); 1227 } 1228 } 1229 1230 return 0; 1231 1232 err4: 1233 phy_power_off(dwc->usb3_generic_phy); 1234 1235 err3: 1236 phy_power_off(dwc->usb2_generic_phy); 1237 1238 err2: 1239 usb_phy_set_suspend(dwc->usb2_phy, 1); 1240 usb_phy_set_suspend(dwc->usb3_phy, 1); 1241 1242 err1: 1243 usb_phy_shutdown(dwc->usb2_phy); 1244 usb_phy_shutdown(dwc->usb3_phy); 1245 phy_exit(dwc->usb2_generic_phy); 1246 phy_exit(dwc->usb3_generic_phy); 1247 1248 err0a: 1249 dwc3_ulpi_exit(dwc); 1250 1251 err0: 1252 return ret; 1253 } 1254 1255 static int dwc3_core_get_phy(struct dwc3 *dwc) 1256 { 1257 struct device *dev = dwc->dev; 1258 struct device_node *node = dev->of_node; 1259 int ret; 1260 1261 if (node) { 1262 dwc->usb2_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0); 1263 dwc->usb3_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 1); 1264 } else { 1265 dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2); 1266 dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3); 1267 } 1268 1269 if (IS_ERR(dwc->usb2_phy)) { 1270 ret = PTR_ERR(dwc->usb2_phy); 1271 if (ret == -ENXIO || ret == -ENODEV) { 1272 dwc->usb2_phy = NULL; 1273 } else { 1274 return dev_err_probe(dev, ret, "no usb2 phy configured\n"); 1275 } 1276 } 1277 1278 if (IS_ERR(dwc->usb3_phy)) { 1279 ret = PTR_ERR(dwc->usb3_phy); 1280 if (ret == -ENXIO || ret == -ENODEV) { 1281 dwc->usb3_phy = NULL; 1282 } else { 1283 return dev_err_probe(dev, ret, "no usb3 phy configured\n"); 1284 } 1285 } 1286 1287 dwc->usb2_generic_phy = devm_phy_get(dev, "usb2-phy"); 1288 if (IS_ERR(dwc->usb2_generic_phy)) { 1289 ret = PTR_ERR(dwc->usb2_generic_phy); 1290 if (ret == -ENOSYS || ret == -ENODEV) { 1291 dwc->usb2_generic_phy = NULL; 1292 } else { 1293 return dev_err_probe(dev, ret, "no usb2 phy configured\n"); 1294 } 1295 } 1296 1297 dwc->usb3_generic_phy = devm_phy_get(dev, "usb3-phy"); 1298 if (IS_ERR(dwc->usb3_generic_phy)) { 1299 ret = PTR_ERR(dwc->usb3_generic_phy); 1300 if (ret == -ENOSYS || ret == -ENODEV) { 1301 dwc->usb3_generic_phy = NULL; 1302 } else { 1303 return dev_err_probe(dev, ret, "no usb3 phy configured\n"); 1304 } 1305 } 1306 1307 return 0; 1308 } 1309 1310 static int dwc3_core_init_mode(struct dwc3 *dwc) 1311 { 1312 struct device *dev = dwc->dev; 1313 int ret; 1314 1315 switch (dwc->dr_mode) { 1316 case USB_DR_MODE_PERIPHERAL: 1317 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); 1318 1319 if (dwc->usb2_phy) 1320 otg_set_vbus(dwc->usb2_phy->otg, false); 1321 phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE); 1322 phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE); 1323 1324 ret = dwc3_gadget_init(dwc); 1325 if (ret) 1326 return dev_err_probe(dev, ret, "failed to initialize gadget\n"); 1327 break; 1328 case USB_DR_MODE_HOST: 1329 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST); 1330 1331 if (dwc->usb2_phy) 1332 otg_set_vbus(dwc->usb2_phy->otg, true); 1333 phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST); 1334 phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST); 1335 1336 ret = dwc3_host_init(dwc); 1337 if (ret) 1338 return dev_err_probe(dev, ret, "failed to initialize host\n"); 1339 break; 1340 case USB_DR_MODE_OTG: 1341 INIT_WORK(&dwc->drd_work, __dwc3_set_mode); 1342 ret = dwc3_drd_init(dwc); 1343 if (ret) 1344 return dev_err_probe(dev, ret, "failed to initialize dual-role\n"); 1345 break; 1346 default: 1347 dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode); 1348 return -EINVAL; 1349 } 1350 1351 return 0; 1352 } 1353 1354 static void dwc3_core_exit_mode(struct dwc3 *dwc) 1355 { 1356 switch (dwc->dr_mode) { 1357 case USB_DR_MODE_PERIPHERAL: 1358 dwc3_gadget_exit(dwc); 1359 break; 1360 case USB_DR_MODE_HOST: 1361 dwc3_host_exit(dwc); 1362 break; 1363 case USB_DR_MODE_OTG: 1364 dwc3_drd_exit(dwc); 1365 break; 1366 default: 1367 /* do nothing */ 1368 break; 1369 } 1370 1371 /* de-assert DRVVBUS for HOST and OTG mode */ 1372 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); 1373 } 1374 1375 static void dwc3_get_properties(struct dwc3 *dwc) 1376 { 1377 struct device *dev = dwc->dev; 1378 u8 lpm_nyet_threshold; 1379 u8 tx_de_emphasis; 1380 u8 hird_threshold; 1381 u8 rx_thr_num_pkt_prd = 0; 1382 u8 rx_max_burst_prd = 0; 1383 u8 tx_thr_num_pkt_prd = 0; 1384 u8 tx_max_burst_prd = 0; 1385 u8 tx_fifo_resize_max_num; 1386 const char *usb_psy_name; 1387 int ret; 1388 1389 /* default to highest possible threshold */ 1390 lpm_nyet_threshold = 0xf; 1391 1392 /* default to -3.5dB de-emphasis */ 1393 tx_de_emphasis = 1; 1394 1395 /* 1396 * default to assert utmi_sleep_n and use maximum allowed HIRD 1397 * threshold value of 0b1100 1398 */ 1399 hird_threshold = 12; 1400 1401 /* 1402 * default to a TXFIFO size large enough to fit 6 max packets. This 1403 * allows for systems with larger bus latencies to have some headroom 1404 * for endpoints that have a large bMaxBurst value. 1405 */ 1406 tx_fifo_resize_max_num = 6; 1407 1408 dwc->maximum_speed = usb_get_maximum_speed(dev); 1409 dwc->max_ssp_rate = usb_get_maximum_ssp_rate(dev); 1410 dwc->dr_mode = usb_get_dr_mode(dev); 1411 dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node); 1412 1413 dwc->sysdev_is_parent = device_property_read_bool(dev, 1414 "linux,sysdev_is_parent"); 1415 if (dwc->sysdev_is_parent) 1416 dwc->sysdev = dwc->dev->parent; 1417 else 1418 dwc->sysdev = dwc->dev; 1419 1420 ret = device_property_read_string(dev, "usb-psy-name", &usb_psy_name); 1421 if (ret >= 0) { 1422 dwc->usb_psy = power_supply_get_by_name(usb_psy_name); 1423 if (!dwc->usb_psy) 1424 dev_err(dev, "couldn't get usb power supply\n"); 1425 } 1426 1427 dwc->has_lpm_erratum = device_property_read_bool(dev, 1428 "snps,has-lpm-erratum"); 1429 device_property_read_u8(dev, "snps,lpm-nyet-threshold", 1430 &lpm_nyet_threshold); 1431 dwc->is_utmi_l1_suspend = device_property_read_bool(dev, 1432 "snps,is-utmi-l1-suspend"); 1433 device_property_read_u8(dev, "snps,hird-threshold", 1434 &hird_threshold); 1435 dwc->dis_start_transfer_quirk = device_property_read_bool(dev, 1436 "snps,dis-start-transfer-quirk"); 1437 dwc->usb3_lpm_capable = device_property_read_bool(dev, 1438 "snps,usb3_lpm_capable"); 1439 dwc->usb2_lpm_disable = device_property_read_bool(dev, 1440 "snps,usb2-lpm-disable"); 1441 dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev, 1442 "snps,usb2-gadget-lpm-disable"); 1443 device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd", 1444 &rx_thr_num_pkt_prd); 1445 device_property_read_u8(dev, "snps,rx-max-burst-prd", 1446 &rx_max_burst_prd); 1447 device_property_read_u8(dev, "snps,tx-thr-num-pkt-prd", 1448 &tx_thr_num_pkt_prd); 1449 device_property_read_u8(dev, "snps,tx-max-burst-prd", 1450 &tx_max_burst_prd); 1451 dwc->do_fifo_resize = device_property_read_bool(dev, 1452 "tx-fifo-resize"); 1453 if (dwc->do_fifo_resize) 1454 device_property_read_u8(dev, "tx-fifo-max-num", 1455 &tx_fifo_resize_max_num); 1456 1457 dwc->disable_scramble_quirk = device_property_read_bool(dev, 1458 "snps,disable_scramble_quirk"); 1459 dwc->u2exit_lfps_quirk = device_property_read_bool(dev, 1460 "snps,u2exit_lfps_quirk"); 1461 dwc->u2ss_inp3_quirk = device_property_read_bool(dev, 1462 "snps,u2ss_inp3_quirk"); 1463 dwc->req_p1p2p3_quirk = device_property_read_bool(dev, 1464 "snps,req_p1p2p3_quirk"); 1465 dwc->del_p1p2p3_quirk = device_property_read_bool(dev, 1466 "snps,del_p1p2p3_quirk"); 1467 dwc->del_phy_power_chg_quirk = device_property_read_bool(dev, 1468 "snps,del_phy_power_chg_quirk"); 1469 dwc->lfps_filter_quirk = device_property_read_bool(dev, 1470 "snps,lfps_filter_quirk"); 1471 dwc->rx_detect_poll_quirk = device_property_read_bool(dev, 1472 "snps,rx_detect_poll_quirk"); 1473 dwc->dis_u3_susphy_quirk = device_property_read_bool(dev, 1474 "snps,dis_u3_susphy_quirk"); 1475 dwc->dis_u2_susphy_quirk = device_property_read_bool(dev, 1476 "snps,dis_u2_susphy_quirk"); 1477 dwc->dis_enblslpm_quirk = device_property_read_bool(dev, 1478 "snps,dis_enblslpm_quirk"); 1479 dwc->dis_u1_entry_quirk = device_property_read_bool(dev, 1480 "snps,dis-u1-entry-quirk"); 1481 dwc->dis_u2_entry_quirk = device_property_read_bool(dev, 1482 "snps,dis-u2-entry-quirk"); 1483 dwc->dis_rxdet_inp3_quirk = device_property_read_bool(dev, 1484 "snps,dis_rxdet_inp3_quirk"); 1485 dwc->dis_u2_freeclk_exists_quirk = device_property_read_bool(dev, 1486 "snps,dis-u2-freeclk-exists-quirk"); 1487 dwc->dis_del_phy_power_chg_quirk = device_property_read_bool(dev, 1488 "snps,dis-del-phy-power-chg-quirk"); 1489 dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev, 1490 "snps,dis-tx-ipgap-linecheck-quirk"); 1491 dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev, 1492 "snps,parkmode-disable-ss-quirk"); 1493 1494 dwc->tx_de_emphasis_quirk = device_property_read_bool(dev, 1495 "snps,tx_de_emphasis_quirk"); 1496 device_property_read_u8(dev, "snps,tx_de_emphasis", 1497 &tx_de_emphasis); 1498 device_property_read_string(dev, "snps,hsphy_interface", 1499 &dwc->hsphy_interface); 1500 device_property_read_u32(dev, "snps,quirk-frame-length-adjustment", 1501 &dwc->fladj); 1502 device_property_read_u32(dev, "snps,ref-clock-period-ns", 1503 &dwc->ref_clk_per); 1504 1505 dwc->dis_metastability_quirk = device_property_read_bool(dev, 1506 "snps,dis_metastability_quirk"); 1507 1508 dwc->dis_split_quirk = device_property_read_bool(dev, 1509 "snps,dis-split-quirk"); 1510 1511 dwc->lpm_nyet_threshold = lpm_nyet_threshold; 1512 dwc->tx_de_emphasis = tx_de_emphasis; 1513 1514 dwc->hird_threshold = hird_threshold; 1515 1516 dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd; 1517 dwc->rx_max_burst_prd = rx_max_burst_prd; 1518 1519 dwc->tx_thr_num_pkt_prd = tx_thr_num_pkt_prd; 1520 dwc->tx_max_burst_prd = tx_max_burst_prd; 1521 1522 dwc->imod_interval = 0; 1523 1524 dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num; 1525 } 1526 1527 /* check whether the core supports IMOD */ 1528 bool dwc3_has_imod(struct dwc3 *dwc) 1529 { 1530 return DWC3_VER_IS_WITHIN(DWC3, 300A, ANY) || 1531 DWC3_VER_IS_WITHIN(DWC31, 120A, ANY) || 1532 DWC3_IP_IS(DWC32); 1533 } 1534 1535 static void dwc3_check_params(struct dwc3 *dwc) 1536 { 1537 struct device *dev = dwc->dev; 1538 unsigned int hwparam_gen = 1539 DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3); 1540 1541 /* Check for proper value of imod_interval */ 1542 if (dwc->imod_interval && !dwc3_has_imod(dwc)) { 1543 dev_warn(dwc->dev, "Interrupt moderation not supported\n"); 1544 dwc->imod_interval = 0; 1545 } 1546 1547 /* 1548 * Workaround for STAR 9000961433 which affects only version 1549 * 3.00a of the DWC_usb3 core. This prevents the controller 1550 * interrupt from being masked while handling events. IMOD 1551 * allows us to work around this issue. Enable it for the 1552 * affected version. 1553 */ 1554 if (!dwc->imod_interval && 1555 DWC3_VER_IS(DWC3, 300A)) 1556 dwc->imod_interval = 1; 1557 1558 /* Check the maximum_speed parameter */ 1559 switch (dwc->maximum_speed) { 1560 case USB_SPEED_FULL: 1561 case USB_SPEED_HIGH: 1562 break; 1563 case USB_SPEED_SUPER: 1564 if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) 1565 dev_warn(dev, "UDC doesn't support Gen 1\n"); 1566 break; 1567 case USB_SPEED_SUPER_PLUS: 1568 if ((DWC3_IP_IS(DWC32) && 1569 hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) || 1570 (!DWC3_IP_IS(DWC32) && 1571 hwparam_gen != DWC3_GHWPARAMS3_SSPHY_IFC_GEN2)) 1572 dev_warn(dev, "UDC doesn't support SSP\n"); 1573 break; 1574 default: 1575 dev_err(dev, "invalid maximum_speed parameter %d\n", 1576 dwc->maximum_speed); 1577 fallthrough; 1578 case USB_SPEED_UNKNOWN: 1579 switch (hwparam_gen) { 1580 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2: 1581 dwc->maximum_speed = USB_SPEED_SUPER_PLUS; 1582 break; 1583 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1: 1584 if (DWC3_IP_IS(DWC32)) 1585 dwc->maximum_speed = USB_SPEED_SUPER_PLUS; 1586 else 1587 dwc->maximum_speed = USB_SPEED_SUPER; 1588 break; 1589 case DWC3_GHWPARAMS3_SSPHY_IFC_DIS: 1590 dwc->maximum_speed = USB_SPEED_HIGH; 1591 break; 1592 default: 1593 dwc->maximum_speed = USB_SPEED_SUPER; 1594 break; 1595 } 1596 break; 1597 } 1598 1599 /* 1600 * Currently the controller does not have visibility into the HW 1601 * parameter to determine the maximum number of lanes the HW supports. 1602 * If the number of lanes is not specified in the device property, then 1603 * set the default to support dual-lane for DWC_usb32 and single-lane 1604 * for DWC_usb31 for super-speed-plus. 1605 */ 1606 if (dwc->maximum_speed == USB_SPEED_SUPER_PLUS) { 1607 switch (dwc->max_ssp_rate) { 1608 case USB_SSP_GEN_2x1: 1609 if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_GEN1) 1610 dev_warn(dev, "UDC only supports Gen 1\n"); 1611 break; 1612 case USB_SSP_GEN_1x2: 1613 case USB_SSP_GEN_2x2: 1614 if (DWC3_IP_IS(DWC31)) 1615 dev_warn(dev, "UDC only supports single lane\n"); 1616 break; 1617 case USB_SSP_GEN_UNKNOWN: 1618 default: 1619 switch (hwparam_gen) { 1620 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2: 1621 if (DWC3_IP_IS(DWC32)) 1622 dwc->max_ssp_rate = USB_SSP_GEN_2x2; 1623 else 1624 dwc->max_ssp_rate = USB_SSP_GEN_2x1; 1625 break; 1626 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1: 1627 if (DWC3_IP_IS(DWC32)) 1628 dwc->max_ssp_rate = USB_SSP_GEN_1x2; 1629 break; 1630 } 1631 break; 1632 } 1633 } 1634 } 1635 1636 static int dwc3_probe(struct platform_device *pdev) 1637 { 1638 struct device *dev = &pdev->dev; 1639 struct resource *res, dwc_res; 1640 struct dwc3 *dwc; 1641 1642 int ret; 1643 1644 void __iomem *regs; 1645 1646 dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL); 1647 if (!dwc) 1648 return -ENOMEM; 1649 1650 dwc->dev = dev; 1651 1652 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1653 if (!res) { 1654 dev_err(dev, "missing memory resource\n"); 1655 return -ENODEV; 1656 } 1657 1658 dwc->xhci_resources[0].start = res->start; 1659 dwc->xhci_resources[0].end = dwc->xhci_resources[0].start + 1660 DWC3_XHCI_REGS_END; 1661 dwc->xhci_resources[0].flags = res->flags; 1662 dwc->xhci_resources[0].name = res->name; 1663 1664 /* 1665 * Request memory region but exclude xHCI regs, 1666 * since it will be requested by the xhci-plat driver. 1667 */ 1668 dwc_res = *res; 1669 dwc_res.start += DWC3_GLOBALS_REGS_START; 1670 1671 regs = devm_ioremap_resource(dev, &dwc_res); 1672 if (IS_ERR(regs)) 1673 return PTR_ERR(regs); 1674 1675 dwc->regs = regs; 1676 dwc->regs_size = resource_size(&dwc_res); 1677 1678 dwc3_get_properties(dwc); 1679 1680 if (!dwc->sysdev_is_parent) { 1681 ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64)); 1682 if (ret) 1683 return ret; 1684 } 1685 1686 dwc->reset = devm_reset_control_array_get_optional_shared(dev); 1687 if (IS_ERR(dwc->reset)) 1688 return PTR_ERR(dwc->reset); 1689 1690 if (dev->of_node) { 1691 /* 1692 * Clocks are optional, but new DT platforms should support all 1693 * clocks as required by the DT-binding. 1694 * Some devices have different clock names in legacy device trees, 1695 * check for them to retain backwards compatibility. 1696 */ 1697 dwc->bus_clk = devm_clk_get_optional(dev, "bus_early"); 1698 if (IS_ERR(dwc->bus_clk)) 1699 return dev_err_probe(dev, PTR_ERR(dwc->bus_clk), 1700 "could not get bus clock\n"); 1701 1702 if (dwc->bus_clk == NULL) { 1703 dwc->bus_clk = devm_clk_get_optional(dev, "bus_clk"); 1704 if (IS_ERR(dwc->bus_clk)) 1705 return dev_err_probe(dev, PTR_ERR(dwc->bus_clk), 1706 "could not get bus clock\n"); 1707 } 1708 1709 dwc->ref_clk = devm_clk_get_optional(dev, "ref"); 1710 if (IS_ERR(dwc->ref_clk)) 1711 return dev_err_probe(dev, PTR_ERR(dwc->ref_clk), 1712 "could not get ref clock\n"); 1713 1714 if (dwc->ref_clk == NULL) { 1715 dwc->ref_clk = devm_clk_get_optional(dev, "ref_clk"); 1716 if (IS_ERR(dwc->ref_clk)) 1717 return dev_err_probe(dev, PTR_ERR(dwc->ref_clk), 1718 "could not get ref clock\n"); 1719 } 1720 1721 dwc->susp_clk = devm_clk_get_optional(dev, "suspend"); 1722 if (IS_ERR(dwc->susp_clk)) 1723 return dev_err_probe(dev, PTR_ERR(dwc->susp_clk), 1724 "could not get suspend clock\n"); 1725 1726 if (dwc->susp_clk == NULL) { 1727 dwc->susp_clk = devm_clk_get_optional(dev, "suspend_clk"); 1728 if (IS_ERR(dwc->susp_clk)) 1729 return dev_err_probe(dev, PTR_ERR(dwc->susp_clk), 1730 "could not get suspend clock\n"); 1731 } 1732 } 1733 1734 ret = reset_control_deassert(dwc->reset); 1735 if (ret) 1736 return ret; 1737 1738 ret = dwc3_clk_enable(dwc); 1739 if (ret) 1740 goto assert_reset; 1741 1742 if (!dwc3_core_is_valid(dwc)) { 1743 dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n"); 1744 ret = -ENODEV; 1745 goto disable_clks; 1746 } 1747 1748 platform_set_drvdata(pdev, dwc); 1749 dwc3_cache_hwparams(dwc); 1750 1751 spin_lock_init(&dwc->lock); 1752 mutex_init(&dwc->mutex); 1753 1754 pm_runtime_set_active(dev); 1755 pm_runtime_use_autosuspend(dev); 1756 pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY); 1757 pm_runtime_enable(dev); 1758 ret = pm_runtime_get_sync(dev); 1759 if (ret < 0) 1760 goto err1; 1761 1762 pm_runtime_forbid(dev); 1763 1764 ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE); 1765 if (ret) { 1766 dev_err(dwc->dev, "failed to allocate event buffers\n"); 1767 ret = -ENOMEM; 1768 goto err2; 1769 } 1770 1771 ret = dwc3_get_dr_mode(dwc); 1772 if (ret) 1773 goto err3; 1774 1775 ret = dwc3_alloc_scratch_buffers(dwc); 1776 if (ret) 1777 goto err3; 1778 1779 ret = dwc3_core_init(dwc); 1780 if (ret) { 1781 dev_err_probe(dev, ret, "failed to initialize core\n"); 1782 goto err4; 1783 } 1784 1785 dwc3_check_params(dwc); 1786 dwc3_debugfs_init(dwc); 1787 1788 ret = dwc3_core_init_mode(dwc); 1789 if (ret) 1790 goto err5; 1791 1792 pm_runtime_put(dev); 1793 1794 return 0; 1795 1796 err5: 1797 dwc3_debugfs_exit(dwc); 1798 dwc3_event_buffers_cleanup(dwc); 1799 1800 usb_phy_shutdown(dwc->usb2_phy); 1801 usb_phy_shutdown(dwc->usb3_phy); 1802 phy_exit(dwc->usb2_generic_phy); 1803 phy_exit(dwc->usb3_generic_phy); 1804 1805 usb_phy_set_suspend(dwc->usb2_phy, 1); 1806 usb_phy_set_suspend(dwc->usb3_phy, 1); 1807 phy_power_off(dwc->usb2_generic_phy); 1808 phy_power_off(dwc->usb3_generic_phy); 1809 1810 dwc3_ulpi_exit(dwc); 1811 1812 err4: 1813 dwc3_free_scratch_buffers(dwc); 1814 1815 err3: 1816 dwc3_free_event_buffers(dwc); 1817 1818 err2: 1819 pm_runtime_allow(&pdev->dev); 1820 1821 err1: 1822 pm_runtime_put_sync(&pdev->dev); 1823 pm_runtime_disable(&pdev->dev); 1824 1825 disable_clks: 1826 dwc3_clk_disable(dwc); 1827 assert_reset: 1828 reset_control_assert(dwc->reset); 1829 1830 if (dwc->usb_psy) 1831 power_supply_put(dwc->usb_psy); 1832 1833 return ret; 1834 } 1835 1836 static int dwc3_remove(struct platform_device *pdev) 1837 { 1838 struct dwc3 *dwc = platform_get_drvdata(pdev); 1839 1840 pm_runtime_get_sync(&pdev->dev); 1841 1842 dwc3_core_exit_mode(dwc); 1843 dwc3_debugfs_exit(dwc); 1844 1845 dwc3_core_exit(dwc); 1846 dwc3_ulpi_exit(dwc); 1847 1848 pm_runtime_disable(&pdev->dev); 1849 pm_runtime_put_noidle(&pdev->dev); 1850 pm_runtime_set_suspended(&pdev->dev); 1851 1852 dwc3_free_event_buffers(dwc); 1853 dwc3_free_scratch_buffers(dwc); 1854 1855 if (dwc->usb_psy) 1856 power_supply_put(dwc->usb_psy); 1857 1858 return 0; 1859 } 1860 1861 #ifdef CONFIG_PM 1862 static int dwc3_core_init_for_resume(struct dwc3 *dwc) 1863 { 1864 int ret; 1865 1866 ret = reset_control_deassert(dwc->reset); 1867 if (ret) 1868 return ret; 1869 1870 ret = dwc3_clk_enable(dwc); 1871 if (ret) 1872 goto assert_reset; 1873 1874 ret = dwc3_core_init(dwc); 1875 if (ret) 1876 goto disable_clks; 1877 1878 return 0; 1879 1880 disable_clks: 1881 dwc3_clk_disable(dwc); 1882 assert_reset: 1883 reset_control_assert(dwc->reset); 1884 1885 return ret; 1886 } 1887 1888 static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) 1889 { 1890 unsigned long flags; 1891 u32 reg; 1892 1893 switch (dwc->current_dr_role) { 1894 case DWC3_GCTL_PRTCAP_DEVICE: 1895 if (pm_runtime_suspended(dwc->dev)) 1896 break; 1897 spin_lock_irqsave(&dwc->lock, flags); 1898 dwc3_gadget_suspend(dwc); 1899 spin_unlock_irqrestore(&dwc->lock, flags); 1900 synchronize_irq(dwc->irq_gadget); 1901 dwc3_core_exit(dwc); 1902 break; 1903 case DWC3_GCTL_PRTCAP_HOST: 1904 if (!PMSG_IS_AUTO(msg)) { 1905 dwc3_core_exit(dwc); 1906 break; 1907 } 1908 1909 /* Let controller to suspend HSPHY before PHY driver suspends */ 1910 if (dwc->dis_u2_susphy_quirk || 1911 dwc->dis_enblslpm_quirk) { 1912 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 1913 reg |= DWC3_GUSB2PHYCFG_ENBLSLPM | 1914 DWC3_GUSB2PHYCFG_SUSPHY; 1915 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 1916 1917 /* Give some time for USB2 PHY to suspend */ 1918 usleep_range(5000, 6000); 1919 } 1920 1921 phy_pm_runtime_put_sync(dwc->usb2_generic_phy); 1922 phy_pm_runtime_put_sync(dwc->usb3_generic_phy); 1923 break; 1924 case DWC3_GCTL_PRTCAP_OTG: 1925 /* do nothing during runtime_suspend */ 1926 if (PMSG_IS_AUTO(msg)) 1927 break; 1928 1929 if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) { 1930 spin_lock_irqsave(&dwc->lock, flags); 1931 dwc3_gadget_suspend(dwc); 1932 spin_unlock_irqrestore(&dwc->lock, flags); 1933 synchronize_irq(dwc->irq_gadget); 1934 } 1935 1936 dwc3_otg_exit(dwc); 1937 dwc3_core_exit(dwc); 1938 break; 1939 default: 1940 /* do nothing */ 1941 break; 1942 } 1943 1944 return 0; 1945 } 1946 1947 static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) 1948 { 1949 unsigned long flags; 1950 int ret; 1951 u32 reg; 1952 1953 switch (dwc->current_dr_role) { 1954 case DWC3_GCTL_PRTCAP_DEVICE: 1955 ret = dwc3_core_init_for_resume(dwc); 1956 if (ret) 1957 return ret; 1958 1959 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); 1960 spin_lock_irqsave(&dwc->lock, flags); 1961 dwc3_gadget_resume(dwc); 1962 spin_unlock_irqrestore(&dwc->lock, flags); 1963 break; 1964 case DWC3_GCTL_PRTCAP_HOST: 1965 if (!PMSG_IS_AUTO(msg)) { 1966 ret = dwc3_core_init_for_resume(dwc); 1967 if (ret) 1968 return ret; 1969 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST); 1970 break; 1971 } 1972 /* Restore GUSB2PHYCFG bits that were modified in suspend */ 1973 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 1974 if (dwc->dis_u2_susphy_quirk) 1975 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; 1976 1977 if (dwc->dis_enblslpm_quirk) 1978 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; 1979 1980 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 1981 1982 phy_pm_runtime_get_sync(dwc->usb2_generic_phy); 1983 phy_pm_runtime_get_sync(dwc->usb3_generic_phy); 1984 break; 1985 case DWC3_GCTL_PRTCAP_OTG: 1986 /* nothing to do on runtime_resume */ 1987 if (PMSG_IS_AUTO(msg)) 1988 break; 1989 1990 ret = dwc3_core_init_for_resume(dwc); 1991 if (ret) 1992 return ret; 1993 1994 dwc3_set_prtcap(dwc, dwc->current_dr_role); 1995 1996 dwc3_otg_init(dwc); 1997 if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) { 1998 dwc3_otg_host_init(dwc); 1999 } else if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) { 2000 spin_lock_irqsave(&dwc->lock, flags); 2001 dwc3_gadget_resume(dwc); 2002 spin_unlock_irqrestore(&dwc->lock, flags); 2003 } 2004 2005 break; 2006 default: 2007 /* do nothing */ 2008 break; 2009 } 2010 2011 return 0; 2012 } 2013 2014 static int dwc3_runtime_checks(struct dwc3 *dwc) 2015 { 2016 switch (dwc->current_dr_role) { 2017 case DWC3_GCTL_PRTCAP_DEVICE: 2018 if (dwc->connected) 2019 return -EBUSY; 2020 break; 2021 case DWC3_GCTL_PRTCAP_HOST: 2022 default: 2023 /* do nothing */ 2024 break; 2025 } 2026 2027 return 0; 2028 } 2029 2030 static int dwc3_runtime_suspend(struct device *dev) 2031 { 2032 struct dwc3 *dwc = dev_get_drvdata(dev); 2033 int ret; 2034 2035 if (dwc3_runtime_checks(dwc)) 2036 return -EBUSY; 2037 2038 ret = dwc3_suspend_common(dwc, PMSG_AUTO_SUSPEND); 2039 if (ret) 2040 return ret; 2041 2042 device_init_wakeup(dev, true); 2043 2044 return 0; 2045 } 2046 2047 static int dwc3_runtime_resume(struct device *dev) 2048 { 2049 struct dwc3 *dwc = dev_get_drvdata(dev); 2050 int ret; 2051 2052 device_init_wakeup(dev, false); 2053 2054 ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME); 2055 if (ret) 2056 return ret; 2057 2058 switch (dwc->current_dr_role) { 2059 case DWC3_GCTL_PRTCAP_DEVICE: 2060 dwc3_gadget_process_pending_events(dwc); 2061 break; 2062 case DWC3_GCTL_PRTCAP_HOST: 2063 default: 2064 /* do nothing */ 2065 break; 2066 } 2067 2068 pm_runtime_mark_last_busy(dev); 2069 2070 return 0; 2071 } 2072 2073 static int dwc3_runtime_idle(struct device *dev) 2074 { 2075 struct dwc3 *dwc = dev_get_drvdata(dev); 2076 2077 switch (dwc->current_dr_role) { 2078 case DWC3_GCTL_PRTCAP_DEVICE: 2079 if (dwc3_runtime_checks(dwc)) 2080 return -EBUSY; 2081 break; 2082 case DWC3_GCTL_PRTCAP_HOST: 2083 default: 2084 /* do nothing */ 2085 break; 2086 } 2087 2088 pm_runtime_mark_last_busy(dev); 2089 pm_runtime_autosuspend(dev); 2090 2091 return 0; 2092 } 2093 #endif /* CONFIG_PM */ 2094 2095 #ifdef CONFIG_PM_SLEEP 2096 static int dwc3_suspend(struct device *dev) 2097 { 2098 struct dwc3 *dwc = dev_get_drvdata(dev); 2099 int ret; 2100 2101 ret = dwc3_suspend_common(dwc, PMSG_SUSPEND); 2102 if (ret) 2103 return ret; 2104 2105 pinctrl_pm_select_sleep_state(dev); 2106 2107 return 0; 2108 } 2109 2110 static int dwc3_resume(struct device *dev) 2111 { 2112 struct dwc3 *dwc = dev_get_drvdata(dev); 2113 int ret; 2114 2115 pinctrl_pm_select_default_state(dev); 2116 2117 ret = dwc3_resume_common(dwc, PMSG_RESUME); 2118 if (ret) 2119 return ret; 2120 2121 pm_runtime_disable(dev); 2122 pm_runtime_set_active(dev); 2123 pm_runtime_enable(dev); 2124 2125 return 0; 2126 } 2127 2128 static void dwc3_complete(struct device *dev) 2129 { 2130 struct dwc3 *dwc = dev_get_drvdata(dev); 2131 u32 reg; 2132 2133 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST && 2134 dwc->dis_split_quirk) { 2135 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); 2136 reg |= DWC3_GUCTL3_SPLITDISABLE; 2137 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); 2138 } 2139 } 2140 #else 2141 #define dwc3_complete NULL 2142 #endif /* CONFIG_PM_SLEEP */ 2143 2144 static const struct dev_pm_ops dwc3_dev_pm_ops = { 2145 SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume) 2146 .complete = dwc3_complete, 2147 SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume, 2148 dwc3_runtime_idle) 2149 }; 2150 2151 #ifdef CONFIG_OF 2152 static const struct of_device_id of_dwc3_match[] = { 2153 { 2154 .compatible = "snps,dwc3" 2155 }, 2156 { 2157 .compatible = "synopsys,dwc3" 2158 }, 2159 { }, 2160 }; 2161 MODULE_DEVICE_TABLE(of, of_dwc3_match); 2162 #endif 2163 2164 #ifdef CONFIG_ACPI 2165 2166 #define ACPI_ID_INTEL_BSW "808622B7" 2167 2168 static const struct acpi_device_id dwc3_acpi_match[] = { 2169 { ACPI_ID_INTEL_BSW, 0 }, 2170 { }, 2171 }; 2172 MODULE_DEVICE_TABLE(acpi, dwc3_acpi_match); 2173 #endif 2174 2175 static struct platform_driver dwc3_driver = { 2176 .probe = dwc3_probe, 2177 .remove = dwc3_remove, 2178 .driver = { 2179 .name = "dwc3", 2180 .of_match_table = of_match_ptr(of_dwc3_match), 2181 .acpi_match_table = ACPI_PTR(dwc3_acpi_match), 2182 .pm = &dwc3_dev_pm_ops, 2183 }, 2184 }; 2185 2186 module_platform_driver(dwc3_driver); 2187 2188 MODULE_ALIAS("platform:dwc3"); 2189 MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); 2190 MODULE_LICENSE("GPL v2"); 2191 MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver"); 2192