1 /* 2 * core.c - DesignWare HS OTG Controller common routines 3 * 4 * Copyright (C) 2004-2013 Synopsys, Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions, and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The names of the above-listed copyright holders may not be used 16 * to endorse or promote products derived from this software without 17 * specific prior written permission. 18 * 19 * ALTERNATIVELY, this software may be distributed under the terms of the 20 * GNU General Public License ("GPL") as published by the Free Software 21 * Foundation; either version 2 of the License, or (at your option) any 22 * later version. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 /* 38 * The Core code provides basic services for accessing and managing the 39 * DWC_otg hardware. These services are used by both the Host Controller 40 * Driver and the Peripheral Controller Driver. 41 */ 42 #include <linux/kernel.h> 43 #include <linux/module.h> 44 #include <linux/moduleparam.h> 45 #include <linux/spinlock.h> 46 #include <linux/interrupt.h> 47 #include <linux/dma-mapping.h> 48 #include <linux/delay.h> 49 #include <linux/io.h> 50 #include <linux/slab.h> 51 #include <linux/usb.h> 52 53 #include <linux/usb/hcd.h> 54 #include <linux/usb/ch11.h> 55 56 #include "core.h" 57 #include "hcd.h" 58 59 #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 60 /** 61 * dwc2_backup_host_registers() - Backup controller host registers. 62 * When suspending usb bus, registers needs to be backuped 63 * if controller power is disabled once suspended. 64 * 65 * @hsotg: Programming view of the DWC_otg controller 66 */ 67 static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) 68 { 69 struct dwc2_hregs_backup *hr; 70 int i; 71 72 dev_dbg(hsotg->dev, "%s\n", __func__); 73 74 /* Backup Host regs */ 75 hr = &hsotg->hr_backup; 76 hr->hcfg = dwc2_readl(hsotg->regs + HCFG); 77 hr->haintmsk = dwc2_readl(hsotg->regs + HAINTMSK); 78 for (i = 0; i < hsotg->core_params->host_channels; ++i) 79 hr->hcintmsk[i] = dwc2_readl(hsotg->regs + HCINTMSK(i)); 80 81 hr->hprt0 = dwc2_read_hprt0(hsotg); 82 hr->hfir = dwc2_readl(hsotg->regs + HFIR); 83 hr->valid = true; 84 85 return 0; 86 } 87 88 /** 89 * dwc2_restore_host_registers() - Restore controller host registers. 90 * When resuming usb bus, device registers needs to be restored 91 * if controller power were disabled. 92 * 93 * @hsotg: Programming view of the DWC_otg controller 94 */ 95 static int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) 96 { 97 struct dwc2_hregs_backup *hr; 98 int i; 99 100 dev_dbg(hsotg->dev, "%s\n", __func__); 101 102 /* Restore host regs */ 103 hr = &hsotg->hr_backup; 104 if (!hr->valid) { 105 dev_err(hsotg->dev, "%s: no host registers to restore\n", 106 __func__); 107 return -EINVAL; 108 } 109 hr->valid = false; 110 111 dwc2_writel(hr->hcfg, hsotg->regs + HCFG); 112 dwc2_writel(hr->haintmsk, hsotg->regs + HAINTMSK); 113 114 for (i = 0; i < hsotg->core_params->host_channels; ++i) 115 dwc2_writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i)); 116 117 dwc2_writel(hr->hprt0, hsotg->regs + HPRT0); 118 dwc2_writel(hr->hfir, hsotg->regs + HFIR); 119 hsotg->frame_number = 0; 120 121 return 0; 122 } 123 #else 124 static inline int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) 125 { return 0; } 126 127 static inline int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) 128 { return 0; } 129 #endif 130 131 #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ 132 IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 133 /** 134 * dwc2_backup_device_registers() - Backup controller device registers. 135 * When suspending usb bus, registers needs to be backuped 136 * if controller power is disabled once suspended. 137 * 138 * @hsotg: Programming view of the DWC_otg controller 139 */ 140 static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) 141 { 142 struct dwc2_dregs_backup *dr; 143 int i; 144 145 dev_dbg(hsotg->dev, "%s\n", __func__); 146 147 /* Backup dev regs */ 148 dr = &hsotg->dr_backup; 149 150 dr->dcfg = dwc2_readl(hsotg->regs + DCFG); 151 dr->dctl = dwc2_readl(hsotg->regs + DCTL); 152 dr->daintmsk = dwc2_readl(hsotg->regs + DAINTMSK); 153 dr->diepmsk = dwc2_readl(hsotg->regs + DIEPMSK); 154 dr->doepmsk = dwc2_readl(hsotg->regs + DOEPMSK); 155 156 for (i = 0; i < hsotg->num_of_eps; i++) { 157 /* Backup IN EPs */ 158 dr->diepctl[i] = dwc2_readl(hsotg->regs + DIEPCTL(i)); 159 160 /* Ensure DATA PID is correctly configured */ 161 if (dr->diepctl[i] & DXEPCTL_DPID) 162 dr->diepctl[i] |= DXEPCTL_SETD1PID; 163 else 164 dr->diepctl[i] |= DXEPCTL_SETD0PID; 165 166 dr->dieptsiz[i] = dwc2_readl(hsotg->regs + DIEPTSIZ(i)); 167 dr->diepdma[i] = dwc2_readl(hsotg->regs + DIEPDMA(i)); 168 169 /* Backup OUT EPs */ 170 dr->doepctl[i] = dwc2_readl(hsotg->regs + DOEPCTL(i)); 171 172 /* Ensure DATA PID is correctly configured */ 173 if (dr->doepctl[i] & DXEPCTL_DPID) 174 dr->doepctl[i] |= DXEPCTL_SETD1PID; 175 else 176 dr->doepctl[i] |= DXEPCTL_SETD0PID; 177 178 dr->doeptsiz[i] = dwc2_readl(hsotg->regs + DOEPTSIZ(i)); 179 dr->doepdma[i] = dwc2_readl(hsotg->regs + DOEPDMA(i)); 180 } 181 dr->valid = true; 182 return 0; 183 } 184 185 /** 186 * dwc2_restore_device_registers() - Restore controller device registers. 187 * When resuming usb bus, device registers needs to be restored 188 * if controller power were disabled. 189 * 190 * @hsotg: Programming view of the DWC_otg controller 191 */ 192 static int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg) 193 { 194 struct dwc2_dregs_backup *dr; 195 u32 dctl; 196 int i; 197 198 dev_dbg(hsotg->dev, "%s\n", __func__); 199 200 /* Restore dev regs */ 201 dr = &hsotg->dr_backup; 202 if (!dr->valid) { 203 dev_err(hsotg->dev, "%s: no device registers to restore\n", 204 __func__); 205 return -EINVAL; 206 } 207 dr->valid = false; 208 209 dwc2_writel(dr->dcfg, hsotg->regs + DCFG); 210 dwc2_writel(dr->dctl, hsotg->regs + DCTL); 211 dwc2_writel(dr->daintmsk, hsotg->regs + DAINTMSK); 212 dwc2_writel(dr->diepmsk, hsotg->regs + DIEPMSK); 213 dwc2_writel(dr->doepmsk, hsotg->regs + DOEPMSK); 214 215 for (i = 0; i < hsotg->num_of_eps; i++) { 216 /* Restore IN EPs */ 217 dwc2_writel(dr->diepctl[i], hsotg->regs + DIEPCTL(i)); 218 dwc2_writel(dr->dieptsiz[i], hsotg->regs + DIEPTSIZ(i)); 219 dwc2_writel(dr->diepdma[i], hsotg->regs + DIEPDMA(i)); 220 221 /* Restore OUT EPs */ 222 dwc2_writel(dr->doepctl[i], hsotg->regs + DOEPCTL(i)); 223 dwc2_writel(dr->doeptsiz[i], hsotg->regs + DOEPTSIZ(i)); 224 dwc2_writel(dr->doepdma[i], hsotg->regs + DOEPDMA(i)); 225 } 226 227 /* Set the Power-On Programming done bit */ 228 dctl = dwc2_readl(hsotg->regs + DCTL); 229 dctl |= DCTL_PWRONPRGDONE; 230 dwc2_writel(dctl, hsotg->regs + DCTL); 231 232 return 0; 233 } 234 #else 235 static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) 236 { return 0; } 237 238 static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg) 239 { return 0; } 240 #endif 241 242 /** 243 * dwc2_backup_global_registers() - Backup global controller registers. 244 * When suspending usb bus, registers needs to be backuped 245 * if controller power is disabled once suspended. 246 * 247 * @hsotg: Programming view of the DWC_otg controller 248 */ 249 static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg) 250 { 251 struct dwc2_gregs_backup *gr; 252 int i; 253 254 /* Backup global regs */ 255 gr = &hsotg->gr_backup; 256 257 gr->gotgctl = dwc2_readl(hsotg->regs + GOTGCTL); 258 gr->gintmsk = dwc2_readl(hsotg->regs + GINTMSK); 259 gr->gahbcfg = dwc2_readl(hsotg->regs + GAHBCFG); 260 gr->gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 261 gr->grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ); 262 gr->gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); 263 gr->hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ); 264 gr->gdfifocfg = dwc2_readl(hsotg->regs + GDFIFOCFG); 265 for (i = 0; i < MAX_EPS_CHANNELS; i++) 266 gr->dtxfsiz[i] = dwc2_readl(hsotg->regs + DPTXFSIZN(i)); 267 268 gr->valid = true; 269 return 0; 270 } 271 272 /** 273 * dwc2_restore_global_registers() - Restore controller global registers. 274 * When resuming usb bus, device registers needs to be restored 275 * if controller power were disabled. 276 * 277 * @hsotg: Programming view of the DWC_otg controller 278 */ 279 static int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg) 280 { 281 struct dwc2_gregs_backup *gr; 282 int i; 283 284 dev_dbg(hsotg->dev, "%s\n", __func__); 285 286 /* Restore global regs */ 287 gr = &hsotg->gr_backup; 288 if (!gr->valid) { 289 dev_err(hsotg->dev, "%s: no global registers to restore\n", 290 __func__); 291 return -EINVAL; 292 } 293 gr->valid = false; 294 295 dwc2_writel(0xffffffff, hsotg->regs + GINTSTS); 296 dwc2_writel(gr->gotgctl, hsotg->regs + GOTGCTL); 297 dwc2_writel(gr->gintmsk, hsotg->regs + GINTMSK); 298 dwc2_writel(gr->gusbcfg, hsotg->regs + GUSBCFG); 299 dwc2_writel(gr->gahbcfg, hsotg->regs + GAHBCFG); 300 dwc2_writel(gr->grxfsiz, hsotg->regs + GRXFSIZ); 301 dwc2_writel(gr->gnptxfsiz, hsotg->regs + GNPTXFSIZ); 302 dwc2_writel(gr->hptxfsiz, hsotg->regs + HPTXFSIZ); 303 dwc2_writel(gr->gdfifocfg, hsotg->regs + GDFIFOCFG); 304 for (i = 0; i < MAX_EPS_CHANNELS; i++) 305 dwc2_writel(gr->dtxfsiz[i], hsotg->regs + DPTXFSIZN(i)); 306 307 return 0; 308 } 309 310 /** 311 * dwc2_exit_hibernation() - Exit controller from Partial Power Down. 312 * 313 * @hsotg: Programming view of the DWC_otg controller 314 * @restore: Controller registers need to be restored 315 */ 316 int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore) 317 { 318 u32 pcgcctl; 319 int ret = 0; 320 321 if (!hsotg->core_params->hibernation) 322 return -ENOTSUPP; 323 324 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL); 325 pcgcctl &= ~PCGCTL_STOPPCLK; 326 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL); 327 328 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL); 329 pcgcctl &= ~PCGCTL_PWRCLMP; 330 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL); 331 332 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL); 333 pcgcctl &= ~PCGCTL_RSTPDWNMODULE; 334 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL); 335 336 udelay(100); 337 if (restore) { 338 ret = dwc2_restore_global_registers(hsotg); 339 if (ret) { 340 dev_err(hsotg->dev, "%s: failed to restore registers\n", 341 __func__); 342 return ret; 343 } 344 if (dwc2_is_host_mode(hsotg)) { 345 ret = dwc2_restore_host_registers(hsotg); 346 if (ret) { 347 dev_err(hsotg->dev, "%s: failed to restore host registers\n", 348 __func__); 349 return ret; 350 } 351 } else { 352 ret = dwc2_restore_device_registers(hsotg); 353 if (ret) { 354 dev_err(hsotg->dev, "%s: failed to restore device registers\n", 355 __func__); 356 return ret; 357 } 358 } 359 } 360 361 return ret; 362 } 363 364 /** 365 * dwc2_enter_hibernation() - Put controller in Partial Power Down. 366 * 367 * @hsotg: Programming view of the DWC_otg controller 368 */ 369 int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg) 370 { 371 u32 pcgcctl; 372 int ret = 0; 373 374 if (!hsotg->core_params->hibernation) 375 return -ENOTSUPP; 376 377 /* Backup all registers */ 378 ret = dwc2_backup_global_registers(hsotg); 379 if (ret) { 380 dev_err(hsotg->dev, "%s: failed to backup global registers\n", 381 __func__); 382 return ret; 383 } 384 385 if (dwc2_is_host_mode(hsotg)) { 386 ret = dwc2_backup_host_registers(hsotg); 387 if (ret) { 388 dev_err(hsotg->dev, "%s: failed to backup host registers\n", 389 __func__); 390 return ret; 391 } 392 } else { 393 ret = dwc2_backup_device_registers(hsotg); 394 if (ret) { 395 dev_err(hsotg->dev, "%s: failed to backup device registers\n", 396 __func__); 397 return ret; 398 } 399 } 400 401 /* 402 * Clear any pending interrupts since dwc2 will not be able to 403 * clear them after entering hibernation. 404 */ 405 dwc2_writel(0xffffffff, hsotg->regs + GINTSTS); 406 407 /* Put the controller in low power state */ 408 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL); 409 410 pcgcctl |= PCGCTL_PWRCLMP; 411 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL); 412 ndelay(20); 413 414 pcgcctl |= PCGCTL_RSTPDWNMODULE; 415 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL); 416 ndelay(20); 417 418 pcgcctl |= PCGCTL_STOPPCLK; 419 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL); 420 421 return ret; 422 } 423 424 /** 425 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts, 426 * used in both device and host modes 427 * 428 * @hsotg: Programming view of the DWC_otg controller 429 */ 430 static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg) 431 { 432 u32 intmsk; 433 434 /* Clear any pending OTG Interrupts */ 435 dwc2_writel(0xffffffff, hsotg->regs + GOTGINT); 436 437 /* Clear any pending interrupts */ 438 dwc2_writel(0xffffffff, hsotg->regs + GINTSTS); 439 440 /* Enable the interrupts in the GINTMSK */ 441 intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT; 442 443 if (hsotg->core_params->dma_enable <= 0) 444 intmsk |= GINTSTS_RXFLVL; 445 if (hsotg->core_params->external_id_pin_ctl <= 0) 446 intmsk |= GINTSTS_CONIDSTSCHNG; 447 448 intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP | 449 GINTSTS_SESSREQINT; 450 451 dwc2_writel(intmsk, hsotg->regs + GINTMSK); 452 } 453 454 /* 455 * Initializes the FSLSPClkSel field of the HCFG register depending on the 456 * PHY type 457 */ 458 static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg) 459 { 460 u32 hcfg, val; 461 462 if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && 463 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && 464 hsotg->core_params->ulpi_fs_ls > 0) || 465 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) { 466 /* Full speed PHY */ 467 val = HCFG_FSLSPCLKSEL_48_MHZ; 468 } else { 469 /* High speed PHY running at full speed or high speed */ 470 val = HCFG_FSLSPCLKSEL_30_60_MHZ; 471 } 472 473 dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val); 474 hcfg = dwc2_readl(hsotg->regs + HCFG); 475 hcfg &= ~HCFG_FSLSPCLKSEL_MASK; 476 hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT; 477 dwc2_writel(hcfg, hsotg->regs + HCFG); 478 } 479 480 /* 481 * Do core a soft reset of the core. Be careful with this because it 482 * resets all the internal state machines of the core. 483 */ 484 int dwc2_core_reset(struct dwc2_hsotg *hsotg) 485 { 486 u32 greset; 487 int count = 0; 488 489 dev_vdbg(hsotg->dev, "%s()\n", __func__); 490 491 /* Core Soft Reset */ 492 greset = dwc2_readl(hsotg->regs + GRSTCTL); 493 greset |= GRSTCTL_CSFTRST; 494 dwc2_writel(greset, hsotg->regs + GRSTCTL); 495 do { 496 udelay(1); 497 greset = dwc2_readl(hsotg->regs + GRSTCTL); 498 if (++count > 50) { 499 dev_warn(hsotg->dev, 500 "%s() HANG! Soft Reset GRSTCTL=%0x\n", 501 __func__, greset); 502 return -EBUSY; 503 } 504 } while (greset & GRSTCTL_CSFTRST); 505 506 /* Wait for AHB master IDLE state */ 507 count = 0; 508 do { 509 udelay(1); 510 greset = dwc2_readl(hsotg->regs + GRSTCTL); 511 if (++count > 50) { 512 dev_warn(hsotg->dev, 513 "%s() HANG! AHB Idle GRSTCTL=%0x\n", 514 __func__, greset); 515 return -EBUSY; 516 } 517 } while (!(greset & GRSTCTL_AHBIDLE)); 518 519 return 0; 520 } 521 522 /* 523 * Force the mode of the controller. 524 * 525 * Forcing the mode is needed for two cases: 526 * 527 * 1) If the dr_mode is set to either HOST or PERIPHERAL we force the 528 * controller to stay in a particular mode regardless of ID pin 529 * changes. We do this usually after a core reset. 530 * 531 * 2) During probe we want to read reset values of the hw 532 * configuration registers that are only available in either host or 533 * device mode. We may need to force the mode if the current mode does 534 * not allow us to access the register in the mode that we want. 535 * 536 * In either case it only makes sense to force the mode if the 537 * controller hardware is OTG capable. 538 * 539 * Checks are done in this function to determine whether doing a force 540 * would be valid or not. 541 * 542 * If a force is done, it requires a 25ms delay to take effect. 543 * 544 * Returns true if the mode was forced. 545 */ 546 static bool dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host) 547 { 548 u32 gusbcfg; 549 u32 set; 550 u32 clear; 551 552 dev_dbg(hsotg->dev, "Forcing mode to %s\n", host ? "host" : "device"); 553 554 /* 555 * Force mode has no effect if the hardware is not OTG. 556 */ 557 if (!dwc2_hw_is_otg(hsotg)) 558 return false; 559 560 /* 561 * If dr_mode is either peripheral or host only, there is no 562 * need to ever force the mode to the opposite mode. 563 */ 564 if (WARN_ON(host && hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)) 565 return false; 566 567 if (WARN_ON(!host && hsotg->dr_mode == USB_DR_MODE_HOST)) 568 return false; 569 570 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 571 572 set = host ? GUSBCFG_FORCEHOSTMODE : GUSBCFG_FORCEDEVMODE; 573 clear = host ? GUSBCFG_FORCEDEVMODE : GUSBCFG_FORCEHOSTMODE; 574 575 /* 576 * If the force mode bit is already set, don't set it. 577 */ 578 if ((gusbcfg & set) && !(gusbcfg & clear)) 579 return false; 580 581 gusbcfg &= ~clear; 582 gusbcfg |= set; 583 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG); 584 585 msleep(25); 586 return true; 587 } 588 589 /* 590 * Clears the force mode bits. 591 */ 592 static void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg) 593 { 594 u32 gusbcfg; 595 596 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 597 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE; 598 gusbcfg &= ~GUSBCFG_FORCEDEVMODE; 599 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG); 600 601 /* 602 * NOTE: This long sleep is _very_ important, otherwise the core will 603 * not stay in host mode after a connector ID change! 604 */ 605 msleep(25); 606 } 607 608 /* 609 * Sets or clears force mode based on the dr_mode parameter. 610 */ 611 void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg) 612 { 613 switch (hsotg->dr_mode) { 614 case USB_DR_MODE_HOST: 615 dwc2_force_mode(hsotg, true); 616 break; 617 case USB_DR_MODE_PERIPHERAL: 618 dwc2_force_mode(hsotg, false); 619 break; 620 case USB_DR_MODE_OTG: 621 dwc2_clear_force_mode(hsotg); 622 break; 623 default: 624 dev_warn(hsotg->dev, "%s() Invalid dr_mode=%d\n", 625 __func__, hsotg->dr_mode); 626 break; 627 } 628 } 629 630 /* 631 * Do core a soft reset of the core. Be careful with this because it 632 * resets all the internal state machines of the core. 633 * 634 * Additionally this will apply force mode as per the hsotg->dr_mode 635 * parameter. 636 */ 637 int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg) 638 { 639 int retval; 640 641 retval = dwc2_core_reset(hsotg); 642 if (retval) 643 return retval; 644 645 dwc2_force_dr_mode(hsotg); 646 return 0; 647 } 648 649 static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) 650 { 651 u32 usbcfg, i2cctl; 652 int retval = 0; 653 654 /* 655 * core_init() is now called on every switch so only call the 656 * following for the first time through 657 */ 658 if (select_phy) { 659 dev_dbg(hsotg->dev, "FS PHY selected\n"); 660 661 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 662 if (!(usbcfg & GUSBCFG_PHYSEL)) { 663 usbcfg |= GUSBCFG_PHYSEL; 664 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); 665 666 /* Reset after a PHY select */ 667 retval = dwc2_core_reset_and_force_dr_mode(hsotg); 668 669 if (retval) { 670 dev_err(hsotg->dev, 671 "%s: Reset failed, aborting", __func__); 672 return retval; 673 } 674 } 675 } 676 677 /* 678 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also 679 * do this on HNP Dev/Host mode switches (done in dev_init and 680 * host_init). 681 */ 682 if (dwc2_is_host_mode(hsotg)) 683 dwc2_init_fs_ls_pclk_sel(hsotg); 684 685 if (hsotg->core_params->i2c_enable > 0) { 686 dev_dbg(hsotg->dev, "FS PHY enabling I2C\n"); 687 688 /* Program GUSBCFG.OtgUtmiFsSel to I2C */ 689 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 690 usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL; 691 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); 692 693 /* Program GI2CCTL.I2CEn */ 694 i2cctl = dwc2_readl(hsotg->regs + GI2CCTL); 695 i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK; 696 i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT; 697 i2cctl &= ~GI2CCTL_I2CEN; 698 dwc2_writel(i2cctl, hsotg->regs + GI2CCTL); 699 i2cctl |= GI2CCTL_I2CEN; 700 dwc2_writel(i2cctl, hsotg->regs + GI2CCTL); 701 } 702 703 return retval; 704 } 705 706 static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) 707 { 708 u32 usbcfg, usbcfg_old; 709 int retval = 0; 710 711 if (!select_phy) 712 return 0; 713 714 usbcfg = usbcfg_old = dwc2_readl(hsotg->regs + GUSBCFG); 715 716 /* 717 * HS PHY parameters. These parameters are preserved during soft reset 718 * so only program the first time. Do a soft reset immediately after 719 * setting phyif. 720 */ 721 switch (hsotg->core_params->phy_type) { 722 case DWC2_PHY_TYPE_PARAM_ULPI: 723 /* ULPI interface */ 724 dev_dbg(hsotg->dev, "HS ULPI PHY selected\n"); 725 usbcfg |= GUSBCFG_ULPI_UTMI_SEL; 726 usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL); 727 if (hsotg->core_params->phy_ulpi_ddr > 0) 728 usbcfg |= GUSBCFG_DDRSEL; 729 break; 730 case DWC2_PHY_TYPE_PARAM_UTMI: 731 /* UTMI+ interface */ 732 dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n"); 733 usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16); 734 if (hsotg->core_params->phy_utmi_width == 16) 735 usbcfg |= GUSBCFG_PHYIF16; 736 break; 737 default: 738 dev_err(hsotg->dev, "FS PHY selected at HS!\n"); 739 break; 740 } 741 742 if (usbcfg != usbcfg_old) { 743 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); 744 745 /* Reset after setting the PHY parameters */ 746 retval = dwc2_core_reset_and_force_dr_mode(hsotg); 747 if (retval) { 748 dev_err(hsotg->dev, 749 "%s: Reset failed, aborting", __func__); 750 return retval; 751 } 752 } 753 754 return retval; 755 } 756 757 static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) 758 { 759 u32 usbcfg; 760 int retval = 0; 761 762 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL && 763 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) { 764 /* If FS mode with FS PHY */ 765 retval = dwc2_fs_phy_init(hsotg, select_phy); 766 if (retval) 767 return retval; 768 } else { 769 /* High speed PHY */ 770 retval = dwc2_hs_phy_init(hsotg, select_phy); 771 if (retval) 772 return retval; 773 } 774 775 if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && 776 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && 777 hsotg->core_params->ulpi_fs_ls > 0) { 778 dev_dbg(hsotg->dev, "Setting ULPI FSLS\n"); 779 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 780 usbcfg |= GUSBCFG_ULPI_FS_LS; 781 usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M; 782 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); 783 } else { 784 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 785 usbcfg &= ~GUSBCFG_ULPI_FS_LS; 786 usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M; 787 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); 788 } 789 790 return retval; 791 } 792 793 static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg) 794 { 795 u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG); 796 797 switch (hsotg->hw_params.arch) { 798 case GHWCFG2_EXT_DMA_ARCH: 799 dev_err(hsotg->dev, "External DMA Mode not supported\n"); 800 return -EINVAL; 801 802 case GHWCFG2_INT_DMA_ARCH: 803 dev_dbg(hsotg->dev, "Internal DMA Mode\n"); 804 if (hsotg->core_params->ahbcfg != -1) { 805 ahbcfg &= GAHBCFG_CTRL_MASK; 806 ahbcfg |= hsotg->core_params->ahbcfg & 807 ~GAHBCFG_CTRL_MASK; 808 } 809 break; 810 811 case GHWCFG2_SLAVE_ONLY_ARCH: 812 default: 813 dev_dbg(hsotg->dev, "Slave Only Mode\n"); 814 break; 815 } 816 817 dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n", 818 hsotg->core_params->dma_enable, 819 hsotg->core_params->dma_desc_enable); 820 821 if (hsotg->core_params->dma_enable > 0) { 822 if (hsotg->core_params->dma_desc_enable > 0) 823 dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n"); 824 else 825 dev_dbg(hsotg->dev, "Using Buffer DMA mode\n"); 826 } else { 827 dev_dbg(hsotg->dev, "Using Slave mode\n"); 828 hsotg->core_params->dma_desc_enable = 0; 829 } 830 831 if (hsotg->core_params->dma_enable > 0) 832 ahbcfg |= GAHBCFG_DMA_EN; 833 834 dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG); 835 836 return 0; 837 } 838 839 static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg) 840 { 841 u32 usbcfg; 842 843 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 844 usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP); 845 846 switch (hsotg->hw_params.op_mode) { 847 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: 848 if (hsotg->core_params->otg_cap == 849 DWC2_CAP_PARAM_HNP_SRP_CAPABLE) 850 usbcfg |= GUSBCFG_HNPCAP; 851 if (hsotg->core_params->otg_cap != 852 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) 853 usbcfg |= GUSBCFG_SRPCAP; 854 break; 855 856 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: 857 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: 858 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: 859 if (hsotg->core_params->otg_cap != 860 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) 861 usbcfg |= GUSBCFG_SRPCAP; 862 break; 863 864 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE: 865 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE: 866 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST: 867 default: 868 break; 869 } 870 871 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); 872 } 873 874 /** 875 * dwc2_core_init() - Initializes the DWC_otg controller registers and 876 * prepares the core for device mode or host mode operation 877 * 878 * @hsotg: Programming view of the DWC_otg controller 879 * @initial_setup: If true then this is the first init for this instance. 880 */ 881 int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup) 882 { 883 u32 usbcfg, otgctl; 884 int retval; 885 886 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); 887 888 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 889 890 /* Set ULPI External VBUS bit if needed */ 891 usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV; 892 if (hsotg->core_params->phy_ulpi_ext_vbus == 893 DWC2_PHY_ULPI_EXTERNAL_VBUS) 894 usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV; 895 896 /* Set external TS Dline pulsing bit if needed */ 897 usbcfg &= ~GUSBCFG_TERMSELDLPULSE; 898 if (hsotg->core_params->ts_dline > 0) 899 usbcfg |= GUSBCFG_TERMSELDLPULSE; 900 901 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); 902 903 /* 904 * Reset the Controller 905 * 906 * We only need to reset the controller if this is a re-init. 907 * For the first init we know for sure that earlier code reset us (it 908 * needed to in order to properly detect various parameters). 909 */ 910 if (!initial_setup) { 911 retval = dwc2_core_reset_and_force_dr_mode(hsotg); 912 if (retval) { 913 dev_err(hsotg->dev, "%s(): Reset failed, aborting\n", 914 __func__); 915 return retval; 916 } 917 } 918 919 /* 920 * This needs to happen in FS mode before any other programming occurs 921 */ 922 retval = dwc2_phy_init(hsotg, initial_setup); 923 if (retval) 924 return retval; 925 926 /* Program the GAHBCFG Register */ 927 retval = dwc2_gahbcfg_init(hsotg); 928 if (retval) 929 return retval; 930 931 /* Program the GUSBCFG register */ 932 dwc2_gusbcfg_init(hsotg); 933 934 /* Program the GOTGCTL register */ 935 otgctl = dwc2_readl(hsotg->regs + GOTGCTL); 936 otgctl &= ~GOTGCTL_OTGVER; 937 if (hsotg->core_params->otg_ver > 0) 938 otgctl |= GOTGCTL_OTGVER; 939 dwc2_writel(otgctl, hsotg->regs + GOTGCTL); 940 dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver); 941 942 /* Clear the SRP success bit for FS-I2c */ 943 hsotg->srp_success = 0; 944 945 /* Enable common interrupts */ 946 dwc2_enable_common_interrupts(hsotg); 947 948 /* 949 * Do device or host initialization based on mode during PCD and 950 * HCD initialization 951 */ 952 if (dwc2_is_host_mode(hsotg)) { 953 dev_dbg(hsotg->dev, "Host Mode\n"); 954 hsotg->op_state = OTG_STATE_A_HOST; 955 } else { 956 dev_dbg(hsotg->dev, "Device Mode\n"); 957 hsotg->op_state = OTG_STATE_B_PERIPHERAL; 958 } 959 960 return 0; 961 } 962 963 /** 964 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts 965 * 966 * @hsotg: Programming view of DWC_otg controller 967 */ 968 void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg) 969 { 970 u32 intmsk; 971 972 dev_dbg(hsotg->dev, "%s()\n", __func__); 973 974 /* Disable all interrupts */ 975 dwc2_writel(0, hsotg->regs + GINTMSK); 976 dwc2_writel(0, hsotg->regs + HAINTMSK); 977 978 /* Enable the common interrupts */ 979 dwc2_enable_common_interrupts(hsotg); 980 981 /* Enable host mode interrupts without disturbing common interrupts */ 982 intmsk = dwc2_readl(hsotg->regs + GINTMSK); 983 intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT; 984 dwc2_writel(intmsk, hsotg->regs + GINTMSK); 985 } 986 987 /** 988 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts 989 * 990 * @hsotg: Programming view of DWC_otg controller 991 */ 992 void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg) 993 { 994 u32 intmsk = dwc2_readl(hsotg->regs + GINTMSK); 995 996 /* Disable host mode interrupts without disturbing common interrupts */ 997 intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT | 998 GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | GINTSTS_DISCONNINT); 999 dwc2_writel(intmsk, hsotg->regs + GINTMSK); 1000 } 1001 1002 /* 1003 * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size 1004 * For system that have a total fifo depth that is smaller than the default 1005 * RX + TX fifo size. 1006 * 1007 * @hsotg: Programming view of DWC_otg controller 1008 */ 1009 static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg) 1010 { 1011 struct dwc2_core_params *params = hsotg->core_params; 1012 struct dwc2_hw_params *hw = &hsotg->hw_params; 1013 u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size; 1014 1015 total_fifo_size = hw->total_fifo_size; 1016 rxfsiz = params->host_rx_fifo_size; 1017 nptxfsiz = params->host_nperio_tx_fifo_size; 1018 ptxfsiz = params->host_perio_tx_fifo_size; 1019 1020 /* 1021 * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth 1022 * allocation with support for high bandwidth endpoints. Synopsys 1023 * defines MPS(Max Packet size) for a periodic EP=1024, and for 1024 * non-periodic as 512. 1025 */ 1026 if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) { 1027 /* 1028 * For Buffer DMA mode/Scatter Gather DMA mode 1029 * 2 * ((Largest Packet size / 4) + 1 + 1) + n 1030 * with n = number of host channel. 1031 * 2 * ((1024/4) + 2) = 516 1032 */ 1033 rxfsiz = 516 + hw->host_channels; 1034 1035 /* 1036 * min non-periodic tx fifo depth 1037 * 2 * (largest non-periodic USB packet used / 4) 1038 * 2 * (512/4) = 256 1039 */ 1040 nptxfsiz = 256; 1041 1042 /* 1043 * min periodic tx fifo depth 1044 * (largest packet size*MC)/4 1045 * (1024 * 3)/4 = 768 1046 */ 1047 ptxfsiz = 768; 1048 1049 params->host_rx_fifo_size = rxfsiz; 1050 params->host_nperio_tx_fifo_size = nptxfsiz; 1051 params->host_perio_tx_fifo_size = ptxfsiz; 1052 } 1053 1054 /* 1055 * If the summation of RX, NPTX and PTX fifo sizes is still 1056 * bigger than the total_fifo_size, then we have a problem. 1057 * 1058 * We won't be able to allocate as many endpoints. Right now, 1059 * we're just printing an error message, but ideally this FIFO 1060 * allocation algorithm would be improved in the future. 1061 * 1062 * FIXME improve this FIFO allocation algorithm. 1063 */ 1064 if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz))) 1065 dev_err(hsotg->dev, "invalid fifo sizes\n"); 1066 } 1067 1068 static void dwc2_config_fifos(struct dwc2_hsotg *hsotg) 1069 { 1070 struct dwc2_core_params *params = hsotg->core_params; 1071 u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz; 1072 1073 if (!params->enable_dynamic_fifo) 1074 return; 1075 1076 dwc2_calculate_dynamic_fifo(hsotg); 1077 1078 /* Rx FIFO */ 1079 grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ); 1080 dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz); 1081 grxfsiz &= ~GRXFSIZ_DEPTH_MASK; 1082 grxfsiz |= params->host_rx_fifo_size << 1083 GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK; 1084 dwc2_writel(grxfsiz, hsotg->regs + GRXFSIZ); 1085 dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", 1086 dwc2_readl(hsotg->regs + GRXFSIZ)); 1087 1088 /* Non-periodic Tx FIFO */ 1089 dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n", 1090 dwc2_readl(hsotg->regs + GNPTXFSIZ)); 1091 nptxfsiz = params->host_nperio_tx_fifo_size << 1092 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK; 1093 nptxfsiz |= params->host_rx_fifo_size << 1094 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK; 1095 dwc2_writel(nptxfsiz, hsotg->regs + GNPTXFSIZ); 1096 dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n", 1097 dwc2_readl(hsotg->regs + GNPTXFSIZ)); 1098 1099 /* Periodic Tx FIFO */ 1100 dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n", 1101 dwc2_readl(hsotg->regs + HPTXFSIZ)); 1102 hptxfsiz = params->host_perio_tx_fifo_size << 1103 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK; 1104 hptxfsiz |= (params->host_rx_fifo_size + 1105 params->host_nperio_tx_fifo_size) << 1106 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK; 1107 dwc2_writel(hptxfsiz, hsotg->regs + HPTXFSIZ); 1108 dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n", 1109 dwc2_readl(hsotg->regs + HPTXFSIZ)); 1110 1111 if (hsotg->core_params->en_multiple_tx_fifo > 0 && 1112 hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) { 1113 /* 1114 * Global DFIFOCFG calculation for Host mode - 1115 * include RxFIFO, NPTXFIFO and HPTXFIFO 1116 */ 1117 dfifocfg = dwc2_readl(hsotg->regs + GDFIFOCFG); 1118 dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK; 1119 dfifocfg |= (params->host_rx_fifo_size + 1120 params->host_nperio_tx_fifo_size + 1121 params->host_perio_tx_fifo_size) << 1122 GDFIFOCFG_EPINFOBASE_SHIFT & 1123 GDFIFOCFG_EPINFOBASE_MASK; 1124 dwc2_writel(dfifocfg, hsotg->regs + GDFIFOCFG); 1125 } 1126 } 1127 1128 /** 1129 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for 1130 * Host mode 1131 * 1132 * @hsotg: Programming view of DWC_otg controller 1133 * 1134 * This function flushes the Tx and Rx FIFOs and flushes any entries in the 1135 * request queues. Host channels are reset to ensure that they are ready for 1136 * performing transfers. 1137 */ 1138 void dwc2_core_host_init(struct dwc2_hsotg *hsotg) 1139 { 1140 u32 hcfg, hfir, otgctl; 1141 1142 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); 1143 1144 /* Restart the Phy Clock */ 1145 dwc2_writel(0, hsotg->regs + PCGCTL); 1146 1147 /* Initialize Host Configuration Register */ 1148 dwc2_init_fs_ls_pclk_sel(hsotg); 1149 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) { 1150 hcfg = dwc2_readl(hsotg->regs + HCFG); 1151 hcfg |= HCFG_FSLSSUPP; 1152 dwc2_writel(hcfg, hsotg->regs + HCFG); 1153 } 1154 1155 /* 1156 * This bit allows dynamic reloading of the HFIR register during 1157 * runtime. This bit needs to be programmed during initial configuration 1158 * and its value must not be changed during runtime. 1159 */ 1160 if (hsotg->core_params->reload_ctl > 0) { 1161 hfir = dwc2_readl(hsotg->regs + HFIR); 1162 hfir |= HFIR_RLDCTRL; 1163 dwc2_writel(hfir, hsotg->regs + HFIR); 1164 } 1165 1166 if (hsotg->core_params->dma_desc_enable > 0) { 1167 u32 op_mode = hsotg->hw_params.op_mode; 1168 if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a || 1169 !hsotg->hw_params.dma_desc_enable || 1170 op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE || 1171 op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE || 1172 op_mode == GHWCFG2_OP_MODE_UNDEFINED) { 1173 dev_err(hsotg->dev, 1174 "Hardware does not support descriptor DMA mode -\n"); 1175 dev_err(hsotg->dev, 1176 "falling back to buffer DMA mode.\n"); 1177 hsotg->core_params->dma_desc_enable = 0; 1178 } else { 1179 hcfg = dwc2_readl(hsotg->regs + HCFG); 1180 hcfg |= HCFG_DESCDMA; 1181 dwc2_writel(hcfg, hsotg->regs + HCFG); 1182 } 1183 } 1184 1185 /* Configure data FIFO sizes */ 1186 dwc2_config_fifos(hsotg); 1187 1188 /* TODO - check this */ 1189 /* Clear Host Set HNP Enable in the OTG Control Register */ 1190 otgctl = dwc2_readl(hsotg->regs + GOTGCTL); 1191 otgctl &= ~GOTGCTL_HSTSETHNPEN; 1192 dwc2_writel(otgctl, hsotg->regs + GOTGCTL); 1193 1194 /* Make sure the FIFOs are flushed */ 1195 dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */); 1196 dwc2_flush_rx_fifo(hsotg); 1197 1198 /* Clear Host Set HNP Enable in the OTG Control Register */ 1199 otgctl = dwc2_readl(hsotg->regs + GOTGCTL); 1200 otgctl &= ~GOTGCTL_HSTSETHNPEN; 1201 dwc2_writel(otgctl, hsotg->regs + GOTGCTL); 1202 1203 if (hsotg->core_params->dma_desc_enable <= 0) { 1204 int num_channels, i; 1205 u32 hcchar; 1206 1207 /* Flush out any leftover queued requests */ 1208 num_channels = hsotg->core_params->host_channels; 1209 for (i = 0; i < num_channels; i++) { 1210 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i)); 1211 hcchar &= ~HCCHAR_CHENA; 1212 hcchar |= HCCHAR_CHDIS; 1213 hcchar &= ~HCCHAR_EPDIR; 1214 dwc2_writel(hcchar, hsotg->regs + HCCHAR(i)); 1215 } 1216 1217 /* Halt all channels to put them into a known state */ 1218 for (i = 0; i < num_channels; i++) { 1219 int count = 0; 1220 1221 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i)); 1222 hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS; 1223 hcchar &= ~HCCHAR_EPDIR; 1224 dwc2_writel(hcchar, hsotg->regs + HCCHAR(i)); 1225 dev_dbg(hsotg->dev, "%s: Halt channel %d\n", 1226 __func__, i); 1227 do { 1228 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i)); 1229 if (++count > 1000) { 1230 dev_err(hsotg->dev, 1231 "Unable to clear enable on channel %d\n", 1232 i); 1233 break; 1234 } 1235 udelay(1); 1236 } while (hcchar & HCCHAR_CHENA); 1237 } 1238 } 1239 1240 /* Turn on the vbus power */ 1241 dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state); 1242 if (hsotg->op_state == OTG_STATE_A_HOST) { 1243 u32 hprt0 = dwc2_read_hprt0(hsotg); 1244 1245 dev_dbg(hsotg->dev, "Init: Power Port (%d)\n", 1246 !!(hprt0 & HPRT0_PWR)); 1247 if (!(hprt0 & HPRT0_PWR)) { 1248 hprt0 |= HPRT0_PWR; 1249 dwc2_writel(hprt0, hsotg->regs + HPRT0); 1250 } 1251 } 1252 1253 dwc2_enable_host_interrupts(hsotg); 1254 } 1255 1256 static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg, 1257 struct dwc2_host_chan *chan) 1258 { 1259 u32 hcintmsk = HCINTMSK_CHHLTD; 1260 1261 switch (chan->ep_type) { 1262 case USB_ENDPOINT_XFER_CONTROL: 1263 case USB_ENDPOINT_XFER_BULK: 1264 dev_vdbg(hsotg->dev, "control/bulk\n"); 1265 hcintmsk |= HCINTMSK_XFERCOMPL; 1266 hcintmsk |= HCINTMSK_STALL; 1267 hcintmsk |= HCINTMSK_XACTERR; 1268 hcintmsk |= HCINTMSK_DATATGLERR; 1269 if (chan->ep_is_in) { 1270 hcintmsk |= HCINTMSK_BBLERR; 1271 } else { 1272 hcintmsk |= HCINTMSK_NAK; 1273 hcintmsk |= HCINTMSK_NYET; 1274 if (chan->do_ping) 1275 hcintmsk |= HCINTMSK_ACK; 1276 } 1277 1278 if (chan->do_split) { 1279 hcintmsk |= HCINTMSK_NAK; 1280 if (chan->complete_split) 1281 hcintmsk |= HCINTMSK_NYET; 1282 else 1283 hcintmsk |= HCINTMSK_ACK; 1284 } 1285 1286 if (chan->error_state) 1287 hcintmsk |= HCINTMSK_ACK; 1288 break; 1289 1290 case USB_ENDPOINT_XFER_INT: 1291 if (dbg_perio()) 1292 dev_vdbg(hsotg->dev, "intr\n"); 1293 hcintmsk |= HCINTMSK_XFERCOMPL; 1294 hcintmsk |= HCINTMSK_NAK; 1295 hcintmsk |= HCINTMSK_STALL; 1296 hcintmsk |= HCINTMSK_XACTERR; 1297 hcintmsk |= HCINTMSK_DATATGLERR; 1298 hcintmsk |= HCINTMSK_FRMOVRUN; 1299 1300 if (chan->ep_is_in) 1301 hcintmsk |= HCINTMSK_BBLERR; 1302 if (chan->error_state) 1303 hcintmsk |= HCINTMSK_ACK; 1304 if (chan->do_split) { 1305 if (chan->complete_split) 1306 hcintmsk |= HCINTMSK_NYET; 1307 else 1308 hcintmsk |= HCINTMSK_ACK; 1309 } 1310 break; 1311 1312 case USB_ENDPOINT_XFER_ISOC: 1313 if (dbg_perio()) 1314 dev_vdbg(hsotg->dev, "isoc\n"); 1315 hcintmsk |= HCINTMSK_XFERCOMPL; 1316 hcintmsk |= HCINTMSK_FRMOVRUN; 1317 hcintmsk |= HCINTMSK_ACK; 1318 1319 if (chan->ep_is_in) { 1320 hcintmsk |= HCINTMSK_XACTERR; 1321 hcintmsk |= HCINTMSK_BBLERR; 1322 } 1323 break; 1324 default: 1325 dev_err(hsotg->dev, "## Unknown EP type ##\n"); 1326 break; 1327 } 1328 1329 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num)); 1330 if (dbg_hc(chan)) 1331 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk); 1332 } 1333 1334 static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg, 1335 struct dwc2_host_chan *chan) 1336 { 1337 u32 hcintmsk = HCINTMSK_CHHLTD; 1338 1339 /* 1340 * For Descriptor DMA mode core halts the channel on AHB error. 1341 * Interrupt is not required. 1342 */ 1343 if (hsotg->core_params->dma_desc_enable <= 0) { 1344 if (dbg_hc(chan)) 1345 dev_vdbg(hsotg->dev, "desc DMA disabled\n"); 1346 hcintmsk |= HCINTMSK_AHBERR; 1347 } else { 1348 if (dbg_hc(chan)) 1349 dev_vdbg(hsotg->dev, "desc DMA enabled\n"); 1350 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) 1351 hcintmsk |= HCINTMSK_XFERCOMPL; 1352 } 1353 1354 if (chan->error_state && !chan->do_split && 1355 chan->ep_type != USB_ENDPOINT_XFER_ISOC) { 1356 if (dbg_hc(chan)) 1357 dev_vdbg(hsotg->dev, "setting ACK\n"); 1358 hcintmsk |= HCINTMSK_ACK; 1359 if (chan->ep_is_in) { 1360 hcintmsk |= HCINTMSK_DATATGLERR; 1361 if (chan->ep_type != USB_ENDPOINT_XFER_INT) 1362 hcintmsk |= HCINTMSK_NAK; 1363 } 1364 } 1365 1366 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num)); 1367 if (dbg_hc(chan)) 1368 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk); 1369 } 1370 1371 static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg, 1372 struct dwc2_host_chan *chan) 1373 { 1374 u32 intmsk; 1375 1376 if (hsotg->core_params->dma_enable > 0) { 1377 if (dbg_hc(chan)) 1378 dev_vdbg(hsotg->dev, "DMA enabled\n"); 1379 dwc2_hc_enable_dma_ints(hsotg, chan); 1380 } else { 1381 if (dbg_hc(chan)) 1382 dev_vdbg(hsotg->dev, "DMA disabled\n"); 1383 dwc2_hc_enable_slave_ints(hsotg, chan); 1384 } 1385 1386 /* Enable the top level host channel interrupt */ 1387 intmsk = dwc2_readl(hsotg->regs + HAINTMSK); 1388 intmsk |= 1 << chan->hc_num; 1389 dwc2_writel(intmsk, hsotg->regs + HAINTMSK); 1390 if (dbg_hc(chan)) 1391 dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk); 1392 1393 /* Make sure host channel interrupts are enabled */ 1394 intmsk = dwc2_readl(hsotg->regs + GINTMSK); 1395 intmsk |= GINTSTS_HCHINT; 1396 dwc2_writel(intmsk, hsotg->regs + GINTMSK); 1397 if (dbg_hc(chan)) 1398 dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk); 1399 } 1400 1401 /** 1402 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from 1403 * a specific endpoint 1404 * 1405 * @hsotg: Programming view of DWC_otg controller 1406 * @chan: Information needed to initialize the host channel 1407 * 1408 * The HCCHARn register is set up with the characteristics specified in chan. 1409 * Host channel interrupts that may need to be serviced while this transfer is 1410 * in progress are enabled. 1411 */ 1412 void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) 1413 { 1414 u8 hc_num = chan->hc_num; 1415 u32 hcintmsk; 1416 u32 hcchar; 1417 u32 hcsplt = 0; 1418 1419 if (dbg_hc(chan)) 1420 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1421 1422 /* Clear old interrupt conditions for this host channel */ 1423 hcintmsk = 0xffffffff; 1424 hcintmsk &= ~HCINTMSK_RESERVED14_31; 1425 dwc2_writel(hcintmsk, hsotg->regs + HCINT(hc_num)); 1426 1427 /* Enable channel interrupts required for this transfer */ 1428 dwc2_hc_enable_ints(hsotg, chan); 1429 1430 /* 1431 * Program the HCCHARn register with the endpoint characteristics for 1432 * the current transfer 1433 */ 1434 hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK; 1435 hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK; 1436 if (chan->ep_is_in) 1437 hcchar |= HCCHAR_EPDIR; 1438 if (chan->speed == USB_SPEED_LOW) 1439 hcchar |= HCCHAR_LSPDDEV; 1440 hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK; 1441 hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK; 1442 dwc2_writel(hcchar, hsotg->regs + HCCHAR(hc_num)); 1443 if (dbg_hc(chan)) { 1444 dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n", 1445 hc_num, hcchar); 1446 1447 dev_vdbg(hsotg->dev, "%s: Channel %d\n", 1448 __func__, hc_num); 1449 dev_vdbg(hsotg->dev, " Dev Addr: %d\n", 1450 chan->dev_addr); 1451 dev_vdbg(hsotg->dev, " Ep Num: %d\n", 1452 chan->ep_num); 1453 dev_vdbg(hsotg->dev, " Is In: %d\n", 1454 chan->ep_is_in); 1455 dev_vdbg(hsotg->dev, " Is Low Speed: %d\n", 1456 chan->speed == USB_SPEED_LOW); 1457 dev_vdbg(hsotg->dev, " Ep Type: %d\n", 1458 chan->ep_type); 1459 dev_vdbg(hsotg->dev, " Max Pkt: %d\n", 1460 chan->max_packet); 1461 } 1462 1463 /* Program the HCSPLT register for SPLITs */ 1464 if (chan->do_split) { 1465 if (dbg_hc(chan)) 1466 dev_vdbg(hsotg->dev, 1467 "Programming HC %d with split --> %s\n", 1468 hc_num, 1469 chan->complete_split ? "CSPLIT" : "SSPLIT"); 1470 if (chan->complete_split) 1471 hcsplt |= HCSPLT_COMPSPLT; 1472 hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT & 1473 HCSPLT_XACTPOS_MASK; 1474 hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT & 1475 HCSPLT_HUBADDR_MASK; 1476 hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT & 1477 HCSPLT_PRTADDR_MASK; 1478 if (dbg_hc(chan)) { 1479 dev_vdbg(hsotg->dev, " comp split %d\n", 1480 chan->complete_split); 1481 dev_vdbg(hsotg->dev, " xact pos %d\n", 1482 chan->xact_pos); 1483 dev_vdbg(hsotg->dev, " hub addr %d\n", 1484 chan->hub_addr); 1485 dev_vdbg(hsotg->dev, " hub port %d\n", 1486 chan->hub_port); 1487 dev_vdbg(hsotg->dev, " is_in %d\n", 1488 chan->ep_is_in); 1489 dev_vdbg(hsotg->dev, " Max Pkt %d\n", 1490 chan->max_packet); 1491 dev_vdbg(hsotg->dev, " xferlen %d\n", 1492 chan->xfer_len); 1493 } 1494 } 1495 1496 dwc2_writel(hcsplt, hsotg->regs + HCSPLT(hc_num)); 1497 } 1498 1499 /** 1500 * dwc2_hc_halt() - Attempts to halt a host channel 1501 * 1502 * @hsotg: Controller register interface 1503 * @chan: Host channel to halt 1504 * @halt_status: Reason for halting the channel 1505 * 1506 * This function should only be called in Slave mode or to abort a transfer in 1507 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the 1508 * controller halts the channel when the transfer is complete or a condition 1509 * occurs that requires application intervention. 1510 * 1511 * In slave mode, checks for a free request queue entry, then sets the Channel 1512 * Enable and Channel Disable bits of the Host Channel Characteristics 1513 * register of the specified channel to intiate the halt. If there is no free 1514 * request queue entry, sets only the Channel Disable bit of the HCCHARn 1515 * register to flush requests for this channel. In the latter case, sets a 1516 * flag to indicate that the host channel needs to be halted when a request 1517 * queue slot is open. 1518 * 1519 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the 1520 * HCCHARn register. The controller ensures there is space in the request 1521 * queue before submitting the halt request. 1522 * 1523 * Some time may elapse before the core flushes any posted requests for this 1524 * host channel and halts. The Channel Halted interrupt handler completes the 1525 * deactivation of the host channel. 1526 */ 1527 void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, 1528 enum dwc2_halt_status halt_status) 1529 { 1530 u32 nptxsts, hptxsts, hcchar; 1531 1532 if (dbg_hc(chan)) 1533 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1534 if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS) 1535 dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status); 1536 1537 if (halt_status == DWC2_HC_XFER_URB_DEQUEUE || 1538 halt_status == DWC2_HC_XFER_AHB_ERR) { 1539 /* 1540 * Disable all channel interrupts except Ch Halted. The QTD 1541 * and QH state associated with this transfer has been cleared 1542 * (in the case of URB_DEQUEUE), so the channel needs to be 1543 * shut down carefully to prevent crashes. 1544 */ 1545 u32 hcintmsk = HCINTMSK_CHHLTD; 1546 1547 dev_vdbg(hsotg->dev, "dequeue/error\n"); 1548 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num)); 1549 1550 /* 1551 * Make sure no other interrupts besides halt are currently 1552 * pending. Handling another interrupt could cause a crash due 1553 * to the QTD and QH state. 1554 */ 1555 dwc2_writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num)); 1556 1557 /* 1558 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR 1559 * even if the channel was already halted for some other 1560 * reason 1561 */ 1562 chan->halt_status = halt_status; 1563 1564 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); 1565 if (!(hcchar & HCCHAR_CHENA)) { 1566 /* 1567 * The channel is either already halted or it hasn't 1568 * started yet. In DMA mode, the transfer may halt if 1569 * it finishes normally or a condition occurs that 1570 * requires driver intervention. Don't want to halt 1571 * the channel again. In either Slave or DMA mode, 1572 * it's possible that the transfer has been assigned 1573 * to a channel, but not started yet when an URB is 1574 * dequeued. Don't want to halt a channel that hasn't 1575 * started yet. 1576 */ 1577 return; 1578 } 1579 } 1580 if (chan->halt_pending) { 1581 /* 1582 * A halt has already been issued for this channel. This might 1583 * happen when a transfer is aborted by a higher level in 1584 * the stack. 1585 */ 1586 dev_vdbg(hsotg->dev, 1587 "*** %s: Channel %d, chan->halt_pending already set ***\n", 1588 __func__, chan->hc_num); 1589 return; 1590 } 1591 1592 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); 1593 1594 /* No need to set the bit in DDMA for disabling the channel */ 1595 /* TODO check it everywhere channel is disabled */ 1596 if (hsotg->core_params->dma_desc_enable <= 0) { 1597 if (dbg_hc(chan)) 1598 dev_vdbg(hsotg->dev, "desc DMA disabled\n"); 1599 hcchar |= HCCHAR_CHENA; 1600 } else { 1601 if (dbg_hc(chan)) 1602 dev_dbg(hsotg->dev, "desc DMA enabled\n"); 1603 } 1604 hcchar |= HCCHAR_CHDIS; 1605 1606 if (hsotg->core_params->dma_enable <= 0) { 1607 if (dbg_hc(chan)) 1608 dev_vdbg(hsotg->dev, "DMA not enabled\n"); 1609 hcchar |= HCCHAR_CHENA; 1610 1611 /* Check for space in the request queue to issue the halt */ 1612 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL || 1613 chan->ep_type == USB_ENDPOINT_XFER_BULK) { 1614 dev_vdbg(hsotg->dev, "control/bulk\n"); 1615 nptxsts = dwc2_readl(hsotg->regs + GNPTXSTS); 1616 if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) { 1617 dev_vdbg(hsotg->dev, "Disabling channel\n"); 1618 hcchar &= ~HCCHAR_CHENA; 1619 } 1620 } else { 1621 if (dbg_perio()) 1622 dev_vdbg(hsotg->dev, "isoc/intr\n"); 1623 hptxsts = dwc2_readl(hsotg->regs + HPTXSTS); 1624 if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 || 1625 hsotg->queuing_high_bandwidth) { 1626 if (dbg_perio()) 1627 dev_vdbg(hsotg->dev, "Disabling channel\n"); 1628 hcchar &= ~HCCHAR_CHENA; 1629 } 1630 } 1631 } else { 1632 if (dbg_hc(chan)) 1633 dev_vdbg(hsotg->dev, "DMA enabled\n"); 1634 } 1635 1636 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); 1637 chan->halt_status = halt_status; 1638 1639 if (hcchar & HCCHAR_CHENA) { 1640 if (dbg_hc(chan)) 1641 dev_vdbg(hsotg->dev, "Channel enabled\n"); 1642 chan->halt_pending = 1; 1643 chan->halt_on_queue = 0; 1644 } else { 1645 if (dbg_hc(chan)) 1646 dev_vdbg(hsotg->dev, "Channel disabled\n"); 1647 chan->halt_on_queue = 1; 1648 } 1649 1650 if (dbg_hc(chan)) { 1651 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 1652 chan->hc_num); 1653 dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n", 1654 hcchar); 1655 dev_vdbg(hsotg->dev, " halt_pending: %d\n", 1656 chan->halt_pending); 1657 dev_vdbg(hsotg->dev, " halt_on_queue: %d\n", 1658 chan->halt_on_queue); 1659 dev_vdbg(hsotg->dev, " halt_status: %d\n", 1660 chan->halt_status); 1661 } 1662 } 1663 1664 /** 1665 * dwc2_hc_cleanup() - Clears the transfer state for a host channel 1666 * 1667 * @hsotg: Programming view of DWC_otg controller 1668 * @chan: Identifies the host channel to clean up 1669 * 1670 * This function is normally called after a transfer is done and the host 1671 * channel is being released 1672 */ 1673 void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) 1674 { 1675 u32 hcintmsk; 1676 1677 chan->xfer_started = 0; 1678 1679 /* 1680 * Clear channel interrupt enables and any unhandled channel interrupt 1681 * conditions 1682 */ 1683 dwc2_writel(0, hsotg->regs + HCINTMSK(chan->hc_num)); 1684 hcintmsk = 0xffffffff; 1685 hcintmsk &= ~HCINTMSK_RESERVED14_31; 1686 dwc2_writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num)); 1687 } 1688 1689 /** 1690 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in 1691 * which frame a periodic transfer should occur 1692 * 1693 * @hsotg: Programming view of DWC_otg controller 1694 * @chan: Identifies the host channel to set up and its properties 1695 * @hcchar: Current value of the HCCHAR register for the specified host channel 1696 * 1697 * This function has no effect on non-periodic transfers 1698 */ 1699 static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg, 1700 struct dwc2_host_chan *chan, u32 *hcchar) 1701 { 1702 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1703 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1704 /* 1 if _next_ frame is odd, 0 if it's even */ 1705 if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1)) 1706 *hcchar |= HCCHAR_ODDFRM; 1707 } 1708 } 1709 1710 static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan) 1711 { 1712 /* Set up the initial PID for the transfer */ 1713 if (chan->speed == USB_SPEED_HIGH) { 1714 if (chan->ep_is_in) { 1715 if (chan->multi_count == 1) 1716 chan->data_pid_start = DWC2_HC_PID_DATA0; 1717 else if (chan->multi_count == 2) 1718 chan->data_pid_start = DWC2_HC_PID_DATA1; 1719 else 1720 chan->data_pid_start = DWC2_HC_PID_DATA2; 1721 } else { 1722 if (chan->multi_count == 1) 1723 chan->data_pid_start = DWC2_HC_PID_DATA0; 1724 else 1725 chan->data_pid_start = DWC2_HC_PID_MDATA; 1726 } 1727 } else { 1728 chan->data_pid_start = DWC2_HC_PID_DATA0; 1729 } 1730 } 1731 1732 /** 1733 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with 1734 * the Host Channel 1735 * 1736 * @hsotg: Programming view of DWC_otg controller 1737 * @chan: Information needed to initialize the host channel 1738 * 1739 * This function should only be called in Slave mode. For a channel associated 1740 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel 1741 * associated with a periodic EP, the periodic Tx FIFO is written. 1742 * 1743 * Upon return the xfer_buf and xfer_count fields in chan are incremented by 1744 * the number of bytes written to the Tx FIFO. 1745 */ 1746 static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg, 1747 struct dwc2_host_chan *chan) 1748 { 1749 u32 i; 1750 u32 remaining_count; 1751 u32 byte_count; 1752 u32 dword_count; 1753 u32 __iomem *data_fifo; 1754 u32 *data_buf = (u32 *)chan->xfer_buf; 1755 1756 if (dbg_hc(chan)) 1757 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1758 1759 data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num)); 1760 1761 remaining_count = chan->xfer_len - chan->xfer_count; 1762 if (remaining_count > chan->max_packet) 1763 byte_count = chan->max_packet; 1764 else 1765 byte_count = remaining_count; 1766 1767 dword_count = (byte_count + 3) / 4; 1768 1769 if (((unsigned long)data_buf & 0x3) == 0) { 1770 /* xfer_buf is DWORD aligned */ 1771 for (i = 0; i < dword_count; i++, data_buf++) 1772 dwc2_writel(*data_buf, data_fifo); 1773 } else { 1774 /* xfer_buf is not DWORD aligned */ 1775 for (i = 0; i < dword_count; i++, data_buf++) { 1776 u32 data = data_buf[0] | data_buf[1] << 8 | 1777 data_buf[2] << 16 | data_buf[3] << 24; 1778 dwc2_writel(data, data_fifo); 1779 } 1780 } 1781 1782 chan->xfer_count += byte_count; 1783 chan->xfer_buf += byte_count; 1784 } 1785 1786 /** 1787 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host 1788 * channel and starts the transfer 1789 * 1790 * @hsotg: Programming view of DWC_otg controller 1791 * @chan: Information needed to initialize the host channel. The xfer_len value 1792 * may be reduced to accommodate the max widths of the XferSize and 1793 * PktCnt fields in the HCTSIZn register. The multi_count value may be 1794 * changed to reflect the final xfer_len value. 1795 * 1796 * This function may be called in either Slave mode or DMA mode. In Slave mode, 1797 * the caller must ensure that there is sufficient space in the request queue 1798 * and Tx Data FIFO. 1799 * 1800 * For an OUT transfer in Slave mode, it loads a data packet into the 1801 * appropriate FIFO. If necessary, additional data packets are loaded in the 1802 * Host ISR. 1803 * 1804 * For an IN transfer in Slave mode, a data packet is requested. The data 1805 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary, 1806 * additional data packets are requested in the Host ISR. 1807 * 1808 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ 1809 * register along with a packet count of 1 and the channel is enabled. This 1810 * causes a single PING transaction to occur. Other fields in HCTSIZ are 1811 * simply set to 0 since no data transfer occurs in this case. 1812 * 1813 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with 1814 * all the information required to perform the subsequent data transfer. In 1815 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the 1816 * controller performs the entire PING protocol, then starts the data 1817 * transfer. 1818 */ 1819 void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, 1820 struct dwc2_host_chan *chan) 1821 { 1822 u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size; 1823 u16 max_hc_pkt_count = hsotg->core_params->max_packet_count; 1824 u32 hcchar; 1825 u32 hctsiz = 0; 1826 u16 num_packets; 1827 u32 ec_mc; 1828 1829 if (dbg_hc(chan)) 1830 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1831 1832 if (chan->do_ping) { 1833 if (hsotg->core_params->dma_enable <= 0) { 1834 if (dbg_hc(chan)) 1835 dev_vdbg(hsotg->dev, "ping, no DMA\n"); 1836 dwc2_hc_do_ping(hsotg, chan); 1837 chan->xfer_started = 1; 1838 return; 1839 } else { 1840 if (dbg_hc(chan)) 1841 dev_vdbg(hsotg->dev, "ping, DMA\n"); 1842 hctsiz |= TSIZ_DOPNG; 1843 } 1844 } 1845 1846 if (chan->do_split) { 1847 if (dbg_hc(chan)) 1848 dev_vdbg(hsotg->dev, "split\n"); 1849 num_packets = 1; 1850 1851 if (chan->complete_split && !chan->ep_is_in) 1852 /* 1853 * For CSPLIT OUT Transfer, set the size to 0 so the 1854 * core doesn't expect any data written to the FIFO 1855 */ 1856 chan->xfer_len = 0; 1857 else if (chan->ep_is_in || chan->xfer_len > chan->max_packet) 1858 chan->xfer_len = chan->max_packet; 1859 else if (!chan->ep_is_in && chan->xfer_len > 188) 1860 chan->xfer_len = 188; 1861 1862 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT & 1863 TSIZ_XFERSIZE_MASK; 1864 1865 /* For split set ec_mc for immediate retries */ 1866 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1867 chan->ep_type == USB_ENDPOINT_XFER_ISOC) 1868 ec_mc = 3; 1869 else 1870 ec_mc = 1; 1871 } else { 1872 if (dbg_hc(chan)) 1873 dev_vdbg(hsotg->dev, "no split\n"); 1874 /* 1875 * Ensure that the transfer length and packet count will fit 1876 * in the widths allocated for them in the HCTSIZn register 1877 */ 1878 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1879 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1880 /* 1881 * Make sure the transfer size is no larger than one 1882 * (micro)frame's worth of data. (A check was done 1883 * when the periodic transfer was accepted to ensure 1884 * that a (micro)frame's worth of data can be 1885 * programmed into a channel.) 1886 */ 1887 u32 max_periodic_len = 1888 chan->multi_count * chan->max_packet; 1889 1890 if (chan->xfer_len > max_periodic_len) 1891 chan->xfer_len = max_periodic_len; 1892 } else if (chan->xfer_len > max_hc_xfer_size) { 1893 /* 1894 * Make sure that xfer_len is a multiple of max packet 1895 * size 1896 */ 1897 chan->xfer_len = 1898 max_hc_xfer_size - chan->max_packet + 1; 1899 } 1900 1901 if (chan->xfer_len > 0) { 1902 num_packets = (chan->xfer_len + chan->max_packet - 1) / 1903 chan->max_packet; 1904 if (num_packets > max_hc_pkt_count) { 1905 num_packets = max_hc_pkt_count; 1906 chan->xfer_len = num_packets * chan->max_packet; 1907 } 1908 } else { 1909 /* Need 1 packet for transfer length of 0 */ 1910 num_packets = 1; 1911 } 1912 1913 if (chan->ep_is_in) 1914 /* 1915 * Always program an integral # of max packets for IN 1916 * transfers 1917 */ 1918 chan->xfer_len = num_packets * chan->max_packet; 1919 1920 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1921 chan->ep_type == USB_ENDPOINT_XFER_ISOC) 1922 /* 1923 * Make sure that the multi_count field matches the 1924 * actual transfer length 1925 */ 1926 chan->multi_count = num_packets; 1927 1928 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) 1929 dwc2_set_pid_isoc(chan); 1930 1931 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT & 1932 TSIZ_XFERSIZE_MASK; 1933 1934 /* The ec_mc gets the multi_count for non-split */ 1935 ec_mc = chan->multi_count; 1936 } 1937 1938 chan->start_pkt_count = num_packets; 1939 hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK; 1940 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT & 1941 TSIZ_SC_MC_PID_MASK; 1942 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); 1943 if (dbg_hc(chan)) { 1944 dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n", 1945 hctsiz, chan->hc_num); 1946 1947 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 1948 chan->hc_num); 1949 dev_vdbg(hsotg->dev, " Xfer Size: %d\n", 1950 (hctsiz & TSIZ_XFERSIZE_MASK) >> 1951 TSIZ_XFERSIZE_SHIFT); 1952 dev_vdbg(hsotg->dev, " Num Pkts: %d\n", 1953 (hctsiz & TSIZ_PKTCNT_MASK) >> 1954 TSIZ_PKTCNT_SHIFT); 1955 dev_vdbg(hsotg->dev, " Start PID: %d\n", 1956 (hctsiz & TSIZ_SC_MC_PID_MASK) >> 1957 TSIZ_SC_MC_PID_SHIFT); 1958 } 1959 1960 if (hsotg->core_params->dma_enable > 0) { 1961 dma_addr_t dma_addr; 1962 1963 if (chan->align_buf) { 1964 if (dbg_hc(chan)) 1965 dev_vdbg(hsotg->dev, "align_buf\n"); 1966 dma_addr = chan->align_buf; 1967 } else { 1968 dma_addr = chan->xfer_dma; 1969 } 1970 dwc2_writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num)); 1971 if (dbg_hc(chan)) 1972 dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n", 1973 (unsigned long)dma_addr, chan->hc_num); 1974 } 1975 1976 /* Start the split */ 1977 if (chan->do_split) { 1978 u32 hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chan->hc_num)); 1979 1980 hcsplt |= HCSPLT_SPLTENA; 1981 dwc2_writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num)); 1982 } 1983 1984 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); 1985 hcchar &= ~HCCHAR_MULTICNT_MASK; 1986 hcchar |= (ec_mc << HCCHAR_MULTICNT_SHIFT) & HCCHAR_MULTICNT_MASK; 1987 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar); 1988 1989 if (hcchar & HCCHAR_CHDIS) 1990 dev_warn(hsotg->dev, 1991 "%s: chdis set, channel %d, hcchar 0x%08x\n", 1992 __func__, chan->hc_num, hcchar); 1993 1994 /* Set host channel enable after all other setup is complete */ 1995 hcchar |= HCCHAR_CHENA; 1996 hcchar &= ~HCCHAR_CHDIS; 1997 1998 if (dbg_hc(chan)) 1999 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n", 2000 (hcchar & HCCHAR_MULTICNT_MASK) >> 2001 HCCHAR_MULTICNT_SHIFT); 2002 2003 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); 2004 if (dbg_hc(chan)) 2005 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, 2006 chan->hc_num); 2007 2008 chan->xfer_started = 1; 2009 chan->requests++; 2010 2011 if (hsotg->core_params->dma_enable <= 0 && 2012 !chan->ep_is_in && chan->xfer_len > 0) 2013 /* Load OUT packet into the appropriate Tx FIFO */ 2014 dwc2_hc_write_packet(hsotg, chan); 2015 } 2016 2017 /** 2018 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a 2019 * host channel and starts the transfer in Descriptor DMA mode 2020 * 2021 * @hsotg: Programming view of DWC_otg controller 2022 * @chan: Information needed to initialize the host channel 2023 * 2024 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set. 2025 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field 2026 * with micro-frame bitmap. 2027 * 2028 * Initializes HCDMA register with descriptor list address and CTD value then 2029 * starts the transfer via enabling the channel. 2030 */ 2031 void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg, 2032 struct dwc2_host_chan *chan) 2033 { 2034 u32 hcchar; 2035 u32 hctsiz = 0; 2036 2037 if (chan->do_ping) 2038 hctsiz |= TSIZ_DOPNG; 2039 2040 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) 2041 dwc2_set_pid_isoc(chan); 2042 2043 /* Packet Count and Xfer Size are not used in Descriptor DMA mode */ 2044 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT & 2045 TSIZ_SC_MC_PID_MASK; 2046 2047 /* 0 - 1 descriptor, 1 - 2 descriptors, etc */ 2048 hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK; 2049 2050 /* Non-zero only for high-speed interrupt endpoints */ 2051 hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK; 2052 2053 if (dbg_hc(chan)) { 2054 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 2055 chan->hc_num); 2056 dev_vdbg(hsotg->dev, " Start PID: %d\n", 2057 chan->data_pid_start); 2058 dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1); 2059 } 2060 2061 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); 2062 2063 dma_sync_single_for_device(hsotg->dev, chan->desc_list_addr, 2064 chan->desc_list_sz, DMA_TO_DEVICE); 2065 2066 dwc2_writel(chan->desc_list_addr, hsotg->regs + HCDMA(chan->hc_num)); 2067 2068 if (dbg_hc(chan)) 2069 dev_vdbg(hsotg->dev, "Wrote %pad to HCDMA(%d)\n", 2070 &chan->desc_list_addr, chan->hc_num); 2071 2072 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); 2073 hcchar &= ~HCCHAR_MULTICNT_MASK; 2074 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT & 2075 HCCHAR_MULTICNT_MASK; 2076 2077 if (hcchar & HCCHAR_CHDIS) 2078 dev_warn(hsotg->dev, 2079 "%s: chdis set, channel %d, hcchar 0x%08x\n", 2080 __func__, chan->hc_num, hcchar); 2081 2082 /* Set host channel enable after all other setup is complete */ 2083 hcchar |= HCCHAR_CHENA; 2084 hcchar &= ~HCCHAR_CHDIS; 2085 2086 if (dbg_hc(chan)) 2087 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n", 2088 (hcchar & HCCHAR_MULTICNT_MASK) >> 2089 HCCHAR_MULTICNT_SHIFT); 2090 2091 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); 2092 if (dbg_hc(chan)) 2093 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, 2094 chan->hc_num); 2095 2096 chan->xfer_started = 1; 2097 chan->requests++; 2098 } 2099 2100 /** 2101 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by 2102 * a previous call to dwc2_hc_start_transfer() 2103 * 2104 * @hsotg: Programming view of DWC_otg controller 2105 * @chan: Information needed to initialize the host channel 2106 * 2107 * The caller must ensure there is sufficient space in the request queue and Tx 2108 * Data FIFO. This function should only be called in Slave mode. In DMA mode, 2109 * the controller acts autonomously to complete transfers programmed to a host 2110 * channel. 2111 * 2112 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO 2113 * if there is any data remaining to be queued. For an IN transfer, another 2114 * data packet is always requested. For the SETUP phase of a control transfer, 2115 * this function does nothing. 2116 * 2117 * Return: 1 if a new request is queued, 0 if no more requests are required 2118 * for this transfer 2119 */ 2120 int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg, 2121 struct dwc2_host_chan *chan) 2122 { 2123 if (dbg_hc(chan)) 2124 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 2125 chan->hc_num); 2126 2127 if (chan->do_split) 2128 /* SPLITs always queue just once per channel */ 2129 return 0; 2130 2131 if (chan->data_pid_start == DWC2_HC_PID_SETUP) 2132 /* SETUPs are queued only once since they can't be NAK'd */ 2133 return 0; 2134 2135 if (chan->ep_is_in) { 2136 /* 2137 * Always queue another request for other IN transfers. If 2138 * back-to-back INs are issued and NAKs are received for both, 2139 * the driver may still be processing the first NAK when the 2140 * second NAK is received. When the interrupt handler clears 2141 * the NAK interrupt for the first NAK, the second NAK will 2142 * not be seen. So we can't depend on the NAK interrupt 2143 * handler to requeue a NAK'd request. Instead, IN requests 2144 * are issued each time this function is called. When the 2145 * transfer completes, the extra requests for the channel will 2146 * be flushed. 2147 */ 2148 u32 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); 2149 2150 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar); 2151 hcchar |= HCCHAR_CHENA; 2152 hcchar &= ~HCCHAR_CHDIS; 2153 if (dbg_hc(chan)) 2154 dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n", 2155 hcchar); 2156 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); 2157 chan->requests++; 2158 return 1; 2159 } 2160 2161 /* OUT transfers */ 2162 2163 if (chan->xfer_count < chan->xfer_len) { 2164 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 2165 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 2166 u32 hcchar = dwc2_readl(hsotg->regs + 2167 HCCHAR(chan->hc_num)); 2168 2169 dwc2_hc_set_even_odd_frame(hsotg, chan, 2170 &hcchar); 2171 } 2172 2173 /* Load OUT packet into the appropriate Tx FIFO */ 2174 dwc2_hc_write_packet(hsotg, chan); 2175 chan->requests++; 2176 return 1; 2177 } 2178 2179 return 0; 2180 } 2181 2182 /** 2183 * dwc2_hc_do_ping() - Starts a PING transfer 2184 * 2185 * @hsotg: Programming view of DWC_otg controller 2186 * @chan: Information needed to initialize the host channel 2187 * 2188 * This function should only be called in Slave mode. The Do Ping bit is set in 2189 * the HCTSIZ register, then the channel is enabled. 2190 */ 2191 void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) 2192 { 2193 u32 hcchar; 2194 u32 hctsiz; 2195 2196 if (dbg_hc(chan)) 2197 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 2198 chan->hc_num); 2199 2200 2201 hctsiz = TSIZ_DOPNG; 2202 hctsiz |= 1 << TSIZ_PKTCNT_SHIFT; 2203 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); 2204 2205 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); 2206 hcchar |= HCCHAR_CHENA; 2207 hcchar &= ~HCCHAR_CHDIS; 2208 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); 2209 } 2210 2211 /** 2212 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for 2213 * the HFIR register according to PHY type and speed 2214 * 2215 * @hsotg: Programming view of DWC_otg controller 2216 * 2217 * NOTE: The caller can modify the value of the HFIR register only after the 2218 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort) 2219 * has been set 2220 */ 2221 u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg) 2222 { 2223 u32 usbcfg; 2224 u32 hprt0; 2225 int clock = 60; /* default value */ 2226 2227 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 2228 hprt0 = dwc2_readl(hsotg->regs + HPRT0); 2229 2230 if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) && 2231 !(usbcfg & GUSBCFG_PHYIF16)) 2232 clock = 60; 2233 if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type == 2234 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI) 2235 clock = 48; 2236 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && 2237 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16)) 2238 clock = 30; 2239 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && 2240 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16)) 2241 clock = 60; 2242 if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && 2243 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16)) 2244 clock = 48; 2245 if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) && 2246 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI) 2247 clock = 48; 2248 if ((usbcfg & GUSBCFG_PHYSEL) && 2249 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) 2250 clock = 48; 2251 2252 if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED) 2253 /* High speed case */ 2254 return 125 * clock; 2255 else 2256 /* FS/LS case */ 2257 return 1000 * clock; 2258 } 2259 2260 /** 2261 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination 2262 * buffer 2263 * 2264 * @core_if: Programming view of DWC_otg controller 2265 * @dest: Destination buffer for the packet 2266 * @bytes: Number of bytes to copy to the destination 2267 */ 2268 void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes) 2269 { 2270 u32 __iomem *fifo = hsotg->regs + HCFIFO(0); 2271 u32 *data_buf = (u32 *)dest; 2272 int word_count = (bytes + 3) / 4; 2273 int i; 2274 2275 /* 2276 * Todo: Account for the case where dest is not dword aligned. This 2277 * requires reading data from the FIFO into a u32 temp buffer, then 2278 * moving it into the data buffer. 2279 */ 2280 2281 dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes); 2282 2283 for (i = 0; i < word_count; i++, data_buf++) 2284 *data_buf = dwc2_readl(fifo); 2285 } 2286 2287 /** 2288 * dwc2_dump_host_registers() - Prints the host registers 2289 * 2290 * @hsotg: Programming view of DWC_otg controller 2291 * 2292 * NOTE: This function will be removed once the peripheral controller code 2293 * is integrated and the driver is stable 2294 */ 2295 void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg) 2296 { 2297 #ifdef DEBUG 2298 u32 __iomem *addr; 2299 int i; 2300 2301 dev_dbg(hsotg->dev, "Host Global Registers\n"); 2302 addr = hsotg->regs + HCFG; 2303 dev_dbg(hsotg->dev, "HCFG @0x%08lX : 0x%08X\n", 2304 (unsigned long)addr, dwc2_readl(addr)); 2305 addr = hsotg->regs + HFIR; 2306 dev_dbg(hsotg->dev, "HFIR @0x%08lX : 0x%08X\n", 2307 (unsigned long)addr, dwc2_readl(addr)); 2308 addr = hsotg->regs + HFNUM; 2309 dev_dbg(hsotg->dev, "HFNUM @0x%08lX : 0x%08X\n", 2310 (unsigned long)addr, dwc2_readl(addr)); 2311 addr = hsotg->regs + HPTXSTS; 2312 dev_dbg(hsotg->dev, "HPTXSTS @0x%08lX : 0x%08X\n", 2313 (unsigned long)addr, dwc2_readl(addr)); 2314 addr = hsotg->regs + HAINT; 2315 dev_dbg(hsotg->dev, "HAINT @0x%08lX : 0x%08X\n", 2316 (unsigned long)addr, dwc2_readl(addr)); 2317 addr = hsotg->regs + HAINTMSK; 2318 dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n", 2319 (unsigned long)addr, dwc2_readl(addr)); 2320 if (hsotg->core_params->dma_desc_enable > 0) { 2321 addr = hsotg->regs + HFLBADDR; 2322 dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n", 2323 (unsigned long)addr, dwc2_readl(addr)); 2324 } 2325 2326 addr = hsotg->regs + HPRT0; 2327 dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n", 2328 (unsigned long)addr, dwc2_readl(addr)); 2329 2330 for (i = 0; i < hsotg->core_params->host_channels; i++) { 2331 dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i); 2332 addr = hsotg->regs + HCCHAR(i); 2333 dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n", 2334 (unsigned long)addr, dwc2_readl(addr)); 2335 addr = hsotg->regs + HCSPLT(i); 2336 dev_dbg(hsotg->dev, "HCSPLT @0x%08lX : 0x%08X\n", 2337 (unsigned long)addr, dwc2_readl(addr)); 2338 addr = hsotg->regs + HCINT(i); 2339 dev_dbg(hsotg->dev, "HCINT @0x%08lX : 0x%08X\n", 2340 (unsigned long)addr, dwc2_readl(addr)); 2341 addr = hsotg->regs + HCINTMSK(i); 2342 dev_dbg(hsotg->dev, "HCINTMSK @0x%08lX : 0x%08X\n", 2343 (unsigned long)addr, dwc2_readl(addr)); 2344 addr = hsotg->regs + HCTSIZ(i); 2345 dev_dbg(hsotg->dev, "HCTSIZ @0x%08lX : 0x%08X\n", 2346 (unsigned long)addr, dwc2_readl(addr)); 2347 addr = hsotg->regs + HCDMA(i); 2348 dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n", 2349 (unsigned long)addr, dwc2_readl(addr)); 2350 if (hsotg->core_params->dma_desc_enable > 0) { 2351 addr = hsotg->regs + HCDMAB(i); 2352 dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n", 2353 (unsigned long)addr, dwc2_readl(addr)); 2354 } 2355 } 2356 #endif 2357 } 2358 2359 /** 2360 * dwc2_dump_global_registers() - Prints the core global registers 2361 * 2362 * @hsotg: Programming view of DWC_otg controller 2363 * 2364 * NOTE: This function will be removed once the peripheral controller code 2365 * is integrated and the driver is stable 2366 */ 2367 void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg) 2368 { 2369 #ifdef DEBUG 2370 u32 __iomem *addr; 2371 2372 dev_dbg(hsotg->dev, "Core Global Registers\n"); 2373 addr = hsotg->regs + GOTGCTL; 2374 dev_dbg(hsotg->dev, "GOTGCTL @0x%08lX : 0x%08X\n", 2375 (unsigned long)addr, dwc2_readl(addr)); 2376 addr = hsotg->regs + GOTGINT; 2377 dev_dbg(hsotg->dev, "GOTGINT @0x%08lX : 0x%08X\n", 2378 (unsigned long)addr, dwc2_readl(addr)); 2379 addr = hsotg->regs + GAHBCFG; 2380 dev_dbg(hsotg->dev, "GAHBCFG @0x%08lX : 0x%08X\n", 2381 (unsigned long)addr, dwc2_readl(addr)); 2382 addr = hsotg->regs + GUSBCFG; 2383 dev_dbg(hsotg->dev, "GUSBCFG @0x%08lX : 0x%08X\n", 2384 (unsigned long)addr, dwc2_readl(addr)); 2385 addr = hsotg->regs + GRSTCTL; 2386 dev_dbg(hsotg->dev, "GRSTCTL @0x%08lX : 0x%08X\n", 2387 (unsigned long)addr, dwc2_readl(addr)); 2388 addr = hsotg->regs + GINTSTS; 2389 dev_dbg(hsotg->dev, "GINTSTS @0x%08lX : 0x%08X\n", 2390 (unsigned long)addr, dwc2_readl(addr)); 2391 addr = hsotg->regs + GINTMSK; 2392 dev_dbg(hsotg->dev, "GINTMSK @0x%08lX : 0x%08X\n", 2393 (unsigned long)addr, dwc2_readl(addr)); 2394 addr = hsotg->regs + GRXSTSR; 2395 dev_dbg(hsotg->dev, "GRXSTSR @0x%08lX : 0x%08X\n", 2396 (unsigned long)addr, dwc2_readl(addr)); 2397 addr = hsotg->regs + GRXFSIZ; 2398 dev_dbg(hsotg->dev, "GRXFSIZ @0x%08lX : 0x%08X\n", 2399 (unsigned long)addr, dwc2_readl(addr)); 2400 addr = hsotg->regs + GNPTXFSIZ; 2401 dev_dbg(hsotg->dev, "GNPTXFSIZ @0x%08lX : 0x%08X\n", 2402 (unsigned long)addr, dwc2_readl(addr)); 2403 addr = hsotg->regs + GNPTXSTS; 2404 dev_dbg(hsotg->dev, "GNPTXSTS @0x%08lX : 0x%08X\n", 2405 (unsigned long)addr, dwc2_readl(addr)); 2406 addr = hsotg->regs + GI2CCTL; 2407 dev_dbg(hsotg->dev, "GI2CCTL @0x%08lX : 0x%08X\n", 2408 (unsigned long)addr, dwc2_readl(addr)); 2409 addr = hsotg->regs + GPVNDCTL; 2410 dev_dbg(hsotg->dev, "GPVNDCTL @0x%08lX : 0x%08X\n", 2411 (unsigned long)addr, dwc2_readl(addr)); 2412 addr = hsotg->regs + GGPIO; 2413 dev_dbg(hsotg->dev, "GGPIO @0x%08lX : 0x%08X\n", 2414 (unsigned long)addr, dwc2_readl(addr)); 2415 addr = hsotg->regs + GUID; 2416 dev_dbg(hsotg->dev, "GUID @0x%08lX : 0x%08X\n", 2417 (unsigned long)addr, dwc2_readl(addr)); 2418 addr = hsotg->regs + GSNPSID; 2419 dev_dbg(hsotg->dev, "GSNPSID @0x%08lX : 0x%08X\n", 2420 (unsigned long)addr, dwc2_readl(addr)); 2421 addr = hsotg->regs + GHWCFG1; 2422 dev_dbg(hsotg->dev, "GHWCFG1 @0x%08lX : 0x%08X\n", 2423 (unsigned long)addr, dwc2_readl(addr)); 2424 addr = hsotg->regs + GHWCFG2; 2425 dev_dbg(hsotg->dev, "GHWCFG2 @0x%08lX : 0x%08X\n", 2426 (unsigned long)addr, dwc2_readl(addr)); 2427 addr = hsotg->regs + GHWCFG3; 2428 dev_dbg(hsotg->dev, "GHWCFG3 @0x%08lX : 0x%08X\n", 2429 (unsigned long)addr, dwc2_readl(addr)); 2430 addr = hsotg->regs + GHWCFG4; 2431 dev_dbg(hsotg->dev, "GHWCFG4 @0x%08lX : 0x%08X\n", 2432 (unsigned long)addr, dwc2_readl(addr)); 2433 addr = hsotg->regs + GLPMCFG; 2434 dev_dbg(hsotg->dev, "GLPMCFG @0x%08lX : 0x%08X\n", 2435 (unsigned long)addr, dwc2_readl(addr)); 2436 addr = hsotg->regs + GPWRDN; 2437 dev_dbg(hsotg->dev, "GPWRDN @0x%08lX : 0x%08X\n", 2438 (unsigned long)addr, dwc2_readl(addr)); 2439 addr = hsotg->regs + GDFIFOCFG; 2440 dev_dbg(hsotg->dev, "GDFIFOCFG @0x%08lX : 0x%08X\n", 2441 (unsigned long)addr, dwc2_readl(addr)); 2442 addr = hsotg->regs + HPTXFSIZ; 2443 dev_dbg(hsotg->dev, "HPTXFSIZ @0x%08lX : 0x%08X\n", 2444 (unsigned long)addr, dwc2_readl(addr)); 2445 2446 addr = hsotg->regs + PCGCTL; 2447 dev_dbg(hsotg->dev, "PCGCTL @0x%08lX : 0x%08X\n", 2448 (unsigned long)addr, dwc2_readl(addr)); 2449 #endif 2450 } 2451 2452 /** 2453 * dwc2_flush_tx_fifo() - Flushes a Tx FIFO 2454 * 2455 * @hsotg: Programming view of DWC_otg controller 2456 * @num: Tx FIFO to flush 2457 */ 2458 void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num) 2459 { 2460 u32 greset; 2461 int count = 0; 2462 2463 dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num); 2464 2465 greset = GRSTCTL_TXFFLSH; 2466 greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK; 2467 dwc2_writel(greset, hsotg->regs + GRSTCTL); 2468 2469 do { 2470 greset = dwc2_readl(hsotg->regs + GRSTCTL); 2471 if (++count > 10000) { 2472 dev_warn(hsotg->dev, 2473 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n", 2474 __func__, greset, 2475 dwc2_readl(hsotg->regs + GNPTXSTS)); 2476 break; 2477 } 2478 udelay(1); 2479 } while (greset & GRSTCTL_TXFFLSH); 2480 2481 /* Wait for at least 3 PHY Clocks */ 2482 udelay(1); 2483 } 2484 2485 /** 2486 * dwc2_flush_rx_fifo() - Flushes the Rx FIFO 2487 * 2488 * @hsotg: Programming view of DWC_otg controller 2489 */ 2490 void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg) 2491 { 2492 u32 greset; 2493 int count = 0; 2494 2495 dev_vdbg(hsotg->dev, "%s()\n", __func__); 2496 2497 greset = GRSTCTL_RXFFLSH; 2498 dwc2_writel(greset, hsotg->regs + GRSTCTL); 2499 2500 do { 2501 greset = dwc2_readl(hsotg->regs + GRSTCTL); 2502 if (++count > 10000) { 2503 dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n", 2504 __func__, greset); 2505 break; 2506 } 2507 udelay(1); 2508 } while (greset & GRSTCTL_RXFFLSH); 2509 2510 /* Wait for at least 3 PHY Clocks */ 2511 udelay(1); 2512 } 2513 2514 #define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c)) 2515 2516 /* Parameter access functions */ 2517 void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val) 2518 { 2519 int valid = 1; 2520 2521 switch (val) { 2522 case DWC2_CAP_PARAM_HNP_SRP_CAPABLE: 2523 if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE) 2524 valid = 0; 2525 break; 2526 case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE: 2527 switch (hsotg->hw_params.op_mode) { 2528 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: 2529 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: 2530 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: 2531 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: 2532 break; 2533 default: 2534 valid = 0; 2535 break; 2536 } 2537 break; 2538 case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE: 2539 /* always valid */ 2540 break; 2541 default: 2542 valid = 0; 2543 break; 2544 } 2545 2546 if (!valid) { 2547 if (val >= 0) 2548 dev_err(hsotg->dev, 2549 "%d invalid for otg_cap parameter. Check HW configuration.\n", 2550 val); 2551 switch (hsotg->hw_params.op_mode) { 2552 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: 2553 val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE; 2554 break; 2555 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: 2556 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: 2557 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: 2558 val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE; 2559 break; 2560 default: 2561 val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE; 2562 break; 2563 } 2564 dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val); 2565 } 2566 2567 hsotg->core_params->otg_cap = val; 2568 } 2569 2570 void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val) 2571 { 2572 int valid = 1; 2573 2574 if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH) 2575 valid = 0; 2576 if (val < 0) 2577 valid = 0; 2578 2579 if (!valid) { 2580 if (val >= 0) 2581 dev_err(hsotg->dev, 2582 "%d invalid for dma_enable parameter. Check HW configuration.\n", 2583 val); 2584 val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH; 2585 dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val); 2586 } 2587 2588 hsotg->core_params->dma_enable = val; 2589 } 2590 2591 void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val) 2592 { 2593 int valid = 1; 2594 2595 if (val > 0 && (hsotg->core_params->dma_enable <= 0 || 2596 !hsotg->hw_params.dma_desc_enable)) 2597 valid = 0; 2598 if (val < 0) 2599 valid = 0; 2600 2601 if (!valid) { 2602 if (val >= 0) 2603 dev_err(hsotg->dev, 2604 "%d invalid for dma_desc_enable parameter. Check HW configuration.\n", 2605 val); 2606 val = (hsotg->core_params->dma_enable > 0 && 2607 hsotg->hw_params.dma_desc_enable); 2608 dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val); 2609 } 2610 2611 hsotg->core_params->dma_desc_enable = val; 2612 } 2613 2614 void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, int val) 2615 { 2616 int valid = 1; 2617 2618 if (val > 0 && (hsotg->core_params->dma_enable <= 0 || 2619 !hsotg->hw_params.dma_desc_enable)) 2620 valid = 0; 2621 if (val < 0) 2622 valid = 0; 2623 2624 if (!valid) { 2625 if (val >= 0) 2626 dev_err(hsotg->dev, 2627 "%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n", 2628 val); 2629 val = (hsotg->core_params->dma_enable > 0 && 2630 hsotg->hw_params.dma_desc_enable); 2631 } 2632 2633 hsotg->core_params->dma_desc_fs_enable = val; 2634 dev_dbg(hsotg->dev, "Setting dma_desc_fs_enable to %d\n", val); 2635 } 2636 2637 void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg, 2638 int val) 2639 { 2640 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2641 if (val >= 0) { 2642 dev_err(hsotg->dev, 2643 "Wrong value for host_support_fs_low_power\n"); 2644 dev_err(hsotg->dev, 2645 "host_support_fs_low_power must be 0 or 1\n"); 2646 } 2647 val = 0; 2648 dev_dbg(hsotg->dev, 2649 "Setting host_support_fs_low_power to %d\n", val); 2650 } 2651 2652 hsotg->core_params->host_support_fs_ls_low_power = val; 2653 } 2654 2655 void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val) 2656 { 2657 int valid = 1; 2658 2659 if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo) 2660 valid = 0; 2661 if (val < 0) 2662 valid = 0; 2663 2664 if (!valid) { 2665 if (val >= 0) 2666 dev_err(hsotg->dev, 2667 "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n", 2668 val); 2669 val = hsotg->hw_params.enable_dynamic_fifo; 2670 dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val); 2671 } 2672 2673 hsotg->core_params->enable_dynamic_fifo = val; 2674 } 2675 2676 void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val) 2677 { 2678 int valid = 1; 2679 2680 if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size) 2681 valid = 0; 2682 2683 if (!valid) { 2684 if (val >= 0) 2685 dev_err(hsotg->dev, 2686 "%d invalid for host_rx_fifo_size. Check HW configuration.\n", 2687 val); 2688 val = hsotg->hw_params.host_rx_fifo_size; 2689 dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val); 2690 } 2691 2692 hsotg->core_params->host_rx_fifo_size = val; 2693 } 2694 2695 void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val) 2696 { 2697 int valid = 1; 2698 2699 if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size) 2700 valid = 0; 2701 2702 if (!valid) { 2703 if (val >= 0) 2704 dev_err(hsotg->dev, 2705 "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n", 2706 val); 2707 val = hsotg->hw_params.host_nperio_tx_fifo_size; 2708 dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n", 2709 val); 2710 } 2711 2712 hsotg->core_params->host_nperio_tx_fifo_size = val; 2713 } 2714 2715 void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val) 2716 { 2717 int valid = 1; 2718 2719 if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size) 2720 valid = 0; 2721 2722 if (!valid) { 2723 if (val >= 0) 2724 dev_err(hsotg->dev, 2725 "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n", 2726 val); 2727 val = hsotg->hw_params.host_perio_tx_fifo_size; 2728 dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n", 2729 val); 2730 } 2731 2732 hsotg->core_params->host_perio_tx_fifo_size = val; 2733 } 2734 2735 void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val) 2736 { 2737 int valid = 1; 2738 2739 if (val < 2047 || val > hsotg->hw_params.max_transfer_size) 2740 valid = 0; 2741 2742 if (!valid) { 2743 if (val >= 0) 2744 dev_err(hsotg->dev, 2745 "%d invalid for max_transfer_size. Check HW configuration.\n", 2746 val); 2747 val = hsotg->hw_params.max_transfer_size; 2748 dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val); 2749 } 2750 2751 hsotg->core_params->max_transfer_size = val; 2752 } 2753 2754 void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val) 2755 { 2756 int valid = 1; 2757 2758 if (val < 15 || val > hsotg->hw_params.max_packet_count) 2759 valid = 0; 2760 2761 if (!valid) { 2762 if (val >= 0) 2763 dev_err(hsotg->dev, 2764 "%d invalid for max_packet_count. Check HW configuration.\n", 2765 val); 2766 val = hsotg->hw_params.max_packet_count; 2767 dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val); 2768 } 2769 2770 hsotg->core_params->max_packet_count = val; 2771 } 2772 2773 void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val) 2774 { 2775 int valid = 1; 2776 2777 if (val < 1 || val > hsotg->hw_params.host_channels) 2778 valid = 0; 2779 2780 if (!valid) { 2781 if (val >= 0) 2782 dev_err(hsotg->dev, 2783 "%d invalid for host_channels. Check HW configuration.\n", 2784 val); 2785 val = hsotg->hw_params.host_channels; 2786 dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val); 2787 } 2788 2789 hsotg->core_params->host_channels = val; 2790 } 2791 2792 void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val) 2793 { 2794 int valid = 0; 2795 u32 hs_phy_type, fs_phy_type; 2796 2797 if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS, 2798 DWC2_PHY_TYPE_PARAM_ULPI)) { 2799 if (val >= 0) { 2800 dev_err(hsotg->dev, "Wrong value for phy_type\n"); 2801 dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n"); 2802 } 2803 2804 valid = 0; 2805 } 2806 2807 hs_phy_type = hsotg->hw_params.hs_phy_type; 2808 fs_phy_type = hsotg->hw_params.fs_phy_type; 2809 if (val == DWC2_PHY_TYPE_PARAM_UTMI && 2810 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || 2811 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) 2812 valid = 1; 2813 else if (val == DWC2_PHY_TYPE_PARAM_ULPI && 2814 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI || 2815 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) 2816 valid = 1; 2817 else if (val == DWC2_PHY_TYPE_PARAM_FS && 2818 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) 2819 valid = 1; 2820 2821 if (!valid) { 2822 if (val >= 0) 2823 dev_err(hsotg->dev, 2824 "%d invalid for phy_type. Check HW configuration.\n", 2825 val); 2826 val = DWC2_PHY_TYPE_PARAM_FS; 2827 if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) { 2828 if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || 2829 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI) 2830 val = DWC2_PHY_TYPE_PARAM_UTMI; 2831 else 2832 val = DWC2_PHY_TYPE_PARAM_ULPI; 2833 } 2834 dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val); 2835 } 2836 2837 hsotg->core_params->phy_type = val; 2838 } 2839 2840 static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg) 2841 { 2842 return hsotg->core_params->phy_type; 2843 } 2844 2845 void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val) 2846 { 2847 int valid = 1; 2848 2849 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2850 if (val >= 0) { 2851 dev_err(hsotg->dev, "Wrong value for speed parameter\n"); 2852 dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n"); 2853 } 2854 valid = 0; 2855 } 2856 2857 if (val == DWC2_SPEED_PARAM_HIGH && 2858 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS) 2859 valid = 0; 2860 2861 if (!valid) { 2862 if (val >= 0) 2863 dev_err(hsotg->dev, 2864 "%d invalid for speed parameter. Check HW configuration.\n", 2865 val); 2866 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ? 2867 DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH; 2868 dev_dbg(hsotg->dev, "Setting speed to %d\n", val); 2869 } 2870 2871 hsotg->core_params->speed = val; 2872 } 2873 2874 void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val) 2875 { 2876 int valid = 1; 2877 2878 if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ, 2879 DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) { 2880 if (val >= 0) { 2881 dev_err(hsotg->dev, 2882 "Wrong value for host_ls_low_power_phy_clk parameter\n"); 2883 dev_err(hsotg->dev, 2884 "host_ls_low_power_phy_clk must be 0 or 1\n"); 2885 } 2886 valid = 0; 2887 } 2888 2889 if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ && 2890 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS) 2891 valid = 0; 2892 2893 if (!valid) { 2894 if (val >= 0) 2895 dev_err(hsotg->dev, 2896 "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n", 2897 val); 2898 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS 2899 ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 2900 : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ; 2901 dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n", 2902 val); 2903 } 2904 2905 hsotg->core_params->host_ls_low_power_phy_clk = val; 2906 } 2907 2908 void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val) 2909 { 2910 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2911 if (val >= 0) { 2912 dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n"); 2913 dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n"); 2914 } 2915 val = 0; 2916 dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val); 2917 } 2918 2919 hsotg->core_params->phy_ulpi_ddr = val; 2920 } 2921 2922 void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val) 2923 { 2924 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2925 if (val >= 0) { 2926 dev_err(hsotg->dev, 2927 "Wrong value for phy_ulpi_ext_vbus\n"); 2928 dev_err(hsotg->dev, 2929 "phy_ulpi_ext_vbus must be 0 or 1\n"); 2930 } 2931 val = 0; 2932 dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val); 2933 } 2934 2935 hsotg->core_params->phy_ulpi_ext_vbus = val; 2936 } 2937 2938 void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val) 2939 { 2940 int valid = 0; 2941 2942 switch (hsotg->hw_params.utmi_phy_data_width) { 2943 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8: 2944 valid = (val == 8); 2945 break; 2946 case GHWCFG4_UTMI_PHY_DATA_WIDTH_16: 2947 valid = (val == 16); 2948 break; 2949 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16: 2950 valid = (val == 8 || val == 16); 2951 break; 2952 } 2953 2954 if (!valid) { 2955 if (val >= 0) { 2956 dev_err(hsotg->dev, 2957 "%d invalid for phy_utmi_width. Check HW configuration.\n", 2958 val); 2959 } 2960 val = (hsotg->hw_params.utmi_phy_data_width == 2961 GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16; 2962 dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val); 2963 } 2964 2965 hsotg->core_params->phy_utmi_width = val; 2966 } 2967 2968 void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val) 2969 { 2970 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2971 if (val >= 0) { 2972 dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n"); 2973 dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n"); 2974 } 2975 val = 0; 2976 dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val); 2977 } 2978 2979 hsotg->core_params->ulpi_fs_ls = val; 2980 } 2981 2982 void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val) 2983 { 2984 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2985 if (val >= 0) { 2986 dev_err(hsotg->dev, "Wrong value for ts_dline\n"); 2987 dev_err(hsotg->dev, "ts_dline must be 0 or 1\n"); 2988 } 2989 val = 0; 2990 dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val); 2991 } 2992 2993 hsotg->core_params->ts_dline = val; 2994 } 2995 2996 void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val) 2997 { 2998 int valid = 1; 2999 3000 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 3001 if (val >= 0) { 3002 dev_err(hsotg->dev, "Wrong value for i2c_enable\n"); 3003 dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n"); 3004 } 3005 3006 valid = 0; 3007 } 3008 3009 if (val == 1 && !(hsotg->hw_params.i2c_enable)) 3010 valid = 0; 3011 3012 if (!valid) { 3013 if (val >= 0) 3014 dev_err(hsotg->dev, 3015 "%d invalid for i2c_enable. Check HW configuration.\n", 3016 val); 3017 val = hsotg->hw_params.i2c_enable; 3018 dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val); 3019 } 3020 3021 hsotg->core_params->i2c_enable = val; 3022 } 3023 3024 void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val) 3025 { 3026 int valid = 1; 3027 3028 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 3029 if (val >= 0) { 3030 dev_err(hsotg->dev, 3031 "Wrong value for en_multiple_tx_fifo,\n"); 3032 dev_err(hsotg->dev, 3033 "en_multiple_tx_fifo must be 0 or 1\n"); 3034 } 3035 valid = 0; 3036 } 3037 3038 if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo) 3039 valid = 0; 3040 3041 if (!valid) { 3042 if (val >= 0) 3043 dev_err(hsotg->dev, 3044 "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n", 3045 val); 3046 val = hsotg->hw_params.en_multiple_tx_fifo; 3047 dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val); 3048 } 3049 3050 hsotg->core_params->en_multiple_tx_fifo = val; 3051 } 3052 3053 void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val) 3054 { 3055 int valid = 1; 3056 3057 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 3058 if (val >= 0) { 3059 dev_err(hsotg->dev, 3060 "'%d' invalid for parameter reload_ctl\n", val); 3061 dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n"); 3062 } 3063 valid = 0; 3064 } 3065 3066 if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a) 3067 valid = 0; 3068 3069 if (!valid) { 3070 if (val >= 0) 3071 dev_err(hsotg->dev, 3072 "%d invalid for parameter reload_ctl. Check HW configuration.\n", 3073 val); 3074 val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a; 3075 dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val); 3076 } 3077 3078 hsotg->core_params->reload_ctl = val; 3079 } 3080 3081 void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val) 3082 { 3083 if (val != -1) 3084 hsotg->core_params->ahbcfg = val; 3085 else 3086 hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 << 3087 GAHBCFG_HBSTLEN_SHIFT; 3088 } 3089 3090 void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val) 3091 { 3092 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 3093 if (val >= 0) { 3094 dev_err(hsotg->dev, 3095 "'%d' invalid for parameter otg_ver\n", val); 3096 dev_err(hsotg->dev, 3097 "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n"); 3098 } 3099 val = 0; 3100 dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val); 3101 } 3102 3103 hsotg->core_params->otg_ver = val; 3104 } 3105 3106 static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val) 3107 { 3108 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 3109 if (val >= 0) { 3110 dev_err(hsotg->dev, 3111 "'%d' invalid for parameter uframe_sched\n", 3112 val); 3113 dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n"); 3114 } 3115 val = 1; 3116 dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val); 3117 } 3118 3119 hsotg->core_params->uframe_sched = val; 3120 } 3121 3122 static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg, 3123 int val) 3124 { 3125 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 3126 if (val >= 0) { 3127 dev_err(hsotg->dev, 3128 "'%d' invalid for parameter external_id_pin_ctl\n", 3129 val); 3130 dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n"); 3131 } 3132 val = 0; 3133 dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val); 3134 } 3135 3136 hsotg->core_params->external_id_pin_ctl = val; 3137 } 3138 3139 static void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg, 3140 int val) 3141 { 3142 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 3143 if (val >= 0) { 3144 dev_err(hsotg->dev, 3145 "'%d' invalid for parameter hibernation\n", 3146 val); 3147 dev_err(hsotg->dev, "hibernation must be 0 or 1\n"); 3148 } 3149 val = 0; 3150 dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val); 3151 } 3152 3153 hsotg->core_params->hibernation = val; 3154 } 3155 3156 /* 3157 * This function is called during module intialization to pass module parameters 3158 * for the DWC_otg core. 3159 */ 3160 void dwc2_set_parameters(struct dwc2_hsotg *hsotg, 3161 const struct dwc2_core_params *params) 3162 { 3163 dev_dbg(hsotg->dev, "%s()\n", __func__); 3164 3165 dwc2_set_param_otg_cap(hsotg, params->otg_cap); 3166 dwc2_set_param_dma_enable(hsotg, params->dma_enable); 3167 dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable); 3168 dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable); 3169 dwc2_set_param_host_support_fs_ls_low_power(hsotg, 3170 params->host_support_fs_ls_low_power); 3171 dwc2_set_param_enable_dynamic_fifo(hsotg, 3172 params->enable_dynamic_fifo); 3173 dwc2_set_param_host_rx_fifo_size(hsotg, 3174 params->host_rx_fifo_size); 3175 dwc2_set_param_host_nperio_tx_fifo_size(hsotg, 3176 params->host_nperio_tx_fifo_size); 3177 dwc2_set_param_host_perio_tx_fifo_size(hsotg, 3178 params->host_perio_tx_fifo_size); 3179 dwc2_set_param_max_transfer_size(hsotg, 3180 params->max_transfer_size); 3181 dwc2_set_param_max_packet_count(hsotg, 3182 params->max_packet_count); 3183 dwc2_set_param_host_channels(hsotg, params->host_channels); 3184 dwc2_set_param_phy_type(hsotg, params->phy_type); 3185 dwc2_set_param_speed(hsotg, params->speed); 3186 dwc2_set_param_host_ls_low_power_phy_clk(hsotg, 3187 params->host_ls_low_power_phy_clk); 3188 dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr); 3189 dwc2_set_param_phy_ulpi_ext_vbus(hsotg, 3190 params->phy_ulpi_ext_vbus); 3191 dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width); 3192 dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls); 3193 dwc2_set_param_ts_dline(hsotg, params->ts_dline); 3194 dwc2_set_param_i2c_enable(hsotg, params->i2c_enable); 3195 dwc2_set_param_en_multiple_tx_fifo(hsotg, 3196 params->en_multiple_tx_fifo); 3197 dwc2_set_param_reload_ctl(hsotg, params->reload_ctl); 3198 dwc2_set_param_ahbcfg(hsotg, params->ahbcfg); 3199 dwc2_set_param_otg_ver(hsotg, params->otg_ver); 3200 dwc2_set_param_uframe_sched(hsotg, params->uframe_sched); 3201 dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl); 3202 dwc2_set_param_hibernation(hsotg, params->hibernation); 3203 } 3204 3205 /* 3206 * Forces either host or device mode if the controller is not 3207 * currently in that mode. 3208 * 3209 * Returns true if the mode was forced. 3210 */ 3211 static bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host) 3212 { 3213 if (host && dwc2_is_host_mode(hsotg)) 3214 return false; 3215 else if (!host && dwc2_is_device_mode(hsotg)) 3216 return false; 3217 3218 return dwc2_force_mode(hsotg, host); 3219 } 3220 3221 /* 3222 * Gets host hardware parameters. Forces host mode if not currently in 3223 * host mode. Should be called immediately after a core soft reset in 3224 * order to get the reset values. 3225 */ 3226 static void dwc2_get_host_hwparams(struct dwc2_hsotg *hsotg) 3227 { 3228 struct dwc2_hw_params *hw = &hsotg->hw_params; 3229 u32 gnptxfsiz; 3230 u32 hptxfsiz; 3231 bool forced; 3232 3233 if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) 3234 return; 3235 3236 forced = dwc2_force_mode_if_needed(hsotg, true); 3237 3238 gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); 3239 hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ); 3240 dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz); 3241 dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz); 3242 3243 if (forced) 3244 dwc2_clear_force_mode(hsotg); 3245 3246 hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >> 3247 FIFOSIZE_DEPTH_SHIFT; 3248 hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >> 3249 FIFOSIZE_DEPTH_SHIFT; 3250 } 3251 3252 /* 3253 * Gets device hardware parameters. Forces device mode if not 3254 * currently in device mode. Should be called immediately after a core 3255 * soft reset in order to get the reset values. 3256 */ 3257 static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg) 3258 { 3259 struct dwc2_hw_params *hw = &hsotg->hw_params; 3260 bool forced; 3261 u32 gnptxfsiz; 3262 3263 if (hsotg->dr_mode == USB_DR_MODE_HOST) 3264 return; 3265 3266 forced = dwc2_force_mode_if_needed(hsotg, false); 3267 3268 gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); 3269 dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz); 3270 3271 if (forced) 3272 dwc2_clear_force_mode(hsotg); 3273 3274 hw->dev_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >> 3275 FIFOSIZE_DEPTH_SHIFT; 3276 } 3277 3278 /** 3279 * During device initialization, read various hardware configuration 3280 * registers and interpret the contents. 3281 * 3282 * This should be called during driver probe. It will perform a core 3283 * soft reset in order to get the reset values of the parameters. 3284 */ 3285 int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) 3286 { 3287 struct dwc2_hw_params *hw = &hsotg->hw_params; 3288 unsigned width; 3289 u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4; 3290 u32 grxfsiz; 3291 int retval; 3292 3293 /* 3294 * Attempt to ensure this device is really a DWC_otg Controller. 3295 * Read and verify the GSNPSID register contents. The value should be 3296 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3", 3297 * as in "OTG version 2.xx" or "OTG version 3.xx". 3298 */ 3299 hw->snpsid = dwc2_readl(hsotg->regs + GSNPSID); 3300 if ((hw->snpsid & 0xfffff000) != 0x4f542000 && 3301 (hw->snpsid & 0xfffff000) != 0x4f543000) { 3302 dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n", 3303 hw->snpsid); 3304 return -ENODEV; 3305 } 3306 3307 dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n", 3308 hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf, 3309 hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid); 3310 3311 retval = dwc2_core_reset(hsotg); 3312 if (retval) 3313 return retval; 3314 3315 hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1); 3316 hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2); 3317 hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3); 3318 hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4); 3319 grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ); 3320 3321 dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1); 3322 dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2); 3323 dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3); 3324 dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4); 3325 dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz); 3326 3327 /* 3328 * Host specific hardware parameters. Reading these parameters 3329 * requires the controller to be in host mode. The mode will 3330 * be forced, if necessary, to read these values. 3331 */ 3332 dwc2_get_host_hwparams(hsotg); 3333 dwc2_get_dev_hwparams(hsotg); 3334 3335 /* hwcfg1 */ 3336 hw->dev_ep_dirs = hwcfg1; 3337 3338 /* hwcfg2 */ 3339 hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >> 3340 GHWCFG2_OP_MODE_SHIFT; 3341 hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >> 3342 GHWCFG2_ARCHITECTURE_SHIFT; 3343 hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO); 3344 hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >> 3345 GHWCFG2_NUM_HOST_CHAN_SHIFT); 3346 hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >> 3347 GHWCFG2_HS_PHY_TYPE_SHIFT; 3348 hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >> 3349 GHWCFG2_FS_PHY_TYPE_SHIFT; 3350 hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >> 3351 GHWCFG2_NUM_DEV_EP_SHIFT; 3352 hw->nperio_tx_q_depth = 3353 (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >> 3354 GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1; 3355 hw->host_perio_tx_q_depth = 3356 (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >> 3357 GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1; 3358 hw->dev_token_q_depth = 3359 (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >> 3360 GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT; 3361 3362 /* hwcfg3 */ 3363 width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >> 3364 GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT; 3365 hw->max_transfer_size = (1 << (width + 11)) - 1; 3366 /* 3367 * Clip max_transfer_size to 65535. dwc2_hc_setup_align_buf() allocates 3368 * coherent buffers with this size, and if it's too large we can 3369 * exhaust the coherent DMA pool. 3370 */ 3371 if (hw->max_transfer_size > 65535) 3372 hw->max_transfer_size = 65535; 3373 width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >> 3374 GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT; 3375 hw->max_packet_count = (1 << (width + 4)) - 1; 3376 hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C); 3377 hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >> 3378 GHWCFG3_DFIFO_DEPTH_SHIFT; 3379 3380 /* hwcfg4 */ 3381 hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN); 3382 hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >> 3383 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT; 3384 hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA); 3385 hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ); 3386 hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >> 3387 GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT; 3388 3389 /* fifo sizes */ 3390 hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >> 3391 GRXFSIZ_DEPTH_SHIFT; 3392 3393 dev_dbg(hsotg->dev, "Detected values from hardware:\n"); 3394 dev_dbg(hsotg->dev, " op_mode=%d\n", 3395 hw->op_mode); 3396 dev_dbg(hsotg->dev, " arch=%d\n", 3397 hw->arch); 3398 dev_dbg(hsotg->dev, " dma_desc_enable=%d\n", 3399 hw->dma_desc_enable); 3400 dev_dbg(hsotg->dev, " power_optimized=%d\n", 3401 hw->power_optimized); 3402 dev_dbg(hsotg->dev, " i2c_enable=%d\n", 3403 hw->i2c_enable); 3404 dev_dbg(hsotg->dev, " hs_phy_type=%d\n", 3405 hw->hs_phy_type); 3406 dev_dbg(hsotg->dev, " fs_phy_type=%d\n", 3407 hw->fs_phy_type); 3408 dev_dbg(hsotg->dev, " utmi_phy_data_width=%d\n", 3409 hw->utmi_phy_data_width); 3410 dev_dbg(hsotg->dev, " num_dev_ep=%d\n", 3411 hw->num_dev_ep); 3412 dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n", 3413 hw->num_dev_perio_in_ep); 3414 dev_dbg(hsotg->dev, " host_channels=%d\n", 3415 hw->host_channels); 3416 dev_dbg(hsotg->dev, " max_transfer_size=%d\n", 3417 hw->max_transfer_size); 3418 dev_dbg(hsotg->dev, " max_packet_count=%d\n", 3419 hw->max_packet_count); 3420 dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n", 3421 hw->nperio_tx_q_depth); 3422 dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n", 3423 hw->host_perio_tx_q_depth); 3424 dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n", 3425 hw->dev_token_q_depth); 3426 dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n", 3427 hw->enable_dynamic_fifo); 3428 dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n", 3429 hw->en_multiple_tx_fifo); 3430 dev_dbg(hsotg->dev, " total_fifo_size=%d\n", 3431 hw->total_fifo_size); 3432 dev_dbg(hsotg->dev, " host_rx_fifo_size=%d\n", 3433 hw->host_rx_fifo_size); 3434 dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n", 3435 hw->host_nperio_tx_fifo_size); 3436 dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n", 3437 hw->host_perio_tx_fifo_size); 3438 dev_dbg(hsotg->dev, "\n"); 3439 3440 return 0; 3441 } 3442 3443 /* 3444 * Sets all parameters to the given value. 3445 * 3446 * Assumes that the dwc2_core_params struct contains only integers. 3447 */ 3448 void dwc2_set_all_params(struct dwc2_core_params *params, int value) 3449 { 3450 int *p = (int *)params; 3451 size_t size = sizeof(*params) / sizeof(*p); 3452 int i; 3453 3454 for (i = 0; i < size; i++) 3455 p[i] = value; 3456 } 3457 3458 3459 u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg) 3460 { 3461 return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103; 3462 } 3463 3464 bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg) 3465 { 3466 if (dwc2_readl(hsotg->regs + GSNPSID) == 0xffffffff) 3467 return false; 3468 else 3469 return true; 3470 } 3471 3472 /** 3473 * dwc2_enable_global_interrupts() - Enables the controller's Global 3474 * Interrupt in the AHB Config register 3475 * 3476 * @hsotg: Programming view of DWC_otg controller 3477 */ 3478 void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg) 3479 { 3480 u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG); 3481 3482 ahbcfg |= GAHBCFG_GLBL_INTR_EN; 3483 dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG); 3484 } 3485 3486 /** 3487 * dwc2_disable_global_interrupts() - Disables the controller's Global 3488 * Interrupt in the AHB Config register 3489 * 3490 * @hsotg: Programming view of DWC_otg controller 3491 */ 3492 void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg) 3493 { 3494 u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG); 3495 3496 ahbcfg &= ~GAHBCFG_GLBL_INTR_EN; 3497 dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG); 3498 } 3499 3500 /* Returns the controller's GHWCFG2.OTG_MODE. */ 3501 unsigned dwc2_op_mode(struct dwc2_hsotg *hsotg) 3502 { 3503 u32 ghwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2); 3504 3505 return (ghwcfg2 & GHWCFG2_OP_MODE_MASK) >> 3506 GHWCFG2_OP_MODE_SHIFT; 3507 } 3508 3509 /* Returns true if the controller is capable of DRD. */ 3510 bool dwc2_hw_is_otg(struct dwc2_hsotg *hsotg) 3511 { 3512 unsigned op_mode = dwc2_op_mode(hsotg); 3513 3514 return (op_mode == GHWCFG2_OP_MODE_HNP_SRP_CAPABLE) || 3515 (op_mode == GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE) || 3516 (op_mode == GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE); 3517 } 3518 3519 /* Returns true if the controller is host-only. */ 3520 bool dwc2_hw_is_host(struct dwc2_hsotg *hsotg) 3521 { 3522 unsigned op_mode = dwc2_op_mode(hsotg); 3523 3524 return (op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_HOST) || 3525 (op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST); 3526 } 3527 3528 /* Returns true if the controller is device-only. */ 3529 bool dwc2_hw_is_device(struct dwc2_hsotg *hsotg) 3530 { 3531 unsigned op_mode = dwc2_op_mode(hsotg); 3532 3533 return (op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) || 3534 (op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE); 3535 } 3536 3537 MODULE_DESCRIPTION("DESIGNWARE HS OTG Core"); 3538 MODULE_AUTHOR("Synopsys, Inc."); 3539 MODULE_LICENSE("Dual BSD/GPL"); 3540