1 /* 2 * core.c - DesignWare HS OTG Controller common routines 3 * 4 * Copyright (C) 2004-2013 Synopsys, Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions, and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The names of the above-listed copyright holders may not be used 16 * to endorse or promote products derived from this software without 17 * specific prior written permission. 18 * 19 * ALTERNATIVELY, this software may be distributed under the terms of the 20 * GNU General Public License ("GPL") as published by the Free Software 21 * Foundation; either version 2 of the License, or (at your option) any 22 * later version. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 /* 38 * The Core code provides basic services for accessing and managing the 39 * DWC_otg hardware. These services are used by both the Host Controller 40 * Driver and the Peripheral Controller Driver. 41 */ 42 #include <linux/kernel.h> 43 #include <linux/module.h> 44 #include <linux/moduleparam.h> 45 #include <linux/spinlock.h> 46 #include <linux/interrupt.h> 47 #include <linux/dma-mapping.h> 48 #include <linux/delay.h> 49 #include <linux/io.h> 50 #include <linux/slab.h> 51 #include <linux/usb.h> 52 53 #include <linux/usb/hcd.h> 54 #include <linux/usb/ch11.h> 55 56 #include "core.h" 57 #include "hcd.h" 58 59 #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 60 /** 61 * dwc2_backup_host_registers() - Backup controller host registers. 62 * When suspending usb bus, registers needs to be backuped 63 * if controller power is disabled once suspended. 64 * 65 * @hsotg: Programming view of the DWC_otg controller 66 */ 67 static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) 68 { 69 struct dwc2_hregs_backup *hr; 70 int i; 71 72 dev_dbg(hsotg->dev, "%s\n", __func__); 73 74 /* Backup Host regs */ 75 hr = &hsotg->hr_backup; 76 hr->hcfg = dwc2_readl(hsotg->regs + HCFG); 77 hr->haintmsk = dwc2_readl(hsotg->regs + HAINTMSK); 78 for (i = 0; i < hsotg->core_params->host_channels; ++i) 79 hr->hcintmsk[i] = dwc2_readl(hsotg->regs + HCINTMSK(i)); 80 81 hr->hprt0 = dwc2_read_hprt0(hsotg); 82 hr->hfir = dwc2_readl(hsotg->regs + HFIR); 83 hr->valid = true; 84 85 return 0; 86 } 87 88 /** 89 * dwc2_restore_host_registers() - Restore controller host registers. 90 * When resuming usb bus, device registers needs to be restored 91 * if controller power were disabled. 92 * 93 * @hsotg: Programming view of the DWC_otg controller 94 */ 95 static int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) 96 { 97 struct dwc2_hregs_backup *hr; 98 int i; 99 100 dev_dbg(hsotg->dev, "%s\n", __func__); 101 102 /* Restore host regs */ 103 hr = &hsotg->hr_backup; 104 if (!hr->valid) { 105 dev_err(hsotg->dev, "%s: no host registers to restore\n", 106 __func__); 107 return -EINVAL; 108 } 109 hr->valid = false; 110 111 dwc2_writel(hr->hcfg, hsotg->regs + HCFG); 112 dwc2_writel(hr->haintmsk, hsotg->regs + HAINTMSK); 113 114 for (i = 0; i < hsotg->core_params->host_channels; ++i) 115 dwc2_writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i)); 116 117 dwc2_writel(hr->hprt0, hsotg->regs + HPRT0); 118 dwc2_writel(hr->hfir, hsotg->regs + HFIR); 119 hsotg->frame_number = 0; 120 121 return 0; 122 } 123 #else 124 static inline int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) 125 { return 0; } 126 127 static inline int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) 128 { return 0; } 129 #endif 130 131 #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ 132 IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 133 /** 134 * dwc2_backup_device_registers() - Backup controller device registers. 135 * When suspending usb bus, registers needs to be backuped 136 * if controller power is disabled once suspended. 137 * 138 * @hsotg: Programming view of the DWC_otg controller 139 */ 140 static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) 141 { 142 struct dwc2_dregs_backup *dr; 143 int i; 144 145 dev_dbg(hsotg->dev, "%s\n", __func__); 146 147 /* Backup dev regs */ 148 dr = &hsotg->dr_backup; 149 150 dr->dcfg = dwc2_readl(hsotg->regs + DCFG); 151 dr->dctl = dwc2_readl(hsotg->regs + DCTL); 152 dr->daintmsk = dwc2_readl(hsotg->regs + DAINTMSK); 153 dr->diepmsk = dwc2_readl(hsotg->regs + DIEPMSK); 154 dr->doepmsk = dwc2_readl(hsotg->regs + DOEPMSK); 155 156 for (i = 0; i < hsotg->num_of_eps; i++) { 157 /* Backup IN EPs */ 158 dr->diepctl[i] = dwc2_readl(hsotg->regs + DIEPCTL(i)); 159 160 /* Ensure DATA PID is correctly configured */ 161 if (dr->diepctl[i] & DXEPCTL_DPID) 162 dr->diepctl[i] |= DXEPCTL_SETD1PID; 163 else 164 dr->diepctl[i] |= DXEPCTL_SETD0PID; 165 166 dr->dieptsiz[i] = dwc2_readl(hsotg->regs + DIEPTSIZ(i)); 167 dr->diepdma[i] = dwc2_readl(hsotg->regs + DIEPDMA(i)); 168 169 /* Backup OUT EPs */ 170 dr->doepctl[i] = dwc2_readl(hsotg->regs + DOEPCTL(i)); 171 172 /* Ensure DATA PID is correctly configured */ 173 if (dr->doepctl[i] & DXEPCTL_DPID) 174 dr->doepctl[i] |= DXEPCTL_SETD1PID; 175 else 176 dr->doepctl[i] |= DXEPCTL_SETD0PID; 177 178 dr->doeptsiz[i] = dwc2_readl(hsotg->regs + DOEPTSIZ(i)); 179 dr->doepdma[i] = dwc2_readl(hsotg->regs + DOEPDMA(i)); 180 } 181 dr->valid = true; 182 return 0; 183 } 184 185 /** 186 * dwc2_restore_device_registers() - Restore controller device registers. 187 * When resuming usb bus, device registers needs to be restored 188 * if controller power were disabled. 189 * 190 * @hsotg: Programming view of the DWC_otg controller 191 */ 192 static int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg) 193 { 194 struct dwc2_dregs_backup *dr; 195 u32 dctl; 196 int i; 197 198 dev_dbg(hsotg->dev, "%s\n", __func__); 199 200 /* Restore dev regs */ 201 dr = &hsotg->dr_backup; 202 if (!dr->valid) { 203 dev_err(hsotg->dev, "%s: no device registers to restore\n", 204 __func__); 205 return -EINVAL; 206 } 207 dr->valid = false; 208 209 dwc2_writel(dr->dcfg, hsotg->regs + DCFG); 210 dwc2_writel(dr->dctl, hsotg->regs + DCTL); 211 dwc2_writel(dr->daintmsk, hsotg->regs + DAINTMSK); 212 dwc2_writel(dr->diepmsk, hsotg->regs + DIEPMSK); 213 dwc2_writel(dr->doepmsk, hsotg->regs + DOEPMSK); 214 215 for (i = 0; i < hsotg->num_of_eps; i++) { 216 /* Restore IN EPs */ 217 dwc2_writel(dr->diepctl[i], hsotg->regs + DIEPCTL(i)); 218 dwc2_writel(dr->dieptsiz[i], hsotg->regs + DIEPTSIZ(i)); 219 dwc2_writel(dr->diepdma[i], hsotg->regs + DIEPDMA(i)); 220 221 /* Restore OUT EPs */ 222 dwc2_writel(dr->doepctl[i], hsotg->regs + DOEPCTL(i)); 223 dwc2_writel(dr->doeptsiz[i], hsotg->regs + DOEPTSIZ(i)); 224 dwc2_writel(dr->doepdma[i], hsotg->regs + DOEPDMA(i)); 225 } 226 227 /* Set the Power-On Programming done bit */ 228 dctl = dwc2_readl(hsotg->regs + DCTL); 229 dctl |= DCTL_PWRONPRGDONE; 230 dwc2_writel(dctl, hsotg->regs + DCTL); 231 232 return 0; 233 } 234 #else 235 static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) 236 { return 0; } 237 238 static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg) 239 { return 0; } 240 #endif 241 242 /** 243 * dwc2_backup_global_registers() - Backup global controller registers. 244 * When suspending usb bus, registers needs to be backuped 245 * if controller power is disabled once suspended. 246 * 247 * @hsotg: Programming view of the DWC_otg controller 248 */ 249 static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg) 250 { 251 struct dwc2_gregs_backup *gr; 252 int i; 253 254 /* Backup global regs */ 255 gr = &hsotg->gr_backup; 256 257 gr->gotgctl = dwc2_readl(hsotg->regs + GOTGCTL); 258 gr->gintmsk = dwc2_readl(hsotg->regs + GINTMSK); 259 gr->gahbcfg = dwc2_readl(hsotg->regs + GAHBCFG); 260 gr->gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 261 gr->grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ); 262 gr->gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); 263 gr->hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ); 264 gr->gdfifocfg = dwc2_readl(hsotg->regs + GDFIFOCFG); 265 for (i = 0; i < MAX_EPS_CHANNELS; i++) 266 gr->dtxfsiz[i] = dwc2_readl(hsotg->regs + DPTXFSIZN(i)); 267 268 gr->valid = true; 269 return 0; 270 } 271 272 /** 273 * dwc2_restore_global_registers() - Restore controller global registers. 274 * When resuming usb bus, device registers needs to be restored 275 * if controller power were disabled. 276 * 277 * @hsotg: Programming view of the DWC_otg controller 278 */ 279 static int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg) 280 { 281 struct dwc2_gregs_backup *gr; 282 int i; 283 284 dev_dbg(hsotg->dev, "%s\n", __func__); 285 286 /* Restore global regs */ 287 gr = &hsotg->gr_backup; 288 if (!gr->valid) { 289 dev_err(hsotg->dev, "%s: no global registers to restore\n", 290 __func__); 291 return -EINVAL; 292 } 293 gr->valid = false; 294 295 dwc2_writel(0xffffffff, hsotg->regs + GINTSTS); 296 dwc2_writel(gr->gotgctl, hsotg->regs + GOTGCTL); 297 dwc2_writel(gr->gintmsk, hsotg->regs + GINTMSK); 298 dwc2_writel(gr->gusbcfg, hsotg->regs + GUSBCFG); 299 dwc2_writel(gr->gahbcfg, hsotg->regs + GAHBCFG); 300 dwc2_writel(gr->grxfsiz, hsotg->regs + GRXFSIZ); 301 dwc2_writel(gr->gnptxfsiz, hsotg->regs + GNPTXFSIZ); 302 dwc2_writel(gr->hptxfsiz, hsotg->regs + HPTXFSIZ); 303 dwc2_writel(gr->gdfifocfg, hsotg->regs + GDFIFOCFG); 304 for (i = 0; i < MAX_EPS_CHANNELS; i++) 305 dwc2_writel(gr->dtxfsiz[i], hsotg->regs + DPTXFSIZN(i)); 306 307 return 0; 308 } 309 310 /** 311 * dwc2_exit_hibernation() - Exit controller from Partial Power Down. 312 * 313 * @hsotg: Programming view of the DWC_otg controller 314 * @restore: Controller registers need to be restored 315 */ 316 int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore) 317 { 318 u32 pcgcctl; 319 int ret = 0; 320 321 if (!hsotg->core_params->hibernation) 322 return -ENOTSUPP; 323 324 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL); 325 pcgcctl &= ~PCGCTL_STOPPCLK; 326 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL); 327 328 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL); 329 pcgcctl &= ~PCGCTL_PWRCLMP; 330 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL); 331 332 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL); 333 pcgcctl &= ~PCGCTL_RSTPDWNMODULE; 334 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL); 335 336 udelay(100); 337 if (restore) { 338 ret = dwc2_restore_global_registers(hsotg); 339 if (ret) { 340 dev_err(hsotg->dev, "%s: failed to restore registers\n", 341 __func__); 342 return ret; 343 } 344 if (dwc2_is_host_mode(hsotg)) { 345 ret = dwc2_restore_host_registers(hsotg); 346 if (ret) { 347 dev_err(hsotg->dev, "%s: failed to restore host registers\n", 348 __func__); 349 return ret; 350 } 351 } else { 352 ret = dwc2_restore_device_registers(hsotg); 353 if (ret) { 354 dev_err(hsotg->dev, "%s: failed to restore device registers\n", 355 __func__); 356 return ret; 357 } 358 } 359 } 360 361 return ret; 362 } 363 364 /** 365 * dwc2_enter_hibernation() - Put controller in Partial Power Down. 366 * 367 * @hsotg: Programming view of the DWC_otg controller 368 */ 369 int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg) 370 { 371 u32 pcgcctl; 372 int ret = 0; 373 374 if (!hsotg->core_params->hibernation) 375 return -ENOTSUPP; 376 377 /* Backup all registers */ 378 ret = dwc2_backup_global_registers(hsotg); 379 if (ret) { 380 dev_err(hsotg->dev, "%s: failed to backup global registers\n", 381 __func__); 382 return ret; 383 } 384 385 if (dwc2_is_host_mode(hsotg)) { 386 ret = dwc2_backup_host_registers(hsotg); 387 if (ret) { 388 dev_err(hsotg->dev, "%s: failed to backup host registers\n", 389 __func__); 390 return ret; 391 } 392 } else { 393 ret = dwc2_backup_device_registers(hsotg); 394 if (ret) { 395 dev_err(hsotg->dev, "%s: failed to backup device registers\n", 396 __func__); 397 return ret; 398 } 399 } 400 401 /* 402 * Clear any pending interrupts since dwc2 will not be able to 403 * clear them after entering hibernation. 404 */ 405 dwc2_writel(0xffffffff, hsotg->regs + GINTSTS); 406 407 /* Put the controller in low power state */ 408 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL); 409 410 pcgcctl |= PCGCTL_PWRCLMP; 411 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL); 412 ndelay(20); 413 414 pcgcctl |= PCGCTL_RSTPDWNMODULE; 415 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL); 416 ndelay(20); 417 418 pcgcctl |= PCGCTL_STOPPCLK; 419 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL); 420 421 return ret; 422 } 423 424 /** 425 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts, 426 * used in both device and host modes 427 * 428 * @hsotg: Programming view of the DWC_otg controller 429 */ 430 static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg) 431 { 432 u32 intmsk; 433 434 /* Clear any pending OTG Interrupts */ 435 dwc2_writel(0xffffffff, hsotg->regs + GOTGINT); 436 437 /* Clear any pending interrupts */ 438 dwc2_writel(0xffffffff, hsotg->regs + GINTSTS); 439 440 /* Enable the interrupts in the GINTMSK */ 441 intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT; 442 443 if (hsotg->core_params->dma_enable <= 0) 444 intmsk |= GINTSTS_RXFLVL; 445 if (hsotg->core_params->external_id_pin_ctl <= 0) 446 intmsk |= GINTSTS_CONIDSTSCHNG; 447 448 intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP | 449 GINTSTS_SESSREQINT; 450 451 dwc2_writel(intmsk, hsotg->regs + GINTMSK); 452 } 453 454 /* 455 * Initializes the FSLSPClkSel field of the HCFG register depending on the 456 * PHY type 457 */ 458 static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg) 459 { 460 u32 hcfg, val; 461 462 if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && 463 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && 464 hsotg->core_params->ulpi_fs_ls > 0) || 465 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) { 466 /* Full speed PHY */ 467 val = HCFG_FSLSPCLKSEL_48_MHZ; 468 } else { 469 /* High speed PHY running at full speed or high speed */ 470 val = HCFG_FSLSPCLKSEL_30_60_MHZ; 471 } 472 473 dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val); 474 hcfg = dwc2_readl(hsotg->regs + HCFG); 475 hcfg &= ~HCFG_FSLSPCLKSEL_MASK; 476 hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT; 477 dwc2_writel(hcfg, hsotg->regs + HCFG); 478 } 479 480 /* 481 * Do core a soft reset of the core. Be careful with this because it 482 * resets all the internal state machines of the core. 483 */ 484 static int dwc2_core_reset(struct dwc2_hsotg *hsotg) 485 { 486 u32 greset; 487 int count = 0; 488 u32 gusbcfg; 489 490 dev_vdbg(hsotg->dev, "%s()\n", __func__); 491 492 /* Wait for AHB master IDLE state */ 493 do { 494 usleep_range(20000, 40000); 495 greset = dwc2_readl(hsotg->regs + GRSTCTL); 496 if (++count > 50) { 497 dev_warn(hsotg->dev, 498 "%s() HANG! AHB Idle GRSTCTL=%0x\n", 499 __func__, greset); 500 return -EBUSY; 501 } 502 } while (!(greset & GRSTCTL_AHBIDLE)); 503 504 /* Core Soft Reset */ 505 count = 0; 506 greset |= GRSTCTL_CSFTRST; 507 dwc2_writel(greset, hsotg->regs + GRSTCTL); 508 do { 509 usleep_range(20000, 40000); 510 greset = dwc2_readl(hsotg->regs + GRSTCTL); 511 if (++count > 50) { 512 dev_warn(hsotg->dev, 513 "%s() HANG! Soft Reset GRSTCTL=%0x\n", 514 __func__, greset); 515 return -EBUSY; 516 } 517 } while (greset & GRSTCTL_CSFTRST); 518 519 if (hsotg->dr_mode == USB_DR_MODE_HOST) { 520 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 521 gusbcfg &= ~GUSBCFG_FORCEDEVMODE; 522 gusbcfg |= GUSBCFG_FORCEHOSTMODE; 523 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG); 524 } else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) { 525 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 526 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE; 527 gusbcfg |= GUSBCFG_FORCEDEVMODE; 528 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG); 529 } else if (hsotg->dr_mode == USB_DR_MODE_OTG) { 530 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 531 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE; 532 gusbcfg &= ~GUSBCFG_FORCEDEVMODE; 533 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG); 534 } 535 536 /* 537 * NOTE: This long sleep is _very_ important, otherwise the core will 538 * not stay in host mode after a connector ID change! 539 */ 540 usleep_range(150000, 200000); 541 542 return 0; 543 } 544 545 static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) 546 { 547 u32 usbcfg, i2cctl; 548 int retval = 0; 549 550 /* 551 * core_init() is now called on every switch so only call the 552 * following for the first time through 553 */ 554 if (select_phy) { 555 dev_dbg(hsotg->dev, "FS PHY selected\n"); 556 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 557 usbcfg |= GUSBCFG_PHYSEL; 558 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); 559 560 /* Reset after a PHY select */ 561 retval = dwc2_core_reset(hsotg); 562 if (retval) { 563 dev_err(hsotg->dev, "%s() Reset failed, aborting", 564 __func__); 565 return retval; 566 } 567 } 568 569 /* 570 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also 571 * do this on HNP Dev/Host mode switches (done in dev_init and 572 * host_init). 573 */ 574 if (dwc2_is_host_mode(hsotg)) 575 dwc2_init_fs_ls_pclk_sel(hsotg); 576 577 if (hsotg->core_params->i2c_enable > 0) { 578 dev_dbg(hsotg->dev, "FS PHY enabling I2C\n"); 579 580 /* Program GUSBCFG.OtgUtmiFsSel to I2C */ 581 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 582 usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL; 583 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); 584 585 /* Program GI2CCTL.I2CEn */ 586 i2cctl = dwc2_readl(hsotg->regs + GI2CCTL); 587 i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK; 588 i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT; 589 i2cctl &= ~GI2CCTL_I2CEN; 590 dwc2_writel(i2cctl, hsotg->regs + GI2CCTL); 591 i2cctl |= GI2CCTL_I2CEN; 592 dwc2_writel(i2cctl, hsotg->regs + GI2CCTL); 593 } 594 595 return retval; 596 } 597 598 static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) 599 { 600 u32 usbcfg; 601 int retval = 0; 602 603 if (!select_phy) 604 return 0; 605 606 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 607 608 /* 609 * HS PHY parameters. These parameters are preserved during soft reset 610 * so only program the first time. Do a soft reset immediately after 611 * setting phyif. 612 */ 613 switch (hsotg->core_params->phy_type) { 614 case DWC2_PHY_TYPE_PARAM_ULPI: 615 /* ULPI interface */ 616 dev_dbg(hsotg->dev, "HS ULPI PHY selected\n"); 617 usbcfg |= GUSBCFG_ULPI_UTMI_SEL; 618 usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL); 619 if (hsotg->core_params->phy_ulpi_ddr > 0) 620 usbcfg |= GUSBCFG_DDRSEL; 621 break; 622 case DWC2_PHY_TYPE_PARAM_UTMI: 623 /* UTMI+ interface */ 624 dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n"); 625 usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16); 626 if (hsotg->core_params->phy_utmi_width == 16) 627 usbcfg |= GUSBCFG_PHYIF16; 628 break; 629 default: 630 dev_err(hsotg->dev, "FS PHY selected at HS!\n"); 631 break; 632 } 633 634 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); 635 636 /* Reset after setting the PHY parameters */ 637 retval = dwc2_core_reset(hsotg); 638 if (retval) { 639 dev_err(hsotg->dev, "%s() Reset failed, aborting", 640 __func__); 641 return retval; 642 } 643 644 return retval; 645 } 646 647 static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) 648 { 649 u32 usbcfg; 650 int retval = 0; 651 652 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL && 653 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) { 654 /* If FS mode with FS PHY */ 655 retval = dwc2_fs_phy_init(hsotg, select_phy); 656 if (retval) 657 return retval; 658 } else { 659 /* High speed PHY */ 660 retval = dwc2_hs_phy_init(hsotg, select_phy); 661 if (retval) 662 return retval; 663 } 664 665 if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && 666 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && 667 hsotg->core_params->ulpi_fs_ls > 0) { 668 dev_dbg(hsotg->dev, "Setting ULPI FSLS\n"); 669 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 670 usbcfg |= GUSBCFG_ULPI_FS_LS; 671 usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M; 672 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); 673 } else { 674 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 675 usbcfg &= ~GUSBCFG_ULPI_FS_LS; 676 usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M; 677 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); 678 } 679 680 return retval; 681 } 682 683 static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg) 684 { 685 u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG); 686 687 switch (hsotg->hw_params.arch) { 688 case GHWCFG2_EXT_DMA_ARCH: 689 dev_err(hsotg->dev, "External DMA Mode not supported\n"); 690 return -EINVAL; 691 692 case GHWCFG2_INT_DMA_ARCH: 693 dev_dbg(hsotg->dev, "Internal DMA Mode\n"); 694 if (hsotg->core_params->ahbcfg != -1) { 695 ahbcfg &= GAHBCFG_CTRL_MASK; 696 ahbcfg |= hsotg->core_params->ahbcfg & 697 ~GAHBCFG_CTRL_MASK; 698 } 699 break; 700 701 case GHWCFG2_SLAVE_ONLY_ARCH: 702 default: 703 dev_dbg(hsotg->dev, "Slave Only Mode\n"); 704 break; 705 } 706 707 dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n", 708 hsotg->core_params->dma_enable, 709 hsotg->core_params->dma_desc_enable); 710 711 if (hsotg->core_params->dma_enable > 0) { 712 if (hsotg->core_params->dma_desc_enable > 0) 713 dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n"); 714 else 715 dev_dbg(hsotg->dev, "Using Buffer DMA mode\n"); 716 } else { 717 dev_dbg(hsotg->dev, "Using Slave mode\n"); 718 hsotg->core_params->dma_desc_enable = 0; 719 } 720 721 if (hsotg->core_params->dma_enable > 0) 722 ahbcfg |= GAHBCFG_DMA_EN; 723 724 dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG); 725 726 return 0; 727 } 728 729 static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg) 730 { 731 u32 usbcfg; 732 733 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 734 usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP); 735 736 switch (hsotg->hw_params.op_mode) { 737 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: 738 if (hsotg->core_params->otg_cap == 739 DWC2_CAP_PARAM_HNP_SRP_CAPABLE) 740 usbcfg |= GUSBCFG_HNPCAP; 741 if (hsotg->core_params->otg_cap != 742 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) 743 usbcfg |= GUSBCFG_SRPCAP; 744 break; 745 746 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: 747 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: 748 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: 749 if (hsotg->core_params->otg_cap != 750 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) 751 usbcfg |= GUSBCFG_SRPCAP; 752 break; 753 754 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE: 755 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE: 756 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST: 757 default: 758 break; 759 } 760 761 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); 762 } 763 764 /** 765 * dwc2_core_init() - Initializes the DWC_otg controller registers and 766 * prepares the core for device mode or host mode operation 767 * 768 * @hsotg: Programming view of the DWC_otg controller 769 * @select_phy: If true then also set the Phy type 770 * @irq: If >= 0, the irq to register 771 */ 772 int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq) 773 { 774 u32 usbcfg, otgctl; 775 int retval; 776 777 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); 778 779 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 780 781 /* Set ULPI External VBUS bit if needed */ 782 usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV; 783 if (hsotg->core_params->phy_ulpi_ext_vbus == 784 DWC2_PHY_ULPI_EXTERNAL_VBUS) 785 usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV; 786 787 /* Set external TS Dline pulsing bit if needed */ 788 usbcfg &= ~GUSBCFG_TERMSELDLPULSE; 789 if (hsotg->core_params->ts_dline > 0) 790 usbcfg |= GUSBCFG_TERMSELDLPULSE; 791 792 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); 793 794 /* Reset the Controller */ 795 retval = dwc2_core_reset(hsotg); 796 if (retval) { 797 dev_err(hsotg->dev, "%s(): Reset failed, aborting\n", 798 __func__); 799 return retval; 800 } 801 802 /* 803 * This needs to happen in FS mode before any other programming occurs 804 */ 805 retval = dwc2_phy_init(hsotg, select_phy); 806 if (retval) 807 return retval; 808 809 /* Program the GAHBCFG Register */ 810 retval = dwc2_gahbcfg_init(hsotg); 811 if (retval) 812 return retval; 813 814 /* Program the GUSBCFG register */ 815 dwc2_gusbcfg_init(hsotg); 816 817 /* Program the GOTGCTL register */ 818 otgctl = dwc2_readl(hsotg->regs + GOTGCTL); 819 otgctl &= ~GOTGCTL_OTGVER; 820 if (hsotg->core_params->otg_ver > 0) 821 otgctl |= GOTGCTL_OTGVER; 822 dwc2_writel(otgctl, hsotg->regs + GOTGCTL); 823 dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver); 824 825 /* Clear the SRP success bit for FS-I2c */ 826 hsotg->srp_success = 0; 827 828 /* Enable common interrupts */ 829 dwc2_enable_common_interrupts(hsotg); 830 831 /* 832 * Do device or host initialization based on mode during PCD and 833 * HCD initialization 834 */ 835 if (dwc2_is_host_mode(hsotg)) { 836 dev_dbg(hsotg->dev, "Host Mode\n"); 837 hsotg->op_state = OTG_STATE_A_HOST; 838 } else { 839 dev_dbg(hsotg->dev, "Device Mode\n"); 840 hsotg->op_state = OTG_STATE_B_PERIPHERAL; 841 } 842 843 return 0; 844 } 845 846 /** 847 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts 848 * 849 * @hsotg: Programming view of DWC_otg controller 850 */ 851 void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg) 852 { 853 u32 intmsk; 854 855 dev_dbg(hsotg->dev, "%s()\n", __func__); 856 857 /* Disable all interrupts */ 858 dwc2_writel(0, hsotg->regs + GINTMSK); 859 dwc2_writel(0, hsotg->regs + HAINTMSK); 860 861 /* Enable the common interrupts */ 862 dwc2_enable_common_interrupts(hsotg); 863 864 /* Enable host mode interrupts without disturbing common interrupts */ 865 intmsk = dwc2_readl(hsotg->regs + GINTMSK); 866 intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT; 867 dwc2_writel(intmsk, hsotg->regs + GINTMSK); 868 } 869 870 /** 871 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts 872 * 873 * @hsotg: Programming view of DWC_otg controller 874 */ 875 void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg) 876 { 877 u32 intmsk = dwc2_readl(hsotg->regs + GINTMSK); 878 879 /* Disable host mode interrupts without disturbing common interrupts */ 880 intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT | 881 GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | GINTSTS_DISCONNINT); 882 dwc2_writel(intmsk, hsotg->regs + GINTMSK); 883 } 884 885 /* 886 * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size 887 * For system that have a total fifo depth that is smaller than the default 888 * RX + TX fifo size. 889 * 890 * @hsotg: Programming view of DWC_otg controller 891 */ 892 static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg) 893 { 894 struct dwc2_core_params *params = hsotg->core_params; 895 struct dwc2_hw_params *hw = &hsotg->hw_params; 896 u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size; 897 898 total_fifo_size = hw->total_fifo_size; 899 rxfsiz = params->host_rx_fifo_size; 900 nptxfsiz = params->host_nperio_tx_fifo_size; 901 ptxfsiz = params->host_perio_tx_fifo_size; 902 903 /* 904 * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth 905 * allocation with support for high bandwidth endpoints. Synopsys 906 * defines MPS(Max Packet size) for a periodic EP=1024, and for 907 * non-periodic as 512. 908 */ 909 if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) { 910 /* 911 * For Buffer DMA mode/Scatter Gather DMA mode 912 * 2 * ((Largest Packet size / 4) + 1 + 1) + n 913 * with n = number of host channel. 914 * 2 * ((1024/4) + 2) = 516 915 */ 916 rxfsiz = 516 + hw->host_channels; 917 918 /* 919 * min non-periodic tx fifo depth 920 * 2 * (largest non-periodic USB packet used / 4) 921 * 2 * (512/4) = 256 922 */ 923 nptxfsiz = 256; 924 925 /* 926 * min periodic tx fifo depth 927 * (largest packet size*MC)/4 928 * (1024 * 3)/4 = 768 929 */ 930 ptxfsiz = 768; 931 932 params->host_rx_fifo_size = rxfsiz; 933 params->host_nperio_tx_fifo_size = nptxfsiz; 934 params->host_perio_tx_fifo_size = ptxfsiz; 935 } 936 937 /* 938 * If the summation of RX, NPTX and PTX fifo sizes is still 939 * bigger than the total_fifo_size, then we have a problem. 940 * 941 * We won't be able to allocate as many endpoints. Right now, 942 * we're just printing an error message, but ideally this FIFO 943 * allocation algorithm would be improved in the future. 944 * 945 * FIXME improve this FIFO allocation algorithm. 946 */ 947 if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz))) 948 dev_err(hsotg->dev, "invalid fifo sizes\n"); 949 } 950 951 static void dwc2_config_fifos(struct dwc2_hsotg *hsotg) 952 { 953 struct dwc2_core_params *params = hsotg->core_params; 954 u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz; 955 956 if (!params->enable_dynamic_fifo) 957 return; 958 959 dwc2_calculate_dynamic_fifo(hsotg); 960 961 /* Rx FIFO */ 962 grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ); 963 dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz); 964 grxfsiz &= ~GRXFSIZ_DEPTH_MASK; 965 grxfsiz |= params->host_rx_fifo_size << 966 GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK; 967 dwc2_writel(grxfsiz, hsotg->regs + GRXFSIZ); 968 dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", 969 dwc2_readl(hsotg->regs + GRXFSIZ)); 970 971 /* Non-periodic Tx FIFO */ 972 dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n", 973 dwc2_readl(hsotg->regs + GNPTXFSIZ)); 974 nptxfsiz = params->host_nperio_tx_fifo_size << 975 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK; 976 nptxfsiz |= params->host_rx_fifo_size << 977 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK; 978 dwc2_writel(nptxfsiz, hsotg->regs + GNPTXFSIZ); 979 dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n", 980 dwc2_readl(hsotg->regs + GNPTXFSIZ)); 981 982 /* Periodic Tx FIFO */ 983 dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n", 984 dwc2_readl(hsotg->regs + HPTXFSIZ)); 985 hptxfsiz = params->host_perio_tx_fifo_size << 986 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK; 987 hptxfsiz |= (params->host_rx_fifo_size + 988 params->host_nperio_tx_fifo_size) << 989 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK; 990 dwc2_writel(hptxfsiz, hsotg->regs + HPTXFSIZ); 991 dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n", 992 dwc2_readl(hsotg->regs + HPTXFSIZ)); 993 994 if (hsotg->core_params->en_multiple_tx_fifo > 0 && 995 hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) { 996 /* 997 * Global DFIFOCFG calculation for Host mode - 998 * include RxFIFO, NPTXFIFO and HPTXFIFO 999 */ 1000 dfifocfg = dwc2_readl(hsotg->regs + GDFIFOCFG); 1001 dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK; 1002 dfifocfg |= (params->host_rx_fifo_size + 1003 params->host_nperio_tx_fifo_size + 1004 params->host_perio_tx_fifo_size) << 1005 GDFIFOCFG_EPINFOBASE_SHIFT & 1006 GDFIFOCFG_EPINFOBASE_MASK; 1007 dwc2_writel(dfifocfg, hsotg->regs + GDFIFOCFG); 1008 } 1009 } 1010 1011 /** 1012 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for 1013 * Host mode 1014 * 1015 * @hsotg: Programming view of DWC_otg controller 1016 * 1017 * This function flushes the Tx and Rx FIFOs and flushes any entries in the 1018 * request queues. Host channels are reset to ensure that they are ready for 1019 * performing transfers. 1020 */ 1021 void dwc2_core_host_init(struct dwc2_hsotg *hsotg) 1022 { 1023 u32 hcfg, hfir, otgctl; 1024 1025 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); 1026 1027 /* Restart the Phy Clock */ 1028 dwc2_writel(0, hsotg->regs + PCGCTL); 1029 1030 /* Initialize Host Configuration Register */ 1031 dwc2_init_fs_ls_pclk_sel(hsotg); 1032 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) { 1033 hcfg = dwc2_readl(hsotg->regs + HCFG); 1034 hcfg |= HCFG_FSLSSUPP; 1035 dwc2_writel(hcfg, hsotg->regs + HCFG); 1036 } 1037 1038 /* 1039 * This bit allows dynamic reloading of the HFIR register during 1040 * runtime. This bit needs to be programmed during initial configuration 1041 * and its value must not be changed during runtime. 1042 */ 1043 if (hsotg->core_params->reload_ctl > 0) { 1044 hfir = dwc2_readl(hsotg->regs + HFIR); 1045 hfir |= HFIR_RLDCTRL; 1046 dwc2_writel(hfir, hsotg->regs + HFIR); 1047 } 1048 1049 if (hsotg->core_params->dma_desc_enable > 0) { 1050 u32 op_mode = hsotg->hw_params.op_mode; 1051 if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a || 1052 !hsotg->hw_params.dma_desc_enable || 1053 op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE || 1054 op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE || 1055 op_mode == GHWCFG2_OP_MODE_UNDEFINED) { 1056 dev_err(hsotg->dev, 1057 "Hardware does not support descriptor DMA mode -\n"); 1058 dev_err(hsotg->dev, 1059 "falling back to buffer DMA mode.\n"); 1060 hsotg->core_params->dma_desc_enable = 0; 1061 } else { 1062 hcfg = dwc2_readl(hsotg->regs + HCFG); 1063 hcfg |= HCFG_DESCDMA; 1064 dwc2_writel(hcfg, hsotg->regs + HCFG); 1065 } 1066 } 1067 1068 /* Configure data FIFO sizes */ 1069 dwc2_config_fifos(hsotg); 1070 1071 /* TODO - check this */ 1072 /* Clear Host Set HNP Enable in the OTG Control Register */ 1073 otgctl = dwc2_readl(hsotg->regs + GOTGCTL); 1074 otgctl &= ~GOTGCTL_HSTSETHNPEN; 1075 dwc2_writel(otgctl, hsotg->regs + GOTGCTL); 1076 1077 /* Make sure the FIFOs are flushed */ 1078 dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */); 1079 dwc2_flush_rx_fifo(hsotg); 1080 1081 /* Clear Host Set HNP Enable in the OTG Control Register */ 1082 otgctl = dwc2_readl(hsotg->regs + GOTGCTL); 1083 otgctl &= ~GOTGCTL_HSTSETHNPEN; 1084 dwc2_writel(otgctl, hsotg->regs + GOTGCTL); 1085 1086 if (hsotg->core_params->dma_desc_enable <= 0) { 1087 int num_channels, i; 1088 u32 hcchar; 1089 1090 /* Flush out any leftover queued requests */ 1091 num_channels = hsotg->core_params->host_channels; 1092 for (i = 0; i < num_channels; i++) { 1093 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i)); 1094 hcchar &= ~HCCHAR_CHENA; 1095 hcchar |= HCCHAR_CHDIS; 1096 hcchar &= ~HCCHAR_EPDIR; 1097 dwc2_writel(hcchar, hsotg->regs + HCCHAR(i)); 1098 } 1099 1100 /* Halt all channels to put them into a known state */ 1101 for (i = 0; i < num_channels; i++) { 1102 int count = 0; 1103 1104 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i)); 1105 hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS; 1106 hcchar &= ~HCCHAR_EPDIR; 1107 dwc2_writel(hcchar, hsotg->regs + HCCHAR(i)); 1108 dev_dbg(hsotg->dev, "%s: Halt channel %d\n", 1109 __func__, i); 1110 do { 1111 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i)); 1112 if (++count > 1000) { 1113 dev_err(hsotg->dev, 1114 "Unable to clear enable on channel %d\n", 1115 i); 1116 break; 1117 } 1118 udelay(1); 1119 } while (hcchar & HCCHAR_CHENA); 1120 } 1121 } 1122 1123 /* Turn on the vbus power */ 1124 dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state); 1125 if (hsotg->op_state == OTG_STATE_A_HOST) { 1126 u32 hprt0 = dwc2_read_hprt0(hsotg); 1127 1128 dev_dbg(hsotg->dev, "Init: Power Port (%d)\n", 1129 !!(hprt0 & HPRT0_PWR)); 1130 if (!(hprt0 & HPRT0_PWR)) { 1131 hprt0 |= HPRT0_PWR; 1132 dwc2_writel(hprt0, hsotg->regs + HPRT0); 1133 } 1134 } 1135 1136 dwc2_enable_host_interrupts(hsotg); 1137 } 1138 1139 static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg, 1140 struct dwc2_host_chan *chan) 1141 { 1142 u32 hcintmsk = HCINTMSK_CHHLTD; 1143 1144 switch (chan->ep_type) { 1145 case USB_ENDPOINT_XFER_CONTROL: 1146 case USB_ENDPOINT_XFER_BULK: 1147 dev_vdbg(hsotg->dev, "control/bulk\n"); 1148 hcintmsk |= HCINTMSK_XFERCOMPL; 1149 hcintmsk |= HCINTMSK_STALL; 1150 hcintmsk |= HCINTMSK_XACTERR; 1151 hcintmsk |= HCINTMSK_DATATGLERR; 1152 if (chan->ep_is_in) { 1153 hcintmsk |= HCINTMSK_BBLERR; 1154 } else { 1155 hcintmsk |= HCINTMSK_NAK; 1156 hcintmsk |= HCINTMSK_NYET; 1157 if (chan->do_ping) 1158 hcintmsk |= HCINTMSK_ACK; 1159 } 1160 1161 if (chan->do_split) { 1162 hcintmsk |= HCINTMSK_NAK; 1163 if (chan->complete_split) 1164 hcintmsk |= HCINTMSK_NYET; 1165 else 1166 hcintmsk |= HCINTMSK_ACK; 1167 } 1168 1169 if (chan->error_state) 1170 hcintmsk |= HCINTMSK_ACK; 1171 break; 1172 1173 case USB_ENDPOINT_XFER_INT: 1174 if (dbg_perio()) 1175 dev_vdbg(hsotg->dev, "intr\n"); 1176 hcintmsk |= HCINTMSK_XFERCOMPL; 1177 hcintmsk |= HCINTMSK_NAK; 1178 hcintmsk |= HCINTMSK_STALL; 1179 hcintmsk |= HCINTMSK_XACTERR; 1180 hcintmsk |= HCINTMSK_DATATGLERR; 1181 hcintmsk |= HCINTMSK_FRMOVRUN; 1182 1183 if (chan->ep_is_in) 1184 hcintmsk |= HCINTMSK_BBLERR; 1185 if (chan->error_state) 1186 hcintmsk |= HCINTMSK_ACK; 1187 if (chan->do_split) { 1188 if (chan->complete_split) 1189 hcintmsk |= HCINTMSK_NYET; 1190 else 1191 hcintmsk |= HCINTMSK_ACK; 1192 } 1193 break; 1194 1195 case USB_ENDPOINT_XFER_ISOC: 1196 if (dbg_perio()) 1197 dev_vdbg(hsotg->dev, "isoc\n"); 1198 hcintmsk |= HCINTMSK_XFERCOMPL; 1199 hcintmsk |= HCINTMSK_FRMOVRUN; 1200 hcintmsk |= HCINTMSK_ACK; 1201 1202 if (chan->ep_is_in) { 1203 hcintmsk |= HCINTMSK_XACTERR; 1204 hcintmsk |= HCINTMSK_BBLERR; 1205 } 1206 break; 1207 default: 1208 dev_err(hsotg->dev, "## Unknown EP type ##\n"); 1209 break; 1210 } 1211 1212 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num)); 1213 if (dbg_hc(chan)) 1214 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk); 1215 } 1216 1217 static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg, 1218 struct dwc2_host_chan *chan) 1219 { 1220 u32 hcintmsk = HCINTMSK_CHHLTD; 1221 1222 /* 1223 * For Descriptor DMA mode core halts the channel on AHB error. 1224 * Interrupt is not required. 1225 */ 1226 if (hsotg->core_params->dma_desc_enable <= 0) { 1227 if (dbg_hc(chan)) 1228 dev_vdbg(hsotg->dev, "desc DMA disabled\n"); 1229 hcintmsk |= HCINTMSK_AHBERR; 1230 } else { 1231 if (dbg_hc(chan)) 1232 dev_vdbg(hsotg->dev, "desc DMA enabled\n"); 1233 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) 1234 hcintmsk |= HCINTMSK_XFERCOMPL; 1235 } 1236 1237 if (chan->error_state && !chan->do_split && 1238 chan->ep_type != USB_ENDPOINT_XFER_ISOC) { 1239 if (dbg_hc(chan)) 1240 dev_vdbg(hsotg->dev, "setting ACK\n"); 1241 hcintmsk |= HCINTMSK_ACK; 1242 if (chan->ep_is_in) { 1243 hcintmsk |= HCINTMSK_DATATGLERR; 1244 if (chan->ep_type != USB_ENDPOINT_XFER_INT) 1245 hcintmsk |= HCINTMSK_NAK; 1246 } 1247 } 1248 1249 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num)); 1250 if (dbg_hc(chan)) 1251 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk); 1252 } 1253 1254 static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg, 1255 struct dwc2_host_chan *chan) 1256 { 1257 u32 intmsk; 1258 1259 if (hsotg->core_params->dma_enable > 0) { 1260 if (dbg_hc(chan)) 1261 dev_vdbg(hsotg->dev, "DMA enabled\n"); 1262 dwc2_hc_enable_dma_ints(hsotg, chan); 1263 } else { 1264 if (dbg_hc(chan)) 1265 dev_vdbg(hsotg->dev, "DMA disabled\n"); 1266 dwc2_hc_enable_slave_ints(hsotg, chan); 1267 } 1268 1269 /* Enable the top level host channel interrupt */ 1270 intmsk = dwc2_readl(hsotg->regs + HAINTMSK); 1271 intmsk |= 1 << chan->hc_num; 1272 dwc2_writel(intmsk, hsotg->regs + HAINTMSK); 1273 if (dbg_hc(chan)) 1274 dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk); 1275 1276 /* Make sure host channel interrupts are enabled */ 1277 intmsk = dwc2_readl(hsotg->regs + GINTMSK); 1278 intmsk |= GINTSTS_HCHINT; 1279 dwc2_writel(intmsk, hsotg->regs + GINTMSK); 1280 if (dbg_hc(chan)) 1281 dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk); 1282 } 1283 1284 /** 1285 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from 1286 * a specific endpoint 1287 * 1288 * @hsotg: Programming view of DWC_otg controller 1289 * @chan: Information needed to initialize the host channel 1290 * 1291 * The HCCHARn register is set up with the characteristics specified in chan. 1292 * Host channel interrupts that may need to be serviced while this transfer is 1293 * in progress are enabled. 1294 */ 1295 void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) 1296 { 1297 u8 hc_num = chan->hc_num; 1298 u32 hcintmsk; 1299 u32 hcchar; 1300 u32 hcsplt = 0; 1301 1302 if (dbg_hc(chan)) 1303 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1304 1305 /* Clear old interrupt conditions for this host channel */ 1306 hcintmsk = 0xffffffff; 1307 hcintmsk &= ~HCINTMSK_RESERVED14_31; 1308 dwc2_writel(hcintmsk, hsotg->regs + HCINT(hc_num)); 1309 1310 /* Enable channel interrupts required for this transfer */ 1311 dwc2_hc_enable_ints(hsotg, chan); 1312 1313 /* 1314 * Program the HCCHARn register with the endpoint characteristics for 1315 * the current transfer 1316 */ 1317 hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK; 1318 hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK; 1319 if (chan->ep_is_in) 1320 hcchar |= HCCHAR_EPDIR; 1321 if (chan->speed == USB_SPEED_LOW) 1322 hcchar |= HCCHAR_LSPDDEV; 1323 hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK; 1324 hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK; 1325 dwc2_writel(hcchar, hsotg->regs + HCCHAR(hc_num)); 1326 if (dbg_hc(chan)) { 1327 dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n", 1328 hc_num, hcchar); 1329 1330 dev_vdbg(hsotg->dev, "%s: Channel %d\n", 1331 __func__, hc_num); 1332 dev_vdbg(hsotg->dev, " Dev Addr: %d\n", 1333 chan->dev_addr); 1334 dev_vdbg(hsotg->dev, " Ep Num: %d\n", 1335 chan->ep_num); 1336 dev_vdbg(hsotg->dev, " Is In: %d\n", 1337 chan->ep_is_in); 1338 dev_vdbg(hsotg->dev, " Is Low Speed: %d\n", 1339 chan->speed == USB_SPEED_LOW); 1340 dev_vdbg(hsotg->dev, " Ep Type: %d\n", 1341 chan->ep_type); 1342 dev_vdbg(hsotg->dev, " Max Pkt: %d\n", 1343 chan->max_packet); 1344 } 1345 1346 /* Program the HCSPLT register for SPLITs */ 1347 if (chan->do_split) { 1348 if (dbg_hc(chan)) 1349 dev_vdbg(hsotg->dev, 1350 "Programming HC %d with split --> %s\n", 1351 hc_num, 1352 chan->complete_split ? "CSPLIT" : "SSPLIT"); 1353 if (chan->complete_split) 1354 hcsplt |= HCSPLT_COMPSPLT; 1355 hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT & 1356 HCSPLT_XACTPOS_MASK; 1357 hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT & 1358 HCSPLT_HUBADDR_MASK; 1359 hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT & 1360 HCSPLT_PRTADDR_MASK; 1361 if (dbg_hc(chan)) { 1362 dev_vdbg(hsotg->dev, " comp split %d\n", 1363 chan->complete_split); 1364 dev_vdbg(hsotg->dev, " xact pos %d\n", 1365 chan->xact_pos); 1366 dev_vdbg(hsotg->dev, " hub addr %d\n", 1367 chan->hub_addr); 1368 dev_vdbg(hsotg->dev, " hub port %d\n", 1369 chan->hub_port); 1370 dev_vdbg(hsotg->dev, " is_in %d\n", 1371 chan->ep_is_in); 1372 dev_vdbg(hsotg->dev, " Max Pkt %d\n", 1373 chan->max_packet); 1374 dev_vdbg(hsotg->dev, " xferlen %d\n", 1375 chan->xfer_len); 1376 } 1377 } 1378 1379 dwc2_writel(hcsplt, hsotg->regs + HCSPLT(hc_num)); 1380 } 1381 1382 /** 1383 * dwc2_hc_halt() - Attempts to halt a host channel 1384 * 1385 * @hsotg: Controller register interface 1386 * @chan: Host channel to halt 1387 * @halt_status: Reason for halting the channel 1388 * 1389 * This function should only be called in Slave mode or to abort a transfer in 1390 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the 1391 * controller halts the channel when the transfer is complete or a condition 1392 * occurs that requires application intervention. 1393 * 1394 * In slave mode, checks for a free request queue entry, then sets the Channel 1395 * Enable and Channel Disable bits of the Host Channel Characteristics 1396 * register of the specified channel to intiate the halt. If there is no free 1397 * request queue entry, sets only the Channel Disable bit of the HCCHARn 1398 * register to flush requests for this channel. In the latter case, sets a 1399 * flag to indicate that the host channel needs to be halted when a request 1400 * queue slot is open. 1401 * 1402 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the 1403 * HCCHARn register. The controller ensures there is space in the request 1404 * queue before submitting the halt request. 1405 * 1406 * Some time may elapse before the core flushes any posted requests for this 1407 * host channel and halts. The Channel Halted interrupt handler completes the 1408 * deactivation of the host channel. 1409 */ 1410 void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, 1411 enum dwc2_halt_status halt_status) 1412 { 1413 u32 nptxsts, hptxsts, hcchar; 1414 1415 if (dbg_hc(chan)) 1416 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1417 if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS) 1418 dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status); 1419 1420 if (halt_status == DWC2_HC_XFER_URB_DEQUEUE || 1421 halt_status == DWC2_HC_XFER_AHB_ERR) { 1422 /* 1423 * Disable all channel interrupts except Ch Halted. The QTD 1424 * and QH state associated with this transfer has been cleared 1425 * (in the case of URB_DEQUEUE), so the channel needs to be 1426 * shut down carefully to prevent crashes. 1427 */ 1428 u32 hcintmsk = HCINTMSK_CHHLTD; 1429 1430 dev_vdbg(hsotg->dev, "dequeue/error\n"); 1431 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num)); 1432 1433 /* 1434 * Make sure no other interrupts besides halt are currently 1435 * pending. Handling another interrupt could cause a crash due 1436 * to the QTD and QH state. 1437 */ 1438 dwc2_writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num)); 1439 1440 /* 1441 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR 1442 * even if the channel was already halted for some other 1443 * reason 1444 */ 1445 chan->halt_status = halt_status; 1446 1447 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); 1448 if (!(hcchar & HCCHAR_CHENA)) { 1449 /* 1450 * The channel is either already halted or it hasn't 1451 * started yet. In DMA mode, the transfer may halt if 1452 * it finishes normally or a condition occurs that 1453 * requires driver intervention. Don't want to halt 1454 * the channel again. In either Slave or DMA mode, 1455 * it's possible that the transfer has been assigned 1456 * to a channel, but not started yet when an URB is 1457 * dequeued. Don't want to halt a channel that hasn't 1458 * started yet. 1459 */ 1460 return; 1461 } 1462 } 1463 if (chan->halt_pending) { 1464 /* 1465 * A halt has already been issued for this channel. This might 1466 * happen when a transfer is aborted by a higher level in 1467 * the stack. 1468 */ 1469 dev_vdbg(hsotg->dev, 1470 "*** %s: Channel %d, chan->halt_pending already set ***\n", 1471 __func__, chan->hc_num); 1472 return; 1473 } 1474 1475 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); 1476 1477 /* No need to set the bit in DDMA for disabling the channel */ 1478 /* TODO check it everywhere channel is disabled */ 1479 if (hsotg->core_params->dma_desc_enable <= 0) { 1480 if (dbg_hc(chan)) 1481 dev_vdbg(hsotg->dev, "desc DMA disabled\n"); 1482 hcchar |= HCCHAR_CHENA; 1483 } else { 1484 if (dbg_hc(chan)) 1485 dev_dbg(hsotg->dev, "desc DMA enabled\n"); 1486 } 1487 hcchar |= HCCHAR_CHDIS; 1488 1489 if (hsotg->core_params->dma_enable <= 0) { 1490 if (dbg_hc(chan)) 1491 dev_vdbg(hsotg->dev, "DMA not enabled\n"); 1492 hcchar |= HCCHAR_CHENA; 1493 1494 /* Check for space in the request queue to issue the halt */ 1495 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL || 1496 chan->ep_type == USB_ENDPOINT_XFER_BULK) { 1497 dev_vdbg(hsotg->dev, "control/bulk\n"); 1498 nptxsts = dwc2_readl(hsotg->regs + GNPTXSTS); 1499 if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) { 1500 dev_vdbg(hsotg->dev, "Disabling channel\n"); 1501 hcchar &= ~HCCHAR_CHENA; 1502 } 1503 } else { 1504 if (dbg_perio()) 1505 dev_vdbg(hsotg->dev, "isoc/intr\n"); 1506 hptxsts = dwc2_readl(hsotg->regs + HPTXSTS); 1507 if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 || 1508 hsotg->queuing_high_bandwidth) { 1509 if (dbg_perio()) 1510 dev_vdbg(hsotg->dev, "Disabling channel\n"); 1511 hcchar &= ~HCCHAR_CHENA; 1512 } 1513 } 1514 } else { 1515 if (dbg_hc(chan)) 1516 dev_vdbg(hsotg->dev, "DMA enabled\n"); 1517 } 1518 1519 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); 1520 chan->halt_status = halt_status; 1521 1522 if (hcchar & HCCHAR_CHENA) { 1523 if (dbg_hc(chan)) 1524 dev_vdbg(hsotg->dev, "Channel enabled\n"); 1525 chan->halt_pending = 1; 1526 chan->halt_on_queue = 0; 1527 } else { 1528 if (dbg_hc(chan)) 1529 dev_vdbg(hsotg->dev, "Channel disabled\n"); 1530 chan->halt_on_queue = 1; 1531 } 1532 1533 if (dbg_hc(chan)) { 1534 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 1535 chan->hc_num); 1536 dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n", 1537 hcchar); 1538 dev_vdbg(hsotg->dev, " halt_pending: %d\n", 1539 chan->halt_pending); 1540 dev_vdbg(hsotg->dev, " halt_on_queue: %d\n", 1541 chan->halt_on_queue); 1542 dev_vdbg(hsotg->dev, " halt_status: %d\n", 1543 chan->halt_status); 1544 } 1545 } 1546 1547 /** 1548 * dwc2_hc_cleanup() - Clears the transfer state for a host channel 1549 * 1550 * @hsotg: Programming view of DWC_otg controller 1551 * @chan: Identifies the host channel to clean up 1552 * 1553 * This function is normally called after a transfer is done and the host 1554 * channel is being released 1555 */ 1556 void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) 1557 { 1558 u32 hcintmsk; 1559 1560 chan->xfer_started = 0; 1561 1562 /* 1563 * Clear channel interrupt enables and any unhandled channel interrupt 1564 * conditions 1565 */ 1566 dwc2_writel(0, hsotg->regs + HCINTMSK(chan->hc_num)); 1567 hcintmsk = 0xffffffff; 1568 hcintmsk &= ~HCINTMSK_RESERVED14_31; 1569 dwc2_writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num)); 1570 } 1571 1572 /** 1573 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in 1574 * which frame a periodic transfer should occur 1575 * 1576 * @hsotg: Programming view of DWC_otg controller 1577 * @chan: Identifies the host channel to set up and its properties 1578 * @hcchar: Current value of the HCCHAR register for the specified host channel 1579 * 1580 * This function has no effect on non-periodic transfers 1581 */ 1582 static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg, 1583 struct dwc2_host_chan *chan, u32 *hcchar) 1584 { 1585 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1586 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1587 /* 1 if _next_ frame is odd, 0 if it's even */ 1588 if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1)) 1589 *hcchar |= HCCHAR_ODDFRM; 1590 } 1591 } 1592 1593 static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan) 1594 { 1595 /* Set up the initial PID for the transfer */ 1596 if (chan->speed == USB_SPEED_HIGH) { 1597 if (chan->ep_is_in) { 1598 if (chan->multi_count == 1) 1599 chan->data_pid_start = DWC2_HC_PID_DATA0; 1600 else if (chan->multi_count == 2) 1601 chan->data_pid_start = DWC2_HC_PID_DATA1; 1602 else 1603 chan->data_pid_start = DWC2_HC_PID_DATA2; 1604 } else { 1605 if (chan->multi_count == 1) 1606 chan->data_pid_start = DWC2_HC_PID_DATA0; 1607 else 1608 chan->data_pid_start = DWC2_HC_PID_MDATA; 1609 } 1610 } else { 1611 chan->data_pid_start = DWC2_HC_PID_DATA0; 1612 } 1613 } 1614 1615 /** 1616 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with 1617 * the Host Channel 1618 * 1619 * @hsotg: Programming view of DWC_otg controller 1620 * @chan: Information needed to initialize the host channel 1621 * 1622 * This function should only be called in Slave mode. For a channel associated 1623 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel 1624 * associated with a periodic EP, the periodic Tx FIFO is written. 1625 * 1626 * Upon return the xfer_buf and xfer_count fields in chan are incremented by 1627 * the number of bytes written to the Tx FIFO. 1628 */ 1629 static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg, 1630 struct dwc2_host_chan *chan) 1631 { 1632 u32 i; 1633 u32 remaining_count; 1634 u32 byte_count; 1635 u32 dword_count; 1636 u32 __iomem *data_fifo; 1637 u32 *data_buf = (u32 *)chan->xfer_buf; 1638 1639 if (dbg_hc(chan)) 1640 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1641 1642 data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num)); 1643 1644 remaining_count = chan->xfer_len - chan->xfer_count; 1645 if (remaining_count > chan->max_packet) 1646 byte_count = chan->max_packet; 1647 else 1648 byte_count = remaining_count; 1649 1650 dword_count = (byte_count + 3) / 4; 1651 1652 if (((unsigned long)data_buf & 0x3) == 0) { 1653 /* xfer_buf is DWORD aligned */ 1654 for (i = 0; i < dword_count; i++, data_buf++) 1655 dwc2_writel(*data_buf, data_fifo); 1656 } else { 1657 /* xfer_buf is not DWORD aligned */ 1658 for (i = 0; i < dword_count; i++, data_buf++) { 1659 u32 data = data_buf[0] | data_buf[1] << 8 | 1660 data_buf[2] << 16 | data_buf[3] << 24; 1661 dwc2_writel(data, data_fifo); 1662 } 1663 } 1664 1665 chan->xfer_count += byte_count; 1666 chan->xfer_buf += byte_count; 1667 } 1668 1669 /** 1670 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host 1671 * channel and starts the transfer 1672 * 1673 * @hsotg: Programming view of DWC_otg controller 1674 * @chan: Information needed to initialize the host channel. The xfer_len value 1675 * may be reduced to accommodate the max widths of the XferSize and 1676 * PktCnt fields in the HCTSIZn register. The multi_count value may be 1677 * changed to reflect the final xfer_len value. 1678 * 1679 * This function may be called in either Slave mode or DMA mode. In Slave mode, 1680 * the caller must ensure that there is sufficient space in the request queue 1681 * and Tx Data FIFO. 1682 * 1683 * For an OUT transfer in Slave mode, it loads a data packet into the 1684 * appropriate FIFO. If necessary, additional data packets are loaded in the 1685 * Host ISR. 1686 * 1687 * For an IN transfer in Slave mode, a data packet is requested. The data 1688 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary, 1689 * additional data packets are requested in the Host ISR. 1690 * 1691 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ 1692 * register along with a packet count of 1 and the channel is enabled. This 1693 * causes a single PING transaction to occur. Other fields in HCTSIZ are 1694 * simply set to 0 since no data transfer occurs in this case. 1695 * 1696 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with 1697 * all the information required to perform the subsequent data transfer. In 1698 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the 1699 * controller performs the entire PING protocol, then starts the data 1700 * transfer. 1701 */ 1702 void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, 1703 struct dwc2_host_chan *chan) 1704 { 1705 u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size; 1706 u16 max_hc_pkt_count = hsotg->core_params->max_packet_count; 1707 u32 hcchar; 1708 u32 hctsiz = 0; 1709 u16 num_packets; 1710 1711 if (dbg_hc(chan)) 1712 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1713 1714 if (chan->do_ping) { 1715 if (hsotg->core_params->dma_enable <= 0) { 1716 if (dbg_hc(chan)) 1717 dev_vdbg(hsotg->dev, "ping, no DMA\n"); 1718 dwc2_hc_do_ping(hsotg, chan); 1719 chan->xfer_started = 1; 1720 return; 1721 } else { 1722 if (dbg_hc(chan)) 1723 dev_vdbg(hsotg->dev, "ping, DMA\n"); 1724 hctsiz |= TSIZ_DOPNG; 1725 } 1726 } 1727 1728 if (chan->do_split) { 1729 if (dbg_hc(chan)) 1730 dev_vdbg(hsotg->dev, "split\n"); 1731 num_packets = 1; 1732 1733 if (chan->complete_split && !chan->ep_is_in) 1734 /* 1735 * For CSPLIT OUT Transfer, set the size to 0 so the 1736 * core doesn't expect any data written to the FIFO 1737 */ 1738 chan->xfer_len = 0; 1739 else if (chan->ep_is_in || chan->xfer_len > chan->max_packet) 1740 chan->xfer_len = chan->max_packet; 1741 else if (!chan->ep_is_in && chan->xfer_len > 188) 1742 chan->xfer_len = 188; 1743 1744 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT & 1745 TSIZ_XFERSIZE_MASK; 1746 } else { 1747 if (dbg_hc(chan)) 1748 dev_vdbg(hsotg->dev, "no split\n"); 1749 /* 1750 * Ensure that the transfer length and packet count will fit 1751 * in the widths allocated for them in the HCTSIZn register 1752 */ 1753 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1754 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1755 /* 1756 * Make sure the transfer size is no larger than one 1757 * (micro)frame's worth of data. (A check was done 1758 * when the periodic transfer was accepted to ensure 1759 * that a (micro)frame's worth of data can be 1760 * programmed into a channel.) 1761 */ 1762 u32 max_periodic_len = 1763 chan->multi_count * chan->max_packet; 1764 1765 if (chan->xfer_len > max_periodic_len) 1766 chan->xfer_len = max_periodic_len; 1767 } else if (chan->xfer_len > max_hc_xfer_size) { 1768 /* 1769 * Make sure that xfer_len is a multiple of max packet 1770 * size 1771 */ 1772 chan->xfer_len = 1773 max_hc_xfer_size - chan->max_packet + 1; 1774 } 1775 1776 if (chan->xfer_len > 0) { 1777 num_packets = (chan->xfer_len + chan->max_packet - 1) / 1778 chan->max_packet; 1779 if (num_packets > max_hc_pkt_count) { 1780 num_packets = max_hc_pkt_count; 1781 chan->xfer_len = num_packets * chan->max_packet; 1782 } 1783 } else { 1784 /* Need 1 packet for transfer length of 0 */ 1785 num_packets = 1; 1786 } 1787 1788 if (chan->ep_is_in) 1789 /* 1790 * Always program an integral # of max packets for IN 1791 * transfers 1792 */ 1793 chan->xfer_len = num_packets * chan->max_packet; 1794 1795 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1796 chan->ep_type == USB_ENDPOINT_XFER_ISOC) 1797 /* 1798 * Make sure that the multi_count field matches the 1799 * actual transfer length 1800 */ 1801 chan->multi_count = num_packets; 1802 1803 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) 1804 dwc2_set_pid_isoc(chan); 1805 1806 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT & 1807 TSIZ_XFERSIZE_MASK; 1808 } 1809 1810 chan->start_pkt_count = num_packets; 1811 hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK; 1812 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT & 1813 TSIZ_SC_MC_PID_MASK; 1814 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); 1815 if (dbg_hc(chan)) { 1816 dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n", 1817 hctsiz, chan->hc_num); 1818 1819 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 1820 chan->hc_num); 1821 dev_vdbg(hsotg->dev, " Xfer Size: %d\n", 1822 (hctsiz & TSIZ_XFERSIZE_MASK) >> 1823 TSIZ_XFERSIZE_SHIFT); 1824 dev_vdbg(hsotg->dev, " Num Pkts: %d\n", 1825 (hctsiz & TSIZ_PKTCNT_MASK) >> 1826 TSIZ_PKTCNT_SHIFT); 1827 dev_vdbg(hsotg->dev, " Start PID: %d\n", 1828 (hctsiz & TSIZ_SC_MC_PID_MASK) >> 1829 TSIZ_SC_MC_PID_SHIFT); 1830 } 1831 1832 if (hsotg->core_params->dma_enable > 0) { 1833 dma_addr_t dma_addr; 1834 1835 if (chan->align_buf) { 1836 if (dbg_hc(chan)) 1837 dev_vdbg(hsotg->dev, "align_buf\n"); 1838 dma_addr = chan->align_buf; 1839 } else { 1840 dma_addr = chan->xfer_dma; 1841 } 1842 dwc2_writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num)); 1843 if (dbg_hc(chan)) 1844 dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n", 1845 (unsigned long)dma_addr, chan->hc_num); 1846 } 1847 1848 /* Start the split */ 1849 if (chan->do_split) { 1850 u32 hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chan->hc_num)); 1851 1852 hcsplt |= HCSPLT_SPLTENA; 1853 dwc2_writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num)); 1854 } 1855 1856 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); 1857 hcchar &= ~HCCHAR_MULTICNT_MASK; 1858 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT & 1859 HCCHAR_MULTICNT_MASK; 1860 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar); 1861 1862 if (hcchar & HCCHAR_CHDIS) 1863 dev_warn(hsotg->dev, 1864 "%s: chdis set, channel %d, hcchar 0x%08x\n", 1865 __func__, chan->hc_num, hcchar); 1866 1867 /* Set host channel enable after all other setup is complete */ 1868 hcchar |= HCCHAR_CHENA; 1869 hcchar &= ~HCCHAR_CHDIS; 1870 1871 if (dbg_hc(chan)) 1872 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n", 1873 (hcchar & HCCHAR_MULTICNT_MASK) >> 1874 HCCHAR_MULTICNT_SHIFT); 1875 1876 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); 1877 if (dbg_hc(chan)) 1878 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, 1879 chan->hc_num); 1880 1881 chan->xfer_started = 1; 1882 chan->requests++; 1883 1884 if (hsotg->core_params->dma_enable <= 0 && 1885 !chan->ep_is_in && chan->xfer_len > 0) 1886 /* Load OUT packet into the appropriate Tx FIFO */ 1887 dwc2_hc_write_packet(hsotg, chan); 1888 } 1889 1890 /** 1891 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a 1892 * host channel and starts the transfer in Descriptor DMA mode 1893 * 1894 * @hsotg: Programming view of DWC_otg controller 1895 * @chan: Information needed to initialize the host channel 1896 * 1897 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set. 1898 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field 1899 * with micro-frame bitmap. 1900 * 1901 * Initializes HCDMA register with descriptor list address and CTD value then 1902 * starts the transfer via enabling the channel. 1903 */ 1904 void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg, 1905 struct dwc2_host_chan *chan) 1906 { 1907 u32 hcchar; 1908 u32 hc_dma; 1909 u32 hctsiz = 0; 1910 1911 if (chan->do_ping) 1912 hctsiz |= TSIZ_DOPNG; 1913 1914 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) 1915 dwc2_set_pid_isoc(chan); 1916 1917 /* Packet Count and Xfer Size are not used in Descriptor DMA mode */ 1918 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT & 1919 TSIZ_SC_MC_PID_MASK; 1920 1921 /* 0 - 1 descriptor, 1 - 2 descriptors, etc */ 1922 hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK; 1923 1924 /* Non-zero only for high-speed interrupt endpoints */ 1925 hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK; 1926 1927 if (dbg_hc(chan)) { 1928 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 1929 chan->hc_num); 1930 dev_vdbg(hsotg->dev, " Start PID: %d\n", 1931 chan->data_pid_start); 1932 dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1); 1933 } 1934 1935 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); 1936 1937 hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK; 1938 1939 /* Always start from first descriptor */ 1940 hc_dma &= ~HCDMA_CTD_MASK; 1941 dwc2_writel(hc_dma, hsotg->regs + HCDMA(chan->hc_num)); 1942 if (dbg_hc(chan)) 1943 dev_vdbg(hsotg->dev, "Wrote %08x to HCDMA(%d)\n", 1944 hc_dma, chan->hc_num); 1945 1946 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); 1947 hcchar &= ~HCCHAR_MULTICNT_MASK; 1948 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT & 1949 HCCHAR_MULTICNT_MASK; 1950 1951 if (hcchar & HCCHAR_CHDIS) 1952 dev_warn(hsotg->dev, 1953 "%s: chdis set, channel %d, hcchar 0x%08x\n", 1954 __func__, chan->hc_num, hcchar); 1955 1956 /* Set host channel enable after all other setup is complete */ 1957 hcchar |= HCCHAR_CHENA; 1958 hcchar &= ~HCCHAR_CHDIS; 1959 1960 if (dbg_hc(chan)) 1961 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n", 1962 (hcchar & HCCHAR_MULTICNT_MASK) >> 1963 HCCHAR_MULTICNT_SHIFT); 1964 1965 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); 1966 if (dbg_hc(chan)) 1967 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, 1968 chan->hc_num); 1969 1970 chan->xfer_started = 1; 1971 chan->requests++; 1972 } 1973 1974 /** 1975 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by 1976 * a previous call to dwc2_hc_start_transfer() 1977 * 1978 * @hsotg: Programming view of DWC_otg controller 1979 * @chan: Information needed to initialize the host channel 1980 * 1981 * The caller must ensure there is sufficient space in the request queue and Tx 1982 * Data FIFO. This function should only be called in Slave mode. In DMA mode, 1983 * the controller acts autonomously to complete transfers programmed to a host 1984 * channel. 1985 * 1986 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO 1987 * if there is any data remaining to be queued. For an IN transfer, another 1988 * data packet is always requested. For the SETUP phase of a control transfer, 1989 * this function does nothing. 1990 * 1991 * Return: 1 if a new request is queued, 0 if no more requests are required 1992 * for this transfer 1993 */ 1994 int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg, 1995 struct dwc2_host_chan *chan) 1996 { 1997 if (dbg_hc(chan)) 1998 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 1999 chan->hc_num); 2000 2001 if (chan->do_split) 2002 /* SPLITs always queue just once per channel */ 2003 return 0; 2004 2005 if (chan->data_pid_start == DWC2_HC_PID_SETUP) 2006 /* SETUPs are queued only once since they can't be NAK'd */ 2007 return 0; 2008 2009 if (chan->ep_is_in) { 2010 /* 2011 * Always queue another request for other IN transfers. If 2012 * back-to-back INs are issued and NAKs are received for both, 2013 * the driver may still be processing the first NAK when the 2014 * second NAK is received. When the interrupt handler clears 2015 * the NAK interrupt for the first NAK, the second NAK will 2016 * not be seen. So we can't depend on the NAK interrupt 2017 * handler to requeue a NAK'd request. Instead, IN requests 2018 * are issued each time this function is called. When the 2019 * transfer completes, the extra requests for the channel will 2020 * be flushed. 2021 */ 2022 u32 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); 2023 2024 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar); 2025 hcchar |= HCCHAR_CHENA; 2026 hcchar &= ~HCCHAR_CHDIS; 2027 if (dbg_hc(chan)) 2028 dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n", 2029 hcchar); 2030 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); 2031 chan->requests++; 2032 return 1; 2033 } 2034 2035 /* OUT transfers */ 2036 2037 if (chan->xfer_count < chan->xfer_len) { 2038 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 2039 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 2040 u32 hcchar = dwc2_readl(hsotg->regs + 2041 HCCHAR(chan->hc_num)); 2042 2043 dwc2_hc_set_even_odd_frame(hsotg, chan, 2044 &hcchar); 2045 } 2046 2047 /* Load OUT packet into the appropriate Tx FIFO */ 2048 dwc2_hc_write_packet(hsotg, chan); 2049 chan->requests++; 2050 return 1; 2051 } 2052 2053 return 0; 2054 } 2055 2056 /** 2057 * dwc2_hc_do_ping() - Starts a PING transfer 2058 * 2059 * @hsotg: Programming view of DWC_otg controller 2060 * @chan: Information needed to initialize the host channel 2061 * 2062 * This function should only be called in Slave mode. The Do Ping bit is set in 2063 * the HCTSIZ register, then the channel is enabled. 2064 */ 2065 void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) 2066 { 2067 u32 hcchar; 2068 u32 hctsiz; 2069 2070 if (dbg_hc(chan)) 2071 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 2072 chan->hc_num); 2073 2074 2075 hctsiz = TSIZ_DOPNG; 2076 hctsiz |= 1 << TSIZ_PKTCNT_SHIFT; 2077 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); 2078 2079 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); 2080 hcchar |= HCCHAR_CHENA; 2081 hcchar &= ~HCCHAR_CHDIS; 2082 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); 2083 } 2084 2085 /** 2086 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for 2087 * the HFIR register according to PHY type and speed 2088 * 2089 * @hsotg: Programming view of DWC_otg controller 2090 * 2091 * NOTE: The caller can modify the value of the HFIR register only after the 2092 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort) 2093 * has been set 2094 */ 2095 u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg) 2096 { 2097 u32 usbcfg; 2098 u32 hprt0; 2099 int clock = 60; /* default value */ 2100 2101 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 2102 hprt0 = dwc2_readl(hsotg->regs + HPRT0); 2103 2104 if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) && 2105 !(usbcfg & GUSBCFG_PHYIF16)) 2106 clock = 60; 2107 if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type == 2108 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI) 2109 clock = 48; 2110 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && 2111 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16)) 2112 clock = 30; 2113 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && 2114 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16)) 2115 clock = 60; 2116 if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && 2117 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16)) 2118 clock = 48; 2119 if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) && 2120 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI) 2121 clock = 48; 2122 if ((usbcfg & GUSBCFG_PHYSEL) && 2123 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) 2124 clock = 48; 2125 2126 if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED) 2127 /* High speed case */ 2128 return 125 * clock; 2129 else 2130 /* FS/LS case */ 2131 return 1000 * clock; 2132 } 2133 2134 /** 2135 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination 2136 * buffer 2137 * 2138 * @core_if: Programming view of DWC_otg controller 2139 * @dest: Destination buffer for the packet 2140 * @bytes: Number of bytes to copy to the destination 2141 */ 2142 void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes) 2143 { 2144 u32 __iomem *fifo = hsotg->regs + HCFIFO(0); 2145 u32 *data_buf = (u32 *)dest; 2146 int word_count = (bytes + 3) / 4; 2147 int i; 2148 2149 /* 2150 * Todo: Account for the case where dest is not dword aligned. This 2151 * requires reading data from the FIFO into a u32 temp buffer, then 2152 * moving it into the data buffer. 2153 */ 2154 2155 dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes); 2156 2157 for (i = 0; i < word_count; i++, data_buf++) 2158 *data_buf = dwc2_readl(fifo); 2159 } 2160 2161 /** 2162 * dwc2_dump_host_registers() - Prints the host registers 2163 * 2164 * @hsotg: Programming view of DWC_otg controller 2165 * 2166 * NOTE: This function will be removed once the peripheral controller code 2167 * is integrated and the driver is stable 2168 */ 2169 void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg) 2170 { 2171 #ifdef DEBUG 2172 u32 __iomem *addr; 2173 int i; 2174 2175 dev_dbg(hsotg->dev, "Host Global Registers\n"); 2176 addr = hsotg->regs + HCFG; 2177 dev_dbg(hsotg->dev, "HCFG @0x%08lX : 0x%08X\n", 2178 (unsigned long)addr, dwc2_readl(addr)); 2179 addr = hsotg->regs + HFIR; 2180 dev_dbg(hsotg->dev, "HFIR @0x%08lX : 0x%08X\n", 2181 (unsigned long)addr, dwc2_readl(addr)); 2182 addr = hsotg->regs + HFNUM; 2183 dev_dbg(hsotg->dev, "HFNUM @0x%08lX : 0x%08X\n", 2184 (unsigned long)addr, dwc2_readl(addr)); 2185 addr = hsotg->regs + HPTXSTS; 2186 dev_dbg(hsotg->dev, "HPTXSTS @0x%08lX : 0x%08X\n", 2187 (unsigned long)addr, dwc2_readl(addr)); 2188 addr = hsotg->regs + HAINT; 2189 dev_dbg(hsotg->dev, "HAINT @0x%08lX : 0x%08X\n", 2190 (unsigned long)addr, dwc2_readl(addr)); 2191 addr = hsotg->regs + HAINTMSK; 2192 dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n", 2193 (unsigned long)addr, dwc2_readl(addr)); 2194 if (hsotg->core_params->dma_desc_enable > 0) { 2195 addr = hsotg->regs + HFLBADDR; 2196 dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n", 2197 (unsigned long)addr, dwc2_readl(addr)); 2198 } 2199 2200 addr = hsotg->regs + HPRT0; 2201 dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n", 2202 (unsigned long)addr, dwc2_readl(addr)); 2203 2204 for (i = 0; i < hsotg->core_params->host_channels; i++) { 2205 dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i); 2206 addr = hsotg->regs + HCCHAR(i); 2207 dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n", 2208 (unsigned long)addr, dwc2_readl(addr)); 2209 addr = hsotg->regs + HCSPLT(i); 2210 dev_dbg(hsotg->dev, "HCSPLT @0x%08lX : 0x%08X\n", 2211 (unsigned long)addr, dwc2_readl(addr)); 2212 addr = hsotg->regs + HCINT(i); 2213 dev_dbg(hsotg->dev, "HCINT @0x%08lX : 0x%08X\n", 2214 (unsigned long)addr, dwc2_readl(addr)); 2215 addr = hsotg->regs + HCINTMSK(i); 2216 dev_dbg(hsotg->dev, "HCINTMSK @0x%08lX : 0x%08X\n", 2217 (unsigned long)addr, dwc2_readl(addr)); 2218 addr = hsotg->regs + HCTSIZ(i); 2219 dev_dbg(hsotg->dev, "HCTSIZ @0x%08lX : 0x%08X\n", 2220 (unsigned long)addr, dwc2_readl(addr)); 2221 addr = hsotg->regs + HCDMA(i); 2222 dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n", 2223 (unsigned long)addr, dwc2_readl(addr)); 2224 if (hsotg->core_params->dma_desc_enable > 0) { 2225 addr = hsotg->regs + HCDMAB(i); 2226 dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n", 2227 (unsigned long)addr, dwc2_readl(addr)); 2228 } 2229 } 2230 #endif 2231 } 2232 2233 /** 2234 * dwc2_dump_global_registers() - Prints the core global registers 2235 * 2236 * @hsotg: Programming view of DWC_otg controller 2237 * 2238 * NOTE: This function will be removed once the peripheral controller code 2239 * is integrated and the driver is stable 2240 */ 2241 void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg) 2242 { 2243 #ifdef DEBUG 2244 u32 __iomem *addr; 2245 2246 dev_dbg(hsotg->dev, "Core Global Registers\n"); 2247 addr = hsotg->regs + GOTGCTL; 2248 dev_dbg(hsotg->dev, "GOTGCTL @0x%08lX : 0x%08X\n", 2249 (unsigned long)addr, dwc2_readl(addr)); 2250 addr = hsotg->regs + GOTGINT; 2251 dev_dbg(hsotg->dev, "GOTGINT @0x%08lX : 0x%08X\n", 2252 (unsigned long)addr, dwc2_readl(addr)); 2253 addr = hsotg->regs + GAHBCFG; 2254 dev_dbg(hsotg->dev, "GAHBCFG @0x%08lX : 0x%08X\n", 2255 (unsigned long)addr, dwc2_readl(addr)); 2256 addr = hsotg->regs + GUSBCFG; 2257 dev_dbg(hsotg->dev, "GUSBCFG @0x%08lX : 0x%08X\n", 2258 (unsigned long)addr, dwc2_readl(addr)); 2259 addr = hsotg->regs + GRSTCTL; 2260 dev_dbg(hsotg->dev, "GRSTCTL @0x%08lX : 0x%08X\n", 2261 (unsigned long)addr, dwc2_readl(addr)); 2262 addr = hsotg->regs + GINTSTS; 2263 dev_dbg(hsotg->dev, "GINTSTS @0x%08lX : 0x%08X\n", 2264 (unsigned long)addr, dwc2_readl(addr)); 2265 addr = hsotg->regs + GINTMSK; 2266 dev_dbg(hsotg->dev, "GINTMSK @0x%08lX : 0x%08X\n", 2267 (unsigned long)addr, dwc2_readl(addr)); 2268 addr = hsotg->regs + GRXSTSR; 2269 dev_dbg(hsotg->dev, "GRXSTSR @0x%08lX : 0x%08X\n", 2270 (unsigned long)addr, dwc2_readl(addr)); 2271 addr = hsotg->regs + GRXFSIZ; 2272 dev_dbg(hsotg->dev, "GRXFSIZ @0x%08lX : 0x%08X\n", 2273 (unsigned long)addr, dwc2_readl(addr)); 2274 addr = hsotg->regs + GNPTXFSIZ; 2275 dev_dbg(hsotg->dev, "GNPTXFSIZ @0x%08lX : 0x%08X\n", 2276 (unsigned long)addr, dwc2_readl(addr)); 2277 addr = hsotg->regs + GNPTXSTS; 2278 dev_dbg(hsotg->dev, "GNPTXSTS @0x%08lX : 0x%08X\n", 2279 (unsigned long)addr, dwc2_readl(addr)); 2280 addr = hsotg->regs + GI2CCTL; 2281 dev_dbg(hsotg->dev, "GI2CCTL @0x%08lX : 0x%08X\n", 2282 (unsigned long)addr, dwc2_readl(addr)); 2283 addr = hsotg->regs + GPVNDCTL; 2284 dev_dbg(hsotg->dev, "GPVNDCTL @0x%08lX : 0x%08X\n", 2285 (unsigned long)addr, dwc2_readl(addr)); 2286 addr = hsotg->regs + GGPIO; 2287 dev_dbg(hsotg->dev, "GGPIO @0x%08lX : 0x%08X\n", 2288 (unsigned long)addr, dwc2_readl(addr)); 2289 addr = hsotg->regs + GUID; 2290 dev_dbg(hsotg->dev, "GUID @0x%08lX : 0x%08X\n", 2291 (unsigned long)addr, dwc2_readl(addr)); 2292 addr = hsotg->regs + GSNPSID; 2293 dev_dbg(hsotg->dev, "GSNPSID @0x%08lX : 0x%08X\n", 2294 (unsigned long)addr, dwc2_readl(addr)); 2295 addr = hsotg->regs + GHWCFG1; 2296 dev_dbg(hsotg->dev, "GHWCFG1 @0x%08lX : 0x%08X\n", 2297 (unsigned long)addr, dwc2_readl(addr)); 2298 addr = hsotg->regs + GHWCFG2; 2299 dev_dbg(hsotg->dev, "GHWCFG2 @0x%08lX : 0x%08X\n", 2300 (unsigned long)addr, dwc2_readl(addr)); 2301 addr = hsotg->regs + GHWCFG3; 2302 dev_dbg(hsotg->dev, "GHWCFG3 @0x%08lX : 0x%08X\n", 2303 (unsigned long)addr, dwc2_readl(addr)); 2304 addr = hsotg->regs + GHWCFG4; 2305 dev_dbg(hsotg->dev, "GHWCFG4 @0x%08lX : 0x%08X\n", 2306 (unsigned long)addr, dwc2_readl(addr)); 2307 addr = hsotg->regs + GLPMCFG; 2308 dev_dbg(hsotg->dev, "GLPMCFG @0x%08lX : 0x%08X\n", 2309 (unsigned long)addr, dwc2_readl(addr)); 2310 addr = hsotg->regs + GPWRDN; 2311 dev_dbg(hsotg->dev, "GPWRDN @0x%08lX : 0x%08X\n", 2312 (unsigned long)addr, dwc2_readl(addr)); 2313 addr = hsotg->regs + GDFIFOCFG; 2314 dev_dbg(hsotg->dev, "GDFIFOCFG @0x%08lX : 0x%08X\n", 2315 (unsigned long)addr, dwc2_readl(addr)); 2316 addr = hsotg->regs + HPTXFSIZ; 2317 dev_dbg(hsotg->dev, "HPTXFSIZ @0x%08lX : 0x%08X\n", 2318 (unsigned long)addr, dwc2_readl(addr)); 2319 2320 addr = hsotg->regs + PCGCTL; 2321 dev_dbg(hsotg->dev, "PCGCTL @0x%08lX : 0x%08X\n", 2322 (unsigned long)addr, dwc2_readl(addr)); 2323 #endif 2324 } 2325 2326 /** 2327 * dwc2_flush_tx_fifo() - Flushes a Tx FIFO 2328 * 2329 * @hsotg: Programming view of DWC_otg controller 2330 * @num: Tx FIFO to flush 2331 */ 2332 void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num) 2333 { 2334 u32 greset; 2335 int count = 0; 2336 2337 dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num); 2338 2339 greset = GRSTCTL_TXFFLSH; 2340 greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK; 2341 dwc2_writel(greset, hsotg->regs + GRSTCTL); 2342 2343 do { 2344 greset = dwc2_readl(hsotg->regs + GRSTCTL); 2345 if (++count > 10000) { 2346 dev_warn(hsotg->dev, 2347 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n", 2348 __func__, greset, 2349 dwc2_readl(hsotg->regs + GNPTXSTS)); 2350 break; 2351 } 2352 udelay(1); 2353 } while (greset & GRSTCTL_TXFFLSH); 2354 2355 /* Wait for at least 3 PHY Clocks */ 2356 udelay(1); 2357 } 2358 2359 /** 2360 * dwc2_flush_rx_fifo() - Flushes the Rx FIFO 2361 * 2362 * @hsotg: Programming view of DWC_otg controller 2363 */ 2364 void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg) 2365 { 2366 u32 greset; 2367 int count = 0; 2368 2369 dev_vdbg(hsotg->dev, "%s()\n", __func__); 2370 2371 greset = GRSTCTL_RXFFLSH; 2372 dwc2_writel(greset, hsotg->regs + GRSTCTL); 2373 2374 do { 2375 greset = dwc2_readl(hsotg->regs + GRSTCTL); 2376 if (++count > 10000) { 2377 dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n", 2378 __func__, greset); 2379 break; 2380 } 2381 udelay(1); 2382 } while (greset & GRSTCTL_RXFFLSH); 2383 2384 /* Wait for at least 3 PHY Clocks */ 2385 udelay(1); 2386 } 2387 2388 #define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c)) 2389 2390 /* Parameter access functions */ 2391 void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val) 2392 { 2393 int valid = 1; 2394 2395 switch (val) { 2396 case DWC2_CAP_PARAM_HNP_SRP_CAPABLE: 2397 if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE) 2398 valid = 0; 2399 break; 2400 case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE: 2401 switch (hsotg->hw_params.op_mode) { 2402 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: 2403 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: 2404 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: 2405 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: 2406 break; 2407 default: 2408 valid = 0; 2409 break; 2410 } 2411 break; 2412 case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE: 2413 /* always valid */ 2414 break; 2415 default: 2416 valid = 0; 2417 break; 2418 } 2419 2420 if (!valid) { 2421 if (val >= 0) 2422 dev_err(hsotg->dev, 2423 "%d invalid for otg_cap parameter. Check HW configuration.\n", 2424 val); 2425 switch (hsotg->hw_params.op_mode) { 2426 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: 2427 val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE; 2428 break; 2429 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: 2430 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: 2431 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: 2432 val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE; 2433 break; 2434 default: 2435 val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE; 2436 break; 2437 } 2438 dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val); 2439 } 2440 2441 hsotg->core_params->otg_cap = val; 2442 } 2443 2444 void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val) 2445 { 2446 int valid = 1; 2447 2448 if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH) 2449 valid = 0; 2450 if (val < 0) 2451 valid = 0; 2452 2453 if (!valid) { 2454 if (val >= 0) 2455 dev_err(hsotg->dev, 2456 "%d invalid for dma_enable parameter. Check HW configuration.\n", 2457 val); 2458 val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH; 2459 dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val); 2460 } 2461 2462 hsotg->core_params->dma_enable = val; 2463 } 2464 2465 void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val) 2466 { 2467 int valid = 1; 2468 2469 if (val > 0 && (hsotg->core_params->dma_enable <= 0 || 2470 !hsotg->hw_params.dma_desc_enable)) 2471 valid = 0; 2472 if (val < 0) 2473 valid = 0; 2474 2475 if (!valid) { 2476 if (val >= 0) 2477 dev_err(hsotg->dev, 2478 "%d invalid for dma_desc_enable parameter. Check HW configuration.\n", 2479 val); 2480 val = (hsotg->core_params->dma_enable > 0 && 2481 hsotg->hw_params.dma_desc_enable); 2482 dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val); 2483 } 2484 2485 hsotg->core_params->dma_desc_enable = val; 2486 } 2487 2488 void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg, 2489 int val) 2490 { 2491 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2492 if (val >= 0) { 2493 dev_err(hsotg->dev, 2494 "Wrong value for host_support_fs_low_power\n"); 2495 dev_err(hsotg->dev, 2496 "host_support_fs_low_power must be 0 or 1\n"); 2497 } 2498 val = 0; 2499 dev_dbg(hsotg->dev, 2500 "Setting host_support_fs_low_power to %d\n", val); 2501 } 2502 2503 hsotg->core_params->host_support_fs_ls_low_power = val; 2504 } 2505 2506 void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val) 2507 { 2508 int valid = 1; 2509 2510 if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo) 2511 valid = 0; 2512 if (val < 0) 2513 valid = 0; 2514 2515 if (!valid) { 2516 if (val >= 0) 2517 dev_err(hsotg->dev, 2518 "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n", 2519 val); 2520 val = hsotg->hw_params.enable_dynamic_fifo; 2521 dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val); 2522 } 2523 2524 hsotg->core_params->enable_dynamic_fifo = val; 2525 } 2526 2527 void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val) 2528 { 2529 int valid = 1; 2530 2531 if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size) 2532 valid = 0; 2533 2534 if (!valid) { 2535 if (val >= 0) 2536 dev_err(hsotg->dev, 2537 "%d invalid for host_rx_fifo_size. Check HW configuration.\n", 2538 val); 2539 val = hsotg->hw_params.host_rx_fifo_size; 2540 dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val); 2541 } 2542 2543 hsotg->core_params->host_rx_fifo_size = val; 2544 } 2545 2546 void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val) 2547 { 2548 int valid = 1; 2549 2550 if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size) 2551 valid = 0; 2552 2553 if (!valid) { 2554 if (val >= 0) 2555 dev_err(hsotg->dev, 2556 "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n", 2557 val); 2558 val = hsotg->hw_params.host_nperio_tx_fifo_size; 2559 dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n", 2560 val); 2561 } 2562 2563 hsotg->core_params->host_nperio_tx_fifo_size = val; 2564 } 2565 2566 void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val) 2567 { 2568 int valid = 1; 2569 2570 if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size) 2571 valid = 0; 2572 2573 if (!valid) { 2574 if (val >= 0) 2575 dev_err(hsotg->dev, 2576 "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n", 2577 val); 2578 val = hsotg->hw_params.host_perio_tx_fifo_size; 2579 dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n", 2580 val); 2581 } 2582 2583 hsotg->core_params->host_perio_tx_fifo_size = val; 2584 } 2585 2586 void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val) 2587 { 2588 int valid = 1; 2589 2590 if (val < 2047 || val > hsotg->hw_params.max_transfer_size) 2591 valid = 0; 2592 2593 if (!valid) { 2594 if (val >= 0) 2595 dev_err(hsotg->dev, 2596 "%d invalid for max_transfer_size. Check HW configuration.\n", 2597 val); 2598 val = hsotg->hw_params.max_transfer_size; 2599 dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val); 2600 } 2601 2602 hsotg->core_params->max_transfer_size = val; 2603 } 2604 2605 void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val) 2606 { 2607 int valid = 1; 2608 2609 if (val < 15 || val > hsotg->hw_params.max_packet_count) 2610 valid = 0; 2611 2612 if (!valid) { 2613 if (val >= 0) 2614 dev_err(hsotg->dev, 2615 "%d invalid for max_packet_count. Check HW configuration.\n", 2616 val); 2617 val = hsotg->hw_params.max_packet_count; 2618 dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val); 2619 } 2620 2621 hsotg->core_params->max_packet_count = val; 2622 } 2623 2624 void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val) 2625 { 2626 int valid = 1; 2627 2628 if (val < 1 || val > hsotg->hw_params.host_channels) 2629 valid = 0; 2630 2631 if (!valid) { 2632 if (val >= 0) 2633 dev_err(hsotg->dev, 2634 "%d invalid for host_channels. Check HW configuration.\n", 2635 val); 2636 val = hsotg->hw_params.host_channels; 2637 dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val); 2638 } 2639 2640 hsotg->core_params->host_channels = val; 2641 } 2642 2643 void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val) 2644 { 2645 int valid = 0; 2646 u32 hs_phy_type, fs_phy_type; 2647 2648 if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS, 2649 DWC2_PHY_TYPE_PARAM_ULPI)) { 2650 if (val >= 0) { 2651 dev_err(hsotg->dev, "Wrong value for phy_type\n"); 2652 dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n"); 2653 } 2654 2655 valid = 0; 2656 } 2657 2658 hs_phy_type = hsotg->hw_params.hs_phy_type; 2659 fs_phy_type = hsotg->hw_params.fs_phy_type; 2660 if (val == DWC2_PHY_TYPE_PARAM_UTMI && 2661 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || 2662 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) 2663 valid = 1; 2664 else if (val == DWC2_PHY_TYPE_PARAM_ULPI && 2665 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI || 2666 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) 2667 valid = 1; 2668 else if (val == DWC2_PHY_TYPE_PARAM_FS && 2669 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) 2670 valid = 1; 2671 2672 if (!valid) { 2673 if (val >= 0) 2674 dev_err(hsotg->dev, 2675 "%d invalid for phy_type. Check HW configuration.\n", 2676 val); 2677 val = DWC2_PHY_TYPE_PARAM_FS; 2678 if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) { 2679 if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || 2680 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI) 2681 val = DWC2_PHY_TYPE_PARAM_UTMI; 2682 else 2683 val = DWC2_PHY_TYPE_PARAM_ULPI; 2684 } 2685 dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val); 2686 } 2687 2688 hsotg->core_params->phy_type = val; 2689 } 2690 2691 static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg) 2692 { 2693 return hsotg->core_params->phy_type; 2694 } 2695 2696 void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val) 2697 { 2698 int valid = 1; 2699 2700 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2701 if (val >= 0) { 2702 dev_err(hsotg->dev, "Wrong value for speed parameter\n"); 2703 dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n"); 2704 } 2705 valid = 0; 2706 } 2707 2708 if (val == DWC2_SPEED_PARAM_HIGH && 2709 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS) 2710 valid = 0; 2711 2712 if (!valid) { 2713 if (val >= 0) 2714 dev_err(hsotg->dev, 2715 "%d invalid for speed parameter. Check HW configuration.\n", 2716 val); 2717 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ? 2718 DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH; 2719 dev_dbg(hsotg->dev, "Setting speed to %d\n", val); 2720 } 2721 2722 hsotg->core_params->speed = val; 2723 } 2724 2725 void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val) 2726 { 2727 int valid = 1; 2728 2729 if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ, 2730 DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) { 2731 if (val >= 0) { 2732 dev_err(hsotg->dev, 2733 "Wrong value for host_ls_low_power_phy_clk parameter\n"); 2734 dev_err(hsotg->dev, 2735 "host_ls_low_power_phy_clk must be 0 or 1\n"); 2736 } 2737 valid = 0; 2738 } 2739 2740 if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ && 2741 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS) 2742 valid = 0; 2743 2744 if (!valid) { 2745 if (val >= 0) 2746 dev_err(hsotg->dev, 2747 "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n", 2748 val); 2749 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS 2750 ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 2751 : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ; 2752 dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n", 2753 val); 2754 } 2755 2756 hsotg->core_params->host_ls_low_power_phy_clk = val; 2757 } 2758 2759 void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val) 2760 { 2761 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2762 if (val >= 0) { 2763 dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n"); 2764 dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n"); 2765 } 2766 val = 0; 2767 dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val); 2768 } 2769 2770 hsotg->core_params->phy_ulpi_ddr = val; 2771 } 2772 2773 void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val) 2774 { 2775 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2776 if (val >= 0) { 2777 dev_err(hsotg->dev, 2778 "Wrong value for phy_ulpi_ext_vbus\n"); 2779 dev_err(hsotg->dev, 2780 "phy_ulpi_ext_vbus must be 0 or 1\n"); 2781 } 2782 val = 0; 2783 dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val); 2784 } 2785 2786 hsotg->core_params->phy_ulpi_ext_vbus = val; 2787 } 2788 2789 void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val) 2790 { 2791 int valid = 0; 2792 2793 switch (hsotg->hw_params.utmi_phy_data_width) { 2794 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8: 2795 valid = (val == 8); 2796 break; 2797 case GHWCFG4_UTMI_PHY_DATA_WIDTH_16: 2798 valid = (val == 16); 2799 break; 2800 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16: 2801 valid = (val == 8 || val == 16); 2802 break; 2803 } 2804 2805 if (!valid) { 2806 if (val >= 0) { 2807 dev_err(hsotg->dev, 2808 "%d invalid for phy_utmi_width. Check HW configuration.\n", 2809 val); 2810 } 2811 val = (hsotg->hw_params.utmi_phy_data_width == 2812 GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16; 2813 dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val); 2814 } 2815 2816 hsotg->core_params->phy_utmi_width = val; 2817 } 2818 2819 void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val) 2820 { 2821 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2822 if (val >= 0) { 2823 dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n"); 2824 dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n"); 2825 } 2826 val = 0; 2827 dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val); 2828 } 2829 2830 hsotg->core_params->ulpi_fs_ls = val; 2831 } 2832 2833 void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val) 2834 { 2835 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2836 if (val >= 0) { 2837 dev_err(hsotg->dev, "Wrong value for ts_dline\n"); 2838 dev_err(hsotg->dev, "ts_dline must be 0 or 1\n"); 2839 } 2840 val = 0; 2841 dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val); 2842 } 2843 2844 hsotg->core_params->ts_dline = val; 2845 } 2846 2847 void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val) 2848 { 2849 int valid = 1; 2850 2851 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2852 if (val >= 0) { 2853 dev_err(hsotg->dev, "Wrong value for i2c_enable\n"); 2854 dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n"); 2855 } 2856 2857 valid = 0; 2858 } 2859 2860 if (val == 1 && !(hsotg->hw_params.i2c_enable)) 2861 valid = 0; 2862 2863 if (!valid) { 2864 if (val >= 0) 2865 dev_err(hsotg->dev, 2866 "%d invalid for i2c_enable. Check HW configuration.\n", 2867 val); 2868 val = hsotg->hw_params.i2c_enable; 2869 dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val); 2870 } 2871 2872 hsotg->core_params->i2c_enable = val; 2873 } 2874 2875 void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val) 2876 { 2877 int valid = 1; 2878 2879 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2880 if (val >= 0) { 2881 dev_err(hsotg->dev, 2882 "Wrong value for en_multiple_tx_fifo,\n"); 2883 dev_err(hsotg->dev, 2884 "en_multiple_tx_fifo must be 0 or 1\n"); 2885 } 2886 valid = 0; 2887 } 2888 2889 if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo) 2890 valid = 0; 2891 2892 if (!valid) { 2893 if (val >= 0) 2894 dev_err(hsotg->dev, 2895 "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n", 2896 val); 2897 val = hsotg->hw_params.en_multiple_tx_fifo; 2898 dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val); 2899 } 2900 2901 hsotg->core_params->en_multiple_tx_fifo = val; 2902 } 2903 2904 void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val) 2905 { 2906 int valid = 1; 2907 2908 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2909 if (val >= 0) { 2910 dev_err(hsotg->dev, 2911 "'%d' invalid for parameter reload_ctl\n", val); 2912 dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n"); 2913 } 2914 valid = 0; 2915 } 2916 2917 if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a) 2918 valid = 0; 2919 2920 if (!valid) { 2921 if (val >= 0) 2922 dev_err(hsotg->dev, 2923 "%d invalid for parameter reload_ctl. Check HW configuration.\n", 2924 val); 2925 val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a; 2926 dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val); 2927 } 2928 2929 hsotg->core_params->reload_ctl = val; 2930 } 2931 2932 void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val) 2933 { 2934 if (val != -1) 2935 hsotg->core_params->ahbcfg = val; 2936 else 2937 hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 << 2938 GAHBCFG_HBSTLEN_SHIFT; 2939 } 2940 2941 void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val) 2942 { 2943 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2944 if (val >= 0) { 2945 dev_err(hsotg->dev, 2946 "'%d' invalid for parameter otg_ver\n", val); 2947 dev_err(hsotg->dev, 2948 "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n"); 2949 } 2950 val = 0; 2951 dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val); 2952 } 2953 2954 hsotg->core_params->otg_ver = val; 2955 } 2956 2957 static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val) 2958 { 2959 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2960 if (val >= 0) { 2961 dev_err(hsotg->dev, 2962 "'%d' invalid for parameter uframe_sched\n", 2963 val); 2964 dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n"); 2965 } 2966 val = 1; 2967 dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val); 2968 } 2969 2970 hsotg->core_params->uframe_sched = val; 2971 } 2972 2973 static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg, 2974 int val) 2975 { 2976 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2977 if (val >= 0) { 2978 dev_err(hsotg->dev, 2979 "'%d' invalid for parameter external_id_pin_ctl\n", 2980 val); 2981 dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n"); 2982 } 2983 val = 0; 2984 dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val); 2985 } 2986 2987 hsotg->core_params->external_id_pin_ctl = val; 2988 } 2989 2990 static void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg, 2991 int val) 2992 { 2993 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2994 if (val >= 0) { 2995 dev_err(hsotg->dev, 2996 "'%d' invalid for parameter hibernation\n", 2997 val); 2998 dev_err(hsotg->dev, "hibernation must be 0 or 1\n"); 2999 } 3000 val = 0; 3001 dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val); 3002 } 3003 3004 hsotg->core_params->hibernation = val; 3005 } 3006 3007 /* 3008 * This function is called during module intialization to pass module parameters 3009 * for the DWC_otg core. 3010 */ 3011 void dwc2_set_parameters(struct dwc2_hsotg *hsotg, 3012 const struct dwc2_core_params *params) 3013 { 3014 dev_dbg(hsotg->dev, "%s()\n", __func__); 3015 3016 dwc2_set_param_otg_cap(hsotg, params->otg_cap); 3017 dwc2_set_param_dma_enable(hsotg, params->dma_enable); 3018 dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable); 3019 dwc2_set_param_host_support_fs_ls_low_power(hsotg, 3020 params->host_support_fs_ls_low_power); 3021 dwc2_set_param_enable_dynamic_fifo(hsotg, 3022 params->enable_dynamic_fifo); 3023 dwc2_set_param_host_rx_fifo_size(hsotg, 3024 params->host_rx_fifo_size); 3025 dwc2_set_param_host_nperio_tx_fifo_size(hsotg, 3026 params->host_nperio_tx_fifo_size); 3027 dwc2_set_param_host_perio_tx_fifo_size(hsotg, 3028 params->host_perio_tx_fifo_size); 3029 dwc2_set_param_max_transfer_size(hsotg, 3030 params->max_transfer_size); 3031 dwc2_set_param_max_packet_count(hsotg, 3032 params->max_packet_count); 3033 dwc2_set_param_host_channels(hsotg, params->host_channels); 3034 dwc2_set_param_phy_type(hsotg, params->phy_type); 3035 dwc2_set_param_speed(hsotg, params->speed); 3036 dwc2_set_param_host_ls_low_power_phy_clk(hsotg, 3037 params->host_ls_low_power_phy_clk); 3038 dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr); 3039 dwc2_set_param_phy_ulpi_ext_vbus(hsotg, 3040 params->phy_ulpi_ext_vbus); 3041 dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width); 3042 dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls); 3043 dwc2_set_param_ts_dline(hsotg, params->ts_dline); 3044 dwc2_set_param_i2c_enable(hsotg, params->i2c_enable); 3045 dwc2_set_param_en_multiple_tx_fifo(hsotg, 3046 params->en_multiple_tx_fifo); 3047 dwc2_set_param_reload_ctl(hsotg, params->reload_ctl); 3048 dwc2_set_param_ahbcfg(hsotg, params->ahbcfg); 3049 dwc2_set_param_otg_ver(hsotg, params->otg_ver); 3050 dwc2_set_param_uframe_sched(hsotg, params->uframe_sched); 3051 dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl); 3052 dwc2_set_param_hibernation(hsotg, params->hibernation); 3053 } 3054 3055 /** 3056 * During device initialization, read various hardware configuration 3057 * registers and interpret the contents. 3058 */ 3059 int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) 3060 { 3061 struct dwc2_hw_params *hw = &hsotg->hw_params; 3062 unsigned width; 3063 u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4; 3064 u32 hptxfsiz, grxfsiz, gnptxfsiz; 3065 u32 gusbcfg; 3066 3067 /* 3068 * Attempt to ensure this device is really a DWC_otg Controller. 3069 * Read and verify the GSNPSID register contents. The value should be 3070 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3", 3071 * as in "OTG version 2.xx" or "OTG version 3.xx". 3072 */ 3073 hw->snpsid = dwc2_readl(hsotg->regs + GSNPSID); 3074 if ((hw->snpsid & 0xfffff000) != 0x4f542000 && 3075 (hw->snpsid & 0xfffff000) != 0x4f543000) { 3076 dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n", 3077 hw->snpsid); 3078 return -ENODEV; 3079 } 3080 3081 dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n", 3082 hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf, 3083 hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid); 3084 3085 hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1); 3086 hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2); 3087 hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3); 3088 hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4); 3089 grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ); 3090 3091 dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1); 3092 dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2); 3093 dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3); 3094 dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4); 3095 dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz); 3096 3097 /* Force host mode to get HPTXFSIZ / GNPTXFSIZ exact power on value */ 3098 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 3099 gusbcfg |= GUSBCFG_FORCEHOSTMODE; 3100 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG); 3101 usleep_range(100000, 150000); 3102 3103 gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); 3104 hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ); 3105 dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz); 3106 dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz); 3107 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 3108 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE; 3109 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG); 3110 usleep_range(100000, 150000); 3111 3112 /* hwcfg2 */ 3113 hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >> 3114 GHWCFG2_OP_MODE_SHIFT; 3115 hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >> 3116 GHWCFG2_ARCHITECTURE_SHIFT; 3117 hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO); 3118 hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >> 3119 GHWCFG2_NUM_HOST_CHAN_SHIFT); 3120 hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >> 3121 GHWCFG2_HS_PHY_TYPE_SHIFT; 3122 hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >> 3123 GHWCFG2_FS_PHY_TYPE_SHIFT; 3124 hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >> 3125 GHWCFG2_NUM_DEV_EP_SHIFT; 3126 hw->nperio_tx_q_depth = 3127 (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >> 3128 GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1; 3129 hw->host_perio_tx_q_depth = 3130 (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >> 3131 GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1; 3132 hw->dev_token_q_depth = 3133 (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >> 3134 GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT; 3135 3136 /* hwcfg3 */ 3137 width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >> 3138 GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT; 3139 hw->max_transfer_size = (1 << (width + 11)) - 1; 3140 /* 3141 * Clip max_transfer_size to 65535. dwc2_hc_setup_align_buf() allocates 3142 * coherent buffers with this size, and if it's too large we can 3143 * exhaust the coherent DMA pool. 3144 */ 3145 if (hw->max_transfer_size > 65535) 3146 hw->max_transfer_size = 65535; 3147 width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >> 3148 GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT; 3149 hw->max_packet_count = (1 << (width + 4)) - 1; 3150 hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C); 3151 hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >> 3152 GHWCFG3_DFIFO_DEPTH_SHIFT; 3153 3154 /* hwcfg4 */ 3155 hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN); 3156 hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >> 3157 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT; 3158 hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA); 3159 hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ); 3160 hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >> 3161 GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT; 3162 3163 /* fifo sizes */ 3164 hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >> 3165 GRXFSIZ_DEPTH_SHIFT; 3166 hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >> 3167 FIFOSIZE_DEPTH_SHIFT; 3168 hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >> 3169 FIFOSIZE_DEPTH_SHIFT; 3170 3171 dev_dbg(hsotg->dev, "Detected values from hardware:\n"); 3172 dev_dbg(hsotg->dev, " op_mode=%d\n", 3173 hw->op_mode); 3174 dev_dbg(hsotg->dev, " arch=%d\n", 3175 hw->arch); 3176 dev_dbg(hsotg->dev, " dma_desc_enable=%d\n", 3177 hw->dma_desc_enable); 3178 dev_dbg(hsotg->dev, " power_optimized=%d\n", 3179 hw->power_optimized); 3180 dev_dbg(hsotg->dev, " i2c_enable=%d\n", 3181 hw->i2c_enable); 3182 dev_dbg(hsotg->dev, " hs_phy_type=%d\n", 3183 hw->hs_phy_type); 3184 dev_dbg(hsotg->dev, " fs_phy_type=%d\n", 3185 hw->fs_phy_type); 3186 dev_dbg(hsotg->dev, " utmi_phy_data_width=%d\n", 3187 hw->utmi_phy_data_width); 3188 dev_dbg(hsotg->dev, " num_dev_ep=%d\n", 3189 hw->num_dev_ep); 3190 dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n", 3191 hw->num_dev_perio_in_ep); 3192 dev_dbg(hsotg->dev, " host_channels=%d\n", 3193 hw->host_channels); 3194 dev_dbg(hsotg->dev, " max_transfer_size=%d\n", 3195 hw->max_transfer_size); 3196 dev_dbg(hsotg->dev, " max_packet_count=%d\n", 3197 hw->max_packet_count); 3198 dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n", 3199 hw->nperio_tx_q_depth); 3200 dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n", 3201 hw->host_perio_tx_q_depth); 3202 dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n", 3203 hw->dev_token_q_depth); 3204 dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n", 3205 hw->enable_dynamic_fifo); 3206 dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n", 3207 hw->en_multiple_tx_fifo); 3208 dev_dbg(hsotg->dev, " total_fifo_size=%d\n", 3209 hw->total_fifo_size); 3210 dev_dbg(hsotg->dev, " host_rx_fifo_size=%d\n", 3211 hw->host_rx_fifo_size); 3212 dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n", 3213 hw->host_nperio_tx_fifo_size); 3214 dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n", 3215 hw->host_perio_tx_fifo_size); 3216 dev_dbg(hsotg->dev, "\n"); 3217 3218 return 0; 3219 } 3220 3221 /* 3222 * Sets all parameters to the given value. 3223 * 3224 * Assumes that the dwc2_core_params struct contains only integers. 3225 */ 3226 void dwc2_set_all_params(struct dwc2_core_params *params, int value) 3227 { 3228 int *p = (int *)params; 3229 size_t size = sizeof(*params) / sizeof(*p); 3230 int i; 3231 3232 for (i = 0; i < size; i++) 3233 p[i] = value; 3234 } 3235 3236 3237 u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg) 3238 { 3239 return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103; 3240 } 3241 3242 bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg) 3243 { 3244 if (dwc2_readl(hsotg->regs + GSNPSID) == 0xffffffff) 3245 return false; 3246 else 3247 return true; 3248 } 3249 3250 /** 3251 * dwc2_enable_global_interrupts() - Enables the controller's Global 3252 * Interrupt in the AHB Config register 3253 * 3254 * @hsotg: Programming view of DWC_otg controller 3255 */ 3256 void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg) 3257 { 3258 u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG); 3259 3260 ahbcfg |= GAHBCFG_GLBL_INTR_EN; 3261 dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG); 3262 } 3263 3264 /** 3265 * dwc2_disable_global_interrupts() - Disables the controller's Global 3266 * Interrupt in the AHB Config register 3267 * 3268 * @hsotg: Programming view of DWC_otg controller 3269 */ 3270 void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg) 3271 { 3272 u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG); 3273 3274 ahbcfg &= ~GAHBCFG_GLBL_INTR_EN; 3275 dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG); 3276 } 3277 3278 MODULE_DESCRIPTION("DESIGNWARE HS OTG Core"); 3279 MODULE_AUTHOR("Synopsys, Inc."); 3280 MODULE_LICENSE("Dual BSD/GPL"); 3281