1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Freescale eSDHC controller driver. 4 * 5 * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc. 6 * Copyright (c) 2009 MontaVista Software, Inc. 7 * 8 * Authors: Xiaobo Xie <X.Xie@freescale.com> 9 * Anton Vorontsov <avorontsov@ru.mvista.com> 10 */ 11 12 #include <linux/err.h> 13 #include <linux/io.h> 14 #include <linux/of.h> 15 #include <linux/of_address.h> 16 #include <linux/delay.h> 17 #include <linux/module.h> 18 #include <linux/sys_soc.h> 19 #include <linux/clk.h> 20 #include <linux/ktime.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/mmc/host.h> 23 #include <linux/mmc/mmc.h> 24 #include "sdhci-pltfm.h" 25 #include "sdhci-esdhc.h" 26 27 #define VENDOR_V_22 0x12 28 #define VENDOR_V_23 0x13 29 30 #define MMC_TIMING_NUM (MMC_TIMING_MMC_HS400 + 1) 31 32 struct esdhc_clk_fixup { 33 const unsigned int sd_dflt_max_clk; 34 const unsigned int max_clk[MMC_TIMING_NUM]; 35 }; 36 37 static const struct esdhc_clk_fixup ls1021a_esdhc_clk = { 38 .sd_dflt_max_clk = 25000000, 39 .max_clk[MMC_TIMING_MMC_HS] = 46500000, 40 .max_clk[MMC_TIMING_SD_HS] = 46500000, 41 }; 42 43 static const struct esdhc_clk_fixup ls1046a_esdhc_clk = { 44 .sd_dflt_max_clk = 25000000, 45 .max_clk[MMC_TIMING_UHS_SDR104] = 167000000, 46 .max_clk[MMC_TIMING_MMC_HS200] = 167000000, 47 }; 48 49 static const struct esdhc_clk_fixup ls1012a_esdhc_clk = { 50 .sd_dflt_max_clk = 25000000, 51 .max_clk[MMC_TIMING_UHS_SDR104] = 125000000, 52 .max_clk[MMC_TIMING_MMC_HS200] = 125000000, 53 }; 54 55 static const struct esdhc_clk_fixup p1010_esdhc_clk = { 56 .sd_dflt_max_clk = 20000000, 57 .max_clk[MMC_TIMING_LEGACY] = 20000000, 58 .max_clk[MMC_TIMING_MMC_HS] = 42000000, 59 .max_clk[MMC_TIMING_SD_HS] = 40000000, 60 }; 61 62 static const struct of_device_id sdhci_esdhc_of_match[] = { 63 { .compatible = "fsl,ls1021a-esdhc", .data = &ls1021a_esdhc_clk}, 64 { .compatible = "fsl,ls1046a-esdhc", .data = &ls1046a_esdhc_clk}, 65 { .compatible = "fsl,ls1012a-esdhc", .data = &ls1012a_esdhc_clk}, 66 { .compatible = "fsl,p1010-esdhc", .data = &p1010_esdhc_clk}, 67 { .compatible = "fsl,mpc8379-esdhc" }, 68 { .compatible = "fsl,mpc8536-esdhc" }, 69 { .compatible = "fsl,esdhc" }, 70 { } 71 }; 72 MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match); 73 74 struct sdhci_esdhc { 75 u8 vendor_ver; 76 u8 spec_ver; 77 bool quirk_incorrect_hostver; 78 bool quirk_limited_clk_division; 79 bool quirk_unreliable_pulse_detection; 80 bool quirk_tuning_erratum_type1; 81 bool quirk_tuning_erratum_type2; 82 bool quirk_ignore_data_inhibit; 83 bool quirk_delay_before_data_reset; 84 bool in_sw_tuning; 85 unsigned int peripheral_clock; 86 const struct esdhc_clk_fixup *clk_fixup; 87 u32 div_ratio; 88 }; 89 90 /** 91 * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register 92 * to make it compatible with SD spec. 93 * 94 * @host: pointer to sdhci_host 95 * @spec_reg: SD spec register address 96 * @value: 32bit eSDHC register value on spec_reg address 97 * 98 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC 99 * registers are 32 bits. There are differences in register size, register 100 * address, register function, bit position and function between eSDHC spec 101 * and SD spec. 102 * 103 * Return a fixed up register value 104 */ 105 static u32 esdhc_readl_fixup(struct sdhci_host *host, 106 int spec_reg, u32 value) 107 { 108 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 109 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); 110 u32 ret; 111 112 /* 113 * The bit of ADMA flag in eSDHC is not compatible with standard 114 * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is 115 * supported by eSDHC. 116 * And for many FSL eSDHC controller, the reset value of field 117 * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA, 118 * only these vendor version is greater than 2.2/0x12 support ADMA. 119 */ 120 if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) { 121 if (esdhc->vendor_ver > VENDOR_V_22) { 122 ret = value | SDHCI_CAN_DO_ADMA2; 123 return ret; 124 } 125 } 126 /* 127 * The DAT[3:0] line signal levels and the CMD line signal level are 128 * not compatible with standard SDHC register. The line signal levels 129 * DAT[7:0] are at bits 31:24 and the command line signal level is at 130 * bit 23. All other bits are the same as in the standard SDHC 131 * register. 132 */ 133 if (spec_reg == SDHCI_PRESENT_STATE) { 134 ret = value & 0x000fffff; 135 ret |= (value >> 4) & SDHCI_DATA_LVL_MASK; 136 ret |= (value << 1) & SDHCI_CMD_LVL; 137 return ret; 138 } 139 140 /* 141 * DTS properties of mmc host are used to enable each speed mode 142 * according to soc and board capability. So clean up 143 * SDR50/SDR104/DDR50 support bits here. 144 */ 145 if (spec_reg == SDHCI_CAPABILITIES_1) { 146 ret = value & ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 | 147 SDHCI_SUPPORT_DDR50); 148 return ret; 149 } 150 151 /* 152 * Some controllers have unreliable Data Line Active 153 * bit for commands with busy signal. This affects 154 * Command Inhibit (data) bit. Just ignore it since 155 * MMC core driver has already polled card status 156 * with CMD13 after any command with busy siganl. 157 */ 158 if ((spec_reg == SDHCI_PRESENT_STATE) && 159 (esdhc->quirk_ignore_data_inhibit == true)) { 160 ret = value & ~SDHCI_DATA_INHIBIT; 161 return ret; 162 } 163 164 ret = value; 165 return ret; 166 } 167 168 static u16 esdhc_readw_fixup(struct sdhci_host *host, 169 int spec_reg, u32 value) 170 { 171 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 172 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); 173 u16 ret; 174 int shift = (spec_reg & 0x2) * 8; 175 176 if (spec_reg == SDHCI_TRANSFER_MODE) 177 return pltfm_host->xfer_mode_shadow; 178 179 if (spec_reg == SDHCI_HOST_VERSION) 180 ret = value & 0xffff; 181 else 182 ret = (value >> shift) & 0xffff; 183 /* Workaround for T4240-R1.0-R2.0 eSDHC which has incorrect 184 * vendor version and spec version information. 185 */ 186 if ((spec_reg == SDHCI_HOST_VERSION) && 187 (esdhc->quirk_incorrect_hostver)) 188 ret = (VENDOR_V_23 << SDHCI_VENDOR_VER_SHIFT) | SDHCI_SPEC_200; 189 return ret; 190 } 191 192 static u8 esdhc_readb_fixup(struct sdhci_host *host, 193 int spec_reg, u32 value) 194 { 195 u8 ret; 196 u8 dma_bits; 197 int shift = (spec_reg & 0x3) * 8; 198 199 ret = (value >> shift) & 0xff; 200 201 /* 202 * "DMA select" locates at offset 0x28 in SD specification, but on 203 * P5020 or P3041, it locates at 0x29. 204 */ 205 if (spec_reg == SDHCI_HOST_CONTROL) { 206 /* DMA select is 22,23 bits in Protocol Control Register */ 207 dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK; 208 /* fixup the result */ 209 ret &= ~SDHCI_CTRL_DMA_MASK; 210 ret |= dma_bits; 211 } 212 return ret; 213 } 214 215 /** 216 * esdhc_write*_fixup - Fixup the SD spec register value so that it could be 217 * written into eSDHC register. 218 * 219 * @host: pointer to sdhci_host 220 * @spec_reg: SD spec register address 221 * @value: 8/16/32bit SD spec register value that would be written 222 * @old_value: 32bit eSDHC register value on spec_reg address 223 * 224 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC 225 * registers are 32 bits. There are differences in register size, register 226 * address, register function, bit position and function between eSDHC spec 227 * and SD spec. 228 * 229 * Return a fixed up register value 230 */ 231 static u32 esdhc_writel_fixup(struct sdhci_host *host, 232 int spec_reg, u32 value, u32 old_value) 233 { 234 u32 ret; 235 236 /* 237 * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE] 238 * when SYSCTL[RSTD] is set for some special operations. 239 * No any impact on other operation. 240 */ 241 if (spec_reg == SDHCI_INT_ENABLE) 242 ret = value | SDHCI_INT_BLK_GAP; 243 else 244 ret = value; 245 246 return ret; 247 } 248 249 static u32 esdhc_writew_fixup(struct sdhci_host *host, 250 int spec_reg, u16 value, u32 old_value) 251 { 252 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 253 int shift = (spec_reg & 0x2) * 8; 254 u32 ret; 255 256 switch (spec_reg) { 257 case SDHCI_TRANSFER_MODE: 258 /* 259 * Postpone this write, we must do it together with a 260 * command write that is down below. Return old value. 261 */ 262 pltfm_host->xfer_mode_shadow = value; 263 return old_value; 264 case SDHCI_COMMAND: 265 ret = (value << 16) | pltfm_host->xfer_mode_shadow; 266 return ret; 267 } 268 269 ret = old_value & (~(0xffff << shift)); 270 ret |= (value << shift); 271 272 if (spec_reg == SDHCI_BLOCK_SIZE) { 273 /* 274 * Two last DMA bits are reserved, and first one is used for 275 * non-standard blksz of 4096 bytes that we don't support 276 * yet. So clear the DMA boundary bits. 277 */ 278 ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0)); 279 } 280 return ret; 281 } 282 283 static u32 esdhc_writeb_fixup(struct sdhci_host *host, 284 int spec_reg, u8 value, u32 old_value) 285 { 286 u32 ret; 287 u32 dma_bits; 288 u8 tmp; 289 int shift = (spec_reg & 0x3) * 8; 290 291 /* 292 * eSDHC doesn't have a standard power control register, so we do 293 * nothing here to avoid incorrect operation. 294 */ 295 if (spec_reg == SDHCI_POWER_CONTROL) 296 return old_value; 297 /* 298 * "DMA select" location is offset 0x28 in SD specification, but on 299 * P5020 or P3041, it's located at 0x29. 300 */ 301 if (spec_reg == SDHCI_HOST_CONTROL) { 302 /* 303 * If host control register is not standard, exit 304 * this function 305 */ 306 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL) 307 return old_value; 308 309 /* DMA select is 22,23 bits in Protocol Control Register */ 310 dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5; 311 ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits; 312 tmp = (value & (~SDHCI_CTRL_DMA_MASK)) | 313 (old_value & SDHCI_CTRL_DMA_MASK); 314 ret = (ret & (~0xff)) | tmp; 315 316 /* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */ 317 ret &= ~ESDHC_HOST_CONTROL_RES; 318 return ret; 319 } 320 321 ret = (old_value & (~(0xff << shift))) | (value << shift); 322 return ret; 323 } 324 325 static u32 esdhc_be_readl(struct sdhci_host *host, int reg) 326 { 327 u32 ret; 328 u32 value; 329 330 if (reg == SDHCI_CAPABILITIES_1) 331 value = ioread32be(host->ioaddr + ESDHC_CAPABILITIES_1); 332 else 333 value = ioread32be(host->ioaddr + reg); 334 335 ret = esdhc_readl_fixup(host, reg, value); 336 337 return ret; 338 } 339 340 static u32 esdhc_le_readl(struct sdhci_host *host, int reg) 341 { 342 u32 ret; 343 u32 value; 344 345 if (reg == SDHCI_CAPABILITIES_1) 346 value = ioread32(host->ioaddr + ESDHC_CAPABILITIES_1); 347 else 348 value = ioread32(host->ioaddr + reg); 349 350 ret = esdhc_readl_fixup(host, reg, value); 351 352 return ret; 353 } 354 355 static u16 esdhc_be_readw(struct sdhci_host *host, int reg) 356 { 357 u16 ret; 358 u32 value; 359 int base = reg & ~0x3; 360 361 value = ioread32be(host->ioaddr + base); 362 ret = esdhc_readw_fixup(host, reg, value); 363 return ret; 364 } 365 366 static u16 esdhc_le_readw(struct sdhci_host *host, int reg) 367 { 368 u16 ret; 369 u32 value; 370 int base = reg & ~0x3; 371 372 value = ioread32(host->ioaddr + base); 373 ret = esdhc_readw_fixup(host, reg, value); 374 return ret; 375 } 376 377 static u8 esdhc_be_readb(struct sdhci_host *host, int reg) 378 { 379 u8 ret; 380 u32 value; 381 int base = reg & ~0x3; 382 383 value = ioread32be(host->ioaddr + base); 384 ret = esdhc_readb_fixup(host, reg, value); 385 return ret; 386 } 387 388 static u8 esdhc_le_readb(struct sdhci_host *host, int reg) 389 { 390 u8 ret; 391 u32 value; 392 int base = reg & ~0x3; 393 394 value = ioread32(host->ioaddr + base); 395 ret = esdhc_readb_fixup(host, reg, value); 396 return ret; 397 } 398 399 static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg) 400 { 401 u32 value; 402 403 value = esdhc_writel_fixup(host, reg, val, 0); 404 iowrite32be(value, host->ioaddr + reg); 405 } 406 407 static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg) 408 { 409 u32 value; 410 411 value = esdhc_writel_fixup(host, reg, val, 0); 412 iowrite32(value, host->ioaddr + reg); 413 } 414 415 static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg) 416 { 417 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 418 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); 419 int base = reg & ~0x3; 420 u32 value; 421 u32 ret; 422 423 value = ioread32be(host->ioaddr + base); 424 ret = esdhc_writew_fixup(host, reg, val, value); 425 if (reg != SDHCI_TRANSFER_MODE) 426 iowrite32be(ret, host->ioaddr + base); 427 428 /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set 429 * 1us later after ESDHC_EXTN is set. 430 */ 431 if (base == ESDHC_SYSTEM_CONTROL_2) { 432 if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) && 433 esdhc->in_sw_tuning) { 434 udelay(1); 435 ret |= ESDHC_SMPCLKSEL; 436 iowrite32be(ret, host->ioaddr + base); 437 } 438 } 439 } 440 441 static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg) 442 { 443 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 444 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); 445 int base = reg & ~0x3; 446 u32 value; 447 u32 ret; 448 449 value = ioread32(host->ioaddr + base); 450 ret = esdhc_writew_fixup(host, reg, val, value); 451 if (reg != SDHCI_TRANSFER_MODE) 452 iowrite32(ret, host->ioaddr + base); 453 454 /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set 455 * 1us later after ESDHC_EXTN is set. 456 */ 457 if (base == ESDHC_SYSTEM_CONTROL_2) { 458 if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) && 459 esdhc->in_sw_tuning) { 460 udelay(1); 461 ret |= ESDHC_SMPCLKSEL; 462 iowrite32(ret, host->ioaddr + base); 463 } 464 } 465 } 466 467 static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg) 468 { 469 int base = reg & ~0x3; 470 u32 value; 471 u32 ret; 472 473 value = ioread32be(host->ioaddr + base); 474 ret = esdhc_writeb_fixup(host, reg, val, value); 475 iowrite32be(ret, host->ioaddr + base); 476 } 477 478 static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg) 479 { 480 int base = reg & ~0x3; 481 u32 value; 482 u32 ret; 483 484 value = ioread32(host->ioaddr + base); 485 ret = esdhc_writeb_fixup(host, reg, val, value); 486 iowrite32(ret, host->ioaddr + base); 487 } 488 489 /* 490 * For Abort or Suspend after Stop at Block Gap, ignore the ADMA 491 * error(IRQSTAT[ADMAE]) if both Transfer Complete(IRQSTAT[TC]) 492 * and Block Gap Event(IRQSTAT[BGE]) are also set. 493 * For Continue, apply soft reset for data(SYSCTL[RSTD]); 494 * and re-issue the entire read transaction from beginning. 495 */ 496 static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask) 497 { 498 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 499 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); 500 bool applicable; 501 dma_addr_t dmastart; 502 dma_addr_t dmanow; 503 504 applicable = (intmask & SDHCI_INT_DATA_END) && 505 (intmask & SDHCI_INT_BLK_GAP) && 506 (esdhc->vendor_ver == VENDOR_V_23); 507 if (!applicable) 508 return; 509 510 host->data->error = 0; 511 dmastart = sg_dma_address(host->data->sg); 512 dmanow = dmastart + host->data->bytes_xfered; 513 /* 514 * Force update to the next DMA block boundary. 515 */ 516 dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + 517 SDHCI_DEFAULT_BOUNDARY_SIZE; 518 host->data->bytes_xfered = dmanow - dmastart; 519 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); 520 } 521 522 static int esdhc_of_enable_dma(struct sdhci_host *host) 523 { 524 u32 value; 525 struct device *dev = mmc_dev(host->mmc); 526 527 if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") || 528 of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) 529 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); 530 531 value = sdhci_readl(host, ESDHC_DMA_SYSCTL); 532 533 if (of_dma_is_coherent(dev->of_node)) 534 value |= ESDHC_DMA_SNOOP; 535 else 536 value &= ~ESDHC_DMA_SNOOP; 537 538 sdhci_writel(host, value, ESDHC_DMA_SYSCTL); 539 return 0; 540 } 541 542 static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host) 543 { 544 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 545 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); 546 547 if (esdhc->peripheral_clock) 548 return esdhc->peripheral_clock; 549 else 550 return pltfm_host->clock; 551 } 552 553 static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) 554 { 555 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 556 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); 557 unsigned int clock; 558 559 if (esdhc->peripheral_clock) 560 clock = esdhc->peripheral_clock; 561 else 562 clock = pltfm_host->clock; 563 return clock / 256 / 16; 564 } 565 566 static void esdhc_clock_enable(struct sdhci_host *host, bool enable) 567 { 568 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 569 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); 570 ktime_t timeout; 571 u32 val, clk_en; 572 573 clk_en = ESDHC_CLOCK_SDCLKEN; 574 575 /* 576 * IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version 577 * is 2.2 or lower. 578 */ 579 if (esdhc->vendor_ver <= VENDOR_V_22) 580 clk_en |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | 581 ESDHC_CLOCK_PEREN); 582 583 val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); 584 585 if (enable) 586 val |= clk_en; 587 else 588 val &= ~clk_en; 589 590 sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL); 591 592 /* 593 * Wait max 20 ms. If vendor version is 2.2 or lower, do not 594 * wait clock stable bit which does not exist. 595 */ 596 timeout = ktime_add_ms(ktime_get(), 20); 597 while (esdhc->vendor_ver > VENDOR_V_22) { 598 bool timedout = ktime_after(ktime_get(), timeout); 599 600 if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE) 601 break; 602 if (timedout) { 603 pr_err("%s: Internal clock never stabilised.\n", 604 mmc_hostname(host->mmc)); 605 break; 606 } 607 usleep_range(10, 20); 608 } 609 } 610 611 static void esdhc_flush_async_fifo(struct sdhci_host *host) 612 { 613 ktime_t timeout; 614 u32 val; 615 616 val = sdhci_readl(host, ESDHC_DMA_SYSCTL); 617 val |= ESDHC_FLUSH_ASYNC_FIFO; 618 sdhci_writel(host, val, ESDHC_DMA_SYSCTL); 619 620 /* Wait max 20 ms */ 621 timeout = ktime_add_ms(ktime_get(), 20); 622 while (1) { 623 bool timedout = ktime_after(ktime_get(), timeout); 624 625 if (!(sdhci_readl(host, ESDHC_DMA_SYSCTL) & 626 ESDHC_FLUSH_ASYNC_FIFO)) 627 break; 628 if (timedout) { 629 pr_err("%s: flushing asynchronous FIFO timeout.\n", 630 mmc_hostname(host->mmc)); 631 break; 632 } 633 usleep_range(10, 20); 634 } 635 } 636 637 static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) 638 { 639 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 640 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); 641 unsigned int pre_div = 1, div = 1; 642 unsigned int clock_fixup = 0; 643 ktime_t timeout; 644 u32 temp; 645 646 if (clock == 0) { 647 host->mmc->actual_clock = 0; 648 esdhc_clock_enable(host, false); 649 return; 650 } 651 652 /* Start pre_div at 2 for vendor version < 2.3. */ 653 if (esdhc->vendor_ver < VENDOR_V_23) 654 pre_div = 2; 655 656 /* Fix clock value. */ 657 if (host->mmc->card && mmc_card_sd(host->mmc->card) && 658 esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY) 659 clock_fixup = esdhc->clk_fixup->sd_dflt_max_clk; 660 else if (esdhc->clk_fixup) 661 clock_fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing]; 662 663 if (clock_fixup == 0 || clock < clock_fixup) 664 clock_fixup = clock; 665 666 /* Calculate pre_div and div. */ 667 while (host->max_clk / pre_div / 16 > clock_fixup && pre_div < 256) 668 pre_div *= 2; 669 670 while (host->max_clk / pre_div / div > clock_fixup && div < 16) 671 div++; 672 673 esdhc->div_ratio = pre_div * div; 674 675 /* Limit clock division for HS400 200MHz clock for quirk. */ 676 if (esdhc->quirk_limited_clk_division && 677 clock == MMC_HS200_MAX_DTR && 678 (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 || 679 host->flags & SDHCI_HS400_TUNING)) { 680 if (esdhc->div_ratio <= 4) { 681 pre_div = 4; 682 div = 1; 683 } else if (esdhc->div_ratio <= 8) { 684 pre_div = 4; 685 div = 2; 686 } else if (esdhc->div_ratio <= 12) { 687 pre_div = 4; 688 div = 3; 689 } else { 690 pr_warn("%s: using unsupported clock division.\n", 691 mmc_hostname(host->mmc)); 692 } 693 esdhc->div_ratio = pre_div * div; 694 } 695 696 host->mmc->actual_clock = host->max_clk / esdhc->div_ratio; 697 698 dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", 699 clock, host->mmc->actual_clock); 700 701 /* Set clock division into register. */ 702 pre_div >>= 1; 703 div--; 704 705 esdhc_clock_enable(host, false); 706 707 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); 708 temp &= ~ESDHC_CLOCK_MASK; 709 temp |= ((div << ESDHC_DIVIDER_SHIFT) | 710 (pre_div << ESDHC_PREDIV_SHIFT)); 711 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); 712 713 /* 714 * Wait max 20 ms. If vendor version is 2.2 or lower, do not 715 * wait clock stable bit which does not exist. 716 */ 717 timeout = ktime_add_ms(ktime_get(), 20); 718 while (esdhc->vendor_ver > VENDOR_V_22) { 719 bool timedout = ktime_after(ktime_get(), timeout); 720 721 if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE) 722 break; 723 if (timedout) { 724 pr_err("%s: Internal clock never stabilised.\n", 725 mmc_hostname(host->mmc)); 726 break; 727 } 728 usleep_range(10, 20); 729 } 730 731 /* Additional setting for HS400. */ 732 if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 && 733 clock == MMC_HS200_MAX_DTR) { 734 temp = sdhci_readl(host, ESDHC_TBCTL); 735 sdhci_writel(host, temp | ESDHC_HS400_MODE, ESDHC_TBCTL); 736 temp = sdhci_readl(host, ESDHC_SDCLKCTL); 737 sdhci_writel(host, temp | ESDHC_CMD_CLK_CTL, ESDHC_SDCLKCTL); 738 esdhc_clock_enable(host, true); 739 740 temp = sdhci_readl(host, ESDHC_DLLCFG0); 741 temp |= ESDHC_DLL_ENABLE; 742 if (host->mmc->actual_clock == MMC_HS200_MAX_DTR) 743 temp |= ESDHC_DLL_FREQ_SEL; 744 sdhci_writel(host, temp, ESDHC_DLLCFG0); 745 temp = sdhci_readl(host, ESDHC_TBCTL); 746 sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL); 747 748 esdhc_clock_enable(host, false); 749 esdhc_flush_async_fifo(host); 750 } 751 esdhc_clock_enable(host, true); 752 } 753 754 static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) 755 { 756 u32 ctrl; 757 758 ctrl = sdhci_readl(host, ESDHC_PROCTL); 759 ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK); 760 switch (width) { 761 case MMC_BUS_WIDTH_8: 762 ctrl |= ESDHC_CTRL_8BITBUS; 763 break; 764 765 case MMC_BUS_WIDTH_4: 766 ctrl |= ESDHC_CTRL_4BITBUS; 767 break; 768 769 default: 770 break; 771 } 772 773 sdhci_writel(host, ctrl, ESDHC_PROCTL); 774 } 775 776 static void esdhc_reset(struct sdhci_host *host, u8 mask) 777 { 778 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 779 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); 780 u32 val, bus_width = 0; 781 782 /* 783 * Add delay to make sure all the DMA transfers are finished 784 * for quirk. 785 */ 786 if (esdhc->quirk_delay_before_data_reset && 787 (mask & SDHCI_RESET_DATA) && 788 (host->flags & SDHCI_REQ_USE_DMA)) 789 mdelay(5); 790 791 /* 792 * Save bus-width for eSDHC whose vendor version is 2.2 793 * or lower for data reset. 794 */ 795 if ((mask & SDHCI_RESET_DATA) && 796 (esdhc->vendor_ver <= VENDOR_V_22)) { 797 val = sdhci_readl(host, ESDHC_PROCTL); 798 bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK; 799 } 800 801 sdhci_reset(host, mask); 802 803 /* 804 * Restore bus-width setting and interrupt registers for eSDHC 805 * whose vendor version is 2.2 or lower for data reset. 806 */ 807 if ((mask & SDHCI_RESET_DATA) && 808 (esdhc->vendor_ver <= VENDOR_V_22)) { 809 val = sdhci_readl(host, ESDHC_PROCTL); 810 val &= ~ESDHC_CTRL_BUSWIDTH_MASK; 811 val |= bus_width; 812 sdhci_writel(host, val, ESDHC_PROCTL); 813 814 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 815 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 816 } 817 818 /* 819 * Some bits have to be cleaned manually for eSDHC whose spec 820 * version is higher than 3.0 for all reset. 821 */ 822 if ((mask & SDHCI_RESET_ALL) && 823 (esdhc->spec_ver >= SDHCI_SPEC_300)) { 824 val = sdhci_readl(host, ESDHC_TBCTL); 825 val &= ~ESDHC_TB_EN; 826 sdhci_writel(host, val, ESDHC_TBCTL); 827 828 /* 829 * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to 830 * 0 for quirk. 831 */ 832 if (esdhc->quirk_unreliable_pulse_detection) { 833 val = sdhci_readl(host, ESDHC_DLLCFG1); 834 val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL; 835 sdhci_writel(host, val, ESDHC_DLLCFG1); 836 } 837 } 838 } 839 840 /* The SCFG, Supplemental Configuration Unit, provides SoC specific 841 * configuration and status registers for the device. There is a 842 * SDHC IO VSEL control register on SCFG for some platforms. It's 843 * used to support SDHC IO voltage switching. 844 */ 845 static const struct of_device_id scfg_device_ids[] = { 846 { .compatible = "fsl,t1040-scfg", }, 847 { .compatible = "fsl,ls1012a-scfg", }, 848 { .compatible = "fsl,ls1046a-scfg", }, 849 {} 850 }; 851 852 /* SDHC IO VSEL control register definition */ 853 #define SCFG_SDHCIOVSELCR 0x408 854 #define SDHCIOVSELCR_TGLEN 0x80000000 855 #define SDHCIOVSELCR_VSELVAL 0x60000000 856 #define SDHCIOVSELCR_SDHC_VS 0x00000001 857 858 static int esdhc_signal_voltage_switch(struct mmc_host *mmc, 859 struct mmc_ios *ios) 860 { 861 struct sdhci_host *host = mmc_priv(mmc); 862 struct device_node *scfg_node; 863 void __iomem *scfg_base = NULL; 864 u32 sdhciovselcr; 865 u32 val; 866 867 /* 868 * Signal Voltage Switching is only applicable for Host Controllers 869 * v3.00 and above. 870 */ 871 if (host->version < SDHCI_SPEC_300) 872 return 0; 873 874 val = sdhci_readl(host, ESDHC_PROCTL); 875 876 switch (ios->signal_voltage) { 877 case MMC_SIGNAL_VOLTAGE_330: 878 val &= ~ESDHC_VOLT_SEL; 879 sdhci_writel(host, val, ESDHC_PROCTL); 880 return 0; 881 case MMC_SIGNAL_VOLTAGE_180: 882 scfg_node = of_find_matching_node(NULL, scfg_device_ids); 883 if (scfg_node) 884 scfg_base = of_iomap(scfg_node, 0); 885 if (scfg_base) { 886 sdhciovselcr = SDHCIOVSELCR_TGLEN | 887 SDHCIOVSELCR_VSELVAL; 888 iowrite32be(sdhciovselcr, 889 scfg_base + SCFG_SDHCIOVSELCR); 890 891 val |= ESDHC_VOLT_SEL; 892 sdhci_writel(host, val, ESDHC_PROCTL); 893 mdelay(5); 894 895 sdhciovselcr = SDHCIOVSELCR_TGLEN | 896 SDHCIOVSELCR_SDHC_VS; 897 iowrite32be(sdhciovselcr, 898 scfg_base + SCFG_SDHCIOVSELCR); 899 iounmap(scfg_base); 900 } else { 901 val |= ESDHC_VOLT_SEL; 902 sdhci_writel(host, val, ESDHC_PROCTL); 903 } 904 return 0; 905 default: 906 return 0; 907 } 908 } 909 910 static struct soc_device_attribute soc_tuning_erratum_type1[] = { 911 { .family = "QorIQ T1023", }, 912 { .family = "QorIQ T1040", }, 913 { .family = "QorIQ T2080", }, 914 { .family = "QorIQ LS1021A", }, 915 { }, 916 }; 917 918 static struct soc_device_attribute soc_tuning_erratum_type2[] = { 919 { .family = "QorIQ LS1012A", }, 920 { .family = "QorIQ LS1043A", }, 921 { .family = "QorIQ LS1046A", }, 922 { .family = "QorIQ LS1080A", }, 923 { .family = "QorIQ LS2080A", }, 924 { .family = "QorIQ LA1575A", }, 925 { }, 926 }; 927 928 static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable) 929 { 930 u32 val; 931 932 esdhc_clock_enable(host, false); 933 esdhc_flush_async_fifo(host); 934 935 val = sdhci_readl(host, ESDHC_TBCTL); 936 if (enable) 937 val |= ESDHC_TB_EN; 938 else 939 val &= ~ESDHC_TB_EN; 940 sdhci_writel(host, val, ESDHC_TBCTL); 941 942 esdhc_clock_enable(host, true); 943 } 944 945 static void esdhc_tuning_window_ptr(struct sdhci_host *host, u8 *window_start, 946 u8 *window_end) 947 { 948 u32 val; 949 950 /* Write TBCTL[11:8]=4'h8 */ 951 val = sdhci_readl(host, ESDHC_TBCTL); 952 val &= ~(0xf << 8); 953 val |= 8 << 8; 954 sdhci_writel(host, val, ESDHC_TBCTL); 955 956 mdelay(1); 957 958 /* Read TBCTL[31:0] register and rewrite again */ 959 val = sdhci_readl(host, ESDHC_TBCTL); 960 sdhci_writel(host, val, ESDHC_TBCTL); 961 962 mdelay(1); 963 964 /* Read the TBSTAT[31:0] register twice */ 965 val = sdhci_readl(host, ESDHC_TBSTAT); 966 val = sdhci_readl(host, ESDHC_TBSTAT); 967 968 *window_end = val & 0xff; 969 *window_start = (val >> 8) & 0xff; 970 } 971 972 static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start, 973 u8 *window_end) 974 { 975 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 976 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); 977 u8 start_ptr, end_ptr; 978 979 if (esdhc->quirk_tuning_erratum_type1) { 980 *window_start = 5 * esdhc->div_ratio; 981 *window_end = 3 * esdhc->div_ratio; 982 return; 983 } 984 985 esdhc_tuning_window_ptr(host, &start_ptr, &end_ptr); 986 987 /* Reset data lines by setting ESDHCCTL[RSTD] */ 988 sdhci_reset(host, SDHCI_RESET_DATA); 989 /* Write 32'hFFFF_FFFF to IRQSTAT register */ 990 sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS); 991 992 /* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2 993 * or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2, 994 * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio 995 * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio. 996 */ 997 998 if (abs(start_ptr - end_ptr) > (4 * esdhc->div_ratio + 2)) { 999 *window_start = 8 * esdhc->div_ratio; 1000 *window_end = 4 * esdhc->div_ratio; 1001 } else { 1002 *window_start = 5 * esdhc->div_ratio; 1003 *window_end = 3 * esdhc->div_ratio; 1004 } 1005 } 1006 1007 static int esdhc_execute_sw_tuning(struct mmc_host *mmc, u32 opcode, 1008 u8 window_start, u8 window_end) 1009 { 1010 struct sdhci_host *host = mmc_priv(mmc); 1011 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1012 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); 1013 u32 val; 1014 int ret; 1015 1016 /* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */ 1017 val = ((u32)window_start << ESDHC_WNDW_STRT_PTR_SHIFT) & 1018 ESDHC_WNDW_STRT_PTR_MASK; 1019 val |= window_end & ESDHC_WNDW_END_PTR_MASK; 1020 sdhci_writel(host, val, ESDHC_TBPTR); 1021 1022 /* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */ 1023 val = sdhci_readl(host, ESDHC_TBCTL); 1024 val &= ~ESDHC_TB_MODE_MASK; 1025 val |= ESDHC_TB_MODE_SW; 1026 sdhci_writel(host, val, ESDHC_TBCTL); 1027 1028 esdhc->in_sw_tuning = true; 1029 ret = sdhci_execute_tuning(mmc, opcode); 1030 esdhc->in_sw_tuning = false; 1031 return ret; 1032 } 1033 1034 static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode) 1035 { 1036 struct sdhci_host *host = mmc_priv(mmc); 1037 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1038 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); 1039 u8 window_start, window_end; 1040 int ret, retries = 1; 1041 bool hs400_tuning; 1042 unsigned int clk; 1043 u32 val; 1044 1045 /* For tuning mode, the sd clock divisor value 1046 * must be larger than 3 according to reference manual. 1047 */ 1048 clk = esdhc->peripheral_clock / 3; 1049 if (host->clock > clk) 1050 esdhc_of_set_clock(host, clk); 1051 1052 esdhc_tuning_block_enable(host, true); 1053 1054 hs400_tuning = host->flags & SDHCI_HS400_TUNING; 1055 1056 do { 1057 if (esdhc->quirk_limited_clk_division && 1058 hs400_tuning) 1059 esdhc_of_set_clock(host, host->clock); 1060 1061 /* Do HW tuning */ 1062 val = sdhci_readl(host, ESDHC_TBCTL); 1063 val &= ~ESDHC_TB_MODE_MASK; 1064 val |= ESDHC_TB_MODE_3; 1065 sdhci_writel(host, val, ESDHC_TBCTL); 1066 1067 ret = sdhci_execute_tuning(mmc, opcode); 1068 if (ret) 1069 break; 1070 1071 /* For type2 affected platforms of the tuning erratum, 1072 * tuning may succeed although eSDHC might not have 1073 * tuned properly. Need to check tuning window. 1074 */ 1075 if (esdhc->quirk_tuning_erratum_type2 && 1076 !host->tuning_err) { 1077 esdhc_tuning_window_ptr(host, &window_start, 1078 &window_end); 1079 if (abs(window_start - window_end) > 1080 (4 * esdhc->div_ratio + 2)) 1081 host->tuning_err = -EAGAIN; 1082 } 1083 1084 /* If HW tuning fails and triggers erratum, 1085 * try workaround. 1086 */ 1087 ret = host->tuning_err; 1088 if (ret == -EAGAIN && 1089 (esdhc->quirk_tuning_erratum_type1 || 1090 esdhc->quirk_tuning_erratum_type2)) { 1091 /* Recover HS400 tuning flag */ 1092 if (hs400_tuning) 1093 host->flags |= SDHCI_HS400_TUNING; 1094 pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n", 1095 mmc_hostname(mmc)); 1096 /* Do SW tuning */ 1097 esdhc_prepare_sw_tuning(host, &window_start, 1098 &window_end); 1099 ret = esdhc_execute_sw_tuning(mmc, opcode, 1100 window_start, 1101 window_end); 1102 if (ret) 1103 break; 1104 1105 /* Retry both HW/SW tuning with reduced clock. */ 1106 ret = host->tuning_err; 1107 if (ret == -EAGAIN && retries) { 1108 /* Recover HS400 tuning flag */ 1109 if (hs400_tuning) 1110 host->flags |= SDHCI_HS400_TUNING; 1111 1112 clk = host->max_clk / (esdhc->div_ratio + 1); 1113 esdhc_of_set_clock(host, clk); 1114 pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n", 1115 mmc_hostname(mmc)); 1116 } else { 1117 break; 1118 } 1119 } else { 1120 break; 1121 } 1122 } while (retries--); 1123 1124 if (ret) { 1125 esdhc_tuning_block_enable(host, false); 1126 } else if (hs400_tuning) { 1127 val = sdhci_readl(host, ESDHC_SDTIMNGCTL); 1128 val |= ESDHC_FLW_CTL_BG; 1129 sdhci_writel(host, val, ESDHC_SDTIMNGCTL); 1130 } 1131 1132 return ret; 1133 } 1134 1135 static void esdhc_set_uhs_signaling(struct sdhci_host *host, 1136 unsigned int timing) 1137 { 1138 u32 val; 1139 1140 /* 1141 * There are specific registers setting for HS400 mode. 1142 * Clean all of them if controller is in HS400 mode to 1143 * exit HS400 mode before re-setting any speed mode. 1144 */ 1145 val = sdhci_readl(host, ESDHC_TBCTL); 1146 if (val & ESDHC_HS400_MODE) { 1147 val = sdhci_readl(host, ESDHC_SDTIMNGCTL); 1148 val &= ~ESDHC_FLW_CTL_BG; 1149 sdhci_writel(host, val, ESDHC_SDTIMNGCTL); 1150 1151 val = sdhci_readl(host, ESDHC_SDCLKCTL); 1152 val &= ~ESDHC_CMD_CLK_CTL; 1153 sdhci_writel(host, val, ESDHC_SDCLKCTL); 1154 1155 esdhc_clock_enable(host, false); 1156 val = sdhci_readl(host, ESDHC_TBCTL); 1157 val &= ~ESDHC_HS400_MODE; 1158 sdhci_writel(host, val, ESDHC_TBCTL); 1159 esdhc_clock_enable(host, true); 1160 1161 val = sdhci_readl(host, ESDHC_DLLCFG0); 1162 val &= ~(ESDHC_DLL_ENABLE | ESDHC_DLL_FREQ_SEL); 1163 sdhci_writel(host, val, ESDHC_DLLCFG0); 1164 1165 val = sdhci_readl(host, ESDHC_TBCTL); 1166 val &= ~ESDHC_HS400_WNDW_ADJUST; 1167 sdhci_writel(host, val, ESDHC_TBCTL); 1168 1169 esdhc_tuning_block_enable(host, false); 1170 } 1171 1172 if (timing == MMC_TIMING_MMC_HS400) 1173 esdhc_tuning_block_enable(host, true); 1174 else 1175 sdhci_set_uhs_signaling(host, timing); 1176 } 1177 1178 static u32 esdhc_irq(struct sdhci_host *host, u32 intmask) 1179 { 1180 u32 command; 1181 1182 if (of_find_compatible_node(NULL, NULL, 1183 "fsl,p2020-esdhc")) { 1184 command = SDHCI_GET_CMD(sdhci_readw(host, 1185 SDHCI_COMMAND)); 1186 if (command == MMC_WRITE_MULTIPLE_BLOCK && 1187 sdhci_readw(host, SDHCI_BLOCK_COUNT) && 1188 intmask & SDHCI_INT_DATA_END) { 1189 intmask &= ~SDHCI_INT_DATA_END; 1190 sdhci_writel(host, SDHCI_INT_DATA_END, 1191 SDHCI_INT_STATUS); 1192 } 1193 } 1194 return intmask; 1195 } 1196 1197 #ifdef CONFIG_PM_SLEEP 1198 static u32 esdhc_proctl; 1199 static int esdhc_of_suspend(struct device *dev) 1200 { 1201 struct sdhci_host *host = dev_get_drvdata(dev); 1202 1203 esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL); 1204 1205 if (host->tuning_mode != SDHCI_TUNING_MODE_3) 1206 mmc_retune_needed(host->mmc); 1207 1208 return sdhci_suspend_host(host); 1209 } 1210 1211 static int esdhc_of_resume(struct device *dev) 1212 { 1213 struct sdhci_host *host = dev_get_drvdata(dev); 1214 int ret = sdhci_resume_host(host); 1215 1216 if (ret == 0) { 1217 /* Isn't this already done by sdhci_resume_host() ? --rmk */ 1218 esdhc_of_enable_dma(host); 1219 sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL); 1220 } 1221 return ret; 1222 } 1223 #endif 1224 1225 static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops, 1226 esdhc_of_suspend, 1227 esdhc_of_resume); 1228 1229 static const struct sdhci_ops sdhci_esdhc_be_ops = { 1230 .read_l = esdhc_be_readl, 1231 .read_w = esdhc_be_readw, 1232 .read_b = esdhc_be_readb, 1233 .write_l = esdhc_be_writel, 1234 .write_w = esdhc_be_writew, 1235 .write_b = esdhc_be_writeb, 1236 .set_clock = esdhc_of_set_clock, 1237 .enable_dma = esdhc_of_enable_dma, 1238 .get_max_clock = esdhc_of_get_max_clock, 1239 .get_min_clock = esdhc_of_get_min_clock, 1240 .adma_workaround = esdhc_of_adma_workaround, 1241 .set_bus_width = esdhc_pltfm_set_bus_width, 1242 .reset = esdhc_reset, 1243 .set_uhs_signaling = esdhc_set_uhs_signaling, 1244 .irq = esdhc_irq, 1245 }; 1246 1247 static const struct sdhci_ops sdhci_esdhc_le_ops = { 1248 .read_l = esdhc_le_readl, 1249 .read_w = esdhc_le_readw, 1250 .read_b = esdhc_le_readb, 1251 .write_l = esdhc_le_writel, 1252 .write_w = esdhc_le_writew, 1253 .write_b = esdhc_le_writeb, 1254 .set_clock = esdhc_of_set_clock, 1255 .enable_dma = esdhc_of_enable_dma, 1256 .get_max_clock = esdhc_of_get_max_clock, 1257 .get_min_clock = esdhc_of_get_min_clock, 1258 .adma_workaround = esdhc_of_adma_workaround, 1259 .set_bus_width = esdhc_pltfm_set_bus_width, 1260 .reset = esdhc_reset, 1261 .set_uhs_signaling = esdhc_set_uhs_signaling, 1262 .irq = esdhc_irq, 1263 }; 1264 1265 static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = { 1266 .quirks = ESDHC_DEFAULT_QUIRKS | 1267 #ifdef CONFIG_PPC 1268 SDHCI_QUIRK_BROKEN_CARD_DETECTION | 1269 #endif 1270 SDHCI_QUIRK_NO_CARD_NO_RESET | 1271 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 1272 .ops = &sdhci_esdhc_be_ops, 1273 }; 1274 1275 static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = { 1276 .quirks = ESDHC_DEFAULT_QUIRKS | 1277 SDHCI_QUIRK_NO_CARD_NO_RESET | 1278 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 1279 .ops = &sdhci_esdhc_le_ops, 1280 }; 1281 1282 static struct soc_device_attribute soc_incorrect_hostver[] = { 1283 { .family = "QorIQ T4240", .revision = "1.0", }, 1284 { .family = "QorIQ T4240", .revision = "2.0", }, 1285 { }, 1286 }; 1287 1288 static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = { 1289 { .family = "QorIQ LX2160A", .revision = "1.0", }, 1290 { .family = "QorIQ LX2160A", .revision = "2.0", }, 1291 { .family = "QorIQ LS1028A", .revision = "1.0", }, 1292 { }, 1293 }; 1294 1295 static struct soc_device_attribute soc_unreliable_pulse_detection[] = { 1296 { .family = "QorIQ LX2160A", .revision = "1.0", }, 1297 { }, 1298 }; 1299 1300 static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host) 1301 { 1302 const struct of_device_id *match; 1303 struct sdhci_pltfm_host *pltfm_host; 1304 struct sdhci_esdhc *esdhc; 1305 struct device_node *np; 1306 struct clk *clk; 1307 u32 val; 1308 u16 host_ver; 1309 1310 pltfm_host = sdhci_priv(host); 1311 esdhc = sdhci_pltfm_priv(pltfm_host); 1312 1313 host_ver = sdhci_readw(host, SDHCI_HOST_VERSION); 1314 esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >> 1315 SDHCI_VENDOR_VER_SHIFT; 1316 esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK; 1317 if (soc_device_match(soc_incorrect_hostver)) 1318 esdhc->quirk_incorrect_hostver = true; 1319 else 1320 esdhc->quirk_incorrect_hostver = false; 1321 1322 if (soc_device_match(soc_fixup_sdhc_clkdivs)) 1323 esdhc->quirk_limited_clk_division = true; 1324 else 1325 esdhc->quirk_limited_clk_division = false; 1326 1327 if (soc_device_match(soc_unreliable_pulse_detection)) 1328 esdhc->quirk_unreliable_pulse_detection = true; 1329 else 1330 esdhc->quirk_unreliable_pulse_detection = false; 1331 1332 match = of_match_node(sdhci_esdhc_of_match, pdev->dev.of_node); 1333 if (match) 1334 esdhc->clk_fixup = match->data; 1335 np = pdev->dev.of_node; 1336 1337 if (of_device_is_compatible(np, "fsl,p2020-esdhc")) 1338 esdhc->quirk_delay_before_data_reset = true; 1339 1340 clk = of_clk_get(np, 0); 1341 if (!IS_ERR(clk)) { 1342 /* 1343 * esdhc->peripheral_clock would be assigned with a value 1344 * which is eSDHC base clock when use periperal clock. 1345 * For some platforms, the clock value got by common clk 1346 * API is peripheral clock while the eSDHC base clock is 1347 * 1/2 peripheral clock. 1348 */ 1349 if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") || 1350 of_device_is_compatible(np, "fsl,ls1028a-esdhc") || 1351 of_device_is_compatible(np, "fsl,ls1088a-esdhc")) 1352 esdhc->peripheral_clock = clk_get_rate(clk) / 2; 1353 else 1354 esdhc->peripheral_clock = clk_get_rate(clk); 1355 1356 clk_put(clk); 1357 } 1358 1359 if (esdhc->peripheral_clock) { 1360 esdhc_clock_enable(host, false); 1361 val = sdhci_readl(host, ESDHC_DMA_SYSCTL); 1362 val |= ESDHC_PERIPHERAL_CLK_SEL; 1363 sdhci_writel(host, val, ESDHC_DMA_SYSCTL); 1364 esdhc_clock_enable(host, true); 1365 } 1366 } 1367 1368 static int esdhc_hs400_prepare_ddr(struct mmc_host *mmc) 1369 { 1370 esdhc_tuning_block_enable(mmc_priv(mmc), false); 1371 return 0; 1372 } 1373 1374 static int sdhci_esdhc_probe(struct platform_device *pdev) 1375 { 1376 struct sdhci_host *host; 1377 struct device_node *np; 1378 struct sdhci_pltfm_host *pltfm_host; 1379 struct sdhci_esdhc *esdhc; 1380 int ret; 1381 1382 np = pdev->dev.of_node; 1383 1384 if (of_property_read_bool(np, "little-endian")) 1385 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata, 1386 sizeof(struct sdhci_esdhc)); 1387 else 1388 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata, 1389 sizeof(struct sdhci_esdhc)); 1390 1391 if (IS_ERR(host)) 1392 return PTR_ERR(host); 1393 1394 host->mmc_host_ops.start_signal_voltage_switch = 1395 esdhc_signal_voltage_switch; 1396 host->mmc_host_ops.execute_tuning = esdhc_execute_tuning; 1397 host->mmc_host_ops.hs400_prepare_ddr = esdhc_hs400_prepare_ddr; 1398 host->tuning_delay = 1; 1399 1400 esdhc_init(pdev, host); 1401 1402 sdhci_get_of_property(pdev); 1403 1404 pltfm_host = sdhci_priv(host); 1405 esdhc = sdhci_pltfm_priv(pltfm_host); 1406 if (soc_device_match(soc_tuning_erratum_type1)) 1407 esdhc->quirk_tuning_erratum_type1 = true; 1408 else 1409 esdhc->quirk_tuning_erratum_type1 = false; 1410 1411 if (soc_device_match(soc_tuning_erratum_type2)) 1412 esdhc->quirk_tuning_erratum_type2 = true; 1413 else 1414 esdhc->quirk_tuning_erratum_type2 = false; 1415 1416 if (esdhc->vendor_ver == VENDOR_V_22) 1417 host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23; 1418 1419 if (esdhc->vendor_ver > VENDOR_V_22) 1420 host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; 1421 1422 if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) { 1423 host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST; 1424 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; 1425 } 1426 1427 if (of_device_is_compatible(np, "fsl,p5040-esdhc") || 1428 of_device_is_compatible(np, "fsl,p5020-esdhc") || 1429 of_device_is_compatible(np, "fsl,p4080-esdhc") || 1430 of_device_is_compatible(np, "fsl,p1020-esdhc") || 1431 of_device_is_compatible(np, "fsl,t1040-esdhc")) 1432 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 1433 1434 if (of_device_is_compatible(np, "fsl,ls1021a-esdhc")) 1435 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; 1436 1437 esdhc->quirk_ignore_data_inhibit = false; 1438 if (of_device_is_compatible(np, "fsl,p2020-esdhc")) { 1439 /* 1440 * Freescale messed up with P2020 as it has a non-standard 1441 * host control register 1442 */ 1443 host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL; 1444 esdhc->quirk_ignore_data_inhibit = true; 1445 } 1446 1447 /* call to generic mmc_of_parse to support additional capabilities */ 1448 ret = mmc_of_parse(host->mmc); 1449 if (ret) 1450 goto err; 1451 1452 mmc_of_parse_voltage(np, &host->ocr_mask); 1453 1454 ret = sdhci_add_host(host); 1455 if (ret) 1456 goto err; 1457 1458 return 0; 1459 err: 1460 sdhci_pltfm_free(pdev); 1461 return ret; 1462 } 1463 1464 static struct platform_driver sdhci_esdhc_driver = { 1465 .driver = { 1466 .name = "sdhci-esdhc", 1467 .of_match_table = sdhci_esdhc_of_match, 1468 .pm = &esdhc_of_dev_pm_ops, 1469 }, 1470 .probe = sdhci_esdhc_probe, 1471 .remove = sdhci_pltfm_unregister, 1472 }; 1473 1474 module_platform_driver(sdhci_esdhc_driver); 1475 1476 MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC"); 1477 MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, " 1478 "Anton Vorontsov <avorontsov@ru.mvista.com>"); 1479 MODULE_LICENSE("GPL v2"); 1480