1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2010 Google, Inc. 4 */ 5 6 #include <linux/delay.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/err.h> 9 #include <linux/module.h> 10 #include <linux/init.h> 11 #include <linux/iopoll.h> 12 #include <linux/platform_device.h> 13 #include <linux/clk.h> 14 #include <linux/io.h> 15 #include <linux/of.h> 16 #include <linux/of_device.h> 17 #include <linux/pinctrl/consumer.h> 18 #include <linux/regulator/consumer.h> 19 #include <linux/reset.h> 20 #include <linux/mmc/card.h> 21 #include <linux/mmc/host.h> 22 #include <linux/mmc/mmc.h> 23 #include <linux/mmc/slot-gpio.h> 24 #include <linux/gpio/consumer.h> 25 #include <linux/ktime.h> 26 27 #include "sdhci-pltfm.h" 28 #include "cqhci.h" 29 30 /* Tegra SDHOST controller vendor register definitions */ 31 #define SDHCI_TEGRA_VENDOR_CLOCK_CTRL 0x100 32 #define SDHCI_CLOCK_CTRL_TAP_MASK 0x00ff0000 33 #define SDHCI_CLOCK_CTRL_TAP_SHIFT 16 34 #define SDHCI_CLOCK_CTRL_TRIM_MASK 0x1f000000 35 #define SDHCI_CLOCK_CTRL_TRIM_SHIFT 24 36 #define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE BIT(5) 37 #define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE BIT(3) 38 #define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE BIT(2) 39 40 #define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL 0x104 41 #define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE BIT(31) 42 43 #define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES 0x10c 44 #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK 0x00003f00 45 #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT 8 46 47 #define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120 48 #define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT BIT(0) 49 #define SDHCI_MISC_CTRL_ENABLE_SDR104 0x8 50 #define SDHCI_MISC_CTRL_ENABLE_SDR50 0x10 51 #define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20 52 #define SDHCI_MISC_CTRL_ENABLE_DDR50 0x200 53 54 #define SDHCI_TEGRA_VENDOR_DLLCAL_CFG 0x1b0 55 #define SDHCI_TEGRA_DLLCAL_CALIBRATE BIT(31) 56 57 #define SDHCI_TEGRA_VENDOR_DLLCAL_STA 0x1bc 58 #define SDHCI_TEGRA_DLLCAL_STA_ACTIVE BIT(31) 59 60 #define SDHCI_VNDR_TUN_CTRL0_0 0x1c0 61 #define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP 0x20000 62 #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK 0x03fc0000 63 #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT 18 64 #define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK 0x00001fc0 65 #define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT 6 66 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK 0x000e000 67 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT 13 68 #define TRIES_128 2 69 #define TRIES_256 4 70 #define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK 0x7 71 72 #define SDHCI_TEGRA_VNDR_TUN_CTRL1_0 0x1c4 73 #define SDHCI_TEGRA_VNDR_TUN_STATUS0 0x1C8 74 #define SDHCI_TEGRA_VNDR_TUN_STATUS1 0x1CC 75 #define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK 0xFF 76 #define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT 0x8 77 #define TUNING_WORD_BIT_SIZE 32 78 79 #define SDHCI_TEGRA_AUTO_CAL_CONFIG 0x1e4 80 #define SDHCI_AUTO_CAL_START BIT(31) 81 #define SDHCI_AUTO_CAL_ENABLE BIT(29) 82 #define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK 0x0000ffff 83 84 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL 0x1e0 85 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK 0x0000000f 86 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL 0x7 87 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD BIT(31) 88 #define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK 0x07FFF000 89 90 #define SDHCI_TEGRA_AUTO_CAL_STATUS 0x1ec 91 #define SDHCI_TEGRA_AUTO_CAL_ACTIVE BIT(31) 92 93 #define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0) 94 #define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1) 95 #define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2) 96 #define NVQUIRK_ENABLE_SDR50 BIT(3) 97 #define NVQUIRK_ENABLE_SDR104 BIT(4) 98 #define NVQUIRK_ENABLE_DDR50 BIT(5) 99 /* 100 * HAS_PADCALIB NVQUIRK is for SoC's supporting auto calibration of pads 101 * drive strength. 102 */ 103 #define NVQUIRK_HAS_PADCALIB BIT(6) 104 /* 105 * NEEDS_PAD_CONTROL NVQUIRK is for SoC's having separate 3V3 and 1V8 pads. 106 * 3V3/1V8 pad selection happens through pinctrl state selection depending 107 * on the signaling mode. 108 */ 109 #define NVQUIRK_NEEDS_PAD_CONTROL BIT(7) 110 #define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8) 111 #define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING BIT(9) 112 113 /* 114 * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra 115 * SDMMC hardware data timeout. 116 */ 117 #define NVQUIRK_HAS_TMCLK BIT(10) 118 119 /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */ 120 #define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000 121 122 #define SDHCI_TEGRA_CQE_TRNS_MODE (SDHCI_TRNS_MULTI | \ 123 SDHCI_TRNS_BLK_CNT_EN | \ 124 SDHCI_TRNS_DMA) 125 126 struct sdhci_tegra_soc_data { 127 const struct sdhci_pltfm_data *pdata; 128 u64 dma_mask; 129 u32 nvquirks; 130 u8 min_tap_delay; 131 u8 max_tap_delay; 132 }; 133 134 /* Magic pull up and pull down pad calibration offsets */ 135 struct sdhci_tegra_autocal_offsets { 136 u32 pull_up_3v3; 137 u32 pull_down_3v3; 138 u32 pull_up_3v3_timeout; 139 u32 pull_down_3v3_timeout; 140 u32 pull_up_1v8; 141 u32 pull_down_1v8; 142 u32 pull_up_1v8_timeout; 143 u32 pull_down_1v8_timeout; 144 u32 pull_up_sdr104; 145 u32 pull_down_sdr104; 146 u32 pull_up_hs400; 147 u32 pull_down_hs400; 148 }; 149 150 struct sdhci_tegra { 151 const struct sdhci_tegra_soc_data *soc_data; 152 struct gpio_desc *power_gpio; 153 struct clk *tmclk; 154 bool ddr_signaling; 155 bool pad_calib_required; 156 bool pad_control_available; 157 158 struct reset_control *rst; 159 struct pinctrl *pinctrl_sdmmc; 160 struct pinctrl_state *pinctrl_state_3v3; 161 struct pinctrl_state *pinctrl_state_1v8; 162 struct pinctrl_state *pinctrl_state_3v3_drv; 163 struct pinctrl_state *pinctrl_state_1v8_drv; 164 165 struct sdhci_tegra_autocal_offsets autocal_offsets; 166 ktime_t last_calib; 167 168 u32 default_tap; 169 u32 default_trim; 170 u32 dqs_trim; 171 bool enable_hwcq; 172 unsigned long curr_clk_rate; 173 u8 tuned_tap_delay; 174 }; 175 176 static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg) 177 { 178 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 179 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 180 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 181 182 if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) && 183 (reg == SDHCI_HOST_VERSION))) { 184 /* Erratum: Version register is invalid in HW. */ 185 return SDHCI_SPEC_200; 186 } 187 188 return readw(host->ioaddr + reg); 189 } 190 191 static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg) 192 { 193 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 194 195 switch (reg) { 196 case SDHCI_TRANSFER_MODE: 197 /* 198 * Postpone this write, we must do it together with a 199 * command write that is down below. 200 */ 201 pltfm_host->xfer_mode_shadow = val; 202 return; 203 case SDHCI_COMMAND: 204 writel((val << 16) | pltfm_host->xfer_mode_shadow, 205 host->ioaddr + SDHCI_TRANSFER_MODE); 206 return; 207 } 208 209 writew(val, host->ioaddr + reg); 210 } 211 212 static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg) 213 { 214 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 215 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 216 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 217 218 /* Seems like we're getting spurious timeout and crc errors, so 219 * disable signalling of them. In case of real errors software 220 * timers should take care of eventually detecting them. 221 */ 222 if (unlikely(reg == SDHCI_SIGNAL_ENABLE)) 223 val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC); 224 225 writel(val, host->ioaddr + reg); 226 227 if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) && 228 (reg == SDHCI_INT_ENABLE))) { 229 /* Erratum: Must enable block gap interrupt detection */ 230 u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); 231 if (val & SDHCI_INT_CARD_INT) 232 gap_ctrl |= 0x8; 233 else 234 gap_ctrl &= ~0x8; 235 writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); 236 } 237 } 238 239 static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable) 240 { 241 bool status; 242 u32 reg; 243 244 reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 245 status = !!(reg & SDHCI_CLOCK_CARD_EN); 246 247 if (status == enable) 248 return status; 249 250 if (enable) 251 reg |= SDHCI_CLOCK_CARD_EN; 252 else 253 reg &= ~SDHCI_CLOCK_CARD_EN; 254 255 sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL); 256 257 return status; 258 } 259 260 static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg) 261 { 262 bool is_tuning_cmd = 0; 263 bool clk_enabled; 264 u8 cmd; 265 266 if (reg == SDHCI_COMMAND) { 267 cmd = SDHCI_GET_CMD(val); 268 is_tuning_cmd = cmd == MMC_SEND_TUNING_BLOCK || 269 cmd == MMC_SEND_TUNING_BLOCK_HS200; 270 } 271 272 if (is_tuning_cmd) 273 clk_enabled = tegra_sdhci_configure_card_clk(host, 0); 274 275 writew(val, host->ioaddr + reg); 276 277 if (is_tuning_cmd) { 278 udelay(1); 279 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 280 tegra_sdhci_configure_card_clk(host, clk_enabled); 281 } 282 } 283 284 static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host) 285 { 286 /* 287 * Write-enable shall be assumed if GPIO is missing in a board's 288 * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on 289 * Tegra. 290 */ 291 return mmc_gpio_get_ro(host->mmc); 292 } 293 294 static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host) 295 { 296 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 297 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 298 int has_1v8, has_3v3; 299 300 /* 301 * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad 302 * voltage configuration in order to perform voltage switching. This 303 * means that valid pinctrl info is required on SDHCI instances capable 304 * of performing voltage switching. Whether or not an SDHCI instance is 305 * capable of voltage switching is determined based on the regulator. 306 */ 307 308 if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL)) 309 return true; 310 311 if (IS_ERR(host->mmc->supply.vqmmc)) 312 return false; 313 314 has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc, 315 1700000, 1950000); 316 317 has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc, 318 2700000, 3600000); 319 320 if (has_1v8 == 1 && has_3v3 == 1) 321 return tegra_host->pad_control_available; 322 323 /* Fixed voltage, no pad control required. */ 324 return true; 325 } 326 327 static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap) 328 { 329 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 330 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 331 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 332 bool card_clk_enabled = false; 333 u32 reg; 334 335 /* 336 * Touching the tap values is a bit tricky on some SoC generations. 337 * The quirk enables a workaround for a glitch that sometimes occurs if 338 * the tap values are changed. 339 */ 340 341 if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP) 342 card_clk_enabled = tegra_sdhci_configure_card_clk(host, false); 343 344 reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); 345 reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK; 346 reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT; 347 sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); 348 349 if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP && 350 card_clk_enabled) { 351 udelay(1); 352 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 353 tegra_sdhci_configure_card_clk(host, card_clk_enabled); 354 } 355 } 356 357 static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc, 358 struct mmc_ios *ios) 359 { 360 struct sdhci_host *host = mmc_priv(mmc); 361 u32 val; 362 363 val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL); 364 365 if (ios->enhanced_strobe) 366 val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE; 367 else 368 val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE; 369 370 sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL); 371 372 } 373 374 static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) 375 { 376 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 377 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 378 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 379 u32 misc_ctrl, clk_ctrl, pad_ctrl; 380 381 sdhci_reset(host, mask); 382 383 if (!(mask & SDHCI_RESET_ALL)) 384 return; 385 386 tegra_sdhci_set_tap(host, tegra_host->default_tap); 387 388 misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL); 389 clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); 390 391 misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 | 392 SDHCI_MISC_CTRL_ENABLE_SDR50 | 393 SDHCI_MISC_CTRL_ENABLE_DDR50 | 394 SDHCI_MISC_CTRL_ENABLE_SDR104); 395 396 clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK | 397 SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE); 398 399 if (tegra_sdhci_is_pad_and_regulator_valid(host)) { 400 /* Erratum: Enable SDHCI spec v3.00 support */ 401 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300) 402 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300; 403 /* Advertise UHS modes as supported by host */ 404 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) 405 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50; 406 if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) 407 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50; 408 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104) 409 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104; 410 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) 411 clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE; 412 } 413 414 clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT; 415 416 sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL); 417 sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); 418 419 if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) { 420 pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL); 421 pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK; 422 pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL; 423 sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL); 424 425 tegra_host->pad_calib_required = true; 426 } 427 428 tegra_host->ddr_signaling = false; 429 } 430 431 static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable) 432 { 433 u32 val; 434 435 /* 436 * Enable or disable the additional I/O pad used by the drive strength 437 * calibration process. 438 */ 439 val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL); 440 441 if (enable) 442 val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD; 443 else 444 val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD; 445 446 sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL); 447 448 if (enable) 449 usleep_range(1, 2); 450 } 451 452 static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host, 453 u16 pdpu) 454 { 455 u32 reg; 456 457 reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG); 458 reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK; 459 reg |= pdpu; 460 sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG); 461 } 462 463 static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage, 464 bool state_drvupdn) 465 { 466 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 467 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 468 struct sdhci_tegra_autocal_offsets *offsets = 469 &tegra_host->autocal_offsets; 470 struct pinctrl_state *pinctrl_drvupdn = NULL; 471 int ret = 0; 472 u8 drvup = 0, drvdn = 0; 473 u32 reg; 474 475 if (!state_drvupdn) { 476 /* PADS Drive Strength */ 477 if (voltage == MMC_SIGNAL_VOLTAGE_180) { 478 if (tegra_host->pinctrl_state_1v8_drv) { 479 pinctrl_drvupdn = 480 tegra_host->pinctrl_state_1v8_drv; 481 } else { 482 drvup = offsets->pull_up_1v8_timeout; 483 drvdn = offsets->pull_down_1v8_timeout; 484 } 485 } else { 486 if (tegra_host->pinctrl_state_3v3_drv) { 487 pinctrl_drvupdn = 488 tegra_host->pinctrl_state_3v3_drv; 489 } else { 490 drvup = offsets->pull_up_3v3_timeout; 491 drvdn = offsets->pull_down_3v3_timeout; 492 } 493 } 494 495 if (pinctrl_drvupdn != NULL) { 496 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, 497 pinctrl_drvupdn); 498 if (ret < 0) 499 dev_err(mmc_dev(host->mmc), 500 "failed pads drvupdn, ret: %d\n", ret); 501 } else if ((drvup) || (drvdn)) { 502 reg = sdhci_readl(host, 503 SDHCI_TEGRA_SDMEM_COMP_PADCTRL); 504 reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK; 505 reg |= (drvup << 20) | (drvdn << 12); 506 sdhci_writel(host, reg, 507 SDHCI_TEGRA_SDMEM_COMP_PADCTRL); 508 } 509 510 } else { 511 /* Dual Voltage PADS Voltage selection */ 512 if (!tegra_host->pad_control_available) 513 return 0; 514 515 if (voltage == MMC_SIGNAL_VOLTAGE_180) { 516 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, 517 tegra_host->pinctrl_state_1v8); 518 if (ret < 0) 519 dev_err(mmc_dev(host->mmc), 520 "setting 1.8V failed, ret: %d\n", ret); 521 } else { 522 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, 523 tegra_host->pinctrl_state_3v3); 524 if (ret < 0) 525 dev_err(mmc_dev(host->mmc), 526 "setting 3.3V failed, ret: %d\n", ret); 527 } 528 } 529 530 return ret; 531 } 532 533 static void tegra_sdhci_pad_autocalib(struct sdhci_host *host) 534 { 535 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 536 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 537 struct sdhci_tegra_autocal_offsets offsets = 538 tegra_host->autocal_offsets; 539 struct mmc_ios *ios = &host->mmc->ios; 540 bool card_clk_enabled; 541 u16 pdpu; 542 u32 reg; 543 int ret; 544 545 switch (ios->timing) { 546 case MMC_TIMING_UHS_SDR104: 547 pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104; 548 break; 549 case MMC_TIMING_MMC_HS400: 550 pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400; 551 break; 552 default: 553 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) 554 pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8; 555 else 556 pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3; 557 } 558 559 /* Set initial offset before auto-calibration */ 560 tegra_sdhci_set_pad_autocal_offset(host, pdpu); 561 562 card_clk_enabled = tegra_sdhci_configure_card_clk(host, false); 563 564 tegra_sdhci_configure_cal_pad(host, true); 565 566 reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG); 567 reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START; 568 sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG); 569 570 usleep_range(1, 2); 571 /* 10 ms timeout */ 572 ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS, 573 reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE), 574 1000, 10000); 575 576 tegra_sdhci_configure_cal_pad(host, false); 577 578 tegra_sdhci_configure_card_clk(host, card_clk_enabled); 579 580 if (ret) { 581 dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n"); 582 583 /* Disable automatic cal and use fixed Drive Strengths */ 584 reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG); 585 reg &= ~SDHCI_AUTO_CAL_ENABLE; 586 sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG); 587 588 ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false); 589 if (ret < 0) 590 dev_err(mmc_dev(host->mmc), 591 "Setting drive strengths failed: %d\n", ret); 592 } 593 } 594 595 static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host) 596 { 597 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 598 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 599 struct sdhci_tegra_autocal_offsets *autocal = 600 &tegra_host->autocal_offsets; 601 int err; 602 603 err = device_property_read_u32(mmc_dev(host->mmc), 604 "nvidia,pad-autocal-pull-up-offset-3v3", 605 &autocal->pull_up_3v3); 606 if (err) 607 autocal->pull_up_3v3 = 0; 608 609 err = device_property_read_u32(mmc_dev(host->mmc), 610 "nvidia,pad-autocal-pull-down-offset-3v3", 611 &autocal->pull_down_3v3); 612 if (err) 613 autocal->pull_down_3v3 = 0; 614 615 err = device_property_read_u32(mmc_dev(host->mmc), 616 "nvidia,pad-autocal-pull-up-offset-1v8", 617 &autocal->pull_up_1v8); 618 if (err) 619 autocal->pull_up_1v8 = 0; 620 621 err = device_property_read_u32(mmc_dev(host->mmc), 622 "nvidia,pad-autocal-pull-down-offset-1v8", 623 &autocal->pull_down_1v8); 624 if (err) 625 autocal->pull_down_1v8 = 0; 626 627 err = device_property_read_u32(mmc_dev(host->mmc), 628 "nvidia,pad-autocal-pull-up-offset-sdr104", 629 &autocal->pull_up_sdr104); 630 if (err) 631 autocal->pull_up_sdr104 = autocal->pull_up_1v8; 632 633 err = device_property_read_u32(mmc_dev(host->mmc), 634 "nvidia,pad-autocal-pull-down-offset-sdr104", 635 &autocal->pull_down_sdr104); 636 if (err) 637 autocal->pull_down_sdr104 = autocal->pull_down_1v8; 638 639 err = device_property_read_u32(mmc_dev(host->mmc), 640 "nvidia,pad-autocal-pull-up-offset-hs400", 641 &autocal->pull_up_hs400); 642 if (err) 643 autocal->pull_up_hs400 = autocal->pull_up_1v8; 644 645 err = device_property_read_u32(mmc_dev(host->mmc), 646 "nvidia,pad-autocal-pull-down-offset-hs400", 647 &autocal->pull_down_hs400); 648 if (err) 649 autocal->pull_down_hs400 = autocal->pull_down_1v8; 650 651 /* 652 * Different fail-safe drive strength values based on the signaling 653 * voltage are applicable for SoCs supporting 3V3 and 1V8 pad controls. 654 * So, avoid reading below device tree properties for SoCs that don't 655 * have NVQUIRK_NEEDS_PAD_CONTROL. 656 */ 657 if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL)) 658 return; 659 660 err = device_property_read_u32(mmc_dev(host->mmc), 661 "nvidia,pad-autocal-pull-up-offset-3v3-timeout", 662 &autocal->pull_up_3v3_timeout); 663 if (err) { 664 if (!IS_ERR(tegra_host->pinctrl_state_3v3) && 665 (tegra_host->pinctrl_state_3v3_drv == NULL)) 666 pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n", 667 mmc_hostname(host->mmc)); 668 autocal->pull_up_3v3_timeout = 0; 669 } 670 671 err = device_property_read_u32(mmc_dev(host->mmc), 672 "nvidia,pad-autocal-pull-down-offset-3v3-timeout", 673 &autocal->pull_down_3v3_timeout); 674 if (err) { 675 if (!IS_ERR(tegra_host->pinctrl_state_3v3) && 676 (tegra_host->pinctrl_state_3v3_drv == NULL)) 677 pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n", 678 mmc_hostname(host->mmc)); 679 autocal->pull_down_3v3_timeout = 0; 680 } 681 682 err = device_property_read_u32(mmc_dev(host->mmc), 683 "nvidia,pad-autocal-pull-up-offset-1v8-timeout", 684 &autocal->pull_up_1v8_timeout); 685 if (err) { 686 if (!IS_ERR(tegra_host->pinctrl_state_1v8) && 687 (tegra_host->pinctrl_state_1v8_drv == NULL)) 688 pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n", 689 mmc_hostname(host->mmc)); 690 autocal->pull_up_1v8_timeout = 0; 691 } 692 693 err = device_property_read_u32(mmc_dev(host->mmc), 694 "nvidia,pad-autocal-pull-down-offset-1v8-timeout", 695 &autocal->pull_down_1v8_timeout); 696 if (err) { 697 if (!IS_ERR(tegra_host->pinctrl_state_1v8) && 698 (tegra_host->pinctrl_state_1v8_drv == NULL)) 699 pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n", 700 mmc_hostname(host->mmc)); 701 autocal->pull_down_1v8_timeout = 0; 702 } 703 } 704 705 static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 706 { 707 struct sdhci_host *host = mmc_priv(mmc); 708 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 709 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 710 ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib); 711 712 /* 100 ms calibration interval is specified in the TRM */ 713 if (ktime_to_ms(since_calib) > 100) { 714 tegra_sdhci_pad_autocalib(host); 715 tegra_host->last_calib = ktime_get(); 716 } 717 718 sdhci_request(mmc, mrq); 719 } 720 721 static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host) 722 { 723 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 724 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 725 int err; 726 727 err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-tap", 728 &tegra_host->default_tap); 729 if (err) 730 tegra_host->default_tap = 0; 731 732 err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-trim", 733 &tegra_host->default_trim); 734 if (err) 735 tegra_host->default_trim = 0; 736 737 err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,dqs-trim", 738 &tegra_host->dqs_trim); 739 if (err) 740 tegra_host->dqs_trim = 0x11; 741 } 742 743 static void tegra_sdhci_parse_dt(struct sdhci_host *host) 744 { 745 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 746 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 747 748 if (device_property_read_bool(mmc_dev(host->mmc), "supports-cqe")) 749 tegra_host->enable_hwcq = true; 750 else 751 tegra_host->enable_hwcq = false; 752 753 tegra_sdhci_parse_pad_autocal_dt(host); 754 tegra_sdhci_parse_tap_and_trim(host); 755 } 756 757 static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 758 { 759 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 760 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 761 unsigned long host_clk; 762 763 if (!clock) 764 return sdhci_set_clock(host, clock); 765 766 /* 767 * In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI 768 * divider to be configured to divided the host clock by two. The SDHCI 769 * clock divider is calculated as part of sdhci_set_clock() by 770 * sdhci_calc_clk(). The divider is calculated from host->max_clk and 771 * the requested clock rate. 772 * 773 * By setting the host->max_clk to clock * 2 the divider calculation 774 * will always result in the correct value for DDR50/52 modes, 775 * regardless of clock rate rounding, which may happen if the value 776 * from clk_get_rate() is used. 777 */ 778 host_clk = tegra_host->ddr_signaling ? clock * 2 : clock; 779 clk_set_rate(pltfm_host->clk, host_clk); 780 tegra_host->curr_clk_rate = host_clk; 781 if (tegra_host->ddr_signaling) 782 host->max_clk = host_clk; 783 else 784 host->max_clk = clk_get_rate(pltfm_host->clk); 785 786 sdhci_set_clock(host, clock); 787 788 if (tegra_host->pad_calib_required) { 789 tegra_sdhci_pad_autocalib(host); 790 tegra_host->pad_calib_required = false; 791 } 792 } 793 794 static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host) 795 { 796 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 797 798 return clk_round_rate(pltfm_host->clk, UINT_MAX); 799 } 800 801 static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim) 802 { 803 u32 val; 804 805 val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES); 806 val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK; 807 val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT; 808 sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES); 809 } 810 811 static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host) 812 { 813 u32 reg; 814 int err; 815 816 reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG); 817 reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE; 818 sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG); 819 820 /* 1 ms sleep, 5 ms timeout */ 821 err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA, 822 reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE), 823 1000, 5000); 824 if (err) 825 dev_err(mmc_dev(host->mmc), 826 "HS400 delay line calibration timed out\n"); 827 } 828 829 static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up, 830 u8 thd_low, u8 fixed_tap) 831 { 832 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 833 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 834 u32 val, tun_status; 835 u8 word, bit, edge1, tap, window; 836 bool tap_result; 837 bool start_fail = false; 838 bool start_pass = false; 839 bool end_pass = false; 840 bool first_fail = false; 841 bool first_pass = false; 842 u8 start_pass_tap = 0; 843 u8 end_pass_tap = 0; 844 u8 first_fail_tap = 0; 845 u8 first_pass_tap = 0; 846 u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE; 847 848 /* 849 * Read auto-tuned results and extract good valid passing window by 850 * filtering out un-wanted bubble/partial/merged windows. 851 */ 852 for (word = 0; word < total_tuning_words; word++) { 853 val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0); 854 val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK; 855 val |= word; 856 sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0); 857 tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0); 858 bit = 0; 859 while (bit < TUNING_WORD_BIT_SIZE) { 860 tap = word * TUNING_WORD_BIT_SIZE + bit; 861 tap_result = tun_status & (1 << bit); 862 if (!tap_result && !start_fail) { 863 start_fail = true; 864 if (!first_fail) { 865 first_fail_tap = tap; 866 first_fail = true; 867 } 868 869 } else if (tap_result && start_fail && !start_pass) { 870 start_pass_tap = tap; 871 start_pass = true; 872 if (!first_pass) { 873 first_pass_tap = tap; 874 first_pass = true; 875 } 876 877 } else if (!tap_result && start_fail && start_pass && 878 !end_pass) { 879 end_pass_tap = tap - 1; 880 end_pass = true; 881 } else if (tap_result && start_pass && start_fail && 882 end_pass) { 883 window = end_pass_tap - start_pass_tap; 884 /* discard merged window and bubble window */ 885 if (window >= thd_up || window < thd_low) { 886 start_pass_tap = tap; 887 end_pass = false; 888 } else { 889 /* set tap at middle of valid window */ 890 tap = start_pass_tap + window / 2; 891 tegra_host->tuned_tap_delay = tap; 892 return; 893 } 894 } 895 896 bit++; 897 } 898 } 899 900 if (!first_fail) { 901 WARN(1, "no edge detected, continue with hw tuned delay.\n"); 902 } else if (first_pass) { 903 /* set tap location at fixed tap relative to the first edge */ 904 edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2; 905 if (edge1 - 1 > fixed_tap) 906 tegra_host->tuned_tap_delay = edge1 - fixed_tap; 907 else 908 tegra_host->tuned_tap_delay = edge1 + fixed_tap; 909 } 910 } 911 912 static void tegra_sdhci_post_tuning(struct sdhci_host *host) 913 { 914 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 915 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 916 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 917 u32 avg_tap_dly, val, min_tap_dly, max_tap_dly; 918 u8 fixed_tap, start_tap, end_tap, window_width; 919 u8 thdupper, thdlower; 920 u8 num_iter; 921 u32 clk_rate_mhz, period_ps, bestcase, worstcase; 922 923 /* retain HW tuned tap to use incase if no correction is needed */ 924 val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); 925 tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >> 926 SDHCI_CLOCK_CTRL_TAP_SHIFT; 927 if (soc_data->min_tap_delay && soc_data->max_tap_delay) { 928 min_tap_dly = soc_data->min_tap_delay; 929 max_tap_dly = soc_data->max_tap_delay; 930 clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC; 931 period_ps = USEC_PER_SEC / clk_rate_mhz; 932 bestcase = period_ps / min_tap_dly; 933 worstcase = period_ps / max_tap_dly; 934 /* 935 * Upper and Lower bound thresholds used to detect merged and 936 * bubble windows 937 */ 938 thdupper = (2 * worstcase + bestcase) / 2; 939 thdlower = worstcase / 4; 940 /* 941 * fixed tap is used when HW tuning result contains single edge 942 * and tap is set at fixed tap delay relative to the first edge 943 */ 944 avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly); 945 fixed_tap = avg_tap_dly / 2; 946 947 val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1); 948 start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK; 949 end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) & 950 SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK; 951 window_width = end_tap - start_tap; 952 num_iter = host->tuning_loop_count; 953 /* 954 * partial window includes edges of the tuning range. 955 * merged window includes more taps so window width is higher 956 * than upper threshold. 957 */ 958 if (start_tap == 0 || (end_tap == (num_iter - 1)) || 959 (end_tap == num_iter - 2) || window_width >= thdupper) { 960 pr_debug("%s: Apply tuning correction\n", 961 mmc_hostname(host->mmc)); 962 tegra_sdhci_tap_correction(host, thdupper, thdlower, 963 fixed_tap); 964 } 965 } 966 967 tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay); 968 } 969 970 static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode) 971 { 972 struct sdhci_host *host = mmc_priv(mmc); 973 int err; 974 975 err = sdhci_execute_tuning(mmc, opcode); 976 if (!err && !host->tuning_err) 977 tegra_sdhci_post_tuning(host); 978 979 return err; 980 } 981 982 static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host, 983 unsigned timing) 984 { 985 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 986 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 987 bool set_default_tap = false; 988 bool set_dqs_trim = false; 989 bool do_hs400_dll_cal = false; 990 u8 iter = TRIES_256; 991 u32 val; 992 993 tegra_host->ddr_signaling = false; 994 switch (timing) { 995 case MMC_TIMING_UHS_SDR50: 996 break; 997 case MMC_TIMING_UHS_SDR104: 998 case MMC_TIMING_MMC_HS200: 999 /* Don't set default tap on tunable modes. */ 1000 iter = TRIES_128; 1001 break; 1002 case MMC_TIMING_MMC_HS400: 1003 set_dqs_trim = true; 1004 do_hs400_dll_cal = true; 1005 iter = TRIES_128; 1006 break; 1007 case MMC_TIMING_MMC_DDR52: 1008 case MMC_TIMING_UHS_DDR50: 1009 tegra_host->ddr_signaling = true; 1010 set_default_tap = true; 1011 break; 1012 default: 1013 set_default_tap = true; 1014 break; 1015 } 1016 1017 val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0); 1018 val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK | 1019 SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK | 1020 SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK); 1021 val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT | 1022 0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT | 1023 1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT); 1024 sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0); 1025 sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0); 1026 1027 host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256; 1028 1029 sdhci_set_uhs_signaling(host, timing); 1030 1031 tegra_sdhci_pad_autocalib(host); 1032 1033 if (tegra_host->tuned_tap_delay && !set_default_tap) 1034 tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay); 1035 else 1036 tegra_sdhci_set_tap(host, tegra_host->default_tap); 1037 1038 if (set_dqs_trim) 1039 tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim); 1040 1041 if (do_hs400_dll_cal) 1042 tegra_sdhci_hs400_dll_cal(host); 1043 } 1044 1045 static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) 1046 { 1047 unsigned int min, max; 1048 1049 /* 1050 * Start search for minimum tap value at 10, as smaller values are 1051 * may wrongly be reported as working but fail at higher speeds, 1052 * according to the TRM. 1053 */ 1054 min = 10; 1055 while (min < 255) { 1056 tegra_sdhci_set_tap(host, min); 1057 if (!mmc_send_tuning(host->mmc, opcode, NULL)) 1058 break; 1059 min++; 1060 } 1061 1062 /* Find the maximum tap value that still passes. */ 1063 max = min + 1; 1064 while (max < 255) { 1065 tegra_sdhci_set_tap(host, max); 1066 if (mmc_send_tuning(host->mmc, opcode, NULL)) { 1067 max--; 1068 break; 1069 } 1070 max++; 1071 } 1072 1073 /* The TRM states the ideal tap value is at 75% in the passing range. */ 1074 tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4)); 1075 1076 return mmc_send_tuning(host->mmc, opcode, NULL); 1077 } 1078 1079 static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc, 1080 struct mmc_ios *ios) 1081 { 1082 struct sdhci_host *host = mmc_priv(mmc); 1083 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1084 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 1085 int ret = 0; 1086 1087 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { 1088 ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true); 1089 if (ret < 0) 1090 return ret; 1091 ret = sdhci_start_signal_voltage_switch(mmc, ios); 1092 } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) { 1093 ret = sdhci_start_signal_voltage_switch(mmc, ios); 1094 if (ret < 0) 1095 return ret; 1096 ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true); 1097 } 1098 1099 if (tegra_host->pad_calib_required) 1100 tegra_sdhci_pad_autocalib(host); 1101 1102 return ret; 1103 } 1104 1105 static int tegra_sdhci_init_pinctrl_info(struct device *dev, 1106 struct sdhci_tegra *tegra_host) 1107 { 1108 tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev); 1109 if (IS_ERR(tegra_host->pinctrl_sdmmc)) { 1110 dev_dbg(dev, "No pinctrl info, err: %ld\n", 1111 PTR_ERR(tegra_host->pinctrl_sdmmc)); 1112 return -1; 1113 } 1114 1115 tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state( 1116 tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv"); 1117 if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) { 1118 if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV) 1119 tegra_host->pinctrl_state_1v8_drv = NULL; 1120 } 1121 1122 tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state( 1123 tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv"); 1124 if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) { 1125 if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV) 1126 tegra_host->pinctrl_state_3v3_drv = NULL; 1127 } 1128 1129 tegra_host->pinctrl_state_3v3 = 1130 pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3"); 1131 if (IS_ERR(tegra_host->pinctrl_state_3v3)) { 1132 dev_warn(dev, "Missing 3.3V pad state, err: %ld\n", 1133 PTR_ERR(tegra_host->pinctrl_state_3v3)); 1134 return -1; 1135 } 1136 1137 tegra_host->pinctrl_state_1v8 = 1138 pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8"); 1139 if (IS_ERR(tegra_host->pinctrl_state_1v8)) { 1140 dev_warn(dev, "Missing 1.8V pad state, err: %ld\n", 1141 PTR_ERR(tegra_host->pinctrl_state_1v8)); 1142 return -1; 1143 } 1144 1145 tegra_host->pad_control_available = true; 1146 1147 return 0; 1148 } 1149 1150 static void tegra_sdhci_voltage_switch(struct sdhci_host *host) 1151 { 1152 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1153 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 1154 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 1155 1156 if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) 1157 tegra_host->pad_calib_required = true; 1158 } 1159 1160 static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg) 1161 { 1162 struct mmc_host *mmc = cq_host->mmc; 1163 struct sdhci_host *host = mmc_priv(mmc); 1164 u8 ctrl; 1165 ktime_t timeout; 1166 bool timed_out; 1167 1168 /* 1169 * During CQE resume/unhalt, CQHCI driver unhalts CQE prior to 1170 * cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need 1171 * to be re-configured. 1172 * Tegra CQHCI/SDHCI prevents write access to block size register when 1173 * CQE is unhalted. So handling CQE resume sequence here to configure 1174 * SDHCI block registers prior to exiting CQE halt state. 1175 */ 1176 if (reg == CQHCI_CTL && !(val & CQHCI_HALT) && 1177 cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) { 1178 sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE); 1179 sdhci_cqe_enable(mmc); 1180 writel(val, cq_host->mmio + reg); 1181 timeout = ktime_add_us(ktime_get(), 50); 1182 while (1) { 1183 timed_out = ktime_compare(ktime_get(), timeout) > 0; 1184 ctrl = cqhci_readl(cq_host, CQHCI_CTL); 1185 if (!(ctrl & CQHCI_HALT) || timed_out) 1186 break; 1187 } 1188 /* 1189 * CQE usually resumes very quick, but incase if Tegra CQE 1190 * doesn't resume retry unhalt. 1191 */ 1192 if (timed_out) 1193 writel(val, cq_host->mmio + reg); 1194 } else { 1195 writel(val, cq_host->mmio + reg); 1196 } 1197 } 1198 1199 static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc, 1200 struct mmc_request *mrq, u64 *data) 1201 { 1202 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc)); 1203 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 1204 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 1205 1206 if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING && 1207 mrq->cmd->flags & MMC_RSP_R1B) 1208 *data |= CQHCI_CMD_TIMING(1); 1209 } 1210 1211 static void sdhci_tegra_cqe_enable(struct mmc_host *mmc) 1212 { 1213 struct cqhci_host *cq_host = mmc->cqe_private; 1214 struct sdhci_host *host = mmc_priv(mmc); 1215 u32 val; 1216 1217 /* 1218 * Tegra CQHCI/SDMMC design prevents write access to sdhci block size 1219 * register when CQE is enabled and unhalted. 1220 * CQHCI driver enables CQE prior to activation, so disable CQE before 1221 * programming block size in sdhci controller and enable it back. 1222 */ 1223 if (!cq_host->activated) { 1224 val = cqhci_readl(cq_host, CQHCI_CFG); 1225 if (val & CQHCI_ENABLE) 1226 cqhci_writel(cq_host, (val & ~CQHCI_ENABLE), 1227 CQHCI_CFG); 1228 sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE); 1229 sdhci_cqe_enable(mmc); 1230 if (val & CQHCI_ENABLE) 1231 cqhci_writel(cq_host, val, CQHCI_CFG); 1232 } 1233 1234 /* 1235 * CMD CRC errors are seen sometimes with some eMMC devices when status 1236 * command is sent during transfer of last data block which is the 1237 * default case as send status command block counter (CBC) is 1. 1238 * Recommended fix to set CBC to 0 allowing send status command only 1239 * when data lines are idle. 1240 */ 1241 val = cqhci_readl(cq_host, CQHCI_SSC1); 1242 val &= ~CQHCI_SSC1_CBC_MASK; 1243 cqhci_writel(cq_host, val, CQHCI_SSC1); 1244 } 1245 1246 static void sdhci_tegra_dumpregs(struct mmc_host *mmc) 1247 { 1248 sdhci_dumpregs(mmc_priv(mmc)); 1249 } 1250 1251 static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask) 1252 { 1253 int cmd_error = 0; 1254 int data_error = 0; 1255 1256 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) 1257 return intmask; 1258 1259 cqhci_irq(host->mmc, intmask, cmd_error, data_error); 1260 1261 return 0; 1262 } 1263 1264 static void tegra_sdhci_set_timeout(struct sdhci_host *host, 1265 struct mmc_command *cmd) 1266 { 1267 u32 val; 1268 1269 /* 1270 * HW busy detection timeout is based on programmed data timeout 1271 * counter and maximum supported timeout is 11s which may not be 1272 * enough for long operations like cache flush, sleep awake, erase. 1273 * 1274 * ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows 1275 * host controller to wait for busy state until the card is busy 1276 * without HW timeout. 1277 * 1278 * So, use infinite busy wait mode for operations that may take 1279 * more than maximum HW busy timeout of 11s otherwise use finite 1280 * busy wait mode. 1281 */ 1282 val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL); 1283 if (cmd && cmd->busy_timeout >= 11 * MSEC_PER_SEC) 1284 val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT; 1285 else 1286 val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT; 1287 sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_MISC_CTRL); 1288 1289 __sdhci_set_timeout(host, cmd); 1290 } 1291 1292 static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc) 1293 { 1294 struct cqhci_host *cq_host = mmc->cqe_private; 1295 u32 reg; 1296 1297 reg = cqhci_readl(cq_host, CQHCI_CFG); 1298 reg |= CQHCI_ENABLE; 1299 cqhci_writel(cq_host, reg, CQHCI_CFG); 1300 } 1301 1302 static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc) 1303 { 1304 struct cqhci_host *cq_host = mmc->cqe_private; 1305 struct sdhci_host *host = mmc_priv(mmc); 1306 u32 reg; 1307 1308 reg = cqhci_readl(cq_host, CQHCI_CFG); 1309 reg &= ~CQHCI_ENABLE; 1310 cqhci_writel(cq_host, reg, CQHCI_CFG); 1311 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); 1312 } 1313 1314 static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = { 1315 .write_l = tegra_cqhci_writel, 1316 .enable = sdhci_tegra_cqe_enable, 1317 .disable = sdhci_cqe_disable, 1318 .dumpregs = sdhci_tegra_dumpregs, 1319 .update_dcmd_desc = sdhci_tegra_update_dcmd_desc, 1320 .pre_enable = sdhci_tegra_cqe_pre_enable, 1321 .post_disable = sdhci_tegra_cqe_post_disable, 1322 }; 1323 1324 static int tegra_sdhci_set_dma_mask(struct sdhci_host *host) 1325 { 1326 struct sdhci_pltfm_host *platform = sdhci_priv(host); 1327 struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform); 1328 const struct sdhci_tegra_soc_data *soc = tegra->soc_data; 1329 struct device *dev = mmc_dev(host->mmc); 1330 1331 if (soc->dma_mask) 1332 return dma_set_mask_and_coherent(dev, soc->dma_mask); 1333 1334 return 0; 1335 } 1336 1337 static const struct sdhci_ops tegra_sdhci_ops = { 1338 .get_ro = tegra_sdhci_get_ro, 1339 .read_w = tegra_sdhci_readw, 1340 .write_l = tegra_sdhci_writel, 1341 .set_clock = tegra_sdhci_set_clock, 1342 .set_dma_mask = tegra_sdhci_set_dma_mask, 1343 .set_bus_width = sdhci_set_bus_width, 1344 .reset = tegra_sdhci_reset, 1345 .platform_execute_tuning = tegra_sdhci_execute_tuning, 1346 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, 1347 .voltage_switch = tegra_sdhci_voltage_switch, 1348 .get_max_clock = tegra_sdhci_get_max_clock, 1349 }; 1350 1351 static const struct sdhci_pltfm_data sdhci_tegra20_pdata = { 1352 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1353 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1354 SDHCI_QUIRK_NO_HISPD_BIT | 1355 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1356 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1357 .ops = &tegra_sdhci_ops, 1358 }; 1359 1360 static const struct sdhci_tegra_soc_data soc_data_tegra20 = { 1361 .pdata = &sdhci_tegra20_pdata, 1362 .dma_mask = DMA_BIT_MASK(32), 1363 .nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 | 1364 NVQUIRK_ENABLE_BLOCK_GAP_DET, 1365 }; 1366 1367 static const struct sdhci_pltfm_data sdhci_tegra30_pdata = { 1368 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1369 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | 1370 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1371 SDHCI_QUIRK_NO_HISPD_BIT | 1372 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1373 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1374 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 1375 SDHCI_QUIRK2_BROKEN_HS200 | 1376 /* 1377 * Auto-CMD23 leads to "Got command interrupt 0x00010000 even 1378 * though no command operation was in progress." 1379 * 1380 * The exact reason is unknown, as the same hardware seems 1381 * to support Auto CMD23 on a downstream 3.1 kernel. 1382 */ 1383 SDHCI_QUIRK2_ACMD23_BROKEN, 1384 .ops = &tegra_sdhci_ops, 1385 }; 1386 1387 static const struct sdhci_tegra_soc_data soc_data_tegra30 = { 1388 .pdata = &sdhci_tegra30_pdata, 1389 .dma_mask = DMA_BIT_MASK(32), 1390 .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 | 1391 NVQUIRK_ENABLE_SDR50 | 1392 NVQUIRK_ENABLE_SDR104 | 1393 NVQUIRK_HAS_PADCALIB, 1394 }; 1395 1396 static const struct sdhci_ops tegra114_sdhci_ops = { 1397 .get_ro = tegra_sdhci_get_ro, 1398 .read_w = tegra_sdhci_readw, 1399 .write_w = tegra_sdhci_writew, 1400 .write_l = tegra_sdhci_writel, 1401 .set_clock = tegra_sdhci_set_clock, 1402 .set_dma_mask = tegra_sdhci_set_dma_mask, 1403 .set_bus_width = sdhci_set_bus_width, 1404 .reset = tegra_sdhci_reset, 1405 .platform_execute_tuning = tegra_sdhci_execute_tuning, 1406 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, 1407 .voltage_switch = tegra_sdhci_voltage_switch, 1408 .get_max_clock = tegra_sdhci_get_max_clock, 1409 }; 1410 1411 static const struct sdhci_pltfm_data sdhci_tegra114_pdata = { 1412 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1413 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | 1414 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1415 SDHCI_QUIRK_NO_HISPD_BIT | 1416 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1417 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1418 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1419 .ops = &tegra114_sdhci_ops, 1420 }; 1421 1422 static const struct sdhci_tegra_soc_data soc_data_tegra114 = { 1423 .pdata = &sdhci_tegra114_pdata, 1424 .dma_mask = DMA_BIT_MASK(32), 1425 }; 1426 1427 static const struct sdhci_pltfm_data sdhci_tegra124_pdata = { 1428 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1429 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | 1430 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1431 SDHCI_QUIRK_NO_HISPD_BIT | 1432 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1433 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1434 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1435 .ops = &tegra114_sdhci_ops, 1436 }; 1437 1438 static const struct sdhci_tegra_soc_data soc_data_tegra124 = { 1439 .pdata = &sdhci_tegra124_pdata, 1440 .dma_mask = DMA_BIT_MASK(34), 1441 }; 1442 1443 static const struct sdhci_ops tegra210_sdhci_ops = { 1444 .get_ro = tegra_sdhci_get_ro, 1445 .read_w = tegra_sdhci_readw, 1446 .write_w = tegra210_sdhci_writew, 1447 .write_l = tegra_sdhci_writel, 1448 .set_clock = tegra_sdhci_set_clock, 1449 .set_dma_mask = tegra_sdhci_set_dma_mask, 1450 .set_bus_width = sdhci_set_bus_width, 1451 .reset = tegra_sdhci_reset, 1452 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, 1453 .voltage_switch = tegra_sdhci_voltage_switch, 1454 .get_max_clock = tegra_sdhci_get_max_clock, 1455 .set_timeout = tegra_sdhci_set_timeout, 1456 }; 1457 1458 static const struct sdhci_pltfm_data sdhci_tegra210_pdata = { 1459 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1460 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1461 SDHCI_QUIRK_NO_HISPD_BIT | 1462 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1463 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1464 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1465 .ops = &tegra210_sdhci_ops, 1466 }; 1467 1468 static const struct sdhci_tegra_soc_data soc_data_tegra210 = { 1469 .pdata = &sdhci_tegra210_pdata, 1470 .dma_mask = DMA_BIT_MASK(34), 1471 .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL | 1472 NVQUIRK_HAS_PADCALIB | 1473 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | 1474 NVQUIRK_ENABLE_SDR50 | 1475 NVQUIRK_ENABLE_SDR104 | 1476 NVQUIRK_HAS_TMCLK, 1477 .min_tap_delay = 106, 1478 .max_tap_delay = 185, 1479 }; 1480 1481 static const struct sdhci_ops tegra186_sdhci_ops = { 1482 .get_ro = tegra_sdhci_get_ro, 1483 .read_w = tegra_sdhci_readw, 1484 .write_l = tegra_sdhci_writel, 1485 .set_clock = tegra_sdhci_set_clock, 1486 .set_dma_mask = tegra_sdhci_set_dma_mask, 1487 .set_bus_width = sdhci_set_bus_width, 1488 .reset = tegra_sdhci_reset, 1489 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, 1490 .voltage_switch = tegra_sdhci_voltage_switch, 1491 .get_max_clock = tegra_sdhci_get_max_clock, 1492 .irq = sdhci_tegra_cqhci_irq, 1493 .set_timeout = tegra_sdhci_set_timeout, 1494 }; 1495 1496 static const struct sdhci_pltfm_data sdhci_tegra186_pdata = { 1497 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1498 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1499 SDHCI_QUIRK_NO_HISPD_BIT | 1500 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1501 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1502 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1503 .ops = &tegra186_sdhci_ops, 1504 }; 1505 1506 static const struct sdhci_tegra_soc_data soc_data_tegra186 = { 1507 .pdata = &sdhci_tegra186_pdata, 1508 .dma_mask = DMA_BIT_MASK(40), 1509 .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL | 1510 NVQUIRK_HAS_PADCALIB | 1511 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | 1512 NVQUIRK_ENABLE_SDR50 | 1513 NVQUIRK_ENABLE_SDR104 | 1514 NVQUIRK_HAS_TMCLK | 1515 NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING, 1516 .min_tap_delay = 84, 1517 .max_tap_delay = 136, 1518 }; 1519 1520 static const struct sdhci_tegra_soc_data soc_data_tegra194 = { 1521 .pdata = &sdhci_tegra186_pdata, 1522 .dma_mask = DMA_BIT_MASK(39), 1523 .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL | 1524 NVQUIRK_HAS_PADCALIB | 1525 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | 1526 NVQUIRK_ENABLE_SDR50 | 1527 NVQUIRK_ENABLE_SDR104 | 1528 NVQUIRK_HAS_TMCLK, 1529 .min_tap_delay = 96, 1530 .max_tap_delay = 139, 1531 }; 1532 1533 static const struct of_device_id sdhci_tegra_dt_match[] = { 1534 { .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 }, 1535 { .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 }, 1536 { .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 }, 1537 { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 }, 1538 { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 }, 1539 { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 }, 1540 { .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 }, 1541 {} 1542 }; 1543 MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match); 1544 1545 static int sdhci_tegra_add_host(struct sdhci_host *host) 1546 { 1547 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1548 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 1549 struct cqhci_host *cq_host; 1550 bool dma64; 1551 int ret; 1552 1553 if (!tegra_host->enable_hwcq) 1554 return sdhci_add_host(host); 1555 1556 sdhci_enable_v4_mode(host); 1557 1558 ret = sdhci_setup_host(host); 1559 if (ret) 1560 return ret; 1561 1562 host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; 1563 1564 cq_host = devm_kzalloc(mmc_dev(host->mmc), 1565 sizeof(*cq_host), GFP_KERNEL); 1566 if (!cq_host) { 1567 ret = -ENOMEM; 1568 goto cleanup; 1569 } 1570 1571 cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR; 1572 cq_host->ops = &sdhci_tegra_cqhci_ops; 1573 1574 dma64 = host->flags & SDHCI_USE_64_BIT_DMA; 1575 if (dma64) 1576 cq_host->caps |= CQHCI_TASK_DESC_SZ_128; 1577 1578 ret = cqhci_init(cq_host, host->mmc, dma64); 1579 if (ret) 1580 goto cleanup; 1581 1582 ret = __sdhci_add_host(host); 1583 if (ret) 1584 goto cleanup; 1585 1586 return 0; 1587 1588 cleanup: 1589 sdhci_cleanup_host(host); 1590 return ret; 1591 } 1592 1593 static int sdhci_tegra_probe(struct platform_device *pdev) 1594 { 1595 const struct of_device_id *match; 1596 const struct sdhci_tegra_soc_data *soc_data; 1597 struct sdhci_host *host; 1598 struct sdhci_pltfm_host *pltfm_host; 1599 struct sdhci_tegra *tegra_host; 1600 struct clk *clk; 1601 int rc; 1602 1603 match = of_match_device(sdhci_tegra_dt_match, &pdev->dev); 1604 if (!match) 1605 return -EINVAL; 1606 soc_data = match->data; 1607 1608 host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host)); 1609 if (IS_ERR(host)) 1610 return PTR_ERR(host); 1611 pltfm_host = sdhci_priv(host); 1612 1613 tegra_host = sdhci_pltfm_priv(pltfm_host); 1614 tegra_host->ddr_signaling = false; 1615 tegra_host->pad_calib_required = false; 1616 tegra_host->pad_control_available = false; 1617 tegra_host->soc_data = soc_data; 1618 1619 if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) { 1620 rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host); 1621 if (rc == 0) 1622 host->mmc_host_ops.start_signal_voltage_switch = 1623 sdhci_tegra_start_signal_voltage_switch; 1624 } 1625 1626 /* Hook to periodically rerun pad calibration */ 1627 if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) 1628 host->mmc_host_ops.request = tegra_sdhci_request; 1629 1630 host->mmc_host_ops.hs400_enhanced_strobe = 1631 tegra_sdhci_hs400_enhanced_strobe; 1632 1633 if (!host->ops->platform_execute_tuning) 1634 host->mmc_host_ops.execute_tuning = 1635 tegra_sdhci_execute_hw_tuning; 1636 1637 rc = mmc_of_parse(host->mmc); 1638 if (rc) 1639 goto err_parse_dt; 1640 1641 if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) 1642 host->mmc->caps |= MMC_CAP_1_8V_DDR; 1643 1644 /* HW busy detection is supported, but R1B responses are required. */ 1645 host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY; 1646 1647 tegra_sdhci_parse_dt(host); 1648 1649 tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power", 1650 GPIOD_OUT_HIGH); 1651 if (IS_ERR(tegra_host->power_gpio)) { 1652 rc = PTR_ERR(tegra_host->power_gpio); 1653 goto err_power_req; 1654 } 1655 1656 /* 1657 * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host 1658 * timeout clock and SW can choose TMCLK or SDCLK for hardware 1659 * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of 1660 * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL. 1661 * 1662 * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses 1663 * 12Mhz TMCLK which is advertised in host capability register. 1664 * With TMCLK of 12Mhz provides maximum data timeout period that can 1665 * be achieved is 11s better than using SDCLK for data timeout. 1666 * 1667 * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's 1668 * supporting separate TMCLK. 1669 */ 1670 1671 if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) { 1672 clk = devm_clk_get(&pdev->dev, "tmclk"); 1673 if (IS_ERR(clk)) { 1674 rc = PTR_ERR(clk); 1675 if (rc == -EPROBE_DEFER) 1676 goto err_power_req; 1677 1678 dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc); 1679 clk = NULL; 1680 } 1681 1682 clk_set_rate(clk, 12000000); 1683 rc = clk_prepare_enable(clk); 1684 if (rc) { 1685 dev_err(&pdev->dev, 1686 "failed to enable tmclk: %d\n", rc); 1687 goto err_power_req; 1688 } 1689 1690 tegra_host->tmclk = clk; 1691 } 1692 1693 clk = devm_clk_get(mmc_dev(host->mmc), NULL); 1694 if (IS_ERR(clk)) { 1695 rc = dev_err_probe(&pdev->dev, PTR_ERR(clk), 1696 "failed to get clock\n"); 1697 goto err_clk_get; 1698 } 1699 clk_prepare_enable(clk); 1700 pltfm_host->clk = clk; 1701 1702 tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev, 1703 "sdhci"); 1704 if (IS_ERR(tegra_host->rst)) { 1705 rc = PTR_ERR(tegra_host->rst); 1706 dev_err(&pdev->dev, "failed to get reset control: %d\n", rc); 1707 goto err_rst_get; 1708 } 1709 1710 rc = reset_control_assert(tegra_host->rst); 1711 if (rc) 1712 goto err_rst_get; 1713 1714 usleep_range(2000, 4000); 1715 1716 rc = reset_control_deassert(tegra_host->rst); 1717 if (rc) 1718 goto err_rst_get; 1719 1720 usleep_range(2000, 4000); 1721 1722 rc = sdhci_tegra_add_host(host); 1723 if (rc) 1724 goto err_add_host; 1725 1726 return 0; 1727 1728 err_add_host: 1729 reset_control_assert(tegra_host->rst); 1730 err_rst_get: 1731 clk_disable_unprepare(pltfm_host->clk); 1732 err_clk_get: 1733 clk_disable_unprepare(tegra_host->tmclk); 1734 err_power_req: 1735 err_parse_dt: 1736 sdhci_pltfm_free(pdev); 1737 return rc; 1738 } 1739 1740 static int sdhci_tegra_remove(struct platform_device *pdev) 1741 { 1742 struct sdhci_host *host = platform_get_drvdata(pdev); 1743 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1744 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 1745 1746 sdhci_remove_host(host, 0); 1747 1748 reset_control_assert(tegra_host->rst); 1749 usleep_range(2000, 4000); 1750 clk_disable_unprepare(pltfm_host->clk); 1751 clk_disable_unprepare(tegra_host->tmclk); 1752 1753 sdhci_pltfm_free(pdev); 1754 1755 return 0; 1756 } 1757 1758 #ifdef CONFIG_PM_SLEEP 1759 static int __maybe_unused sdhci_tegra_suspend(struct device *dev) 1760 { 1761 struct sdhci_host *host = dev_get_drvdata(dev); 1762 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1763 int ret; 1764 1765 if (host->mmc->caps2 & MMC_CAP2_CQE) { 1766 ret = cqhci_suspend(host->mmc); 1767 if (ret) 1768 return ret; 1769 } 1770 1771 ret = sdhci_suspend_host(host); 1772 if (ret) { 1773 cqhci_resume(host->mmc); 1774 return ret; 1775 } 1776 1777 clk_disable_unprepare(pltfm_host->clk); 1778 return 0; 1779 } 1780 1781 static int __maybe_unused sdhci_tegra_resume(struct device *dev) 1782 { 1783 struct sdhci_host *host = dev_get_drvdata(dev); 1784 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1785 int ret; 1786 1787 ret = clk_prepare_enable(pltfm_host->clk); 1788 if (ret) 1789 return ret; 1790 1791 ret = sdhci_resume_host(host); 1792 if (ret) 1793 goto disable_clk; 1794 1795 if (host->mmc->caps2 & MMC_CAP2_CQE) { 1796 ret = cqhci_resume(host->mmc); 1797 if (ret) 1798 goto suspend_host; 1799 } 1800 1801 return 0; 1802 1803 suspend_host: 1804 sdhci_suspend_host(host); 1805 disable_clk: 1806 clk_disable_unprepare(pltfm_host->clk); 1807 return ret; 1808 } 1809 #endif 1810 1811 static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend, 1812 sdhci_tegra_resume); 1813 1814 static struct platform_driver sdhci_tegra_driver = { 1815 .driver = { 1816 .name = "sdhci-tegra", 1817 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 1818 .of_match_table = sdhci_tegra_dt_match, 1819 .pm = &sdhci_tegra_dev_pm_ops, 1820 }, 1821 .probe = sdhci_tegra_probe, 1822 .remove = sdhci_tegra_remove, 1823 }; 1824 1825 module_platform_driver(sdhci_tegra_driver); 1826 1827 MODULE_DESCRIPTION("SDHCI driver for Tegra"); 1828 MODULE_AUTHOR("Google, Inc."); 1829 MODULE_LICENSE("GPL v2"); 1830