1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2010 Google, Inc. 4 */ 5 6 #include <linux/delay.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/err.h> 9 #include <linux/module.h> 10 #include <linux/init.h> 11 #include <linux/iopoll.h> 12 #include <linux/platform_device.h> 13 #include <linux/clk.h> 14 #include <linux/io.h> 15 #include <linux/of.h> 16 #include <linux/of_device.h> 17 #include <linux/pinctrl/consumer.h> 18 #include <linux/regulator/consumer.h> 19 #include <linux/reset.h> 20 #include <linux/mmc/card.h> 21 #include <linux/mmc/host.h> 22 #include <linux/mmc/mmc.h> 23 #include <linux/mmc/slot-gpio.h> 24 #include <linux/gpio/consumer.h> 25 #include <linux/ktime.h> 26 27 #include "sdhci-pltfm.h" 28 #include "cqhci.h" 29 30 /* Tegra SDHOST controller vendor register definitions */ 31 #define SDHCI_TEGRA_VENDOR_CLOCK_CTRL 0x100 32 #define SDHCI_CLOCK_CTRL_TAP_MASK 0x00ff0000 33 #define SDHCI_CLOCK_CTRL_TAP_SHIFT 16 34 #define SDHCI_CLOCK_CTRL_TRIM_MASK 0x1f000000 35 #define SDHCI_CLOCK_CTRL_TRIM_SHIFT 24 36 #define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE BIT(5) 37 #define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE BIT(3) 38 #define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE BIT(2) 39 40 #define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL 0x104 41 #define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE BIT(31) 42 43 #define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES 0x10c 44 #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK 0x00003f00 45 #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT 8 46 47 #define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120 48 #define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT BIT(0) 49 #define SDHCI_MISC_CTRL_ENABLE_SDR104 0x8 50 #define SDHCI_MISC_CTRL_ENABLE_SDR50 0x10 51 #define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20 52 #define SDHCI_MISC_CTRL_ENABLE_DDR50 0x200 53 54 #define SDHCI_TEGRA_VENDOR_DLLCAL_CFG 0x1b0 55 #define SDHCI_TEGRA_DLLCAL_CALIBRATE BIT(31) 56 57 #define SDHCI_TEGRA_VENDOR_DLLCAL_STA 0x1bc 58 #define SDHCI_TEGRA_DLLCAL_STA_ACTIVE BIT(31) 59 60 #define SDHCI_VNDR_TUN_CTRL0_0 0x1c0 61 #define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP 0x20000 62 #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK 0x03fc0000 63 #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT 18 64 #define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK 0x00001fc0 65 #define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT 6 66 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK 0x000e000 67 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT 13 68 #define TRIES_128 2 69 #define TRIES_256 4 70 #define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK 0x7 71 72 #define SDHCI_TEGRA_VNDR_TUN_CTRL1_0 0x1c4 73 #define SDHCI_TEGRA_VNDR_TUN_STATUS0 0x1C8 74 #define SDHCI_TEGRA_VNDR_TUN_STATUS1 0x1CC 75 #define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK 0xFF 76 #define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT 0x8 77 #define TUNING_WORD_BIT_SIZE 32 78 79 #define SDHCI_TEGRA_AUTO_CAL_CONFIG 0x1e4 80 #define SDHCI_AUTO_CAL_START BIT(31) 81 #define SDHCI_AUTO_CAL_ENABLE BIT(29) 82 #define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK 0x0000ffff 83 84 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL 0x1e0 85 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK 0x0000000f 86 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL 0x7 87 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD BIT(31) 88 #define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK 0x07FFF000 89 90 #define SDHCI_TEGRA_AUTO_CAL_STATUS 0x1ec 91 #define SDHCI_TEGRA_AUTO_CAL_ACTIVE BIT(31) 92 93 #define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0) 94 #define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1) 95 #define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2) 96 #define NVQUIRK_ENABLE_SDR50 BIT(3) 97 #define NVQUIRK_ENABLE_SDR104 BIT(4) 98 #define NVQUIRK_ENABLE_DDR50 BIT(5) 99 /* 100 * HAS_PADCALIB NVQUIRK is for SoC's supporting auto calibration of pads 101 * drive strength. 102 */ 103 #define NVQUIRK_HAS_PADCALIB BIT(6) 104 /* 105 * NEEDS_PAD_CONTROL NVQUIRK is for SoC's having separate 3V3 and 1V8 pads. 106 * 3V3/1V8 pad selection happens through pinctrl state selection depending 107 * on the signaling mode. 108 */ 109 #define NVQUIRK_NEEDS_PAD_CONTROL BIT(7) 110 #define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8) 111 #define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING BIT(9) 112 113 /* 114 * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra 115 * SDMMC hardware data timeout. 116 */ 117 #define NVQUIRK_HAS_TMCLK BIT(10) 118 119 #define NVQUIRK_HAS_ANDROID_GPT_SECTOR BIT(11) 120 121 /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */ 122 #define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000 123 124 #define SDHCI_TEGRA_CQE_TRNS_MODE (SDHCI_TRNS_MULTI | \ 125 SDHCI_TRNS_BLK_CNT_EN | \ 126 SDHCI_TRNS_DMA) 127 128 struct sdhci_tegra_soc_data { 129 const struct sdhci_pltfm_data *pdata; 130 u64 dma_mask; 131 u32 nvquirks; 132 u8 min_tap_delay; 133 u8 max_tap_delay; 134 }; 135 136 /* Magic pull up and pull down pad calibration offsets */ 137 struct sdhci_tegra_autocal_offsets { 138 u32 pull_up_3v3; 139 u32 pull_down_3v3; 140 u32 pull_up_3v3_timeout; 141 u32 pull_down_3v3_timeout; 142 u32 pull_up_1v8; 143 u32 pull_down_1v8; 144 u32 pull_up_1v8_timeout; 145 u32 pull_down_1v8_timeout; 146 u32 pull_up_sdr104; 147 u32 pull_down_sdr104; 148 u32 pull_up_hs400; 149 u32 pull_down_hs400; 150 }; 151 152 struct sdhci_tegra { 153 const struct sdhci_tegra_soc_data *soc_data; 154 struct gpio_desc *power_gpio; 155 struct clk *tmclk; 156 bool ddr_signaling; 157 bool pad_calib_required; 158 bool pad_control_available; 159 160 struct reset_control *rst; 161 struct pinctrl *pinctrl_sdmmc; 162 struct pinctrl_state *pinctrl_state_3v3; 163 struct pinctrl_state *pinctrl_state_1v8; 164 struct pinctrl_state *pinctrl_state_3v3_drv; 165 struct pinctrl_state *pinctrl_state_1v8_drv; 166 167 struct sdhci_tegra_autocal_offsets autocal_offsets; 168 ktime_t last_calib; 169 170 u32 default_tap; 171 u32 default_trim; 172 u32 dqs_trim; 173 bool enable_hwcq; 174 unsigned long curr_clk_rate; 175 u8 tuned_tap_delay; 176 }; 177 178 static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg) 179 { 180 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 181 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 182 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 183 184 if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) && 185 (reg == SDHCI_HOST_VERSION))) { 186 /* Erratum: Version register is invalid in HW. */ 187 return SDHCI_SPEC_200; 188 } 189 190 return readw(host->ioaddr + reg); 191 } 192 193 static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg) 194 { 195 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 196 197 switch (reg) { 198 case SDHCI_TRANSFER_MODE: 199 /* 200 * Postpone this write, we must do it together with a 201 * command write that is down below. 202 */ 203 pltfm_host->xfer_mode_shadow = val; 204 return; 205 case SDHCI_COMMAND: 206 writel((val << 16) | pltfm_host->xfer_mode_shadow, 207 host->ioaddr + SDHCI_TRANSFER_MODE); 208 return; 209 } 210 211 writew(val, host->ioaddr + reg); 212 } 213 214 static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg) 215 { 216 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 217 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 218 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 219 220 /* Seems like we're getting spurious timeout and crc errors, so 221 * disable signalling of them. In case of real errors software 222 * timers should take care of eventually detecting them. 223 */ 224 if (unlikely(reg == SDHCI_SIGNAL_ENABLE)) 225 val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC); 226 227 writel(val, host->ioaddr + reg); 228 229 if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) && 230 (reg == SDHCI_INT_ENABLE))) { 231 /* Erratum: Must enable block gap interrupt detection */ 232 u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); 233 if (val & SDHCI_INT_CARD_INT) 234 gap_ctrl |= 0x8; 235 else 236 gap_ctrl &= ~0x8; 237 writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); 238 } 239 } 240 241 static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable) 242 { 243 bool status; 244 u32 reg; 245 246 reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 247 status = !!(reg & SDHCI_CLOCK_CARD_EN); 248 249 if (status == enable) 250 return status; 251 252 if (enable) 253 reg |= SDHCI_CLOCK_CARD_EN; 254 else 255 reg &= ~SDHCI_CLOCK_CARD_EN; 256 257 sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL); 258 259 return status; 260 } 261 262 static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg) 263 { 264 bool is_tuning_cmd = 0; 265 bool clk_enabled; 266 u8 cmd; 267 268 if (reg == SDHCI_COMMAND) { 269 cmd = SDHCI_GET_CMD(val); 270 is_tuning_cmd = cmd == MMC_SEND_TUNING_BLOCK || 271 cmd == MMC_SEND_TUNING_BLOCK_HS200; 272 } 273 274 if (is_tuning_cmd) 275 clk_enabled = tegra_sdhci_configure_card_clk(host, 0); 276 277 writew(val, host->ioaddr + reg); 278 279 if (is_tuning_cmd) { 280 udelay(1); 281 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 282 tegra_sdhci_configure_card_clk(host, clk_enabled); 283 } 284 } 285 286 static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host) 287 { 288 /* 289 * Write-enable shall be assumed if GPIO is missing in a board's 290 * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on 291 * Tegra. 292 */ 293 return mmc_gpio_get_ro(host->mmc); 294 } 295 296 static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host) 297 { 298 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 299 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 300 int has_1v8, has_3v3; 301 302 /* 303 * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad 304 * voltage configuration in order to perform voltage switching. This 305 * means that valid pinctrl info is required on SDHCI instances capable 306 * of performing voltage switching. Whether or not an SDHCI instance is 307 * capable of voltage switching is determined based on the regulator. 308 */ 309 310 if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL)) 311 return true; 312 313 if (IS_ERR(host->mmc->supply.vqmmc)) 314 return false; 315 316 has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc, 317 1700000, 1950000); 318 319 has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc, 320 2700000, 3600000); 321 322 if (has_1v8 == 1 && has_3v3 == 1) 323 return tegra_host->pad_control_available; 324 325 /* Fixed voltage, no pad control required. */ 326 return true; 327 } 328 329 static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap) 330 { 331 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 332 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 333 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 334 bool card_clk_enabled = false; 335 u32 reg; 336 337 /* 338 * Touching the tap values is a bit tricky on some SoC generations. 339 * The quirk enables a workaround for a glitch that sometimes occurs if 340 * the tap values are changed. 341 */ 342 343 if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP) 344 card_clk_enabled = tegra_sdhci_configure_card_clk(host, false); 345 346 reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); 347 reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK; 348 reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT; 349 sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); 350 351 if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP && 352 card_clk_enabled) { 353 udelay(1); 354 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 355 tegra_sdhci_configure_card_clk(host, card_clk_enabled); 356 } 357 } 358 359 static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc, 360 struct mmc_ios *ios) 361 { 362 struct sdhci_host *host = mmc_priv(mmc); 363 u32 val; 364 365 val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL); 366 367 if (ios->enhanced_strobe) 368 val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE; 369 else 370 val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE; 371 372 sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL); 373 374 } 375 376 static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) 377 { 378 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 379 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 380 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 381 u32 misc_ctrl, clk_ctrl, pad_ctrl; 382 383 sdhci_reset(host, mask); 384 385 if (!(mask & SDHCI_RESET_ALL)) 386 return; 387 388 tegra_sdhci_set_tap(host, tegra_host->default_tap); 389 390 misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL); 391 clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); 392 393 misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 | 394 SDHCI_MISC_CTRL_ENABLE_SDR50 | 395 SDHCI_MISC_CTRL_ENABLE_DDR50 | 396 SDHCI_MISC_CTRL_ENABLE_SDR104); 397 398 clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK | 399 SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE); 400 401 if (tegra_sdhci_is_pad_and_regulator_valid(host)) { 402 /* Erratum: Enable SDHCI spec v3.00 support */ 403 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300) 404 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300; 405 /* Advertise UHS modes as supported by host */ 406 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) 407 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50; 408 if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) 409 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50; 410 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104) 411 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104; 412 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) 413 clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE; 414 } 415 416 clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT; 417 418 sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL); 419 sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); 420 421 if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) { 422 pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL); 423 pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK; 424 pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL; 425 sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL); 426 427 tegra_host->pad_calib_required = true; 428 } 429 430 tegra_host->ddr_signaling = false; 431 } 432 433 static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable) 434 { 435 u32 val; 436 437 /* 438 * Enable or disable the additional I/O pad used by the drive strength 439 * calibration process. 440 */ 441 val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL); 442 443 if (enable) 444 val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD; 445 else 446 val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD; 447 448 sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL); 449 450 if (enable) 451 usleep_range(1, 2); 452 } 453 454 static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host, 455 u16 pdpu) 456 { 457 u32 reg; 458 459 reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG); 460 reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK; 461 reg |= pdpu; 462 sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG); 463 } 464 465 static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage, 466 bool state_drvupdn) 467 { 468 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 469 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 470 struct sdhci_tegra_autocal_offsets *offsets = 471 &tegra_host->autocal_offsets; 472 struct pinctrl_state *pinctrl_drvupdn = NULL; 473 int ret = 0; 474 u8 drvup = 0, drvdn = 0; 475 u32 reg; 476 477 if (!state_drvupdn) { 478 /* PADS Drive Strength */ 479 if (voltage == MMC_SIGNAL_VOLTAGE_180) { 480 if (tegra_host->pinctrl_state_1v8_drv) { 481 pinctrl_drvupdn = 482 tegra_host->pinctrl_state_1v8_drv; 483 } else { 484 drvup = offsets->pull_up_1v8_timeout; 485 drvdn = offsets->pull_down_1v8_timeout; 486 } 487 } else { 488 if (tegra_host->pinctrl_state_3v3_drv) { 489 pinctrl_drvupdn = 490 tegra_host->pinctrl_state_3v3_drv; 491 } else { 492 drvup = offsets->pull_up_3v3_timeout; 493 drvdn = offsets->pull_down_3v3_timeout; 494 } 495 } 496 497 if (pinctrl_drvupdn != NULL) { 498 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, 499 pinctrl_drvupdn); 500 if (ret < 0) 501 dev_err(mmc_dev(host->mmc), 502 "failed pads drvupdn, ret: %d\n", ret); 503 } else if ((drvup) || (drvdn)) { 504 reg = sdhci_readl(host, 505 SDHCI_TEGRA_SDMEM_COMP_PADCTRL); 506 reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK; 507 reg |= (drvup << 20) | (drvdn << 12); 508 sdhci_writel(host, reg, 509 SDHCI_TEGRA_SDMEM_COMP_PADCTRL); 510 } 511 512 } else { 513 /* Dual Voltage PADS Voltage selection */ 514 if (!tegra_host->pad_control_available) 515 return 0; 516 517 if (voltage == MMC_SIGNAL_VOLTAGE_180) { 518 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, 519 tegra_host->pinctrl_state_1v8); 520 if (ret < 0) 521 dev_err(mmc_dev(host->mmc), 522 "setting 1.8V failed, ret: %d\n", ret); 523 } else { 524 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, 525 tegra_host->pinctrl_state_3v3); 526 if (ret < 0) 527 dev_err(mmc_dev(host->mmc), 528 "setting 3.3V failed, ret: %d\n", ret); 529 } 530 } 531 532 return ret; 533 } 534 535 static void tegra_sdhci_pad_autocalib(struct sdhci_host *host) 536 { 537 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 538 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 539 struct sdhci_tegra_autocal_offsets offsets = 540 tegra_host->autocal_offsets; 541 struct mmc_ios *ios = &host->mmc->ios; 542 bool card_clk_enabled; 543 u16 pdpu; 544 u32 reg; 545 int ret; 546 547 switch (ios->timing) { 548 case MMC_TIMING_UHS_SDR104: 549 pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104; 550 break; 551 case MMC_TIMING_MMC_HS400: 552 pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400; 553 break; 554 default: 555 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) 556 pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8; 557 else 558 pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3; 559 } 560 561 /* Set initial offset before auto-calibration */ 562 tegra_sdhci_set_pad_autocal_offset(host, pdpu); 563 564 card_clk_enabled = tegra_sdhci_configure_card_clk(host, false); 565 566 tegra_sdhci_configure_cal_pad(host, true); 567 568 reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG); 569 reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START; 570 sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG); 571 572 usleep_range(1, 2); 573 /* 10 ms timeout */ 574 ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS, 575 reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE), 576 1000, 10000); 577 578 tegra_sdhci_configure_cal_pad(host, false); 579 580 tegra_sdhci_configure_card_clk(host, card_clk_enabled); 581 582 if (ret) { 583 dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n"); 584 585 /* Disable automatic cal and use fixed Drive Strengths */ 586 reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG); 587 reg &= ~SDHCI_AUTO_CAL_ENABLE; 588 sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG); 589 590 ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false); 591 if (ret < 0) 592 dev_err(mmc_dev(host->mmc), 593 "Setting drive strengths failed: %d\n", ret); 594 } 595 } 596 597 static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host) 598 { 599 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 600 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 601 struct sdhci_tegra_autocal_offsets *autocal = 602 &tegra_host->autocal_offsets; 603 int err; 604 605 err = device_property_read_u32(mmc_dev(host->mmc), 606 "nvidia,pad-autocal-pull-up-offset-3v3", 607 &autocal->pull_up_3v3); 608 if (err) 609 autocal->pull_up_3v3 = 0; 610 611 err = device_property_read_u32(mmc_dev(host->mmc), 612 "nvidia,pad-autocal-pull-down-offset-3v3", 613 &autocal->pull_down_3v3); 614 if (err) 615 autocal->pull_down_3v3 = 0; 616 617 err = device_property_read_u32(mmc_dev(host->mmc), 618 "nvidia,pad-autocal-pull-up-offset-1v8", 619 &autocal->pull_up_1v8); 620 if (err) 621 autocal->pull_up_1v8 = 0; 622 623 err = device_property_read_u32(mmc_dev(host->mmc), 624 "nvidia,pad-autocal-pull-down-offset-1v8", 625 &autocal->pull_down_1v8); 626 if (err) 627 autocal->pull_down_1v8 = 0; 628 629 err = device_property_read_u32(mmc_dev(host->mmc), 630 "nvidia,pad-autocal-pull-up-offset-sdr104", 631 &autocal->pull_up_sdr104); 632 if (err) 633 autocal->pull_up_sdr104 = autocal->pull_up_1v8; 634 635 err = device_property_read_u32(mmc_dev(host->mmc), 636 "nvidia,pad-autocal-pull-down-offset-sdr104", 637 &autocal->pull_down_sdr104); 638 if (err) 639 autocal->pull_down_sdr104 = autocal->pull_down_1v8; 640 641 err = device_property_read_u32(mmc_dev(host->mmc), 642 "nvidia,pad-autocal-pull-up-offset-hs400", 643 &autocal->pull_up_hs400); 644 if (err) 645 autocal->pull_up_hs400 = autocal->pull_up_1v8; 646 647 err = device_property_read_u32(mmc_dev(host->mmc), 648 "nvidia,pad-autocal-pull-down-offset-hs400", 649 &autocal->pull_down_hs400); 650 if (err) 651 autocal->pull_down_hs400 = autocal->pull_down_1v8; 652 653 /* 654 * Different fail-safe drive strength values based on the signaling 655 * voltage are applicable for SoCs supporting 3V3 and 1V8 pad controls. 656 * So, avoid reading below device tree properties for SoCs that don't 657 * have NVQUIRK_NEEDS_PAD_CONTROL. 658 */ 659 if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL)) 660 return; 661 662 err = device_property_read_u32(mmc_dev(host->mmc), 663 "nvidia,pad-autocal-pull-up-offset-3v3-timeout", 664 &autocal->pull_up_3v3_timeout); 665 if (err) { 666 if (!IS_ERR(tegra_host->pinctrl_state_3v3) && 667 (tegra_host->pinctrl_state_3v3_drv == NULL)) 668 pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n", 669 mmc_hostname(host->mmc)); 670 autocal->pull_up_3v3_timeout = 0; 671 } 672 673 err = device_property_read_u32(mmc_dev(host->mmc), 674 "nvidia,pad-autocal-pull-down-offset-3v3-timeout", 675 &autocal->pull_down_3v3_timeout); 676 if (err) { 677 if (!IS_ERR(tegra_host->pinctrl_state_3v3) && 678 (tegra_host->pinctrl_state_3v3_drv == NULL)) 679 pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n", 680 mmc_hostname(host->mmc)); 681 autocal->pull_down_3v3_timeout = 0; 682 } 683 684 err = device_property_read_u32(mmc_dev(host->mmc), 685 "nvidia,pad-autocal-pull-up-offset-1v8-timeout", 686 &autocal->pull_up_1v8_timeout); 687 if (err) { 688 if (!IS_ERR(tegra_host->pinctrl_state_1v8) && 689 (tegra_host->pinctrl_state_1v8_drv == NULL)) 690 pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n", 691 mmc_hostname(host->mmc)); 692 autocal->pull_up_1v8_timeout = 0; 693 } 694 695 err = device_property_read_u32(mmc_dev(host->mmc), 696 "nvidia,pad-autocal-pull-down-offset-1v8-timeout", 697 &autocal->pull_down_1v8_timeout); 698 if (err) { 699 if (!IS_ERR(tegra_host->pinctrl_state_1v8) && 700 (tegra_host->pinctrl_state_1v8_drv == NULL)) 701 pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n", 702 mmc_hostname(host->mmc)); 703 autocal->pull_down_1v8_timeout = 0; 704 } 705 } 706 707 static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 708 { 709 struct sdhci_host *host = mmc_priv(mmc); 710 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 711 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 712 ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib); 713 714 /* 100 ms calibration interval is specified in the TRM */ 715 if (ktime_to_ms(since_calib) > 100) { 716 tegra_sdhci_pad_autocalib(host); 717 tegra_host->last_calib = ktime_get(); 718 } 719 720 sdhci_request(mmc, mrq); 721 } 722 723 static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host) 724 { 725 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 726 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 727 int err; 728 729 err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-tap", 730 &tegra_host->default_tap); 731 if (err) 732 tegra_host->default_tap = 0; 733 734 err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-trim", 735 &tegra_host->default_trim); 736 if (err) 737 tegra_host->default_trim = 0; 738 739 err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,dqs-trim", 740 &tegra_host->dqs_trim); 741 if (err) 742 tegra_host->dqs_trim = 0x11; 743 } 744 745 static void tegra_sdhci_parse_dt(struct sdhci_host *host) 746 { 747 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 748 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 749 750 if (device_property_read_bool(mmc_dev(host->mmc), "supports-cqe")) 751 tegra_host->enable_hwcq = true; 752 else 753 tegra_host->enable_hwcq = false; 754 755 tegra_sdhci_parse_pad_autocal_dt(host); 756 tegra_sdhci_parse_tap_and_trim(host); 757 } 758 759 static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 760 { 761 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 762 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 763 unsigned long host_clk; 764 765 if (!clock) 766 return sdhci_set_clock(host, clock); 767 768 /* 769 * In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI 770 * divider to be configured to divided the host clock by two. The SDHCI 771 * clock divider is calculated as part of sdhci_set_clock() by 772 * sdhci_calc_clk(). The divider is calculated from host->max_clk and 773 * the requested clock rate. 774 * 775 * By setting the host->max_clk to clock * 2 the divider calculation 776 * will always result in the correct value for DDR50/52 modes, 777 * regardless of clock rate rounding, which may happen if the value 778 * from clk_get_rate() is used. 779 */ 780 host_clk = tegra_host->ddr_signaling ? clock * 2 : clock; 781 clk_set_rate(pltfm_host->clk, host_clk); 782 tegra_host->curr_clk_rate = host_clk; 783 if (tegra_host->ddr_signaling) 784 host->max_clk = host_clk; 785 else 786 host->max_clk = clk_get_rate(pltfm_host->clk); 787 788 sdhci_set_clock(host, clock); 789 790 if (tegra_host->pad_calib_required) { 791 tegra_sdhci_pad_autocalib(host); 792 tegra_host->pad_calib_required = false; 793 } 794 } 795 796 static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host) 797 { 798 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 799 800 return clk_round_rate(pltfm_host->clk, UINT_MAX); 801 } 802 803 static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim) 804 { 805 u32 val; 806 807 val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES); 808 val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK; 809 val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT; 810 sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES); 811 } 812 813 static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host) 814 { 815 u32 reg; 816 int err; 817 818 reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG); 819 reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE; 820 sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG); 821 822 /* 1 ms sleep, 5 ms timeout */ 823 err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA, 824 reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE), 825 1000, 5000); 826 if (err) 827 dev_err(mmc_dev(host->mmc), 828 "HS400 delay line calibration timed out\n"); 829 } 830 831 static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up, 832 u8 thd_low, u8 fixed_tap) 833 { 834 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 835 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 836 u32 val, tun_status; 837 u8 word, bit, edge1, tap, window; 838 bool tap_result; 839 bool start_fail = false; 840 bool start_pass = false; 841 bool end_pass = false; 842 bool first_fail = false; 843 bool first_pass = false; 844 u8 start_pass_tap = 0; 845 u8 end_pass_tap = 0; 846 u8 first_fail_tap = 0; 847 u8 first_pass_tap = 0; 848 u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE; 849 850 /* 851 * Read auto-tuned results and extract good valid passing window by 852 * filtering out un-wanted bubble/partial/merged windows. 853 */ 854 for (word = 0; word < total_tuning_words; word++) { 855 val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0); 856 val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK; 857 val |= word; 858 sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0); 859 tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0); 860 bit = 0; 861 while (bit < TUNING_WORD_BIT_SIZE) { 862 tap = word * TUNING_WORD_BIT_SIZE + bit; 863 tap_result = tun_status & (1 << bit); 864 if (!tap_result && !start_fail) { 865 start_fail = true; 866 if (!first_fail) { 867 first_fail_tap = tap; 868 first_fail = true; 869 } 870 871 } else if (tap_result && start_fail && !start_pass) { 872 start_pass_tap = tap; 873 start_pass = true; 874 if (!first_pass) { 875 first_pass_tap = tap; 876 first_pass = true; 877 } 878 879 } else if (!tap_result && start_fail && start_pass && 880 !end_pass) { 881 end_pass_tap = tap - 1; 882 end_pass = true; 883 } else if (tap_result && start_pass && start_fail && 884 end_pass) { 885 window = end_pass_tap - start_pass_tap; 886 /* discard merged window and bubble window */ 887 if (window >= thd_up || window < thd_low) { 888 start_pass_tap = tap; 889 end_pass = false; 890 } else { 891 /* set tap at middle of valid window */ 892 tap = start_pass_tap + window / 2; 893 tegra_host->tuned_tap_delay = tap; 894 return; 895 } 896 } 897 898 bit++; 899 } 900 } 901 902 if (!first_fail) { 903 WARN(1, "no edge detected, continue with hw tuned delay.\n"); 904 } else if (first_pass) { 905 /* set tap location at fixed tap relative to the first edge */ 906 edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2; 907 if (edge1 - 1 > fixed_tap) 908 tegra_host->tuned_tap_delay = edge1 - fixed_tap; 909 else 910 tegra_host->tuned_tap_delay = edge1 + fixed_tap; 911 } 912 } 913 914 static void tegra_sdhci_post_tuning(struct sdhci_host *host) 915 { 916 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 917 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 918 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 919 u32 avg_tap_dly, val, min_tap_dly, max_tap_dly; 920 u8 fixed_tap, start_tap, end_tap, window_width; 921 u8 thdupper, thdlower; 922 u8 num_iter; 923 u32 clk_rate_mhz, period_ps, bestcase, worstcase; 924 925 /* retain HW tuned tap to use incase if no correction is needed */ 926 val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); 927 tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >> 928 SDHCI_CLOCK_CTRL_TAP_SHIFT; 929 if (soc_data->min_tap_delay && soc_data->max_tap_delay) { 930 min_tap_dly = soc_data->min_tap_delay; 931 max_tap_dly = soc_data->max_tap_delay; 932 clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC; 933 period_ps = USEC_PER_SEC / clk_rate_mhz; 934 bestcase = period_ps / min_tap_dly; 935 worstcase = period_ps / max_tap_dly; 936 /* 937 * Upper and Lower bound thresholds used to detect merged and 938 * bubble windows 939 */ 940 thdupper = (2 * worstcase + bestcase) / 2; 941 thdlower = worstcase / 4; 942 /* 943 * fixed tap is used when HW tuning result contains single edge 944 * and tap is set at fixed tap delay relative to the first edge 945 */ 946 avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly); 947 fixed_tap = avg_tap_dly / 2; 948 949 val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1); 950 start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK; 951 end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) & 952 SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK; 953 window_width = end_tap - start_tap; 954 num_iter = host->tuning_loop_count; 955 /* 956 * partial window includes edges of the tuning range. 957 * merged window includes more taps so window width is higher 958 * than upper threshold. 959 */ 960 if (start_tap == 0 || (end_tap == (num_iter - 1)) || 961 (end_tap == num_iter - 2) || window_width >= thdupper) { 962 pr_debug("%s: Apply tuning correction\n", 963 mmc_hostname(host->mmc)); 964 tegra_sdhci_tap_correction(host, thdupper, thdlower, 965 fixed_tap); 966 } 967 } 968 969 tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay); 970 } 971 972 static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode) 973 { 974 struct sdhci_host *host = mmc_priv(mmc); 975 int err; 976 977 err = sdhci_execute_tuning(mmc, opcode); 978 if (!err && !host->tuning_err) 979 tegra_sdhci_post_tuning(host); 980 981 return err; 982 } 983 984 static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host, 985 unsigned timing) 986 { 987 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 988 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 989 bool set_default_tap = false; 990 bool set_dqs_trim = false; 991 bool do_hs400_dll_cal = false; 992 u8 iter = TRIES_256; 993 u32 val; 994 995 tegra_host->ddr_signaling = false; 996 switch (timing) { 997 case MMC_TIMING_UHS_SDR50: 998 break; 999 case MMC_TIMING_UHS_SDR104: 1000 case MMC_TIMING_MMC_HS200: 1001 /* Don't set default tap on tunable modes. */ 1002 iter = TRIES_128; 1003 break; 1004 case MMC_TIMING_MMC_HS400: 1005 set_dqs_trim = true; 1006 do_hs400_dll_cal = true; 1007 iter = TRIES_128; 1008 break; 1009 case MMC_TIMING_MMC_DDR52: 1010 case MMC_TIMING_UHS_DDR50: 1011 tegra_host->ddr_signaling = true; 1012 set_default_tap = true; 1013 break; 1014 default: 1015 set_default_tap = true; 1016 break; 1017 } 1018 1019 val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0); 1020 val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK | 1021 SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK | 1022 SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK); 1023 val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT | 1024 0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT | 1025 1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT); 1026 sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0); 1027 sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0); 1028 1029 host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256; 1030 1031 sdhci_set_uhs_signaling(host, timing); 1032 1033 tegra_sdhci_pad_autocalib(host); 1034 1035 if (tegra_host->tuned_tap_delay && !set_default_tap) 1036 tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay); 1037 else 1038 tegra_sdhci_set_tap(host, tegra_host->default_tap); 1039 1040 if (set_dqs_trim) 1041 tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim); 1042 1043 if (do_hs400_dll_cal) 1044 tegra_sdhci_hs400_dll_cal(host); 1045 } 1046 1047 static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) 1048 { 1049 unsigned int min, max; 1050 1051 /* 1052 * Start search for minimum tap value at 10, as smaller values are 1053 * may wrongly be reported as working but fail at higher speeds, 1054 * according to the TRM. 1055 */ 1056 min = 10; 1057 while (min < 255) { 1058 tegra_sdhci_set_tap(host, min); 1059 if (!mmc_send_tuning(host->mmc, opcode, NULL)) 1060 break; 1061 min++; 1062 } 1063 1064 /* Find the maximum tap value that still passes. */ 1065 max = min + 1; 1066 while (max < 255) { 1067 tegra_sdhci_set_tap(host, max); 1068 if (mmc_send_tuning(host->mmc, opcode, NULL)) { 1069 max--; 1070 break; 1071 } 1072 max++; 1073 } 1074 1075 /* The TRM states the ideal tap value is at 75% in the passing range. */ 1076 tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4)); 1077 1078 return mmc_send_tuning(host->mmc, opcode, NULL); 1079 } 1080 1081 static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc, 1082 struct mmc_ios *ios) 1083 { 1084 struct sdhci_host *host = mmc_priv(mmc); 1085 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1086 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 1087 int ret = 0; 1088 1089 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { 1090 ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true); 1091 if (ret < 0) 1092 return ret; 1093 ret = sdhci_start_signal_voltage_switch(mmc, ios); 1094 } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) { 1095 ret = sdhci_start_signal_voltage_switch(mmc, ios); 1096 if (ret < 0) 1097 return ret; 1098 ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true); 1099 } 1100 1101 if (tegra_host->pad_calib_required) 1102 tegra_sdhci_pad_autocalib(host); 1103 1104 return ret; 1105 } 1106 1107 static int tegra_sdhci_init_pinctrl_info(struct device *dev, 1108 struct sdhci_tegra *tegra_host) 1109 { 1110 tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev); 1111 if (IS_ERR(tegra_host->pinctrl_sdmmc)) { 1112 dev_dbg(dev, "No pinctrl info, err: %ld\n", 1113 PTR_ERR(tegra_host->pinctrl_sdmmc)); 1114 return -1; 1115 } 1116 1117 tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state( 1118 tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv"); 1119 if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) { 1120 if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV) 1121 tegra_host->pinctrl_state_1v8_drv = NULL; 1122 } 1123 1124 tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state( 1125 tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv"); 1126 if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) { 1127 if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV) 1128 tegra_host->pinctrl_state_3v3_drv = NULL; 1129 } 1130 1131 tegra_host->pinctrl_state_3v3 = 1132 pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3"); 1133 if (IS_ERR(tegra_host->pinctrl_state_3v3)) { 1134 dev_warn(dev, "Missing 3.3V pad state, err: %ld\n", 1135 PTR_ERR(tegra_host->pinctrl_state_3v3)); 1136 return -1; 1137 } 1138 1139 tegra_host->pinctrl_state_1v8 = 1140 pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8"); 1141 if (IS_ERR(tegra_host->pinctrl_state_1v8)) { 1142 dev_warn(dev, "Missing 1.8V pad state, err: %ld\n", 1143 PTR_ERR(tegra_host->pinctrl_state_1v8)); 1144 return -1; 1145 } 1146 1147 tegra_host->pad_control_available = true; 1148 1149 return 0; 1150 } 1151 1152 static void tegra_sdhci_voltage_switch(struct sdhci_host *host) 1153 { 1154 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1155 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 1156 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 1157 1158 if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) 1159 tegra_host->pad_calib_required = true; 1160 } 1161 1162 static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg) 1163 { 1164 struct mmc_host *mmc = cq_host->mmc; 1165 struct sdhci_host *host = mmc_priv(mmc); 1166 u8 ctrl; 1167 ktime_t timeout; 1168 bool timed_out; 1169 1170 /* 1171 * During CQE resume/unhalt, CQHCI driver unhalts CQE prior to 1172 * cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need 1173 * to be re-configured. 1174 * Tegra CQHCI/SDHCI prevents write access to block size register when 1175 * CQE is unhalted. So handling CQE resume sequence here to configure 1176 * SDHCI block registers prior to exiting CQE halt state. 1177 */ 1178 if (reg == CQHCI_CTL && !(val & CQHCI_HALT) && 1179 cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) { 1180 sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE); 1181 sdhci_cqe_enable(mmc); 1182 writel(val, cq_host->mmio + reg); 1183 timeout = ktime_add_us(ktime_get(), 50); 1184 while (1) { 1185 timed_out = ktime_compare(ktime_get(), timeout) > 0; 1186 ctrl = cqhci_readl(cq_host, CQHCI_CTL); 1187 if (!(ctrl & CQHCI_HALT) || timed_out) 1188 break; 1189 } 1190 /* 1191 * CQE usually resumes very quick, but incase if Tegra CQE 1192 * doesn't resume retry unhalt. 1193 */ 1194 if (timed_out) 1195 writel(val, cq_host->mmio + reg); 1196 } else { 1197 writel(val, cq_host->mmio + reg); 1198 } 1199 } 1200 1201 static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc, 1202 struct mmc_request *mrq, u64 *data) 1203 { 1204 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc)); 1205 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 1206 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 1207 1208 if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING && 1209 mrq->cmd->flags & MMC_RSP_R1B) 1210 *data |= CQHCI_CMD_TIMING(1); 1211 } 1212 1213 static void sdhci_tegra_cqe_enable(struct mmc_host *mmc) 1214 { 1215 struct cqhci_host *cq_host = mmc->cqe_private; 1216 struct sdhci_host *host = mmc_priv(mmc); 1217 u32 val; 1218 1219 /* 1220 * Tegra CQHCI/SDMMC design prevents write access to sdhci block size 1221 * register when CQE is enabled and unhalted. 1222 * CQHCI driver enables CQE prior to activation, so disable CQE before 1223 * programming block size in sdhci controller and enable it back. 1224 */ 1225 if (!cq_host->activated) { 1226 val = cqhci_readl(cq_host, CQHCI_CFG); 1227 if (val & CQHCI_ENABLE) 1228 cqhci_writel(cq_host, (val & ~CQHCI_ENABLE), 1229 CQHCI_CFG); 1230 sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE); 1231 sdhci_cqe_enable(mmc); 1232 if (val & CQHCI_ENABLE) 1233 cqhci_writel(cq_host, val, CQHCI_CFG); 1234 } 1235 1236 /* 1237 * CMD CRC errors are seen sometimes with some eMMC devices when status 1238 * command is sent during transfer of last data block which is the 1239 * default case as send status command block counter (CBC) is 1. 1240 * Recommended fix to set CBC to 0 allowing send status command only 1241 * when data lines are idle. 1242 */ 1243 val = cqhci_readl(cq_host, CQHCI_SSC1); 1244 val &= ~CQHCI_SSC1_CBC_MASK; 1245 cqhci_writel(cq_host, val, CQHCI_SSC1); 1246 } 1247 1248 static void sdhci_tegra_dumpregs(struct mmc_host *mmc) 1249 { 1250 sdhci_dumpregs(mmc_priv(mmc)); 1251 } 1252 1253 static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask) 1254 { 1255 int cmd_error = 0; 1256 int data_error = 0; 1257 1258 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) 1259 return intmask; 1260 1261 cqhci_irq(host->mmc, intmask, cmd_error, data_error); 1262 1263 return 0; 1264 } 1265 1266 static void tegra_sdhci_set_timeout(struct sdhci_host *host, 1267 struct mmc_command *cmd) 1268 { 1269 u32 val; 1270 1271 /* 1272 * HW busy detection timeout is based on programmed data timeout 1273 * counter and maximum supported timeout is 11s which may not be 1274 * enough for long operations like cache flush, sleep awake, erase. 1275 * 1276 * ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows 1277 * host controller to wait for busy state until the card is busy 1278 * without HW timeout. 1279 * 1280 * So, use infinite busy wait mode for operations that may take 1281 * more than maximum HW busy timeout of 11s otherwise use finite 1282 * busy wait mode. 1283 */ 1284 val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL); 1285 if (cmd && cmd->busy_timeout >= 11 * MSEC_PER_SEC) 1286 val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT; 1287 else 1288 val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT; 1289 sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_MISC_CTRL); 1290 1291 __sdhci_set_timeout(host, cmd); 1292 } 1293 1294 static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc) 1295 { 1296 struct cqhci_host *cq_host = mmc->cqe_private; 1297 u32 reg; 1298 1299 reg = cqhci_readl(cq_host, CQHCI_CFG); 1300 reg |= CQHCI_ENABLE; 1301 cqhci_writel(cq_host, reg, CQHCI_CFG); 1302 } 1303 1304 static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc) 1305 { 1306 struct cqhci_host *cq_host = mmc->cqe_private; 1307 struct sdhci_host *host = mmc_priv(mmc); 1308 u32 reg; 1309 1310 reg = cqhci_readl(cq_host, CQHCI_CFG); 1311 reg &= ~CQHCI_ENABLE; 1312 cqhci_writel(cq_host, reg, CQHCI_CFG); 1313 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); 1314 } 1315 1316 static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = { 1317 .write_l = tegra_cqhci_writel, 1318 .enable = sdhci_tegra_cqe_enable, 1319 .disable = sdhci_cqe_disable, 1320 .dumpregs = sdhci_tegra_dumpregs, 1321 .update_dcmd_desc = sdhci_tegra_update_dcmd_desc, 1322 .pre_enable = sdhci_tegra_cqe_pre_enable, 1323 .post_disable = sdhci_tegra_cqe_post_disable, 1324 }; 1325 1326 static int tegra_sdhci_set_dma_mask(struct sdhci_host *host) 1327 { 1328 struct sdhci_pltfm_host *platform = sdhci_priv(host); 1329 struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform); 1330 const struct sdhci_tegra_soc_data *soc = tegra->soc_data; 1331 struct device *dev = mmc_dev(host->mmc); 1332 1333 if (soc->dma_mask) 1334 return dma_set_mask_and_coherent(dev, soc->dma_mask); 1335 1336 return 0; 1337 } 1338 1339 static const struct sdhci_ops tegra_sdhci_ops = { 1340 .get_ro = tegra_sdhci_get_ro, 1341 .read_w = tegra_sdhci_readw, 1342 .write_l = tegra_sdhci_writel, 1343 .set_clock = tegra_sdhci_set_clock, 1344 .set_dma_mask = tegra_sdhci_set_dma_mask, 1345 .set_bus_width = sdhci_set_bus_width, 1346 .reset = tegra_sdhci_reset, 1347 .platform_execute_tuning = tegra_sdhci_execute_tuning, 1348 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, 1349 .voltage_switch = tegra_sdhci_voltage_switch, 1350 .get_max_clock = tegra_sdhci_get_max_clock, 1351 }; 1352 1353 static const struct sdhci_pltfm_data sdhci_tegra20_pdata = { 1354 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1355 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1356 SDHCI_QUIRK_NO_HISPD_BIT | 1357 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1358 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1359 .ops = &tegra_sdhci_ops, 1360 }; 1361 1362 static const struct sdhci_tegra_soc_data soc_data_tegra20 = { 1363 .pdata = &sdhci_tegra20_pdata, 1364 .dma_mask = DMA_BIT_MASK(32), 1365 .nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 | 1366 NVQUIRK_HAS_ANDROID_GPT_SECTOR | 1367 NVQUIRK_ENABLE_BLOCK_GAP_DET, 1368 }; 1369 1370 static const struct sdhci_pltfm_data sdhci_tegra30_pdata = { 1371 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1372 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | 1373 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1374 SDHCI_QUIRK_NO_HISPD_BIT | 1375 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1376 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1377 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 1378 SDHCI_QUIRK2_BROKEN_HS200 | 1379 /* 1380 * Auto-CMD23 leads to "Got command interrupt 0x00010000 even 1381 * though no command operation was in progress." 1382 * 1383 * The exact reason is unknown, as the same hardware seems 1384 * to support Auto CMD23 on a downstream 3.1 kernel. 1385 */ 1386 SDHCI_QUIRK2_ACMD23_BROKEN, 1387 .ops = &tegra_sdhci_ops, 1388 }; 1389 1390 static const struct sdhci_tegra_soc_data soc_data_tegra30 = { 1391 .pdata = &sdhci_tegra30_pdata, 1392 .dma_mask = DMA_BIT_MASK(32), 1393 .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 | 1394 NVQUIRK_ENABLE_SDR50 | 1395 NVQUIRK_ENABLE_SDR104 | 1396 NVQUIRK_HAS_ANDROID_GPT_SECTOR | 1397 NVQUIRK_HAS_PADCALIB, 1398 }; 1399 1400 static const struct sdhci_ops tegra114_sdhci_ops = { 1401 .get_ro = tegra_sdhci_get_ro, 1402 .read_w = tegra_sdhci_readw, 1403 .write_w = tegra_sdhci_writew, 1404 .write_l = tegra_sdhci_writel, 1405 .set_clock = tegra_sdhci_set_clock, 1406 .set_dma_mask = tegra_sdhci_set_dma_mask, 1407 .set_bus_width = sdhci_set_bus_width, 1408 .reset = tegra_sdhci_reset, 1409 .platform_execute_tuning = tegra_sdhci_execute_tuning, 1410 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, 1411 .voltage_switch = tegra_sdhci_voltage_switch, 1412 .get_max_clock = tegra_sdhci_get_max_clock, 1413 }; 1414 1415 static const struct sdhci_pltfm_data sdhci_tegra114_pdata = { 1416 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1417 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | 1418 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1419 SDHCI_QUIRK_NO_HISPD_BIT | 1420 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1421 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1422 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1423 .ops = &tegra114_sdhci_ops, 1424 }; 1425 1426 static const struct sdhci_tegra_soc_data soc_data_tegra114 = { 1427 .pdata = &sdhci_tegra114_pdata, 1428 .dma_mask = DMA_BIT_MASK(32), 1429 .nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR, 1430 }; 1431 1432 static const struct sdhci_pltfm_data sdhci_tegra124_pdata = { 1433 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1434 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | 1435 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1436 SDHCI_QUIRK_NO_HISPD_BIT | 1437 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1438 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1439 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1440 .ops = &tegra114_sdhci_ops, 1441 }; 1442 1443 static const struct sdhci_tegra_soc_data soc_data_tegra124 = { 1444 .pdata = &sdhci_tegra124_pdata, 1445 .dma_mask = DMA_BIT_MASK(34), 1446 .nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR, 1447 }; 1448 1449 static const struct sdhci_ops tegra210_sdhci_ops = { 1450 .get_ro = tegra_sdhci_get_ro, 1451 .read_w = tegra_sdhci_readw, 1452 .write_w = tegra210_sdhci_writew, 1453 .write_l = tegra_sdhci_writel, 1454 .set_clock = tegra_sdhci_set_clock, 1455 .set_dma_mask = tegra_sdhci_set_dma_mask, 1456 .set_bus_width = sdhci_set_bus_width, 1457 .reset = tegra_sdhci_reset, 1458 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, 1459 .voltage_switch = tegra_sdhci_voltage_switch, 1460 .get_max_clock = tegra_sdhci_get_max_clock, 1461 .set_timeout = tegra_sdhci_set_timeout, 1462 }; 1463 1464 static const struct sdhci_pltfm_data sdhci_tegra210_pdata = { 1465 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1466 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1467 SDHCI_QUIRK_NO_HISPD_BIT | 1468 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1469 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1470 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1471 .ops = &tegra210_sdhci_ops, 1472 }; 1473 1474 static const struct sdhci_tegra_soc_data soc_data_tegra210 = { 1475 .pdata = &sdhci_tegra210_pdata, 1476 .dma_mask = DMA_BIT_MASK(34), 1477 .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL | 1478 NVQUIRK_HAS_PADCALIB | 1479 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | 1480 NVQUIRK_ENABLE_SDR50 | 1481 NVQUIRK_ENABLE_SDR104 | 1482 NVQUIRK_HAS_TMCLK, 1483 .min_tap_delay = 106, 1484 .max_tap_delay = 185, 1485 }; 1486 1487 static const struct sdhci_ops tegra186_sdhci_ops = { 1488 .get_ro = tegra_sdhci_get_ro, 1489 .read_w = tegra_sdhci_readw, 1490 .write_l = tegra_sdhci_writel, 1491 .set_clock = tegra_sdhci_set_clock, 1492 .set_dma_mask = tegra_sdhci_set_dma_mask, 1493 .set_bus_width = sdhci_set_bus_width, 1494 .reset = tegra_sdhci_reset, 1495 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, 1496 .voltage_switch = tegra_sdhci_voltage_switch, 1497 .get_max_clock = tegra_sdhci_get_max_clock, 1498 .irq = sdhci_tegra_cqhci_irq, 1499 .set_timeout = tegra_sdhci_set_timeout, 1500 }; 1501 1502 static const struct sdhci_pltfm_data sdhci_tegra186_pdata = { 1503 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1504 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1505 SDHCI_QUIRK_NO_HISPD_BIT | 1506 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1507 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1508 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1509 .ops = &tegra186_sdhci_ops, 1510 }; 1511 1512 static const struct sdhci_tegra_soc_data soc_data_tegra186 = { 1513 .pdata = &sdhci_tegra186_pdata, 1514 .dma_mask = DMA_BIT_MASK(40), 1515 .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL | 1516 NVQUIRK_HAS_PADCALIB | 1517 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | 1518 NVQUIRK_ENABLE_SDR50 | 1519 NVQUIRK_ENABLE_SDR104 | 1520 NVQUIRK_HAS_TMCLK | 1521 NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING, 1522 .min_tap_delay = 84, 1523 .max_tap_delay = 136, 1524 }; 1525 1526 static const struct sdhci_tegra_soc_data soc_data_tegra194 = { 1527 .pdata = &sdhci_tegra186_pdata, 1528 .dma_mask = DMA_BIT_MASK(39), 1529 .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL | 1530 NVQUIRK_HAS_PADCALIB | 1531 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | 1532 NVQUIRK_ENABLE_SDR50 | 1533 NVQUIRK_ENABLE_SDR104 | 1534 NVQUIRK_HAS_TMCLK, 1535 .min_tap_delay = 96, 1536 .max_tap_delay = 139, 1537 }; 1538 1539 static const struct of_device_id sdhci_tegra_dt_match[] = { 1540 { .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 }, 1541 { .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 }, 1542 { .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 }, 1543 { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 }, 1544 { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 }, 1545 { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 }, 1546 { .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 }, 1547 {} 1548 }; 1549 MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match); 1550 1551 static int sdhci_tegra_add_host(struct sdhci_host *host) 1552 { 1553 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1554 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 1555 struct cqhci_host *cq_host; 1556 bool dma64; 1557 int ret; 1558 1559 if (!tegra_host->enable_hwcq) 1560 return sdhci_add_host(host); 1561 1562 sdhci_enable_v4_mode(host); 1563 1564 ret = sdhci_setup_host(host); 1565 if (ret) 1566 return ret; 1567 1568 host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; 1569 1570 cq_host = devm_kzalloc(mmc_dev(host->mmc), 1571 sizeof(*cq_host), GFP_KERNEL); 1572 if (!cq_host) { 1573 ret = -ENOMEM; 1574 goto cleanup; 1575 } 1576 1577 cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR; 1578 cq_host->ops = &sdhci_tegra_cqhci_ops; 1579 1580 dma64 = host->flags & SDHCI_USE_64_BIT_DMA; 1581 if (dma64) 1582 cq_host->caps |= CQHCI_TASK_DESC_SZ_128; 1583 1584 ret = cqhci_init(cq_host, host->mmc, dma64); 1585 if (ret) 1586 goto cleanup; 1587 1588 ret = __sdhci_add_host(host); 1589 if (ret) 1590 goto cleanup; 1591 1592 return 0; 1593 1594 cleanup: 1595 sdhci_cleanup_host(host); 1596 return ret; 1597 } 1598 1599 static int sdhci_tegra_probe(struct platform_device *pdev) 1600 { 1601 const struct of_device_id *match; 1602 const struct sdhci_tegra_soc_data *soc_data; 1603 struct sdhci_host *host; 1604 struct sdhci_pltfm_host *pltfm_host; 1605 struct sdhci_tegra *tegra_host; 1606 struct clk *clk; 1607 int rc; 1608 1609 match = of_match_device(sdhci_tegra_dt_match, &pdev->dev); 1610 if (!match) 1611 return -EINVAL; 1612 soc_data = match->data; 1613 1614 host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host)); 1615 if (IS_ERR(host)) 1616 return PTR_ERR(host); 1617 pltfm_host = sdhci_priv(host); 1618 1619 tegra_host = sdhci_pltfm_priv(pltfm_host); 1620 tegra_host->ddr_signaling = false; 1621 tegra_host->pad_calib_required = false; 1622 tegra_host->pad_control_available = false; 1623 tegra_host->soc_data = soc_data; 1624 1625 if (soc_data->nvquirks & NVQUIRK_HAS_ANDROID_GPT_SECTOR) 1626 host->mmc->caps2 |= MMC_CAP2_ALT_GPT_TEGRA; 1627 1628 if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) { 1629 rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host); 1630 if (rc == 0) 1631 host->mmc_host_ops.start_signal_voltage_switch = 1632 sdhci_tegra_start_signal_voltage_switch; 1633 } 1634 1635 /* Hook to periodically rerun pad calibration */ 1636 if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) 1637 host->mmc_host_ops.request = tegra_sdhci_request; 1638 1639 host->mmc_host_ops.hs400_enhanced_strobe = 1640 tegra_sdhci_hs400_enhanced_strobe; 1641 1642 if (!host->ops->platform_execute_tuning) 1643 host->mmc_host_ops.execute_tuning = 1644 tegra_sdhci_execute_hw_tuning; 1645 1646 rc = mmc_of_parse(host->mmc); 1647 if (rc) 1648 goto err_parse_dt; 1649 1650 if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) 1651 host->mmc->caps |= MMC_CAP_1_8V_DDR; 1652 1653 /* HW busy detection is supported, but R1B responses are required. */ 1654 host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY; 1655 1656 tegra_sdhci_parse_dt(host); 1657 1658 tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power", 1659 GPIOD_OUT_HIGH); 1660 if (IS_ERR(tegra_host->power_gpio)) { 1661 rc = PTR_ERR(tegra_host->power_gpio); 1662 goto err_power_req; 1663 } 1664 1665 /* 1666 * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host 1667 * timeout clock and SW can choose TMCLK or SDCLK for hardware 1668 * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of 1669 * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL. 1670 * 1671 * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses 1672 * 12Mhz TMCLK which is advertised in host capability register. 1673 * With TMCLK of 12Mhz provides maximum data timeout period that can 1674 * be achieved is 11s better than using SDCLK for data timeout. 1675 * 1676 * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's 1677 * supporting separate TMCLK. 1678 */ 1679 1680 if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) { 1681 clk = devm_clk_get(&pdev->dev, "tmclk"); 1682 if (IS_ERR(clk)) { 1683 rc = PTR_ERR(clk); 1684 if (rc == -EPROBE_DEFER) 1685 goto err_power_req; 1686 1687 dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc); 1688 clk = NULL; 1689 } 1690 1691 clk_set_rate(clk, 12000000); 1692 rc = clk_prepare_enable(clk); 1693 if (rc) { 1694 dev_err(&pdev->dev, 1695 "failed to enable tmclk: %d\n", rc); 1696 goto err_power_req; 1697 } 1698 1699 tegra_host->tmclk = clk; 1700 } 1701 1702 clk = devm_clk_get(mmc_dev(host->mmc), NULL); 1703 if (IS_ERR(clk)) { 1704 rc = dev_err_probe(&pdev->dev, PTR_ERR(clk), 1705 "failed to get clock\n"); 1706 goto err_clk_get; 1707 } 1708 clk_prepare_enable(clk); 1709 pltfm_host->clk = clk; 1710 1711 tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev, 1712 "sdhci"); 1713 if (IS_ERR(tegra_host->rst)) { 1714 rc = PTR_ERR(tegra_host->rst); 1715 dev_err(&pdev->dev, "failed to get reset control: %d\n", rc); 1716 goto err_rst_get; 1717 } 1718 1719 rc = reset_control_assert(tegra_host->rst); 1720 if (rc) 1721 goto err_rst_get; 1722 1723 usleep_range(2000, 4000); 1724 1725 rc = reset_control_deassert(tegra_host->rst); 1726 if (rc) 1727 goto err_rst_get; 1728 1729 usleep_range(2000, 4000); 1730 1731 rc = sdhci_tegra_add_host(host); 1732 if (rc) 1733 goto err_add_host; 1734 1735 return 0; 1736 1737 err_add_host: 1738 reset_control_assert(tegra_host->rst); 1739 err_rst_get: 1740 clk_disable_unprepare(pltfm_host->clk); 1741 err_clk_get: 1742 clk_disable_unprepare(tegra_host->tmclk); 1743 err_power_req: 1744 err_parse_dt: 1745 sdhci_pltfm_free(pdev); 1746 return rc; 1747 } 1748 1749 static int sdhci_tegra_remove(struct platform_device *pdev) 1750 { 1751 struct sdhci_host *host = platform_get_drvdata(pdev); 1752 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1753 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 1754 1755 sdhci_remove_host(host, 0); 1756 1757 reset_control_assert(tegra_host->rst); 1758 usleep_range(2000, 4000); 1759 clk_disable_unprepare(pltfm_host->clk); 1760 clk_disable_unprepare(tegra_host->tmclk); 1761 1762 sdhci_pltfm_free(pdev); 1763 1764 return 0; 1765 } 1766 1767 #ifdef CONFIG_PM_SLEEP 1768 static int __maybe_unused sdhci_tegra_suspend(struct device *dev) 1769 { 1770 struct sdhci_host *host = dev_get_drvdata(dev); 1771 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1772 int ret; 1773 1774 if (host->mmc->caps2 & MMC_CAP2_CQE) { 1775 ret = cqhci_suspend(host->mmc); 1776 if (ret) 1777 return ret; 1778 } 1779 1780 ret = sdhci_suspend_host(host); 1781 if (ret) { 1782 cqhci_resume(host->mmc); 1783 return ret; 1784 } 1785 1786 clk_disable_unprepare(pltfm_host->clk); 1787 return 0; 1788 } 1789 1790 static int __maybe_unused sdhci_tegra_resume(struct device *dev) 1791 { 1792 struct sdhci_host *host = dev_get_drvdata(dev); 1793 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1794 int ret; 1795 1796 ret = clk_prepare_enable(pltfm_host->clk); 1797 if (ret) 1798 return ret; 1799 1800 ret = sdhci_resume_host(host); 1801 if (ret) 1802 goto disable_clk; 1803 1804 if (host->mmc->caps2 & MMC_CAP2_CQE) { 1805 ret = cqhci_resume(host->mmc); 1806 if (ret) 1807 goto suspend_host; 1808 } 1809 1810 return 0; 1811 1812 suspend_host: 1813 sdhci_suspend_host(host); 1814 disable_clk: 1815 clk_disable_unprepare(pltfm_host->clk); 1816 return ret; 1817 } 1818 #endif 1819 1820 static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend, 1821 sdhci_tegra_resume); 1822 1823 static struct platform_driver sdhci_tegra_driver = { 1824 .driver = { 1825 .name = "sdhci-tegra", 1826 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 1827 .of_match_table = sdhci_tegra_dt_match, 1828 .pm = &sdhci_tegra_dev_pm_ops, 1829 }, 1830 .probe = sdhci_tegra_probe, 1831 .remove = sdhci_tegra_remove, 1832 }; 1833 1834 module_platform_driver(sdhci_tegra_driver); 1835 1836 MODULE_DESCRIPTION("SDHCI driver for Tegra"); 1837 MODULE_AUTHOR("Google, Inc."); 1838 MODULE_LICENSE("GPL v2"); 1839