1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2010 Google, Inc. 4 */ 5 6 #include <linux/delay.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/err.h> 9 #include <linux/module.h> 10 #include <linux/init.h> 11 #include <linux/iopoll.h> 12 #include <linux/platform_device.h> 13 #include <linux/clk.h> 14 #include <linux/io.h> 15 #include <linux/of.h> 16 #include <linux/of_device.h> 17 #include <linux/pinctrl/consumer.h> 18 #include <linux/regulator/consumer.h> 19 #include <linux/reset.h> 20 #include <linux/mmc/card.h> 21 #include <linux/mmc/host.h> 22 #include <linux/mmc/mmc.h> 23 #include <linux/mmc/slot-gpio.h> 24 #include <linux/gpio/consumer.h> 25 #include <linux/ktime.h> 26 27 #include "sdhci-pltfm.h" 28 #include "cqhci.h" 29 30 /* Tegra SDHOST controller vendor register definitions */ 31 #define SDHCI_TEGRA_VENDOR_CLOCK_CTRL 0x100 32 #define SDHCI_CLOCK_CTRL_TAP_MASK 0x00ff0000 33 #define SDHCI_CLOCK_CTRL_TAP_SHIFT 16 34 #define SDHCI_CLOCK_CTRL_TRIM_MASK 0x1f000000 35 #define SDHCI_CLOCK_CTRL_TRIM_SHIFT 24 36 #define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE BIT(5) 37 #define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE BIT(3) 38 #define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE BIT(2) 39 40 #define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL 0x104 41 #define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE BIT(31) 42 43 #define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES 0x10c 44 #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK 0x00003f00 45 #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT 8 46 47 #define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120 48 #define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT BIT(0) 49 #define SDHCI_MISC_CTRL_ENABLE_SDR104 0x8 50 #define SDHCI_MISC_CTRL_ENABLE_SDR50 0x10 51 #define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20 52 #define SDHCI_MISC_CTRL_ENABLE_DDR50 0x200 53 54 #define SDHCI_TEGRA_VENDOR_DLLCAL_CFG 0x1b0 55 #define SDHCI_TEGRA_DLLCAL_CALIBRATE BIT(31) 56 57 #define SDHCI_TEGRA_VENDOR_DLLCAL_STA 0x1bc 58 #define SDHCI_TEGRA_DLLCAL_STA_ACTIVE BIT(31) 59 60 #define SDHCI_VNDR_TUN_CTRL0_0 0x1c0 61 #define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP 0x20000 62 #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK 0x03fc0000 63 #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT 18 64 #define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK 0x00001fc0 65 #define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT 6 66 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK 0x000e000 67 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT 13 68 #define TRIES_128 2 69 #define TRIES_256 4 70 #define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK 0x7 71 72 #define SDHCI_TEGRA_VNDR_TUN_CTRL1_0 0x1c4 73 #define SDHCI_TEGRA_VNDR_TUN_STATUS0 0x1C8 74 #define SDHCI_TEGRA_VNDR_TUN_STATUS1 0x1CC 75 #define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK 0xFF 76 #define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT 0x8 77 #define TUNING_WORD_BIT_SIZE 32 78 79 #define SDHCI_TEGRA_AUTO_CAL_CONFIG 0x1e4 80 #define SDHCI_AUTO_CAL_START BIT(31) 81 #define SDHCI_AUTO_CAL_ENABLE BIT(29) 82 #define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK 0x0000ffff 83 84 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL 0x1e0 85 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK 0x0000000f 86 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL 0x7 87 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD BIT(31) 88 #define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK 0x07FFF000 89 90 #define SDHCI_TEGRA_AUTO_CAL_STATUS 0x1ec 91 #define SDHCI_TEGRA_AUTO_CAL_ACTIVE BIT(31) 92 93 #define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0) 94 #define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1) 95 #define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2) 96 #define NVQUIRK_ENABLE_SDR50 BIT(3) 97 #define NVQUIRK_ENABLE_SDR104 BIT(4) 98 #define NVQUIRK_ENABLE_DDR50 BIT(5) 99 /* 100 * HAS_PADCALIB NVQUIRK is for SoC's supporting auto calibration of pads 101 * drive strength. 102 */ 103 #define NVQUIRK_HAS_PADCALIB BIT(6) 104 /* 105 * NEEDS_PAD_CONTROL NVQUIRK is for SoC's having separate 3V3 and 1V8 pads. 106 * 3V3/1V8 pad selection happens through pinctrl state selection depending 107 * on the signaling mode. 108 */ 109 #define NVQUIRK_NEEDS_PAD_CONTROL BIT(7) 110 #define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8) 111 #define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING BIT(9) 112 113 /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */ 114 #define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000 115 116 struct sdhci_tegra_soc_data { 117 const struct sdhci_pltfm_data *pdata; 118 u64 dma_mask; 119 u32 nvquirks; 120 u8 min_tap_delay; 121 u8 max_tap_delay; 122 }; 123 124 /* Magic pull up and pull down pad calibration offsets */ 125 struct sdhci_tegra_autocal_offsets { 126 u32 pull_up_3v3; 127 u32 pull_down_3v3; 128 u32 pull_up_3v3_timeout; 129 u32 pull_down_3v3_timeout; 130 u32 pull_up_1v8; 131 u32 pull_down_1v8; 132 u32 pull_up_1v8_timeout; 133 u32 pull_down_1v8_timeout; 134 u32 pull_up_sdr104; 135 u32 pull_down_sdr104; 136 u32 pull_up_hs400; 137 u32 pull_down_hs400; 138 }; 139 140 struct sdhci_tegra { 141 const struct sdhci_tegra_soc_data *soc_data; 142 struct gpio_desc *power_gpio; 143 bool ddr_signaling; 144 bool pad_calib_required; 145 bool pad_control_available; 146 147 struct reset_control *rst; 148 struct pinctrl *pinctrl_sdmmc; 149 struct pinctrl_state *pinctrl_state_3v3; 150 struct pinctrl_state *pinctrl_state_1v8; 151 struct pinctrl_state *pinctrl_state_3v3_drv; 152 struct pinctrl_state *pinctrl_state_1v8_drv; 153 154 struct sdhci_tegra_autocal_offsets autocal_offsets; 155 ktime_t last_calib; 156 157 u32 default_tap; 158 u32 default_trim; 159 u32 dqs_trim; 160 bool enable_hwcq; 161 unsigned long curr_clk_rate; 162 u8 tuned_tap_delay; 163 }; 164 165 static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg) 166 { 167 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 168 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 169 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 170 171 if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) && 172 (reg == SDHCI_HOST_VERSION))) { 173 /* Erratum: Version register is invalid in HW. */ 174 return SDHCI_SPEC_200; 175 } 176 177 return readw(host->ioaddr + reg); 178 } 179 180 static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg) 181 { 182 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 183 184 switch (reg) { 185 case SDHCI_TRANSFER_MODE: 186 /* 187 * Postpone this write, we must do it together with a 188 * command write that is down below. 189 */ 190 pltfm_host->xfer_mode_shadow = val; 191 return; 192 case SDHCI_COMMAND: 193 writel((val << 16) | pltfm_host->xfer_mode_shadow, 194 host->ioaddr + SDHCI_TRANSFER_MODE); 195 return; 196 } 197 198 writew(val, host->ioaddr + reg); 199 } 200 201 static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg) 202 { 203 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 204 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 205 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 206 207 /* Seems like we're getting spurious timeout and crc errors, so 208 * disable signalling of them. In case of real errors software 209 * timers should take care of eventually detecting them. 210 */ 211 if (unlikely(reg == SDHCI_SIGNAL_ENABLE)) 212 val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC); 213 214 writel(val, host->ioaddr + reg); 215 216 if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) && 217 (reg == SDHCI_INT_ENABLE))) { 218 /* Erratum: Must enable block gap interrupt detection */ 219 u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); 220 if (val & SDHCI_INT_CARD_INT) 221 gap_ctrl |= 0x8; 222 else 223 gap_ctrl &= ~0x8; 224 writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); 225 } 226 } 227 228 static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable) 229 { 230 bool status; 231 u32 reg; 232 233 reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 234 status = !!(reg & SDHCI_CLOCK_CARD_EN); 235 236 if (status == enable) 237 return status; 238 239 if (enable) 240 reg |= SDHCI_CLOCK_CARD_EN; 241 else 242 reg &= ~SDHCI_CLOCK_CARD_EN; 243 244 sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL); 245 246 return status; 247 } 248 249 static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg) 250 { 251 bool is_tuning_cmd = 0; 252 bool clk_enabled; 253 u8 cmd; 254 255 if (reg == SDHCI_COMMAND) { 256 cmd = SDHCI_GET_CMD(val); 257 is_tuning_cmd = cmd == MMC_SEND_TUNING_BLOCK || 258 cmd == MMC_SEND_TUNING_BLOCK_HS200; 259 } 260 261 if (is_tuning_cmd) 262 clk_enabled = tegra_sdhci_configure_card_clk(host, 0); 263 264 writew(val, host->ioaddr + reg); 265 266 if (is_tuning_cmd) { 267 udelay(1); 268 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 269 tegra_sdhci_configure_card_clk(host, clk_enabled); 270 } 271 } 272 273 static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host) 274 { 275 /* 276 * Write-enable shall be assumed if GPIO is missing in a board's 277 * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on 278 * Tegra. 279 */ 280 return mmc_gpio_get_ro(host->mmc); 281 } 282 283 static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host) 284 { 285 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 286 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 287 int has_1v8, has_3v3; 288 289 /* 290 * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad 291 * voltage configuration in order to perform voltage switching. This 292 * means that valid pinctrl info is required on SDHCI instances capable 293 * of performing voltage switching. Whether or not an SDHCI instance is 294 * capable of voltage switching is determined based on the regulator. 295 */ 296 297 if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL)) 298 return true; 299 300 if (IS_ERR(host->mmc->supply.vqmmc)) 301 return false; 302 303 has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc, 304 1700000, 1950000); 305 306 has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc, 307 2700000, 3600000); 308 309 if (has_1v8 == 1 && has_3v3 == 1) 310 return tegra_host->pad_control_available; 311 312 /* Fixed voltage, no pad control required. */ 313 return true; 314 } 315 316 static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap) 317 { 318 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 319 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 320 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 321 bool card_clk_enabled = false; 322 u32 reg; 323 324 /* 325 * Touching the tap values is a bit tricky on some SoC generations. 326 * The quirk enables a workaround for a glitch that sometimes occurs if 327 * the tap values are changed. 328 */ 329 330 if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP) 331 card_clk_enabled = tegra_sdhci_configure_card_clk(host, false); 332 333 reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); 334 reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK; 335 reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT; 336 sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); 337 338 if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP && 339 card_clk_enabled) { 340 udelay(1); 341 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 342 tegra_sdhci_configure_card_clk(host, card_clk_enabled); 343 } 344 } 345 346 static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc, 347 struct mmc_ios *ios) 348 { 349 struct sdhci_host *host = mmc_priv(mmc); 350 u32 val; 351 352 val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL); 353 354 if (ios->enhanced_strobe) 355 val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE; 356 else 357 val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE; 358 359 sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL); 360 361 } 362 363 static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) 364 { 365 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 366 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 367 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 368 u32 misc_ctrl, clk_ctrl, pad_ctrl; 369 370 sdhci_reset(host, mask); 371 372 if (!(mask & SDHCI_RESET_ALL)) 373 return; 374 375 tegra_sdhci_set_tap(host, tegra_host->default_tap); 376 377 misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL); 378 clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); 379 380 misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 | 381 SDHCI_MISC_CTRL_ENABLE_SDR50 | 382 SDHCI_MISC_CTRL_ENABLE_DDR50 | 383 SDHCI_MISC_CTRL_ENABLE_SDR104); 384 385 clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK | 386 SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE); 387 388 if (tegra_sdhci_is_pad_and_regulator_valid(host)) { 389 /* Erratum: Enable SDHCI spec v3.00 support */ 390 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300) 391 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300; 392 /* Advertise UHS modes as supported by host */ 393 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) 394 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50; 395 if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) 396 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50; 397 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104) 398 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104; 399 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) 400 clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE; 401 } 402 403 clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT; 404 405 sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL); 406 sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); 407 408 if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) { 409 pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL); 410 pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK; 411 pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL; 412 sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL); 413 414 tegra_host->pad_calib_required = true; 415 } 416 417 tegra_host->ddr_signaling = false; 418 } 419 420 static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable) 421 { 422 u32 val; 423 424 /* 425 * Enable or disable the additional I/O pad used by the drive strength 426 * calibration process. 427 */ 428 val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL); 429 430 if (enable) 431 val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD; 432 else 433 val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD; 434 435 sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL); 436 437 if (enable) 438 usleep_range(1, 2); 439 } 440 441 static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host, 442 u16 pdpu) 443 { 444 u32 reg; 445 446 reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG); 447 reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK; 448 reg |= pdpu; 449 sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG); 450 } 451 452 static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage, 453 bool state_drvupdn) 454 { 455 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 456 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 457 struct sdhci_tegra_autocal_offsets *offsets = 458 &tegra_host->autocal_offsets; 459 struct pinctrl_state *pinctrl_drvupdn = NULL; 460 int ret = 0; 461 u8 drvup = 0, drvdn = 0; 462 u32 reg; 463 464 if (!state_drvupdn) { 465 /* PADS Drive Strength */ 466 if (voltage == MMC_SIGNAL_VOLTAGE_180) { 467 if (tegra_host->pinctrl_state_1v8_drv) { 468 pinctrl_drvupdn = 469 tegra_host->pinctrl_state_1v8_drv; 470 } else { 471 drvup = offsets->pull_up_1v8_timeout; 472 drvdn = offsets->pull_down_1v8_timeout; 473 } 474 } else { 475 if (tegra_host->pinctrl_state_3v3_drv) { 476 pinctrl_drvupdn = 477 tegra_host->pinctrl_state_3v3_drv; 478 } else { 479 drvup = offsets->pull_up_3v3_timeout; 480 drvdn = offsets->pull_down_3v3_timeout; 481 } 482 } 483 484 if (pinctrl_drvupdn != NULL) { 485 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, 486 pinctrl_drvupdn); 487 if (ret < 0) 488 dev_err(mmc_dev(host->mmc), 489 "failed pads drvupdn, ret: %d\n", ret); 490 } else if ((drvup) || (drvdn)) { 491 reg = sdhci_readl(host, 492 SDHCI_TEGRA_SDMEM_COMP_PADCTRL); 493 reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK; 494 reg |= (drvup << 20) | (drvdn << 12); 495 sdhci_writel(host, reg, 496 SDHCI_TEGRA_SDMEM_COMP_PADCTRL); 497 } 498 499 } else { 500 /* Dual Voltage PADS Voltage selection */ 501 if (!tegra_host->pad_control_available) 502 return 0; 503 504 if (voltage == MMC_SIGNAL_VOLTAGE_180) { 505 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, 506 tegra_host->pinctrl_state_1v8); 507 if (ret < 0) 508 dev_err(mmc_dev(host->mmc), 509 "setting 1.8V failed, ret: %d\n", ret); 510 } else { 511 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, 512 tegra_host->pinctrl_state_3v3); 513 if (ret < 0) 514 dev_err(mmc_dev(host->mmc), 515 "setting 3.3V failed, ret: %d\n", ret); 516 } 517 } 518 519 return ret; 520 } 521 522 static void tegra_sdhci_pad_autocalib(struct sdhci_host *host) 523 { 524 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 525 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 526 struct sdhci_tegra_autocal_offsets offsets = 527 tegra_host->autocal_offsets; 528 struct mmc_ios *ios = &host->mmc->ios; 529 bool card_clk_enabled; 530 u16 pdpu; 531 u32 reg; 532 int ret; 533 534 switch (ios->timing) { 535 case MMC_TIMING_UHS_SDR104: 536 pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104; 537 break; 538 case MMC_TIMING_MMC_HS400: 539 pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400; 540 break; 541 default: 542 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) 543 pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8; 544 else 545 pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3; 546 } 547 548 /* Set initial offset before auto-calibration */ 549 tegra_sdhci_set_pad_autocal_offset(host, pdpu); 550 551 card_clk_enabled = tegra_sdhci_configure_card_clk(host, false); 552 553 tegra_sdhci_configure_cal_pad(host, true); 554 555 reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG); 556 reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START; 557 sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG); 558 559 usleep_range(1, 2); 560 /* 10 ms timeout */ 561 ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS, 562 reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE), 563 1000, 10000); 564 565 tegra_sdhci_configure_cal_pad(host, false); 566 567 tegra_sdhci_configure_card_clk(host, card_clk_enabled); 568 569 if (ret) { 570 dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n"); 571 572 /* Disable automatic cal and use fixed Drive Strengths */ 573 reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG); 574 reg &= ~SDHCI_AUTO_CAL_ENABLE; 575 sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG); 576 577 ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false); 578 if (ret < 0) 579 dev_err(mmc_dev(host->mmc), 580 "Setting drive strengths failed: %d\n", ret); 581 } 582 } 583 584 static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host) 585 { 586 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 587 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 588 struct sdhci_tegra_autocal_offsets *autocal = 589 &tegra_host->autocal_offsets; 590 int err; 591 592 err = device_property_read_u32(host->mmc->parent, 593 "nvidia,pad-autocal-pull-up-offset-3v3", 594 &autocal->pull_up_3v3); 595 if (err) 596 autocal->pull_up_3v3 = 0; 597 598 err = device_property_read_u32(host->mmc->parent, 599 "nvidia,pad-autocal-pull-down-offset-3v3", 600 &autocal->pull_down_3v3); 601 if (err) 602 autocal->pull_down_3v3 = 0; 603 604 err = device_property_read_u32(host->mmc->parent, 605 "nvidia,pad-autocal-pull-up-offset-1v8", 606 &autocal->pull_up_1v8); 607 if (err) 608 autocal->pull_up_1v8 = 0; 609 610 err = device_property_read_u32(host->mmc->parent, 611 "nvidia,pad-autocal-pull-down-offset-1v8", 612 &autocal->pull_down_1v8); 613 if (err) 614 autocal->pull_down_1v8 = 0; 615 616 err = device_property_read_u32(host->mmc->parent, 617 "nvidia,pad-autocal-pull-up-offset-sdr104", 618 &autocal->pull_up_sdr104); 619 if (err) 620 autocal->pull_up_sdr104 = autocal->pull_up_1v8; 621 622 err = device_property_read_u32(host->mmc->parent, 623 "nvidia,pad-autocal-pull-down-offset-sdr104", 624 &autocal->pull_down_sdr104); 625 if (err) 626 autocal->pull_down_sdr104 = autocal->pull_down_1v8; 627 628 err = device_property_read_u32(host->mmc->parent, 629 "nvidia,pad-autocal-pull-up-offset-hs400", 630 &autocal->pull_up_hs400); 631 if (err) 632 autocal->pull_up_hs400 = autocal->pull_up_1v8; 633 634 err = device_property_read_u32(host->mmc->parent, 635 "nvidia,pad-autocal-pull-down-offset-hs400", 636 &autocal->pull_down_hs400); 637 if (err) 638 autocal->pull_down_hs400 = autocal->pull_down_1v8; 639 640 /* 641 * Different fail-safe drive strength values based on the signaling 642 * voltage are applicable for SoCs supporting 3V3 and 1V8 pad controls. 643 * So, avoid reading below device tree properties for SoCs that don't 644 * have NVQUIRK_NEEDS_PAD_CONTROL. 645 */ 646 if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL)) 647 return; 648 649 err = device_property_read_u32(host->mmc->parent, 650 "nvidia,pad-autocal-pull-up-offset-3v3-timeout", 651 &autocal->pull_up_3v3_timeout); 652 if (err) { 653 if (!IS_ERR(tegra_host->pinctrl_state_3v3) && 654 (tegra_host->pinctrl_state_3v3_drv == NULL)) 655 pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n", 656 mmc_hostname(host->mmc)); 657 autocal->pull_up_3v3_timeout = 0; 658 } 659 660 err = device_property_read_u32(host->mmc->parent, 661 "nvidia,pad-autocal-pull-down-offset-3v3-timeout", 662 &autocal->pull_down_3v3_timeout); 663 if (err) { 664 if (!IS_ERR(tegra_host->pinctrl_state_3v3) && 665 (tegra_host->pinctrl_state_3v3_drv == NULL)) 666 pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n", 667 mmc_hostname(host->mmc)); 668 autocal->pull_down_3v3_timeout = 0; 669 } 670 671 err = device_property_read_u32(host->mmc->parent, 672 "nvidia,pad-autocal-pull-up-offset-1v8-timeout", 673 &autocal->pull_up_1v8_timeout); 674 if (err) { 675 if (!IS_ERR(tegra_host->pinctrl_state_1v8) && 676 (tegra_host->pinctrl_state_1v8_drv == NULL)) 677 pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n", 678 mmc_hostname(host->mmc)); 679 autocal->pull_up_1v8_timeout = 0; 680 } 681 682 err = device_property_read_u32(host->mmc->parent, 683 "nvidia,pad-autocal-pull-down-offset-1v8-timeout", 684 &autocal->pull_down_1v8_timeout); 685 if (err) { 686 if (!IS_ERR(tegra_host->pinctrl_state_1v8) && 687 (tegra_host->pinctrl_state_1v8_drv == NULL)) 688 pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n", 689 mmc_hostname(host->mmc)); 690 autocal->pull_down_1v8_timeout = 0; 691 } 692 } 693 694 static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 695 { 696 struct sdhci_host *host = mmc_priv(mmc); 697 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 698 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 699 ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib); 700 701 /* 100 ms calibration interval is specified in the TRM */ 702 if (ktime_to_ms(since_calib) > 100) { 703 tegra_sdhci_pad_autocalib(host); 704 tegra_host->last_calib = ktime_get(); 705 } 706 707 sdhci_request(mmc, mrq); 708 } 709 710 static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host) 711 { 712 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 713 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 714 int err; 715 716 err = device_property_read_u32(host->mmc->parent, "nvidia,default-tap", 717 &tegra_host->default_tap); 718 if (err) 719 tegra_host->default_tap = 0; 720 721 err = device_property_read_u32(host->mmc->parent, "nvidia,default-trim", 722 &tegra_host->default_trim); 723 if (err) 724 tegra_host->default_trim = 0; 725 726 err = device_property_read_u32(host->mmc->parent, "nvidia,dqs-trim", 727 &tegra_host->dqs_trim); 728 if (err) 729 tegra_host->dqs_trim = 0x11; 730 } 731 732 static void tegra_sdhci_parse_dt(struct sdhci_host *host) 733 { 734 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 735 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 736 737 if (device_property_read_bool(host->mmc->parent, "supports-cqe")) 738 tegra_host->enable_hwcq = true; 739 else 740 tegra_host->enable_hwcq = false; 741 742 tegra_sdhci_parse_pad_autocal_dt(host); 743 tegra_sdhci_parse_tap_and_trim(host); 744 } 745 746 static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 747 { 748 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 749 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 750 unsigned long host_clk; 751 752 if (!clock) 753 return sdhci_set_clock(host, clock); 754 755 /* 756 * In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI 757 * divider to be configured to divided the host clock by two. The SDHCI 758 * clock divider is calculated as part of sdhci_set_clock() by 759 * sdhci_calc_clk(). The divider is calculated from host->max_clk and 760 * the requested clock rate. 761 * 762 * By setting the host->max_clk to clock * 2 the divider calculation 763 * will always result in the correct value for DDR50/52 modes, 764 * regardless of clock rate rounding, which may happen if the value 765 * from clk_get_rate() is used. 766 */ 767 host_clk = tegra_host->ddr_signaling ? clock * 2 : clock; 768 clk_set_rate(pltfm_host->clk, host_clk); 769 tegra_host->curr_clk_rate = host_clk; 770 if (tegra_host->ddr_signaling) 771 host->max_clk = host_clk; 772 else 773 host->max_clk = clk_get_rate(pltfm_host->clk); 774 775 sdhci_set_clock(host, clock); 776 777 if (tegra_host->pad_calib_required) { 778 tegra_sdhci_pad_autocalib(host); 779 tegra_host->pad_calib_required = false; 780 } 781 } 782 783 static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host) 784 { 785 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 786 787 return clk_round_rate(pltfm_host->clk, UINT_MAX); 788 } 789 790 static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim) 791 { 792 u32 val; 793 794 val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES); 795 val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK; 796 val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT; 797 sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES); 798 } 799 800 static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host) 801 { 802 u32 reg; 803 int err; 804 805 reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG); 806 reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE; 807 sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG); 808 809 /* 1 ms sleep, 5 ms timeout */ 810 err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA, 811 reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE), 812 1000, 5000); 813 if (err) 814 dev_err(mmc_dev(host->mmc), 815 "HS400 delay line calibration timed out\n"); 816 } 817 818 static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up, 819 u8 thd_low, u8 fixed_tap) 820 { 821 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 822 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 823 u32 val, tun_status; 824 u8 word, bit, edge1, tap, window; 825 bool tap_result; 826 bool start_fail = false; 827 bool start_pass = false; 828 bool end_pass = false; 829 bool first_fail = false; 830 bool first_pass = false; 831 u8 start_pass_tap = 0; 832 u8 end_pass_tap = 0; 833 u8 first_fail_tap = 0; 834 u8 first_pass_tap = 0; 835 u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE; 836 837 /* 838 * Read auto-tuned results and extract good valid passing window by 839 * filtering out un-wanted bubble/partial/merged windows. 840 */ 841 for (word = 0; word < total_tuning_words; word++) { 842 val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0); 843 val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK; 844 val |= word; 845 sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0); 846 tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0); 847 bit = 0; 848 while (bit < TUNING_WORD_BIT_SIZE) { 849 tap = word * TUNING_WORD_BIT_SIZE + bit; 850 tap_result = tun_status & (1 << bit); 851 if (!tap_result && !start_fail) { 852 start_fail = true; 853 if (!first_fail) { 854 first_fail_tap = tap; 855 first_fail = true; 856 } 857 858 } else if (tap_result && start_fail && !start_pass) { 859 start_pass_tap = tap; 860 start_pass = true; 861 if (!first_pass) { 862 first_pass_tap = tap; 863 first_pass = true; 864 } 865 866 } else if (!tap_result && start_fail && start_pass && 867 !end_pass) { 868 end_pass_tap = tap - 1; 869 end_pass = true; 870 } else if (tap_result && start_pass && start_fail && 871 end_pass) { 872 window = end_pass_tap - start_pass_tap; 873 /* discard merged window and bubble window */ 874 if (window >= thd_up || window < thd_low) { 875 start_pass_tap = tap; 876 end_pass = false; 877 } else { 878 /* set tap at middle of valid window */ 879 tap = start_pass_tap + window / 2; 880 tegra_host->tuned_tap_delay = tap; 881 return; 882 } 883 } 884 885 bit++; 886 } 887 } 888 889 if (!first_fail) { 890 WARN(1, "no edge detected, continue with hw tuned delay.\n"); 891 } else if (first_pass) { 892 /* set tap location at fixed tap relative to the first edge */ 893 edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2; 894 if (edge1 - 1 > fixed_tap) 895 tegra_host->tuned_tap_delay = edge1 - fixed_tap; 896 else 897 tegra_host->tuned_tap_delay = edge1 + fixed_tap; 898 } 899 } 900 901 static void tegra_sdhci_post_tuning(struct sdhci_host *host) 902 { 903 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 904 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 905 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 906 u32 avg_tap_dly, val, min_tap_dly, max_tap_dly; 907 u8 fixed_tap, start_tap, end_tap, window_width; 908 u8 thdupper, thdlower; 909 u8 num_iter; 910 u32 clk_rate_mhz, period_ps, bestcase, worstcase; 911 912 /* retain HW tuned tap to use incase if no correction is needed */ 913 val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); 914 tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >> 915 SDHCI_CLOCK_CTRL_TAP_SHIFT; 916 if (soc_data->min_tap_delay && soc_data->max_tap_delay) { 917 min_tap_dly = soc_data->min_tap_delay; 918 max_tap_dly = soc_data->max_tap_delay; 919 clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC; 920 period_ps = USEC_PER_SEC / clk_rate_mhz; 921 bestcase = period_ps / min_tap_dly; 922 worstcase = period_ps / max_tap_dly; 923 /* 924 * Upper and Lower bound thresholds used to detect merged and 925 * bubble windows 926 */ 927 thdupper = (2 * worstcase + bestcase) / 2; 928 thdlower = worstcase / 4; 929 /* 930 * fixed tap is used when HW tuning result contains single edge 931 * and tap is set at fixed tap delay relative to the first edge 932 */ 933 avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly); 934 fixed_tap = avg_tap_dly / 2; 935 936 val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1); 937 start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK; 938 end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) & 939 SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK; 940 window_width = end_tap - start_tap; 941 num_iter = host->tuning_loop_count; 942 /* 943 * partial window includes edges of the tuning range. 944 * merged window includes more taps so window width is higher 945 * than upper threshold. 946 */ 947 if (start_tap == 0 || (end_tap == (num_iter - 1)) || 948 (end_tap == num_iter - 2) || window_width >= thdupper) { 949 pr_debug("%s: Apply tuning correction\n", 950 mmc_hostname(host->mmc)); 951 tegra_sdhci_tap_correction(host, thdupper, thdlower, 952 fixed_tap); 953 } 954 } 955 956 tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay); 957 } 958 959 static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode) 960 { 961 struct sdhci_host *host = mmc_priv(mmc); 962 int err; 963 964 err = sdhci_execute_tuning(mmc, opcode); 965 if (!err && !host->tuning_err) 966 tegra_sdhci_post_tuning(host); 967 968 return err; 969 } 970 971 static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host, 972 unsigned timing) 973 { 974 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 975 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 976 bool set_default_tap = false; 977 bool set_dqs_trim = false; 978 bool do_hs400_dll_cal = false; 979 u8 iter = TRIES_256; 980 u32 val; 981 982 tegra_host->ddr_signaling = false; 983 switch (timing) { 984 case MMC_TIMING_UHS_SDR50: 985 break; 986 case MMC_TIMING_UHS_SDR104: 987 case MMC_TIMING_MMC_HS200: 988 /* Don't set default tap on tunable modes. */ 989 iter = TRIES_128; 990 break; 991 case MMC_TIMING_MMC_HS400: 992 set_dqs_trim = true; 993 do_hs400_dll_cal = true; 994 iter = TRIES_128; 995 break; 996 case MMC_TIMING_MMC_DDR52: 997 case MMC_TIMING_UHS_DDR50: 998 tegra_host->ddr_signaling = true; 999 set_default_tap = true; 1000 break; 1001 default: 1002 set_default_tap = true; 1003 break; 1004 } 1005 1006 val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0); 1007 val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK | 1008 SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK | 1009 SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK); 1010 val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT | 1011 0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT | 1012 1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT); 1013 sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0); 1014 sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0); 1015 1016 host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256; 1017 1018 sdhci_set_uhs_signaling(host, timing); 1019 1020 tegra_sdhci_pad_autocalib(host); 1021 1022 if (tegra_host->tuned_tap_delay && !set_default_tap) 1023 tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay); 1024 else 1025 tegra_sdhci_set_tap(host, tegra_host->default_tap); 1026 1027 if (set_dqs_trim) 1028 tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim); 1029 1030 if (do_hs400_dll_cal) 1031 tegra_sdhci_hs400_dll_cal(host); 1032 } 1033 1034 static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) 1035 { 1036 unsigned int min, max; 1037 1038 /* 1039 * Start search for minimum tap value at 10, as smaller values are 1040 * may wrongly be reported as working but fail at higher speeds, 1041 * according to the TRM. 1042 */ 1043 min = 10; 1044 while (min < 255) { 1045 tegra_sdhci_set_tap(host, min); 1046 if (!mmc_send_tuning(host->mmc, opcode, NULL)) 1047 break; 1048 min++; 1049 } 1050 1051 /* Find the maximum tap value that still passes. */ 1052 max = min + 1; 1053 while (max < 255) { 1054 tegra_sdhci_set_tap(host, max); 1055 if (mmc_send_tuning(host->mmc, opcode, NULL)) { 1056 max--; 1057 break; 1058 } 1059 max++; 1060 } 1061 1062 /* The TRM states the ideal tap value is at 75% in the passing range. */ 1063 tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4)); 1064 1065 return mmc_send_tuning(host->mmc, opcode, NULL); 1066 } 1067 1068 static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc, 1069 struct mmc_ios *ios) 1070 { 1071 struct sdhci_host *host = mmc_priv(mmc); 1072 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1073 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 1074 int ret = 0; 1075 1076 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { 1077 ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true); 1078 if (ret < 0) 1079 return ret; 1080 ret = sdhci_start_signal_voltage_switch(mmc, ios); 1081 } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) { 1082 ret = sdhci_start_signal_voltage_switch(mmc, ios); 1083 if (ret < 0) 1084 return ret; 1085 ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true); 1086 } 1087 1088 if (tegra_host->pad_calib_required) 1089 tegra_sdhci_pad_autocalib(host); 1090 1091 return ret; 1092 } 1093 1094 static int tegra_sdhci_init_pinctrl_info(struct device *dev, 1095 struct sdhci_tegra *tegra_host) 1096 { 1097 tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev); 1098 if (IS_ERR(tegra_host->pinctrl_sdmmc)) { 1099 dev_dbg(dev, "No pinctrl info, err: %ld\n", 1100 PTR_ERR(tegra_host->pinctrl_sdmmc)); 1101 return -1; 1102 } 1103 1104 tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state( 1105 tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv"); 1106 if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) { 1107 if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV) 1108 tegra_host->pinctrl_state_1v8_drv = NULL; 1109 } 1110 1111 tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state( 1112 tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv"); 1113 if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) { 1114 if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV) 1115 tegra_host->pinctrl_state_3v3_drv = NULL; 1116 } 1117 1118 tegra_host->pinctrl_state_3v3 = 1119 pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3"); 1120 if (IS_ERR(tegra_host->pinctrl_state_3v3)) { 1121 dev_warn(dev, "Missing 3.3V pad state, err: %ld\n", 1122 PTR_ERR(tegra_host->pinctrl_state_3v3)); 1123 return -1; 1124 } 1125 1126 tegra_host->pinctrl_state_1v8 = 1127 pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8"); 1128 if (IS_ERR(tegra_host->pinctrl_state_1v8)) { 1129 dev_warn(dev, "Missing 1.8V pad state, err: %ld\n", 1130 PTR_ERR(tegra_host->pinctrl_state_1v8)); 1131 return -1; 1132 } 1133 1134 tegra_host->pad_control_available = true; 1135 1136 return 0; 1137 } 1138 1139 static void tegra_sdhci_voltage_switch(struct sdhci_host *host) 1140 { 1141 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1142 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 1143 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 1144 1145 if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) 1146 tegra_host->pad_calib_required = true; 1147 } 1148 1149 static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg) 1150 { 1151 struct mmc_host *mmc = cq_host->mmc; 1152 u8 ctrl; 1153 ktime_t timeout; 1154 bool timed_out; 1155 1156 /* 1157 * During CQE resume/unhalt, CQHCI driver unhalts CQE prior to 1158 * cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need 1159 * to be re-configured. 1160 * Tegra CQHCI/SDHCI prevents write access to block size register when 1161 * CQE is unhalted. So handling CQE resume sequence here to configure 1162 * SDHCI block registers prior to exiting CQE halt state. 1163 */ 1164 if (reg == CQHCI_CTL && !(val & CQHCI_HALT) && 1165 cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) { 1166 sdhci_cqe_enable(mmc); 1167 writel(val, cq_host->mmio + reg); 1168 timeout = ktime_add_us(ktime_get(), 50); 1169 while (1) { 1170 timed_out = ktime_compare(ktime_get(), timeout) > 0; 1171 ctrl = cqhci_readl(cq_host, CQHCI_CTL); 1172 if (!(ctrl & CQHCI_HALT) || timed_out) 1173 break; 1174 } 1175 /* 1176 * CQE usually resumes very quick, but incase if Tegra CQE 1177 * doesn't resume retry unhalt. 1178 */ 1179 if (timed_out) 1180 writel(val, cq_host->mmio + reg); 1181 } else { 1182 writel(val, cq_host->mmio + reg); 1183 } 1184 } 1185 1186 static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc, 1187 struct mmc_request *mrq, u64 *data) 1188 { 1189 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc)); 1190 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 1191 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 1192 1193 if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING && 1194 mrq->cmd->flags & MMC_RSP_R1B) 1195 *data |= CQHCI_CMD_TIMING(1); 1196 } 1197 1198 static void sdhci_tegra_cqe_enable(struct mmc_host *mmc) 1199 { 1200 struct cqhci_host *cq_host = mmc->cqe_private; 1201 u32 val; 1202 1203 /* 1204 * Tegra CQHCI/SDMMC design prevents write access to sdhci block size 1205 * register when CQE is enabled and unhalted. 1206 * CQHCI driver enables CQE prior to activation, so disable CQE before 1207 * programming block size in sdhci controller and enable it back. 1208 */ 1209 if (!cq_host->activated) { 1210 val = cqhci_readl(cq_host, CQHCI_CFG); 1211 if (val & CQHCI_ENABLE) 1212 cqhci_writel(cq_host, (val & ~CQHCI_ENABLE), 1213 CQHCI_CFG); 1214 sdhci_cqe_enable(mmc); 1215 if (val & CQHCI_ENABLE) 1216 cqhci_writel(cq_host, val, CQHCI_CFG); 1217 } 1218 1219 /* 1220 * CMD CRC errors are seen sometimes with some eMMC devices when status 1221 * command is sent during transfer of last data block which is the 1222 * default case as send status command block counter (CBC) is 1. 1223 * Recommended fix to set CBC to 0 allowing send status command only 1224 * when data lines are idle. 1225 */ 1226 val = cqhci_readl(cq_host, CQHCI_SSC1); 1227 val &= ~CQHCI_SSC1_CBC_MASK; 1228 cqhci_writel(cq_host, val, CQHCI_SSC1); 1229 } 1230 1231 static void sdhci_tegra_dumpregs(struct mmc_host *mmc) 1232 { 1233 sdhci_dumpregs(mmc_priv(mmc)); 1234 } 1235 1236 static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask) 1237 { 1238 int cmd_error = 0; 1239 int data_error = 0; 1240 1241 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) 1242 return intmask; 1243 1244 cqhci_irq(host->mmc, intmask, cmd_error, data_error); 1245 1246 return 0; 1247 } 1248 1249 static void tegra_sdhci_set_timeout(struct sdhci_host *host, 1250 struct mmc_command *cmd) 1251 { 1252 u32 val; 1253 1254 /* 1255 * HW busy detection timeout is based on programmed data timeout 1256 * counter and maximum supported timeout is 11s which may not be 1257 * enough for long operations like cache flush, sleep awake, erase. 1258 * 1259 * ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows 1260 * host controller to wait for busy state until the card is busy 1261 * without HW timeout. 1262 * 1263 * So, use infinite busy wait mode for operations that may take 1264 * more than maximum HW busy timeout of 11s otherwise use finite 1265 * busy wait mode. 1266 */ 1267 val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL); 1268 if (cmd && cmd->busy_timeout >= 11 * HZ) 1269 val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT; 1270 else 1271 val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT; 1272 sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_MISC_CTRL); 1273 1274 __sdhci_set_timeout(host, cmd); 1275 } 1276 1277 static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = { 1278 .write_l = tegra_cqhci_writel, 1279 .enable = sdhci_tegra_cqe_enable, 1280 .disable = sdhci_cqe_disable, 1281 .dumpregs = sdhci_tegra_dumpregs, 1282 .update_dcmd_desc = sdhci_tegra_update_dcmd_desc, 1283 }; 1284 1285 static int tegra_sdhci_set_dma_mask(struct sdhci_host *host) 1286 { 1287 struct sdhci_pltfm_host *platform = sdhci_priv(host); 1288 struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform); 1289 const struct sdhci_tegra_soc_data *soc = tegra->soc_data; 1290 struct device *dev = mmc_dev(host->mmc); 1291 1292 if (soc->dma_mask) 1293 return dma_set_mask_and_coherent(dev, soc->dma_mask); 1294 1295 return 0; 1296 } 1297 1298 static const struct sdhci_ops tegra_sdhci_ops = { 1299 .get_ro = tegra_sdhci_get_ro, 1300 .read_w = tegra_sdhci_readw, 1301 .write_l = tegra_sdhci_writel, 1302 .set_clock = tegra_sdhci_set_clock, 1303 .set_dma_mask = tegra_sdhci_set_dma_mask, 1304 .set_bus_width = sdhci_set_bus_width, 1305 .reset = tegra_sdhci_reset, 1306 .platform_execute_tuning = tegra_sdhci_execute_tuning, 1307 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, 1308 .voltage_switch = tegra_sdhci_voltage_switch, 1309 .get_max_clock = tegra_sdhci_get_max_clock, 1310 }; 1311 1312 static const struct sdhci_pltfm_data sdhci_tegra20_pdata = { 1313 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1314 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1315 SDHCI_QUIRK_NO_HISPD_BIT | 1316 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1317 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1318 .ops = &tegra_sdhci_ops, 1319 }; 1320 1321 static const struct sdhci_tegra_soc_data soc_data_tegra20 = { 1322 .pdata = &sdhci_tegra20_pdata, 1323 .dma_mask = DMA_BIT_MASK(32), 1324 .nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 | 1325 NVQUIRK_ENABLE_BLOCK_GAP_DET, 1326 }; 1327 1328 static const struct sdhci_pltfm_data sdhci_tegra30_pdata = { 1329 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1330 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | 1331 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1332 SDHCI_QUIRK_NO_HISPD_BIT | 1333 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1334 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1335 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 1336 SDHCI_QUIRK2_BROKEN_HS200 | 1337 /* 1338 * Auto-CMD23 leads to "Got command interrupt 0x00010000 even 1339 * though no command operation was in progress." 1340 * 1341 * The exact reason is unknown, as the same hardware seems 1342 * to support Auto CMD23 on a downstream 3.1 kernel. 1343 */ 1344 SDHCI_QUIRK2_ACMD23_BROKEN, 1345 .ops = &tegra_sdhci_ops, 1346 }; 1347 1348 static const struct sdhci_tegra_soc_data soc_data_tegra30 = { 1349 .pdata = &sdhci_tegra30_pdata, 1350 .dma_mask = DMA_BIT_MASK(32), 1351 .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 | 1352 NVQUIRK_ENABLE_SDR50 | 1353 NVQUIRK_ENABLE_SDR104 | 1354 NVQUIRK_HAS_PADCALIB, 1355 }; 1356 1357 static const struct sdhci_ops tegra114_sdhci_ops = { 1358 .get_ro = tegra_sdhci_get_ro, 1359 .read_w = tegra_sdhci_readw, 1360 .write_w = tegra_sdhci_writew, 1361 .write_l = tegra_sdhci_writel, 1362 .set_clock = tegra_sdhci_set_clock, 1363 .set_dma_mask = tegra_sdhci_set_dma_mask, 1364 .set_bus_width = sdhci_set_bus_width, 1365 .reset = tegra_sdhci_reset, 1366 .platform_execute_tuning = tegra_sdhci_execute_tuning, 1367 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, 1368 .voltage_switch = tegra_sdhci_voltage_switch, 1369 .get_max_clock = tegra_sdhci_get_max_clock, 1370 }; 1371 1372 static const struct sdhci_pltfm_data sdhci_tegra114_pdata = { 1373 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1374 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | 1375 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1376 SDHCI_QUIRK_NO_HISPD_BIT | 1377 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1378 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1379 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1380 .ops = &tegra114_sdhci_ops, 1381 }; 1382 1383 static const struct sdhci_tegra_soc_data soc_data_tegra114 = { 1384 .pdata = &sdhci_tegra114_pdata, 1385 .dma_mask = DMA_BIT_MASK(32), 1386 }; 1387 1388 static const struct sdhci_pltfm_data sdhci_tegra124_pdata = { 1389 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1390 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | 1391 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1392 SDHCI_QUIRK_NO_HISPD_BIT | 1393 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1394 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1395 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1396 .ops = &tegra114_sdhci_ops, 1397 }; 1398 1399 static const struct sdhci_tegra_soc_data soc_data_tegra124 = { 1400 .pdata = &sdhci_tegra124_pdata, 1401 .dma_mask = DMA_BIT_MASK(34), 1402 }; 1403 1404 static const struct sdhci_ops tegra210_sdhci_ops = { 1405 .get_ro = tegra_sdhci_get_ro, 1406 .read_w = tegra_sdhci_readw, 1407 .write_w = tegra210_sdhci_writew, 1408 .write_l = tegra_sdhci_writel, 1409 .set_clock = tegra_sdhci_set_clock, 1410 .set_dma_mask = tegra_sdhci_set_dma_mask, 1411 .set_bus_width = sdhci_set_bus_width, 1412 .reset = tegra_sdhci_reset, 1413 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, 1414 .voltage_switch = tegra_sdhci_voltage_switch, 1415 .get_max_clock = tegra_sdhci_get_max_clock, 1416 .set_timeout = tegra_sdhci_set_timeout, 1417 }; 1418 1419 static const struct sdhci_pltfm_data sdhci_tegra210_pdata = { 1420 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1421 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | 1422 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1423 SDHCI_QUIRK_NO_HISPD_BIT | 1424 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1425 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1426 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1427 .ops = &tegra210_sdhci_ops, 1428 }; 1429 1430 static const struct sdhci_tegra_soc_data soc_data_tegra210 = { 1431 .pdata = &sdhci_tegra210_pdata, 1432 .dma_mask = DMA_BIT_MASK(34), 1433 .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL | 1434 NVQUIRK_HAS_PADCALIB | 1435 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | 1436 NVQUIRK_ENABLE_SDR50 | 1437 NVQUIRK_ENABLE_SDR104, 1438 .min_tap_delay = 106, 1439 .max_tap_delay = 185, 1440 }; 1441 1442 static const struct sdhci_ops tegra186_sdhci_ops = { 1443 .get_ro = tegra_sdhci_get_ro, 1444 .read_w = tegra_sdhci_readw, 1445 .write_l = tegra_sdhci_writel, 1446 .set_clock = tegra_sdhci_set_clock, 1447 .set_dma_mask = tegra_sdhci_set_dma_mask, 1448 .set_bus_width = sdhci_set_bus_width, 1449 .reset = tegra_sdhci_reset, 1450 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, 1451 .voltage_switch = tegra_sdhci_voltage_switch, 1452 .get_max_clock = tegra_sdhci_get_max_clock, 1453 .irq = sdhci_tegra_cqhci_irq, 1454 .set_timeout = tegra_sdhci_set_timeout, 1455 }; 1456 1457 static const struct sdhci_pltfm_data sdhci_tegra186_pdata = { 1458 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1459 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | 1460 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1461 SDHCI_QUIRK_NO_HISPD_BIT | 1462 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1463 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1464 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1465 .ops = &tegra186_sdhci_ops, 1466 }; 1467 1468 static const struct sdhci_tegra_soc_data soc_data_tegra186 = { 1469 .pdata = &sdhci_tegra186_pdata, 1470 .dma_mask = DMA_BIT_MASK(40), 1471 .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL | 1472 NVQUIRK_HAS_PADCALIB | 1473 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | 1474 NVQUIRK_ENABLE_SDR50 | 1475 NVQUIRK_ENABLE_SDR104 | 1476 NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING, 1477 .min_tap_delay = 84, 1478 .max_tap_delay = 136, 1479 }; 1480 1481 static const struct sdhci_tegra_soc_data soc_data_tegra194 = { 1482 .pdata = &sdhci_tegra186_pdata, 1483 .dma_mask = DMA_BIT_MASK(39), 1484 .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL | 1485 NVQUIRK_HAS_PADCALIB | 1486 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | 1487 NVQUIRK_ENABLE_SDR50 | 1488 NVQUIRK_ENABLE_SDR104, 1489 .min_tap_delay = 96, 1490 .max_tap_delay = 139, 1491 }; 1492 1493 static const struct of_device_id sdhci_tegra_dt_match[] = { 1494 { .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 }, 1495 { .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 }, 1496 { .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 }, 1497 { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 }, 1498 { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 }, 1499 { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 }, 1500 { .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 }, 1501 {} 1502 }; 1503 MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match); 1504 1505 static int sdhci_tegra_add_host(struct sdhci_host *host) 1506 { 1507 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1508 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 1509 struct cqhci_host *cq_host; 1510 bool dma64; 1511 int ret; 1512 1513 if (!tegra_host->enable_hwcq) 1514 return sdhci_add_host(host); 1515 1516 sdhci_enable_v4_mode(host); 1517 1518 ret = sdhci_setup_host(host); 1519 if (ret) 1520 return ret; 1521 1522 host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; 1523 1524 cq_host = devm_kzalloc(host->mmc->parent, 1525 sizeof(*cq_host), GFP_KERNEL); 1526 if (!cq_host) { 1527 ret = -ENOMEM; 1528 goto cleanup; 1529 } 1530 1531 cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR; 1532 cq_host->ops = &sdhci_tegra_cqhci_ops; 1533 1534 dma64 = host->flags & SDHCI_USE_64_BIT_DMA; 1535 if (dma64) 1536 cq_host->caps |= CQHCI_TASK_DESC_SZ_128; 1537 1538 ret = cqhci_init(cq_host, host->mmc, dma64); 1539 if (ret) 1540 goto cleanup; 1541 1542 ret = __sdhci_add_host(host); 1543 if (ret) 1544 goto cleanup; 1545 1546 return 0; 1547 1548 cleanup: 1549 sdhci_cleanup_host(host); 1550 return ret; 1551 } 1552 1553 static int sdhci_tegra_probe(struct platform_device *pdev) 1554 { 1555 const struct of_device_id *match; 1556 const struct sdhci_tegra_soc_data *soc_data; 1557 struct sdhci_host *host; 1558 struct sdhci_pltfm_host *pltfm_host; 1559 struct sdhci_tegra *tegra_host; 1560 struct clk *clk; 1561 int rc; 1562 1563 match = of_match_device(sdhci_tegra_dt_match, &pdev->dev); 1564 if (!match) 1565 return -EINVAL; 1566 soc_data = match->data; 1567 1568 host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host)); 1569 if (IS_ERR(host)) 1570 return PTR_ERR(host); 1571 pltfm_host = sdhci_priv(host); 1572 1573 tegra_host = sdhci_pltfm_priv(pltfm_host); 1574 tegra_host->ddr_signaling = false; 1575 tegra_host->pad_calib_required = false; 1576 tegra_host->pad_control_available = false; 1577 tegra_host->soc_data = soc_data; 1578 1579 if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) { 1580 rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host); 1581 if (rc == 0) 1582 host->mmc_host_ops.start_signal_voltage_switch = 1583 sdhci_tegra_start_signal_voltage_switch; 1584 } 1585 1586 /* Hook to periodically rerun pad calibration */ 1587 if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) 1588 host->mmc_host_ops.request = tegra_sdhci_request; 1589 1590 host->mmc_host_ops.hs400_enhanced_strobe = 1591 tegra_sdhci_hs400_enhanced_strobe; 1592 1593 if (!host->ops->platform_execute_tuning) 1594 host->mmc_host_ops.execute_tuning = 1595 tegra_sdhci_execute_hw_tuning; 1596 1597 rc = mmc_of_parse(host->mmc); 1598 if (rc) 1599 goto err_parse_dt; 1600 1601 if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) 1602 host->mmc->caps |= MMC_CAP_1_8V_DDR; 1603 1604 /* HW busy detection is supported, but R1B responses are required. */ 1605 host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY; 1606 1607 tegra_sdhci_parse_dt(host); 1608 1609 tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power", 1610 GPIOD_OUT_HIGH); 1611 if (IS_ERR(tegra_host->power_gpio)) { 1612 rc = PTR_ERR(tegra_host->power_gpio); 1613 goto err_power_req; 1614 } 1615 1616 clk = devm_clk_get(mmc_dev(host->mmc), NULL); 1617 if (IS_ERR(clk)) { 1618 rc = PTR_ERR(clk); 1619 1620 if (rc != -EPROBE_DEFER) 1621 dev_err(&pdev->dev, "failed to get clock: %d\n", rc); 1622 1623 goto err_clk_get; 1624 } 1625 clk_prepare_enable(clk); 1626 pltfm_host->clk = clk; 1627 1628 tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev, 1629 "sdhci"); 1630 if (IS_ERR(tegra_host->rst)) { 1631 rc = PTR_ERR(tegra_host->rst); 1632 dev_err(&pdev->dev, "failed to get reset control: %d\n", rc); 1633 goto err_rst_get; 1634 } 1635 1636 rc = reset_control_assert(tegra_host->rst); 1637 if (rc) 1638 goto err_rst_get; 1639 1640 usleep_range(2000, 4000); 1641 1642 rc = reset_control_deassert(tegra_host->rst); 1643 if (rc) 1644 goto err_rst_get; 1645 1646 usleep_range(2000, 4000); 1647 1648 rc = sdhci_tegra_add_host(host); 1649 if (rc) 1650 goto err_add_host; 1651 1652 return 0; 1653 1654 err_add_host: 1655 reset_control_assert(tegra_host->rst); 1656 err_rst_get: 1657 clk_disable_unprepare(pltfm_host->clk); 1658 err_clk_get: 1659 err_power_req: 1660 err_parse_dt: 1661 sdhci_pltfm_free(pdev); 1662 return rc; 1663 } 1664 1665 static int sdhci_tegra_remove(struct platform_device *pdev) 1666 { 1667 struct sdhci_host *host = platform_get_drvdata(pdev); 1668 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1669 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 1670 1671 sdhci_remove_host(host, 0); 1672 1673 reset_control_assert(tegra_host->rst); 1674 usleep_range(2000, 4000); 1675 clk_disable_unprepare(pltfm_host->clk); 1676 1677 sdhci_pltfm_free(pdev); 1678 1679 return 0; 1680 } 1681 1682 #ifdef CONFIG_PM_SLEEP 1683 static int __maybe_unused sdhci_tegra_suspend(struct device *dev) 1684 { 1685 struct sdhci_host *host = dev_get_drvdata(dev); 1686 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1687 int ret; 1688 1689 if (host->mmc->caps2 & MMC_CAP2_CQE) { 1690 ret = cqhci_suspend(host->mmc); 1691 if (ret) 1692 return ret; 1693 } 1694 1695 ret = sdhci_suspend_host(host); 1696 if (ret) { 1697 cqhci_resume(host->mmc); 1698 return ret; 1699 } 1700 1701 clk_disable_unprepare(pltfm_host->clk); 1702 return 0; 1703 } 1704 1705 static int __maybe_unused sdhci_tegra_resume(struct device *dev) 1706 { 1707 struct sdhci_host *host = dev_get_drvdata(dev); 1708 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1709 int ret; 1710 1711 ret = clk_prepare_enable(pltfm_host->clk); 1712 if (ret) 1713 return ret; 1714 1715 ret = sdhci_resume_host(host); 1716 if (ret) 1717 goto disable_clk; 1718 1719 if (host->mmc->caps2 & MMC_CAP2_CQE) { 1720 ret = cqhci_resume(host->mmc); 1721 if (ret) 1722 goto suspend_host; 1723 } 1724 1725 return 0; 1726 1727 suspend_host: 1728 sdhci_suspend_host(host); 1729 disable_clk: 1730 clk_disable_unprepare(pltfm_host->clk); 1731 return ret; 1732 } 1733 #endif 1734 1735 static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend, 1736 sdhci_tegra_resume); 1737 1738 static struct platform_driver sdhci_tegra_driver = { 1739 .driver = { 1740 .name = "sdhci-tegra", 1741 .of_match_table = sdhci_tegra_dt_match, 1742 .pm = &sdhci_tegra_dev_pm_ops, 1743 }, 1744 .probe = sdhci_tegra_probe, 1745 .remove = sdhci_tegra_remove, 1746 }; 1747 1748 module_platform_driver(sdhci_tegra_driver); 1749 1750 MODULE_DESCRIPTION("SDHCI driver for Tegra"); 1751 MODULE_AUTHOR("Google, Inc."); 1752 MODULE_LICENSE("GPL v2"); 1753