1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver 4 * 5 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. 6 */ 7 8 #include <linux/module.h> 9 #include <linux/of_device.h> 10 #include <linux/delay.h> 11 #include <linux/mmc/mmc.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/pm_opp.h> 14 #include <linux/slab.h> 15 #include <linux/iopoll.h> 16 #include <linux/firmware/qcom/qcom_scm.h> 17 #include <linux/regulator/consumer.h> 18 #include <linux/interconnect.h> 19 #include <linux/pinctrl/consumer.h> 20 #include <linux/reset.h> 21 22 #include "sdhci-cqhci.h" 23 #include "sdhci-pltfm.h" 24 #include "cqhci.h" 25 26 #define CORE_MCI_VERSION 0x50 27 #define CORE_VERSION_MAJOR_SHIFT 28 28 #define CORE_VERSION_MAJOR_MASK (0xf << CORE_VERSION_MAJOR_SHIFT) 29 #define CORE_VERSION_MINOR_MASK 0xff 30 31 #define CORE_MCI_GENERICS 0x70 32 #define SWITCHABLE_SIGNALING_VOLTAGE BIT(29) 33 34 #define HC_MODE_EN 0x1 35 #define CORE_POWER 0x0 36 #define CORE_SW_RST BIT(7) 37 #define FF_CLK_SW_RST_DIS BIT(13) 38 39 #define CORE_PWRCTL_BUS_OFF BIT(0) 40 #define CORE_PWRCTL_BUS_ON BIT(1) 41 #define CORE_PWRCTL_IO_LOW BIT(2) 42 #define CORE_PWRCTL_IO_HIGH BIT(3) 43 #define CORE_PWRCTL_BUS_SUCCESS BIT(0) 44 #define CORE_PWRCTL_BUS_FAIL BIT(1) 45 #define CORE_PWRCTL_IO_SUCCESS BIT(2) 46 #define CORE_PWRCTL_IO_FAIL BIT(3) 47 #define REQ_BUS_OFF BIT(0) 48 #define REQ_BUS_ON BIT(1) 49 #define REQ_IO_LOW BIT(2) 50 #define REQ_IO_HIGH BIT(3) 51 #define INT_MASK 0xf 52 #define MAX_PHASES 16 53 #define CORE_DLL_LOCK BIT(7) 54 #define CORE_DDR_DLL_LOCK BIT(11) 55 #define CORE_DLL_EN BIT(16) 56 #define CORE_CDR_EN BIT(17) 57 #define CORE_CK_OUT_EN BIT(18) 58 #define CORE_CDR_EXT_EN BIT(19) 59 #define CORE_DLL_PDN BIT(29) 60 #define CORE_DLL_RST BIT(30) 61 #define CORE_CMD_DAT_TRACK_SEL BIT(0) 62 63 #define CORE_DDR_CAL_EN BIT(0) 64 #define CORE_FLL_CYCLE_CNT BIT(18) 65 #define CORE_DLL_CLOCK_DISABLE BIT(21) 66 67 #define DLL_USR_CTL_POR_VAL 0x10800 68 #define ENABLE_DLL_LOCK_STATUS BIT(26) 69 #define FINE_TUNE_MODE_EN BIT(27) 70 #define BIAS_OK_SIGNAL BIT(29) 71 72 #define DLL_CONFIG_3_LOW_FREQ_VAL 0x08 73 #define DLL_CONFIG_3_HIGH_FREQ_VAL 0x10 74 75 #define CORE_VENDOR_SPEC_POR_VAL 0xa9c 76 #define CORE_CLK_PWRSAVE BIT(1) 77 #define CORE_HC_MCLK_SEL_DFLT (2 << 8) 78 #define CORE_HC_MCLK_SEL_HS400 (3 << 8) 79 #define CORE_HC_MCLK_SEL_MASK (3 << 8) 80 #define CORE_IO_PAD_PWR_SWITCH_EN BIT(15) 81 #define CORE_IO_PAD_PWR_SWITCH BIT(16) 82 #define CORE_HC_SELECT_IN_EN BIT(18) 83 #define CORE_HC_SELECT_IN_HS400 (6 << 19) 84 #define CORE_HC_SELECT_IN_MASK (7 << 19) 85 86 #define CORE_3_0V_SUPPORT BIT(25) 87 #define CORE_1_8V_SUPPORT BIT(26) 88 #define CORE_VOLT_SUPPORT (CORE_3_0V_SUPPORT | CORE_1_8V_SUPPORT) 89 90 #define CORE_CSR_CDC_CTLR_CFG0 0x130 91 #define CORE_SW_TRIG_FULL_CALIB BIT(16) 92 #define CORE_HW_AUTOCAL_ENA BIT(17) 93 94 #define CORE_CSR_CDC_CTLR_CFG1 0x134 95 #define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138 96 #define CORE_TIMER_ENA BIT(16) 97 98 #define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C 99 #define CORE_CSR_CDC_REFCOUNT_CFG 0x140 100 #define CORE_CSR_CDC_COARSE_CAL_CFG 0x144 101 #define CORE_CDC_OFFSET_CFG 0x14C 102 #define CORE_CSR_CDC_DELAY_CFG 0x150 103 #define CORE_CDC_SLAVE_DDA_CFG 0x160 104 #define CORE_CSR_CDC_STATUS0 0x164 105 #define CORE_CALIBRATION_DONE BIT(0) 106 107 #define CORE_CDC_ERROR_CODE_MASK 0x7000000 108 109 #define CORE_CSR_CDC_GEN_CFG 0x178 110 #define CORE_CDC_SWITCH_BYPASS_OFF BIT(0) 111 #define CORE_CDC_SWITCH_RC_EN BIT(1) 112 113 #define CORE_CDC_T4_DLY_SEL BIT(0) 114 #define CORE_CMDIN_RCLK_EN BIT(1) 115 #define CORE_START_CDC_TRAFFIC BIT(6) 116 117 #define CORE_PWRSAVE_DLL BIT(3) 118 119 #define DDR_CONFIG_POR_VAL 0x80040873 120 121 122 #define INVALID_TUNING_PHASE -1 123 #define SDHCI_MSM_MIN_CLOCK 400000 124 #define CORE_FREQ_100MHZ (100 * 1000 * 1000) 125 126 #define CDR_SELEXT_SHIFT 20 127 #define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT) 128 #define CMUX_SHIFT_PHASE_SHIFT 24 129 #define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT) 130 131 #define MSM_MMC_AUTOSUSPEND_DELAY_MS 50 132 133 /* Timeout value to avoid infinite waiting for pwr_irq */ 134 #define MSM_PWR_IRQ_TIMEOUT_MS 5000 135 136 /* Max load for eMMC Vdd-io supply */ 137 #define MMC_VQMMC_MAX_LOAD_UA 325000 138 139 #define msm_host_readl(msm_host, host, offset) \ 140 msm_host->var_ops->msm_readl_relaxed(host, offset) 141 142 #define msm_host_writel(msm_host, val, host, offset) \ 143 msm_host->var_ops->msm_writel_relaxed(val, host, offset) 144 145 /* CQHCI vendor specific registers */ 146 #define CQHCI_VENDOR_CFG1 0xA00 147 #define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13) 148 149 struct sdhci_msm_offset { 150 u32 core_hc_mode; 151 u32 core_mci_data_cnt; 152 u32 core_mci_status; 153 u32 core_mci_fifo_cnt; 154 u32 core_mci_version; 155 u32 core_generics; 156 u32 core_testbus_config; 157 u32 core_testbus_sel2_bit; 158 u32 core_testbus_ena; 159 u32 core_testbus_sel2; 160 u32 core_pwrctl_status; 161 u32 core_pwrctl_mask; 162 u32 core_pwrctl_clear; 163 u32 core_pwrctl_ctl; 164 u32 core_sdcc_debug_reg; 165 u32 core_dll_config; 166 u32 core_dll_status; 167 u32 core_vendor_spec; 168 u32 core_vendor_spec_adma_err_addr0; 169 u32 core_vendor_spec_adma_err_addr1; 170 u32 core_vendor_spec_func2; 171 u32 core_vendor_spec_capabilities0; 172 u32 core_ddr_200_cfg; 173 u32 core_vendor_spec3; 174 u32 core_dll_config_2; 175 u32 core_dll_config_3; 176 u32 core_ddr_config_old; /* Applicable to sdcc minor ver < 0x49 */ 177 u32 core_ddr_config; 178 u32 core_dll_usr_ctl; /* Present on SDCC5.1 onwards */ 179 }; 180 181 static const struct sdhci_msm_offset sdhci_msm_v5_offset = { 182 .core_mci_data_cnt = 0x35c, 183 .core_mci_status = 0x324, 184 .core_mci_fifo_cnt = 0x308, 185 .core_mci_version = 0x318, 186 .core_generics = 0x320, 187 .core_testbus_config = 0x32c, 188 .core_testbus_sel2_bit = 3, 189 .core_testbus_ena = (1 << 31), 190 .core_testbus_sel2 = (1 << 3), 191 .core_pwrctl_status = 0x240, 192 .core_pwrctl_mask = 0x244, 193 .core_pwrctl_clear = 0x248, 194 .core_pwrctl_ctl = 0x24c, 195 .core_sdcc_debug_reg = 0x358, 196 .core_dll_config = 0x200, 197 .core_dll_status = 0x208, 198 .core_vendor_spec = 0x20c, 199 .core_vendor_spec_adma_err_addr0 = 0x214, 200 .core_vendor_spec_adma_err_addr1 = 0x218, 201 .core_vendor_spec_func2 = 0x210, 202 .core_vendor_spec_capabilities0 = 0x21c, 203 .core_ddr_200_cfg = 0x224, 204 .core_vendor_spec3 = 0x250, 205 .core_dll_config_2 = 0x254, 206 .core_dll_config_3 = 0x258, 207 .core_ddr_config = 0x25c, 208 .core_dll_usr_ctl = 0x388, 209 }; 210 211 static const struct sdhci_msm_offset sdhci_msm_mci_offset = { 212 .core_hc_mode = 0x78, 213 .core_mci_data_cnt = 0x30, 214 .core_mci_status = 0x34, 215 .core_mci_fifo_cnt = 0x44, 216 .core_mci_version = 0x050, 217 .core_generics = 0x70, 218 .core_testbus_config = 0x0cc, 219 .core_testbus_sel2_bit = 4, 220 .core_testbus_ena = (1 << 3), 221 .core_testbus_sel2 = (1 << 4), 222 .core_pwrctl_status = 0xdc, 223 .core_pwrctl_mask = 0xe0, 224 .core_pwrctl_clear = 0xe4, 225 .core_pwrctl_ctl = 0xe8, 226 .core_sdcc_debug_reg = 0x124, 227 .core_dll_config = 0x100, 228 .core_dll_status = 0x108, 229 .core_vendor_spec = 0x10c, 230 .core_vendor_spec_adma_err_addr0 = 0x114, 231 .core_vendor_spec_adma_err_addr1 = 0x118, 232 .core_vendor_spec_func2 = 0x110, 233 .core_vendor_spec_capabilities0 = 0x11c, 234 .core_ddr_200_cfg = 0x184, 235 .core_vendor_spec3 = 0x1b0, 236 .core_dll_config_2 = 0x1b4, 237 .core_ddr_config_old = 0x1b8, 238 .core_ddr_config = 0x1bc, 239 }; 240 241 struct sdhci_msm_variant_ops { 242 u32 (*msm_readl_relaxed)(struct sdhci_host *host, u32 offset); 243 void (*msm_writel_relaxed)(u32 val, struct sdhci_host *host, 244 u32 offset); 245 }; 246 247 /* 248 * From V5, register spaces have changed. Wrap this info in a structure 249 * and choose the data_structure based on version info mentioned in DT. 250 */ 251 struct sdhci_msm_variant_info { 252 bool mci_removed; 253 bool restore_dll_config; 254 const struct sdhci_msm_variant_ops *var_ops; 255 const struct sdhci_msm_offset *offset; 256 }; 257 258 struct sdhci_msm_host { 259 struct platform_device *pdev; 260 void __iomem *core_mem; /* MSM SDCC mapped address */ 261 void __iomem *ice_mem; /* MSM ICE mapped address (if available) */ 262 int pwr_irq; /* power irq */ 263 struct clk *bus_clk; /* SDHC bus voter clock */ 264 struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/ 265 /* core, iface, cal, sleep, and ice clocks */ 266 struct clk_bulk_data bulk_clks[5]; 267 unsigned long clk_rate; 268 struct mmc_host *mmc; 269 bool use_14lpp_dll_reset; 270 bool tuning_done; 271 bool calibration_done; 272 u8 saved_tuning_phase; 273 bool use_cdclp533; 274 u32 curr_pwr_state; 275 u32 curr_io_level; 276 wait_queue_head_t pwr_irq_wait; 277 bool pwr_irq_flag; 278 u32 caps_0; 279 bool mci_removed; 280 bool restore_dll_config; 281 const struct sdhci_msm_variant_ops *var_ops; 282 const struct sdhci_msm_offset *offset; 283 bool use_cdr; 284 u32 transfer_mode; 285 bool updated_ddr_cfg; 286 bool uses_tassadar_dll; 287 u32 dll_config; 288 u32 ddr_config; 289 bool vqmmc_enabled; 290 }; 291 292 static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host) 293 { 294 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 295 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 296 297 return msm_host->offset; 298 } 299 300 /* 301 * APIs to read/write to vendor specific registers which were there in the 302 * core_mem region before MCI was removed. 303 */ 304 static u32 sdhci_msm_mci_variant_readl_relaxed(struct sdhci_host *host, 305 u32 offset) 306 { 307 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 308 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 309 310 return readl_relaxed(msm_host->core_mem + offset); 311 } 312 313 static u32 sdhci_msm_v5_variant_readl_relaxed(struct sdhci_host *host, 314 u32 offset) 315 { 316 return readl_relaxed(host->ioaddr + offset); 317 } 318 319 static void sdhci_msm_mci_variant_writel_relaxed(u32 val, 320 struct sdhci_host *host, u32 offset) 321 { 322 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 323 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 324 325 writel_relaxed(val, msm_host->core_mem + offset); 326 } 327 328 static void sdhci_msm_v5_variant_writel_relaxed(u32 val, 329 struct sdhci_host *host, u32 offset) 330 { 331 writel_relaxed(val, host->ioaddr + offset); 332 } 333 334 static unsigned int msm_get_clock_mult_for_bus_mode(struct sdhci_host *host) 335 { 336 struct mmc_ios ios = host->mmc->ios; 337 /* 338 * The SDHC requires internal clock frequency to be double the 339 * actual clock that will be set for DDR mode. The controller 340 * uses the faster clock(100/400MHz) for some of its parts and 341 * send the actual required clock (50/200MHz) to the card. 342 */ 343 if (ios.timing == MMC_TIMING_UHS_DDR50 || 344 ios.timing == MMC_TIMING_MMC_DDR52 || 345 ios.timing == MMC_TIMING_MMC_HS400 || 346 host->flags & SDHCI_HS400_TUNING) 347 return 2; 348 return 1; 349 } 350 351 static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host, 352 unsigned int clock) 353 { 354 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 355 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 356 struct mmc_ios curr_ios = host->mmc->ios; 357 struct clk *core_clk = msm_host->bulk_clks[0].clk; 358 unsigned long achieved_rate; 359 unsigned int desired_rate; 360 unsigned int mult; 361 int rc; 362 363 mult = msm_get_clock_mult_for_bus_mode(host); 364 desired_rate = clock * mult; 365 rc = dev_pm_opp_set_rate(mmc_dev(host->mmc), desired_rate); 366 if (rc) { 367 pr_err("%s: Failed to set clock at rate %u at timing %d\n", 368 mmc_hostname(host->mmc), desired_rate, curr_ios.timing); 369 return; 370 } 371 372 /* 373 * Qualcomm clock drivers by default round clock _up_ if they can't 374 * make the requested rate. This is not good for SD. Yell if we 375 * encounter it. 376 */ 377 achieved_rate = clk_get_rate(core_clk); 378 if (achieved_rate > desired_rate) 379 pr_warn("%s: Card appears overclocked; req %u Hz, actual %lu Hz\n", 380 mmc_hostname(host->mmc), desired_rate, achieved_rate); 381 host->mmc->actual_clock = achieved_rate / mult; 382 383 /* Stash the rate we requested to use in sdhci_msm_runtime_resume() */ 384 msm_host->clk_rate = desired_rate; 385 386 pr_debug("%s: Setting clock at rate %lu at timing %d\n", 387 mmc_hostname(host->mmc), achieved_rate, curr_ios.timing); 388 } 389 390 /* Platform specific tuning */ 391 static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll) 392 { 393 u32 wait_cnt = 50; 394 u8 ck_out_en; 395 struct mmc_host *mmc = host->mmc; 396 const struct sdhci_msm_offset *msm_offset = 397 sdhci_priv_msm_offset(host); 398 399 /* Poll for CK_OUT_EN bit. max. poll time = 50us */ 400 ck_out_en = !!(readl_relaxed(host->ioaddr + 401 msm_offset->core_dll_config) & CORE_CK_OUT_EN); 402 403 while (ck_out_en != poll) { 404 if (--wait_cnt == 0) { 405 dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n", 406 mmc_hostname(mmc), poll); 407 return -ETIMEDOUT; 408 } 409 udelay(1); 410 411 ck_out_en = !!(readl_relaxed(host->ioaddr + 412 msm_offset->core_dll_config) & CORE_CK_OUT_EN); 413 } 414 415 return 0; 416 } 417 418 static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase) 419 { 420 int rc; 421 static const u8 grey_coded_phase_table[] = { 422 0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4, 423 0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8 424 }; 425 unsigned long flags; 426 u32 config; 427 struct mmc_host *mmc = host->mmc; 428 const struct sdhci_msm_offset *msm_offset = 429 sdhci_priv_msm_offset(host); 430 431 if (phase > 0xf) 432 return -EINVAL; 433 434 spin_lock_irqsave(&host->lock, flags); 435 436 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); 437 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN); 438 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN); 439 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 440 441 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */ 442 rc = msm_dll_poll_ck_out_en(host, 0); 443 if (rc) 444 goto err_out; 445 446 /* 447 * Write the selected DLL clock output phase (0 ... 15) 448 * to CDR_SELEXT bit field of DLL_CONFIG register. 449 */ 450 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); 451 config &= ~CDR_SELEXT_MASK; 452 config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT; 453 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 454 455 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); 456 config |= CORE_CK_OUT_EN; 457 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 458 459 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */ 460 rc = msm_dll_poll_ck_out_en(host, 1); 461 if (rc) 462 goto err_out; 463 464 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); 465 config |= CORE_CDR_EN; 466 config &= ~CORE_CDR_EXT_EN; 467 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 468 goto out; 469 470 err_out: 471 dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n", 472 mmc_hostname(mmc), phase); 473 out: 474 spin_unlock_irqrestore(&host->lock, flags); 475 return rc; 476 } 477 478 /* 479 * Find out the greatest range of consecuitive selected 480 * DLL clock output phases that can be used as sampling 481 * setting for SD3.0 UHS-I card read operation (in SDR104 482 * timing mode) or for eMMC4.5 card read operation (in 483 * HS400/HS200 timing mode). 484 * Select the 3/4 of the range and configure the DLL with the 485 * selected DLL clock output phase. 486 */ 487 488 static int msm_find_most_appropriate_phase(struct sdhci_host *host, 489 u8 *phase_table, u8 total_phases) 490 { 491 int ret; 492 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} }; 493 u8 phases_per_row[MAX_PHASES] = { 0 }; 494 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0; 495 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0; 496 bool phase_0_found = false, phase_15_found = false; 497 struct mmc_host *mmc = host->mmc; 498 499 if (!total_phases || (total_phases > MAX_PHASES)) { 500 dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n", 501 mmc_hostname(mmc), total_phases); 502 return -EINVAL; 503 } 504 505 for (cnt = 0; cnt < total_phases; cnt++) { 506 ranges[row_index][col_index] = phase_table[cnt]; 507 phases_per_row[row_index] += 1; 508 col_index++; 509 510 if ((cnt + 1) == total_phases) { 511 continue; 512 /* check if next phase in phase_table is consecutive or not */ 513 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) { 514 row_index++; 515 col_index = 0; 516 } 517 } 518 519 if (row_index >= MAX_PHASES) 520 return -EINVAL; 521 522 /* Check if phase-0 is present in first valid window? */ 523 if (!ranges[0][0]) { 524 phase_0_found = true; 525 phase_0_raw_index = 0; 526 /* Check if cycle exist between 2 valid windows */ 527 for (cnt = 1; cnt <= row_index; cnt++) { 528 if (phases_per_row[cnt]) { 529 for (i = 0; i < phases_per_row[cnt]; i++) { 530 if (ranges[cnt][i] == 15) { 531 phase_15_found = true; 532 phase_15_raw_index = cnt; 533 break; 534 } 535 } 536 } 537 } 538 } 539 540 /* If 2 valid windows form cycle then merge them as single window */ 541 if (phase_0_found && phase_15_found) { 542 /* number of phases in raw where phase 0 is present */ 543 u8 phases_0 = phases_per_row[phase_0_raw_index]; 544 /* number of phases in raw where phase 15 is present */ 545 u8 phases_15 = phases_per_row[phase_15_raw_index]; 546 547 if (phases_0 + phases_15 >= MAX_PHASES) 548 /* 549 * If there are more than 1 phase windows then total 550 * number of phases in both the windows should not be 551 * more than or equal to MAX_PHASES. 552 */ 553 return -EINVAL; 554 555 /* Merge 2 cyclic windows */ 556 i = phases_15; 557 for (cnt = 0; cnt < phases_0; cnt++) { 558 ranges[phase_15_raw_index][i] = 559 ranges[phase_0_raw_index][cnt]; 560 if (++i >= MAX_PHASES) 561 break; 562 } 563 564 phases_per_row[phase_0_raw_index] = 0; 565 phases_per_row[phase_15_raw_index] = phases_15 + phases_0; 566 } 567 568 for (cnt = 0; cnt <= row_index; cnt++) { 569 if (phases_per_row[cnt] > curr_max) { 570 curr_max = phases_per_row[cnt]; 571 selected_row_index = cnt; 572 } 573 } 574 575 i = (curr_max * 3) / 4; 576 if (i) 577 i--; 578 579 ret = ranges[selected_row_index][i]; 580 581 if (ret >= MAX_PHASES) { 582 ret = -EINVAL; 583 dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n", 584 mmc_hostname(mmc), ret); 585 } 586 587 return ret; 588 } 589 590 static inline void msm_cm_dll_set_freq(struct sdhci_host *host) 591 { 592 u32 mclk_freq = 0, config; 593 const struct sdhci_msm_offset *msm_offset = 594 sdhci_priv_msm_offset(host); 595 596 /* Program the MCLK value to MCLK_FREQ bit field */ 597 if (host->clock <= 112000000) 598 mclk_freq = 0; 599 else if (host->clock <= 125000000) 600 mclk_freq = 1; 601 else if (host->clock <= 137000000) 602 mclk_freq = 2; 603 else if (host->clock <= 150000000) 604 mclk_freq = 3; 605 else if (host->clock <= 162000000) 606 mclk_freq = 4; 607 else if (host->clock <= 175000000) 608 mclk_freq = 5; 609 else if (host->clock <= 187000000) 610 mclk_freq = 6; 611 else if (host->clock <= 200000000) 612 mclk_freq = 7; 613 614 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); 615 config &= ~CMUX_SHIFT_PHASE_MASK; 616 config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT; 617 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 618 } 619 620 /* Initialize the DLL (Programmable Delay Line) */ 621 static int msm_init_cm_dll(struct sdhci_host *host) 622 { 623 struct mmc_host *mmc = host->mmc; 624 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 625 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 626 int wait_cnt = 50; 627 unsigned long flags, xo_clk = 0; 628 u32 config; 629 const struct sdhci_msm_offset *msm_offset = 630 msm_host->offset; 631 632 if (msm_host->use_14lpp_dll_reset && !IS_ERR_OR_NULL(msm_host->xo_clk)) 633 xo_clk = clk_get_rate(msm_host->xo_clk); 634 635 spin_lock_irqsave(&host->lock, flags); 636 637 /* 638 * Make sure that clock is always enabled when DLL 639 * tuning is in progress. Keeping PWRSAVE ON may 640 * turn off the clock. 641 */ 642 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); 643 config &= ~CORE_CLK_PWRSAVE; 644 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); 645 646 if (msm_host->dll_config) 647 writel_relaxed(msm_host->dll_config, 648 host->ioaddr + msm_offset->core_dll_config); 649 650 if (msm_host->use_14lpp_dll_reset) { 651 config = readl_relaxed(host->ioaddr + 652 msm_offset->core_dll_config); 653 config &= ~CORE_CK_OUT_EN; 654 writel_relaxed(config, host->ioaddr + 655 msm_offset->core_dll_config); 656 657 config = readl_relaxed(host->ioaddr + 658 msm_offset->core_dll_config_2); 659 config |= CORE_DLL_CLOCK_DISABLE; 660 writel_relaxed(config, host->ioaddr + 661 msm_offset->core_dll_config_2); 662 } 663 664 config = readl_relaxed(host->ioaddr + 665 msm_offset->core_dll_config); 666 config |= CORE_DLL_RST; 667 writel_relaxed(config, host->ioaddr + 668 msm_offset->core_dll_config); 669 670 config = readl_relaxed(host->ioaddr + 671 msm_offset->core_dll_config); 672 config |= CORE_DLL_PDN; 673 writel_relaxed(config, host->ioaddr + 674 msm_offset->core_dll_config); 675 676 if (!msm_host->dll_config) 677 msm_cm_dll_set_freq(host); 678 679 if (msm_host->use_14lpp_dll_reset && 680 !IS_ERR_OR_NULL(msm_host->xo_clk)) { 681 u32 mclk_freq = 0; 682 683 config = readl_relaxed(host->ioaddr + 684 msm_offset->core_dll_config_2); 685 config &= CORE_FLL_CYCLE_CNT; 686 if (config) 687 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8), 688 xo_clk); 689 else 690 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4), 691 xo_clk); 692 693 config = readl_relaxed(host->ioaddr + 694 msm_offset->core_dll_config_2); 695 config &= ~(0xFF << 10); 696 config |= mclk_freq << 10; 697 698 writel_relaxed(config, host->ioaddr + 699 msm_offset->core_dll_config_2); 700 /* wait for 5us before enabling DLL clock */ 701 udelay(5); 702 } 703 704 config = readl_relaxed(host->ioaddr + 705 msm_offset->core_dll_config); 706 config &= ~CORE_DLL_RST; 707 writel_relaxed(config, host->ioaddr + 708 msm_offset->core_dll_config); 709 710 config = readl_relaxed(host->ioaddr + 711 msm_offset->core_dll_config); 712 config &= ~CORE_DLL_PDN; 713 writel_relaxed(config, host->ioaddr + 714 msm_offset->core_dll_config); 715 716 if (msm_host->use_14lpp_dll_reset) { 717 if (!msm_host->dll_config) 718 msm_cm_dll_set_freq(host); 719 config = readl_relaxed(host->ioaddr + 720 msm_offset->core_dll_config_2); 721 config &= ~CORE_DLL_CLOCK_DISABLE; 722 writel_relaxed(config, host->ioaddr + 723 msm_offset->core_dll_config_2); 724 } 725 726 /* 727 * Configure DLL user control register to enable DLL status. 728 * This setting is applicable to SDCC v5.1 onwards only. 729 */ 730 if (msm_host->uses_tassadar_dll) { 731 config = DLL_USR_CTL_POR_VAL | FINE_TUNE_MODE_EN | 732 ENABLE_DLL_LOCK_STATUS | BIAS_OK_SIGNAL; 733 writel_relaxed(config, host->ioaddr + 734 msm_offset->core_dll_usr_ctl); 735 736 config = readl_relaxed(host->ioaddr + 737 msm_offset->core_dll_config_3); 738 config &= ~0xFF; 739 if (msm_host->clk_rate < 150000000) 740 config |= DLL_CONFIG_3_LOW_FREQ_VAL; 741 else 742 config |= DLL_CONFIG_3_HIGH_FREQ_VAL; 743 writel_relaxed(config, host->ioaddr + 744 msm_offset->core_dll_config_3); 745 } 746 747 config = readl_relaxed(host->ioaddr + 748 msm_offset->core_dll_config); 749 config |= CORE_DLL_EN; 750 writel_relaxed(config, host->ioaddr + 751 msm_offset->core_dll_config); 752 753 config = readl_relaxed(host->ioaddr + 754 msm_offset->core_dll_config); 755 config |= CORE_CK_OUT_EN; 756 writel_relaxed(config, host->ioaddr + 757 msm_offset->core_dll_config); 758 759 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */ 760 while (!(readl_relaxed(host->ioaddr + msm_offset->core_dll_status) & 761 CORE_DLL_LOCK)) { 762 /* max. wait for 50us sec for LOCK bit to be set */ 763 if (--wait_cnt == 0) { 764 dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n", 765 mmc_hostname(mmc)); 766 spin_unlock_irqrestore(&host->lock, flags); 767 return -ETIMEDOUT; 768 } 769 udelay(1); 770 } 771 772 spin_unlock_irqrestore(&host->lock, flags); 773 return 0; 774 } 775 776 static void msm_hc_select_default(struct sdhci_host *host) 777 { 778 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 779 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 780 u32 config; 781 const struct sdhci_msm_offset *msm_offset = 782 msm_host->offset; 783 784 if (!msm_host->use_cdclp533) { 785 config = readl_relaxed(host->ioaddr + 786 msm_offset->core_vendor_spec3); 787 config &= ~CORE_PWRSAVE_DLL; 788 writel_relaxed(config, host->ioaddr + 789 msm_offset->core_vendor_spec3); 790 } 791 792 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); 793 config &= ~CORE_HC_MCLK_SEL_MASK; 794 config |= CORE_HC_MCLK_SEL_DFLT; 795 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); 796 797 /* 798 * Disable HC_SELECT_IN to be able to use the UHS mode select 799 * configuration from Host Control2 register for all other 800 * modes. 801 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field 802 * in VENDOR_SPEC_FUNC 803 */ 804 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); 805 config &= ~CORE_HC_SELECT_IN_EN; 806 config &= ~CORE_HC_SELECT_IN_MASK; 807 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); 808 809 /* 810 * Make sure above writes impacting free running MCLK are completed 811 * before changing the clk_rate at GCC. 812 */ 813 wmb(); 814 } 815 816 static void msm_hc_select_hs400(struct sdhci_host *host) 817 { 818 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 819 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 820 struct mmc_ios ios = host->mmc->ios; 821 u32 config, dll_lock; 822 int rc; 823 const struct sdhci_msm_offset *msm_offset = 824 msm_host->offset; 825 826 /* Select the divided clock (free running MCLK/2) */ 827 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); 828 config &= ~CORE_HC_MCLK_SEL_MASK; 829 config |= CORE_HC_MCLK_SEL_HS400; 830 831 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); 832 /* 833 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC 834 * register 835 */ 836 if ((msm_host->tuning_done || ios.enhanced_strobe) && 837 !msm_host->calibration_done) { 838 config = readl_relaxed(host->ioaddr + 839 msm_offset->core_vendor_spec); 840 config |= CORE_HC_SELECT_IN_HS400; 841 config |= CORE_HC_SELECT_IN_EN; 842 writel_relaxed(config, host->ioaddr + 843 msm_offset->core_vendor_spec); 844 } 845 if (!msm_host->clk_rate && !msm_host->use_cdclp533) { 846 /* 847 * Poll on DLL_LOCK or DDR_DLL_LOCK bits in 848 * core_dll_status to be set. This should get set 849 * within 15 us at 200 MHz. 850 */ 851 rc = readl_relaxed_poll_timeout(host->ioaddr + 852 msm_offset->core_dll_status, 853 dll_lock, 854 (dll_lock & 855 (CORE_DLL_LOCK | 856 CORE_DDR_DLL_LOCK)), 10, 857 1000); 858 if (rc == -ETIMEDOUT) 859 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n", 860 mmc_hostname(host->mmc), dll_lock); 861 } 862 /* 863 * Make sure above writes impacting free running MCLK are completed 864 * before changing the clk_rate at GCC. 865 */ 866 wmb(); 867 } 868 869 /* 870 * sdhci_msm_hc_select_mode :- In general all timing modes are 871 * controlled via UHS mode select in Host Control2 register. 872 * eMMC specific HS200/HS400 doesn't have their respective modes 873 * defined here, hence we use these values. 874 * 875 * HS200 - SDR104 (Since they both are equivalent in functionality) 876 * HS400 - This involves multiple configurations 877 * Initially SDR104 - when tuning is required as HS200 878 * Then when switching to DDR @ 400MHz (HS400) we use 879 * the vendor specific HC_SELECT_IN to control the mode. 880 * 881 * In addition to controlling the modes we also need to select the 882 * correct input clock for DLL depending on the mode. 883 * 884 * HS400 - divided clock (free running MCLK/2) 885 * All other modes - default (free running MCLK) 886 */ 887 static void sdhci_msm_hc_select_mode(struct sdhci_host *host) 888 { 889 struct mmc_ios ios = host->mmc->ios; 890 891 if (ios.timing == MMC_TIMING_MMC_HS400 || 892 host->flags & SDHCI_HS400_TUNING) 893 msm_hc_select_hs400(host); 894 else 895 msm_hc_select_default(host); 896 } 897 898 static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host) 899 { 900 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 901 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 902 u32 config, calib_done; 903 int ret; 904 const struct sdhci_msm_offset *msm_offset = 905 msm_host->offset; 906 907 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__); 908 909 /* 910 * Retuning in HS400 (DDR mode) will fail, just reset the 911 * tuning block and restore the saved tuning phase. 912 */ 913 ret = msm_init_cm_dll(host); 914 if (ret) 915 goto out; 916 917 /* Set the selected phase in delay line hw block */ 918 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase); 919 if (ret) 920 goto out; 921 922 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); 923 config |= CORE_CMD_DAT_TRACK_SEL; 924 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 925 926 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg); 927 config &= ~CORE_CDC_T4_DLY_SEL; 928 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg); 929 930 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG); 931 config &= ~CORE_CDC_SWITCH_BYPASS_OFF; 932 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG); 933 934 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG); 935 config |= CORE_CDC_SWITCH_RC_EN; 936 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG); 937 938 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg); 939 config &= ~CORE_START_CDC_TRAFFIC; 940 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg); 941 942 /* Perform CDC Register Initialization Sequence */ 943 944 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 945 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1); 946 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0); 947 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1); 948 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG); 949 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG); 950 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG); 951 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG); 952 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG); 953 954 /* CDC HW Calibration */ 955 956 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 957 config |= CORE_SW_TRIG_FULL_CALIB; 958 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 959 960 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 961 config &= ~CORE_SW_TRIG_FULL_CALIB; 962 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 963 964 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 965 config |= CORE_HW_AUTOCAL_ENA; 966 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 967 968 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0); 969 config |= CORE_TIMER_ENA; 970 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0); 971 972 ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0, 973 calib_done, 974 (calib_done & CORE_CALIBRATION_DONE), 975 1, 50); 976 977 if (ret == -ETIMEDOUT) { 978 pr_err("%s: %s: CDC calibration was not completed\n", 979 mmc_hostname(host->mmc), __func__); 980 goto out; 981 } 982 983 ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0) 984 & CORE_CDC_ERROR_CODE_MASK; 985 if (ret) { 986 pr_err("%s: %s: CDC error code %d\n", 987 mmc_hostname(host->mmc), __func__, ret); 988 ret = -EINVAL; 989 goto out; 990 } 991 992 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg); 993 config |= CORE_START_CDC_TRAFFIC; 994 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg); 995 out: 996 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc), 997 __func__, ret); 998 return ret; 999 } 1000 1001 static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host) 1002 { 1003 struct mmc_host *mmc = host->mmc; 1004 u32 dll_status, config, ddr_cfg_offset; 1005 int ret; 1006 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1007 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1008 const struct sdhci_msm_offset *msm_offset = 1009 sdhci_priv_msm_offset(host); 1010 1011 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__); 1012 1013 /* 1014 * Currently the core_ddr_config register defaults to desired 1015 * configuration on reset. Currently reprogramming the power on 1016 * reset (POR) value in case it might have been modified by 1017 * bootloaders. In the future, if this changes, then the desired 1018 * values will need to be programmed appropriately. 1019 */ 1020 if (msm_host->updated_ddr_cfg) 1021 ddr_cfg_offset = msm_offset->core_ddr_config; 1022 else 1023 ddr_cfg_offset = msm_offset->core_ddr_config_old; 1024 writel_relaxed(msm_host->ddr_config, host->ioaddr + ddr_cfg_offset); 1025 1026 if (mmc->ios.enhanced_strobe) { 1027 config = readl_relaxed(host->ioaddr + 1028 msm_offset->core_ddr_200_cfg); 1029 config |= CORE_CMDIN_RCLK_EN; 1030 writel_relaxed(config, host->ioaddr + 1031 msm_offset->core_ddr_200_cfg); 1032 } 1033 1034 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2); 1035 config |= CORE_DDR_CAL_EN; 1036 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config_2); 1037 1038 ret = readl_relaxed_poll_timeout(host->ioaddr + 1039 msm_offset->core_dll_status, 1040 dll_status, 1041 (dll_status & CORE_DDR_DLL_LOCK), 1042 10, 1000); 1043 1044 if (ret == -ETIMEDOUT) { 1045 pr_err("%s: %s: CM_DLL_SDC4 calibration was not completed\n", 1046 mmc_hostname(host->mmc), __func__); 1047 goto out; 1048 } 1049 1050 /* 1051 * Set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3. 1052 * When MCLK is gated OFF, it is not gated for less than 0.5us 1053 * and MCLK must be switched on for at-least 1us before DATA 1054 * starts coming. Controllers with 14lpp and later tech DLL cannot 1055 * guarantee above requirement. So PWRSAVE_DLL should not be 1056 * turned on for host controllers using this DLL. 1057 */ 1058 if (!msm_host->use_14lpp_dll_reset) { 1059 config = readl_relaxed(host->ioaddr + 1060 msm_offset->core_vendor_spec3); 1061 config |= CORE_PWRSAVE_DLL; 1062 writel_relaxed(config, host->ioaddr + 1063 msm_offset->core_vendor_spec3); 1064 } 1065 1066 /* 1067 * Drain writebuffer to ensure above DLL calibration 1068 * and PWRSAVE DLL is enabled. 1069 */ 1070 wmb(); 1071 out: 1072 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc), 1073 __func__, ret); 1074 return ret; 1075 } 1076 1077 static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host) 1078 { 1079 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1080 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1081 struct mmc_host *mmc = host->mmc; 1082 int ret; 1083 u32 config; 1084 const struct sdhci_msm_offset *msm_offset = 1085 msm_host->offset; 1086 1087 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__); 1088 1089 /* 1090 * Retuning in HS400 (DDR mode) will fail, just reset the 1091 * tuning block and restore the saved tuning phase. 1092 */ 1093 ret = msm_init_cm_dll(host); 1094 if (ret) 1095 goto out; 1096 1097 if (!mmc->ios.enhanced_strobe) { 1098 /* Set the selected phase in delay line hw block */ 1099 ret = msm_config_cm_dll_phase(host, 1100 msm_host->saved_tuning_phase); 1101 if (ret) 1102 goto out; 1103 config = readl_relaxed(host->ioaddr + 1104 msm_offset->core_dll_config); 1105 config |= CORE_CMD_DAT_TRACK_SEL; 1106 writel_relaxed(config, host->ioaddr + 1107 msm_offset->core_dll_config); 1108 } 1109 1110 if (msm_host->use_cdclp533) 1111 ret = sdhci_msm_cdclp533_calibration(host); 1112 else 1113 ret = sdhci_msm_cm_dll_sdc4_calibration(host); 1114 out: 1115 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc), 1116 __func__, ret); 1117 return ret; 1118 } 1119 1120 static bool sdhci_msm_is_tuning_needed(struct sdhci_host *host) 1121 { 1122 struct mmc_ios *ios = &host->mmc->ios; 1123 1124 /* 1125 * Tuning is required for SDR104, HS200 and HS400 cards and 1126 * if clock frequency is greater than 100MHz in these modes. 1127 */ 1128 if (host->clock <= CORE_FREQ_100MHZ || 1129 !(ios->timing == MMC_TIMING_MMC_HS400 || 1130 ios->timing == MMC_TIMING_MMC_HS200 || 1131 ios->timing == MMC_TIMING_UHS_SDR104) || 1132 ios->enhanced_strobe) 1133 return false; 1134 1135 return true; 1136 } 1137 1138 static int sdhci_msm_restore_sdr_dll_config(struct sdhci_host *host) 1139 { 1140 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1141 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1142 int ret; 1143 1144 /* 1145 * SDR DLL comes into picture only for timing modes which needs 1146 * tuning. 1147 */ 1148 if (!sdhci_msm_is_tuning_needed(host)) 1149 return 0; 1150 1151 /* Reset the tuning block */ 1152 ret = msm_init_cm_dll(host); 1153 if (ret) 1154 return ret; 1155 1156 /* Restore the tuning block */ 1157 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase); 1158 1159 return ret; 1160 } 1161 1162 static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable) 1163 { 1164 const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host); 1165 u32 config, oldconfig = readl_relaxed(host->ioaddr + 1166 msm_offset->core_dll_config); 1167 1168 config = oldconfig; 1169 if (enable) { 1170 config |= CORE_CDR_EN; 1171 config &= ~CORE_CDR_EXT_EN; 1172 } else { 1173 config &= ~CORE_CDR_EN; 1174 config |= CORE_CDR_EXT_EN; 1175 } 1176 1177 if (config != oldconfig) { 1178 writel_relaxed(config, host->ioaddr + 1179 msm_offset->core_dll_config); 1180 } 1181 } 1182 1183 static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) 1184 { 1185 struct sdhci_host *host = mmc_priv(mmc); 1186 int tuning_seq_cnt = 10; 1187 u8 phase, tuned_phases[16], tuned_phase_cnt = 0; 1188 int rc; 1189 struct mmc_ios ios = host->mmc->ios; 1190 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1191 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1192 1193 if (!sdhci_msm_is_tuning_needed(host)) { 1194 msm_host->use_cdr = false; 1195 sdhci_msm_set_cdr(host, false); 1196 return 0; 1197 } 1198 1199 /* Clock-Data-Recovery used to dynamically adjust RX sampling point */ 1200 msm_host->use_cdr = true; 1201 1202 /* 1203 * Clear tuning_done flag before tuning to ensure proper 1204 * HS400 settings. 1205 */ 1206 msm_host->tuning_done = 0; 1207 1208 /* 1209 * For HS400 tuning in HS200 timing requires: 1210 * - select MCLK/2 in VENDOR_SPEC 1211 * - program MCLK to 400MHz (or nearest supported) in GCC 1212 */ 1213 if (host->flags & SDHCI_HS400_TUNING) { 1214 sdhci_msm_hc_select_mode(host); 1215 msm_set_clock_rate_for_bus_mode(host, ios.clock); 1216 host->flags &= ~SDHCI_HS400_TUNING; 1217 } 1218 1219 retry: 1220 /* First of all reset the tuning block */ 1221 rc = msm_init_cm_dll(host); 1222 if (rc) 1223 return rc; 1224 1225 phase = 0; 1226 do { 1227 /* Set the phase in delay line hw block */ 1228 rc = msm_config_cm_dll_phase(host, phase); 1229 if (rc) 1230 return rc; 1231 1232 rc = mmc_send_tuning(mmc, opcode, NULL); 1233 if (!rc) { 1234 /* Tuning is successful at this tuning point */ 1235 tuned_phases[tuned_phase_cnt++] = phase; 1236 dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n", 1237 mmc_hostname(mmc), phase); 1238 } 1239 } while (++phase < ARRAY_SIZE(tuned_phases)); 1240 1241 if (tuned_phase_cnt) { 1242 if (tuned_phase_cnt == ARRAY_SIZE(tuned_phases)) { 1243 /* 1244 * All phases valid is _almost_ as bad as no phases 1245 * valid. Probably all phases are not really reliable 1246 * but we didn't detect where the unreliable place is. 1247 * That means we'll essentially be guessing and hoping 1248 * we get a good phase. Better to try a few times. 1249 */ 1250 dev_dbg(mmc_dev(mmc), "%s: All phases valid; try again\n", 1251 mmc_hostname(mmc)); 1252 if (--tuning_seq_cnt) { 1253 tuned_phase_cnt = 0; 1254 goto retry; 1255 } 1256 } 1257 1258 rc = msm_find_most_appropriate_phase(host, tuned_phases, 1259 tuned_phase_cnt); 1260 if (rc < 0) 1261 return rc; 1262 else 1263 phase = rc; 1264 1265 /* 1266 * Finally set the selected phase in delay 1267 * line hw block. 1268 */ 1269 rc = msm_config_cm_dll_phase(host, phase); 1270 if (rc) 1271 return rc; 1272 msm_host->saved_tuning_phase = phase; 1273 dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n", 1274 mmc_hostname(mmc), phase); 1275 } else { 1276 if (--tuning_seq_cnt) 1277 goto retry; 1278 /* Tuning failed */ 1279 dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n", 1280 mmc_hostname(mmc)); 1281 rc = -EIO; 1282 } 1283 1284 if (!rc) 1285 msm_host->tuning_done = true; 1286 return rc; 1287 } 1288 1289 /* 1290 * sdhci_msm_hs400 - Calibrate the DLL for HS400 bus speed mode operation. 1291 * This needs to be done for both tuning and enhanced_strobe mode. 1292 * DLL operation is only needed for clock > 100MHz. For clock <= 100MHz 1293 * fixed feedback clock is used. 1294 */ 1295 static void sdhci_msm_hs400(struct sdhci_host *host, struct mmc_ios *ios) 1296 { 1297 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1298 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1299 int ret; 1300 1301 if (host->clock > CORE_FREQ_100MHZ && 1302 (msm_host->tuning_done || ios->enhanced_strobe) && 1303 !msm_host->calibration_done) { 1304 ret = sdhci_msm_hs400_dll_calibration(host); 1305 if (!ret) 1306 msm_host->calibration_done = true; 1307 else 1308 pr_err("%s: Failed to calibrate DLL for hs400 mode (%d)\n", 1309 mmc_hostname(host->mmc), ret); 1310 } 1311 } 1312 1313 static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host, 1314 unsigned int uhs) 1315 { 1316 struct mmc_host *mmc = host->mmc; 1317 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1318 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1319 u16 ctrl_2; 1320 u32 config; 1321 const struct sdhci_msm_offset *msm_offset = 1322 msm_host->offset; 1323 1324 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1325 /* Select Bus Speed Mode for host */ 1326 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 1327 switch (uhs) { 1328 case MMC_TIMING_UHS_SDR12: 1329 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 1330 break; 1331 case MMC_TIMING_UHS_SDR25: 1332 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 1333 break; 1334 case MMC_TIMING_UHS_SDR50: 1335 ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 1336 break; 1337 case MMC_TIMING_MMC_HS400: 1338 case MMC_TIMING_MMC_HS200: 1339 case MMC_TIMING_UHS_SDR104: 1340 ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 1341 break; 1342 case MMC_TIMING_UHS_DDR50: 1343 case MMC_TIMING_MMC_DDR52: 1344 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 1345 break; 1346 } 1347 1348 /* 1349 * When clock frequency is less than 100MHz, the feedback clock must be 1350 * provided and DLL must not be used so that tuning can be skipped. To 1351 * provide feedback clock, the mode selection can be any value less 1352 * than 3'b011 in bits [2:0] of HOST CONTROL2 register. 1353 */ 1354 if (host->clock <= CORE_FREQ_100MHZ) { 1355 if (uhs == MMC_TIMING_MMC_HS400 || 1356 uhs == MMC_TIMING_MMC_HS200 || 1357 uhs == MMC_TIMING_UHS_SDR104) 1358 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 1359 /* 1360 * DLL is not required for clock <= 100MHz 1361 * Thus, make sure DLL it is disabled when not required 1362 */ 1363 config = readl_relaxed(host->ioaddr + 1364 msm_offset->core_dll_config); 1365 config |= CORE_DLL_RST; 1366 writel_relaxed(config, host->ioaddr + 1367 msm_offset->core_dll_config); 1368 1369 config = readl_relaxed(host->ioaddr + 1370 msm_offset->core_dll_config); 1371 config |= CORE_DLL_PDN; 1372 writel_relaxed(config, host->ioaddr + 1373 msm_offset->core_dll_config); 1374 1375 /* 1376 * The DLL needs to be restored and CDCLP533 recalibrated 1377 * when the clock frequency is set back to 400MHz. 1378 */ 1379 msm_host->calibration_done = false; 1380 } 1381 1382 dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n", 1383 mmc_hostname(host->mmc), host->clock, uhs, ctrl_2); 1384 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1385 1386 if (mmc->ios.timing == MMC_TIMING_MMC_HS400) 1387 sdhci_msm_hs400(host, &mmc->ios); 1388 } 1389 1390 static int sdhci_msm_set_pincfg(struct sdhci_msm_host *msm_host, bool level) 1391 { 1392 struct platform_device *pdev = msm_host->pdev; 1393 int ret; 1394 1395 if (level) 1396 ret = pinctrl_pm_select_default_state(&pdev->dev); 1397 else 1398 ret = pinctrl_pm_select_sleep_state(&pdev->dev); 1399 1400 return ret; 1401 } 1402 1403 static int sdhci_msm_set_vmmc(struct mmc_host *mmc) 1404 { 1405 if (IS_ERR(mmc->supply.vmmc)) 1406 return 0; 1407 1408 return mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, mmc->ios.vdd); 1409 } 1410 1411 static int msm_toggle_vqmmc(struct sdhci_msm_host *msm_host, 1412 struct mmc_host *mmc, bool level) 1413 { 1414 int ret; 1415 struct mmc_ios ios; 1416 1417 if (msm_host->vqmmc_enabled == level) 1418 return 0; 1419 1420 if (level) { 1421 /* Set the IO voltage regulator to default voltage level */ 1422 if (msm_host->caps_0 & CORE_3_0V_SUPPORT) 1423 ios.signal_voltage = MMC_SIGNAL_VOLTAGE_330; 1424 else if (msm_host->caps_0 & CORE_1_8V_SUPPORT) 1425 ios.signal_voltage = MMC_SIGNAL_VOLTAGE_180; 1426 1427 if (msm_host->caps_0 & CORE_VOLT_SUPPORT) { 1428 ret = mmc_regulator_set_vqmmc(mmc, &ios); 1429 if (ret < 0) { 1430 dev_err(mmc_dev(mmc), "%s: vqmmc set volgate failed: %d\n", 1431 mmc_hostname(mmc), ret); 1432 goto out; 1433 } 1434 } 1435 ret = regulator_enable(mmc->supply.vqmmc); 1436 } else { 1437 ret = regulator_disable(mmc->supply.vqmmc); 1438 } 1439 1440 if (ret) 1441 dev_err(mmc_dev(mmc), "%s: vqmm %sable failed: %d\n", 1442 mmc_hostname(mmc), level ? "en":"dis", ret); 1443 else 1444 msm_host->vqmmc_enabled = level; 1445 out: 1446 return ret; 1447 } 1448 1449 static int msm_config_vqmmc_mode(struct sdhci_msm_host *msm_host, 1450 struct mmc_host *mmc, bool hpm) 1451 { 1452 int load, ret; 1453 1454 load = hpm ? MMC_VQMMC_MAX_LOAD_UA : 0; 1455 ret = regulator_set_load(mmc->supply.vqmmc, load); 1456 if (ret) 1457 dev_err(mmc_dev(mmc), "%s: vqmmc set load failed: %d\n", 1458 mmc_hostname(mmc), ret); 1459 return ret; 1460 } 1461 1462 static int sdhci_msm_set_vqmmc(struct sdhci_msm_host *msm_host, 1463 struct mmc_host *mmc, bool level) 1464 { 1465 int ret; 1466 bool always_on; 1467 1468 if (IS_ERR(mmc->supply.vqmmc) || 1469 (mmc->ios.power_mode == MMC_POWER_UNDEFINED)) 1470 return 0; 1471 /* 1472 * For eMMC don't turn off Vqmmc, Instead just configure it in LPM 1473 * and HPM modes by setting the corresponding load. 1474 * 1475 * Till eMMC is initialized (i.e. always_on == 0), just turn on/off 1476 * Vqmmc. Vqmmc gets turned off only if init fails and mmc_power_off 1477 * gets invoked. Once eMMC is initialized (i.e. always_on == 1), 1478 * Vqmmc should remain ON, So just set the load instead of turning it 1479 * off/on. 1480 */ 1481 always_on = !mmc_card_is_removable(mmc) && 1482 mmc->card && mmc_card_mmc(mmc->card); 1483 1484 if (always_on) 1485 ret = msm_config_vqmmc_mode(msm_host, mmc, level); 1486 else 1487 ret = msm_toggle_vqmmc(msm_host, mmc, level); 1488 1489 return ret; 1490 } 1491 1492 static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host) 1493 { 1494 init_waitqueue_head(&msm_host->pwr_irq_wait); 1495 } 1496 1497 static inline void sdhci_msm_complete_pwr_irq_wait( 1498 struct sdhci_msm_host *msm_host) 1499 { 1500 wake_up(&msm_host->pwr_irq_wait); 1501 } 1502 1503 /* 1504 * sdhci_msm_check_power_status API should be called when registers writes 1505 * which can toggle sdhci IO bus ON/OFF or change IO lines HIGH/LOW happens. 1506 * To what state the register writes will change the IO lines should be passed 1507 * as the argument req_type. This API will check whether the IO line's state 1508 * is already the expected state and will wait for power irq only if 1509 * power irq is expected to be triggered based on the current IO line state 1510 * and expected IO line state. 1511 */ 1512 static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type) 1513 { 1514 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1515 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1516 bool done = false; 1517 u32 val = SWITCHABLE_SIGNALING_VOLTAGE; 1518 const struct sdhci_msm_offset *msm_offset = 1519 msm_host->offset; 1520 1521 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n", 1522 mmc_hostname(host->mmc), __func__, req_type, 1523 msm_host->curr_pwr_state, msm_host->curr_io_level); 1524 1525 /* 1526 * The power interrupt will not be generated for signal voltage 1527 * switches if SWITCHABLE_SIGNALING_VOLTAGE in MCI_GENERICS is not set. 1528 * Since sdhci-msm-v5, this bit has been removed and SW must consider 1529 * it as always set. 1530 */ 1531 if (!msm_host->mci_removed) 1532 val = msm_host_readl(msm_host, host, 1533 msm_offset->core_generics); 1534 if ((req_type & REQ_IO_HIGH || req_type & REQ_IO_LOW) && 1535 !(val & SWITCHABLE_SIGNALING_VOLTAGE)) { 1536 return; 1537 } 1538 1539 /* 1540 * The IRQ for request type IO High/LOW will be generated when - 1541 * there is a state change in 1.8V enable bit (bit 3) of 1542 * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0 1543 * which indicates 3.3V IO voltage. So, when MMC core layer tries 1544 * to set it to 3.3V before card detection happens, the 1545 * IRQ doesn't get triggered as there is no state change in this bit. 1546 * The driver already handles this case by changing the IO voltage 1547 * level to high as part of controller power up sequence. Hence, check 1548 * for host->pwr to handle a case where IO voltage high request is 1549 * issued even before controller power up. 1550 */ 1551 if ((req_type & REQ_IO_HIGH) && !host->pwr) { 1552 pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n", 1553 mmc_hostname(host->mmc), req_type); 1554 return; 1555 } 1556 if ((req_type & msm_host->curr_pwr_state) || 1557 (req_type & msm_host->curr_io_level)) 1558 done = true; 1559 /* 1560 * This is needed here to handle cases where register writes will 1561 * not change the current bus state or io level of the controller. 1562 * In this case, no power irq will be triggerred and we should 1563 * not wait. 1564 */ 1565 if (!done) { 1566 if (!wait_event_timeout(msm_host->pwr_irq_wait, 1567 msm_host->pwr_irq_flag, 1568 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS))) 1569 dev_warn(&msm_host->pdev->dev, 1570 "%s: pwr_irq for req: (%d) timed out\n", 1571 mmc_hostname(host->mmc), req_type); 1572 } 1573 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc), 1574 __func__, req_type); 1575 } 1576 1577 static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host) 1578 { 1579 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1580 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1581 const struct sdhci_msm_offset *msm_offset = 1582 msm_host->offset; 1583 1584 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n", 1585 mmc_hostname(host->mmc), 1586 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_status), 1587 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_mask), 1588 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_ctl)); 1589 } 1590 1591 static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq) 1592 { 1593 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1594 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1595 struct mmc_host *mmc = host->mmc; 1596 u32 irq_status, irq_ack = 0; 1597 int retry = 10, ret; 1598 u32 pwr_state = 0, io_level = 0; 1599 u32 config; 1600 const struct sdhci_msm_offset *msm_offset = msm_host->offset; 1601 1602 irq_status = msm_host_readl(msm_host, host, 1603 msm_offset->core_pwrctl_status); 1604 irq_status &= INT_MASK; 1605 1606 msm_host_writel(msm_host, irq_status, host, 1607 msm_offset->core_pwrctl_clear); 1608 1609 /* 1610 * There is a rare HW scenario where the first clear pulse could be 1611 * lost when actual reset and clear/read of status register is 1612 * happening at a time. Hence, retry for at least 10 times to make 1613 * sure status register is cleared. Otherwise, this will result in 1614 * a spurious power IRQ resulting in system instability. 1615 */ 1616 while (irq_status & msm_host_readl(msm_host, host, 1617 msm_offset->core_pwrctl_status)) { 1618 if (retry == 0) { 1619 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n", 1620 mmc_hostname(host->mmc), irq_status); 1621 sdhci_msm_dump_pwr_ctrl_regs(host); 1622 WARN_ON(1); 1623 break; 1624 } 1625 msm_host_writel(msm_host, irq_status, host, 1626 msm_offset->core_pwrctl_clear); 1627 retry--; 1628 udelay(10); 1629 } 1630 1631 /* Handle BUS ON/OFF*/ 1632 if (irq_status & CORE_PWRCTL_BUS_ON) { 1633 pwr_state = REQ_BUS_ON; 1634 io_level = REQ_IO_HIGH; 1635 } 1636 if (irq_status & CORE_PWRCTL_BUS_OFF) { 1637 pwr_state = REQ_BUS_OFF; 1638 io_level = REQ_IO_LOW; 1639 } 1640 1641 if (pwr_state) { 1642 ret = sdhci_msm_set_vmmc(mmc); 1643 if (!ret) 1644 ret = sdhci_msm_set_vqmmc(msm_host, mmc, 1645 pwr_state & REQ_BUS_ON); 1646 if (!ret) 1647 ret = sdhci_msm_set_pincfg(msm_host, 1648 pwr_state & REQ_BUS_ON); 1649 if (!ret) 1650 irq_ack |= CORE_PWRCTL_BUS_SUCCESS; 1651 else 1652 irq_ack |= CORE_PWRCTL_BUS_FAIL; 1653 } 1654 1655 /* Handle IO LOW/HIGH */ 1656 if (irq_status & CORE_PWRCTL_IO_LOW) 1657 io_level = REQ_IO_LOW; 1658 1659 if (irq_status & CORE_PWRCTL_IO_HIGH) 1660 io_level = REQ_IO_HIGH; 1661 1662 if (io_level) 1663 irq_ack |= CORE_PWRCTL_IO_SUCCESS; 1664 1665 if (io_level && !IS_ERR(mmc->supply.vqmmc) && !pwr_state) { 1666 ret = mmc_regulator_set_vqmmc(mmc, &mmc->ios); 1667 if (ret < 0) { 1668 dev_err(mmc_dev(mmc), "%s: IO_level setting failed(%d). signal_voltage: %d, vdd: %d irq_status: 0x%08x\n", 1669 mmc_hostname(mmc), ret, 1670 mmc->ios.signal_voltage, mmc->ios.vdd, 1671 irq_status); 1672 irq_ack |= CORE_PWRCTL_IO_FAIL; 1673 } 1674 } 1675 1676 /* 1677 * The driver has to acknowledge the interrupt, switch voltages and 1678 * report back if it succeded or not to this register. The voltage 1679 * switches are handled by the sdhci core, so just report success. 1680 */ 1681 msm_host_writel(msm_host, irq_ack, host, 1682 msm_offset->core_pwrctl_ctl); 1683 1684 /* 1685 * If we don't have info regarding the voltage levels supported by 1686 * regulators, don't change the IO PAD PWR SWITCH. 1687 */ 1688 if (msm_host->caps_0 & CORE_VOLT_SUPPORT) { 1689 u32 new_config; 1690 /* 1691 * We should unset IO PAD PWR switch only if the register write 1692 * can set IO lines high and the regulator also switches to 3 V. 1693 * Else, we should keep the IO PAD PWR switch set. 1694 * This is applicable to certain targets where eMMC vccq supply 1695 * is only 1.8V. In such targets, even during REQ_IO_HIGH, the 1696 * IO PAD PWR switch must be kept set to reflect actual 1697 * regulator voltage. This way, during initialization of 1698 * controllers with only 1.8V, we will set the IO PAD bit 1699 * without waiting for a REQ_IO_LOW. 1700 */ 1701 config = readl_relaxed(host->ioaddr + 1702 msm_offset->core_vendor_spec); 1703 new_config = config; 1704 1705 if ((io_level & REQ_IO_HIGH) && 1706 (msm_host->caps_0 & CORE_3_0V_SUPPORT)) 1707 new_config &= ~CORE_IO_PAD_PWR_SWITCH; 1708 else if ((io_level & REQ_IO_LOW) || 1709 (msm_host->caps_0 & CORE_1_8V_SUPPORT)) 1710 new_config |= CORE_IO_PAD_PWR_SWITCH; 1711 1712 if (config ^ new_config) 1713 writel_relaxed(new_config, host->ioaddr + 1714 msm_offset->core_vendor_spec); 1715 } 1716 1717 if (pwr_state) 1718 msm_host->curr_pwr_state = pwr_state; 1719 if (io_level) 1720 msm_host->curr_io_level = io_level; 1721 1722 dev_dbg(mmc_dev(mmc), "%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n", 1723 mmc_hostname(msm_host->mmc), __func__, irq, irq_status, 1724 irq_ack); 1725 } 1726 1727 static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data) 1728 { 1729 struct sdhci_host *host = (struct sdhci_host *)data; 1730 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1731 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1732 1733 sdhci_msm_handle_pwr_irq(host, irq); 1734 msm_host->pwr_irq_flag = 1; 1735 sdhci_msm_complete_pwr_irq_wait(msm_host); 1736 1737 1738 return IRQ_HANDLED; 1739 } 1740 1741 static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host) 1742 { 1743 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1744 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1745 struct clk *core_clk = msm_host->bulk_clks[0].clk; 1746 1747 return clk_round_rate(core_clk, ULONG_MAX); 1748 } 1749 1750 static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host) 1751 { 1752 return SDHCI_MSM_MIN_CLOCK; 1753 } 1754 1755 /* 1756 * __sdhci_msm_set_clock - sdhci_msm clock control. 1757 * 1758 * Description: 1759 * MSM controller does not use internal divider and 1760 * instead directly control the GCC clock as per 1761 * HW recommendation. 1762 **/ 1763 static void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock) 1764 { 1765 u16 clk; 1766 1767 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 1768 1769 if (clock == 0) 1770 return; 1771 1772 /* 1773 * MSM controller do not use clock divider. 1774 * Thus read SDHCI_CLOCK_CONTROL and only enable 1775 * clock with no divider value programmed. 1776 */ 1777 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1778 sdhci_enable_clk(host, clk); 1779 } 1780 1781 /* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */ 1782 static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock) 1783 { 1784 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1785 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1786 1787 if (!clock) { 1788 host->mmc->actual_clock = msm_host->clk_rate = 0; 1789 goto out; 1790 } 1791 1792 sdhci_msm_hc_select_mode(host); 1793 1794 msm_set_clock_rate_for_bus_mode(host, clock); 1795 out: 1796 __sdhci_msm_set_clock(host, clock); 1797 } 1798 1799 /*****************************************************************************\ 1800 * * 1801 * Inline Crypto Engine (ICE) support * 1802 * * 1803 \*****************************************************************************/ 1804 1805 #ifdef CONFIG_MMC_CRYPTO 1806 1807 #define AES_256_XTS_KEY_SIZE 64 1808 1809 /* QCOM ICE registers */ 1810 1811 #define QCOM_ICE_REG_VERSION 0x0008 1812 1813 #define QCOM_ICE_REG_FUSE_SETTING 0x0010 1814 #define QCOM_ICE_FUSE_SETTING_MASK 0x1 1815 #define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK 0x2 1816 #define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK 0x4 1817 1818 #define QCOM_ICE_REG_BIST_STATUS 0x0070 1819 #define QCOM_ICE_BIST_STATUS_MASK 0xF0000000 1820 1821 #define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000 1822 1823 #define sdhci_msm_ice_writel(host, val, reg) \ 1824 writel((val), (host)->ice_mem + (reg)) 1825 #define sdhci_msm_ice_readl(host, reg) \ 1826 readl((host)->ice_mem + (reg)) 1827 1828 static bool sdhci_msm_ice_supported(struct sdhci_msm_host *msm_host) 1829 { 1830 struct device *dev = mmc_dev(msm_host->mmc); 1831 u32 regval = sdhci_msm_ice_readl(msm_host, QCOM_ICE_REG_VERSION); 1832 int major = regval >> 24; 1833 int minor = (regval >> 16) & 0xFF; 1834 int step = regval & 0xFFFF; 1835 1836 /* For now this driver only supports ICE version 3. */ 1837 if (major != 3) { 1838 dev_warn(dev, "Unsupported ICE version: v%d.%d.%d\n", 1839 major, minor, step); 1840 return false; 1841 } 1842 1843 dev_info(dev, "Found QC Inline Crypto Engine (ICE) v%d.%d.%d\n", 1844 major, minor, step); 1845 1846 /* If fuses are blown, ICE might not work in the standard way. */ 1847 regval = sdhci_msm_ice_readl(msm_host, QCOM_ICE_REG_FUSE_SETTING); 1848 if (regval & (QCOM_ICE_FUSE_SETTING_MASK | 1849 QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK | 1850 QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK)) { 1851 dev_warn(dev, "Fuses are blown; ICE is unusable!\n"); 1852 return false; 1853 } 1854 return true; 1855 } 1856 1857 static inline struct clk *sdhci_msm_ice_get_clk(struct device *dev) 1858 { 1859 return devm_clk_get(dev, "ice"); 1860 } 1861 1862 static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host, 1863 struct cqhci_host *cq_host) 1864 { 1865 struct mmc_host *mmc = msm_host->mmc; 1866 struct device *dev = mmc_dev(mmc); 1867 struct resource *res; 1868 1869 if (!(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS)) 1870 return 0; 1871 1872 res = platform_get_resource_byname(msm_host->pdev, IORESOURCE_MEM, 1873 "ice"); 1874 if (!res) { 1875 dev_warn(dev, "ICE registers not found\n"); 1876 goto disable; 1877 } 1878 1879 if (!qcom_scm_ice_available()) { 1880 dev_warn(dev, "ICE SCM interface not found\n"); 1881 goto disable; 1882 } 1883 1884 msm_host->ice_mem = devm_ioremap_resource(dev, res); 1885 if (IS_ERR(msm_host->ice_mem)) 1886 return PTR_ERR(msm_host->ice_mem); 1887 1888 if (!sdhci_msm_ice_supported(msm_host)) 1889 goto disable; 1890 1891 mmc->caps2 |= MMC_CAP2_CRYPTO; 1892 return 0; 1893 1894 disable: 1895 dev_warn(dev, "Disabling inline encryption support\n"); 1896 return 0; 1897 } 1898 1899 static void sdhci_msm_ice_low_power_mode_enable(struct sdhci_msm_host *msm_host) 1900 { 1901 u32 regval; 1902 1903 regval = sdhci_msm_ice_readl(msm_host, QCOM_ICE_REG_ADVANCED_CONTROL); 1904 /* 1905 * Enable low power mode sequence 1906 * [0]-0, [1]-0, [2]-0, [3]-E, [4]-0, [5]-0, [6]-0, [7]-0 1907 */ 1908 regval |= 0x7000; 1909 sdhci_msm_ice_writel(msm_host, regval, QCOM_ICE_REG_ADVANCED_CONTROL); 1910 } 1911 1912 static void sdhci_msm_ice_optimization_enable(struct sdhci_msm_host *msm_host) 1913 { 1914 u32 regval; 1915 1916 /* ICE Optimizations Enable Sequence */ 1917 regval = sdhci_msm_ice_readl(msm_host, QCOM_ICE_REG_ADVANCED_CONTROL); 1918 regval |= 0xD807100; 1919 /* ICE HPG requires delay before writing */ 1920 udelay(5); 1921 sdhci_msm_ice_writel(msm_host, regval, QCOM_ICE_REG_ADVANCED_CONTROL); 1922 udelay(5); 1923 } 1924 1925 /* 1926 * Wait until the ICE BIST (built-in self-test) has completed. 1927 * 1928 * This may be necessary before ICE can be used. 1929 * 1930 * Note that we don't really care whether the BIST passed or failed; we really 1931 * just want to make sure that it isn't still running. This is because (a) the 1932 * BIST is a FIPS compliance thing that never fails in practice, (b) ICE is 1933 * documented to reject crypto requests if the BIST fails, so we needn't do it 1934 * in software too, and (c) properly testing storage encryption requires testing 1935 * the full storage stack anyway, and not relying on hardware-level self-tests. 1936 */ 1937 static int sdhci_msm_ice_wait_bist_status(struct sdhci_msm_host *msm_host) 1938 { 1939 u32 regval; 1940 int err; 1941 1942 err = readl_poll_timeout(msm_host->ice_mem + QCOM_ICE_REG_BIST_STATUS, 1943 regval, !(regval & QCOM_ICE_BIST_STATUS_MASK), 1944 50, 5000); 1945 if (err) 1946 dev_err(mmc_dev(msm_host->mmc), 1947 "Timed out waiting for ICE self-test to complete\n"); 1948 return err; 1949 } 1950 1951 static void sdhci_msm_ice_enable(struct sdhci_msm_host *msm_host) 1952 { 1953 if (!(msm_host->mmc->caps2 & MMC_CAP2_CRYPTO)) 1954 return; 1955 sdhci_msm_ice_low_power_mode_enable(msm_host); 1956 sdhci_msm_ice_optimization_enable(msm_host); 1957 sdhci_msm_ice_wait_bist_status(msm_host); 1958 } 1959 1960 static int __maybe_unused sdhci_msm_ice_resume(struct sdhci_msm_host *msm_host) 1961 { 1962 if (!(msm_host->mmc->caps2 & MMC_CAP2_CRYPTO)) 1963 return 0; 1964 return sdhci_msm_ice_wait_bist_status(msm_host); 1965 } 1966 1967 /* 1968 * Program a key into a QC ICE keyslot, or evict a keyslot. QC ICE requires 1969 * vendor-specific SCM calls for this; it doesn't support the standard way. 1970 */ 1971 static int sdhci_msm_program_key(struct cqhci_host *cq_host, 1972 const union cqhci_crypto_cfg_entry *cfg, 1973 int slot) 1974 { 1975 struct device *dev = mmc_dev(cq_host->mmc); 1976 union cqhci_crypto_cap_entry cap; 1977 union { 1978 u8 bytes[AES_256_XTS_KEY_SIZE]; 1979 u32 words[AES_256_XTS_KEY_SIZE / sizeof(u32)]; 1980 } key; 1981 int i; 1982 int err; 1983 1984 if (!(cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE)) 1985 return qcom_scm_ice_invalidate_key(slot); 1986 1987 /* Only AES-256-XTS has been tested so far. */ 1988 cap = cq_host->crypto_cap_array[cfg->crypto_cap_idx]; 1989 if (cap.algorithm_id != CQHCI_CRYPTO_ALG_AES_XTS || 1990 cap.key_size != CQHCI_CRYPTO_KEY_SIZE_256) { 1991 dev_err_ratelimited(dev, 1992 "Unhandled crypto capability; algorithm_id=%d, key_size=%d\n", 1993 cap.algorithm_id, cap.key_size); 1994 return -EINVAL; 1995 } 1996 1997 memcpy(key.bytes, cfg->crypto_key, AES_256_XTS_KEY_SIZE); 1998 1999 /* 2000 * The SCM call byte-swaps the 32-bit words of the key. So we have to 2001 * do the same, in order for the final key be correct. 2002 */ 2003 for (i = 0; i < ARRAY_SIZE(key.words); i++) 2004 __cpu_to_be32s(&key.words[i]); 2005 2006 err = qcom_scm_ice_set_key(slot, key.bytes, AES_256_XTS_KEY_SIZE, 2007 QCOM_SCM_ICE_CIPHER_AES_256_XTS, 2008 cfg->data_unit_size); 2009 memzero_explicit(&key, sizeof(key)); 2010 return err; 2011 } 2012 #else /* CONFIG_MMC_CRYPTO */ 2013 static inline struct clk *sdhci_msm_ice_get_clk(struct device *dev) 2014 { 2015 return NULL; 2016 } 2017 2018 static inline int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host, 2019 struct cqhci_host *cq_host) 2020 { 2021 return 0; 2022 } 2023 2024 static inline void sdhci_msm_ice_enable(struct sdhci_msm_host *msm_host) 2025 { 2026 } 2027 2028 static inline int __maybe_unused 2029 sdhci_msm_ice_resume(struct sdhci_msm_host *msm_host) 2030 { 2031 return 0; 2032 } 2033 #endif /* !CONFIG_MMC_CRYPTO */ 2034 2035 /*****************************************************************************\ 2036 * * 2037 * MSM Command Queue Engine (CQE) * 2038 * * 2039 \*****************************************************************************/ 2040 2041 static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask) 2042 { 2043 int cmd_error = 0; 2044 int data_error = 0; 2045 2046 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) 2047 return intmask; 2048 2049 cqhci_irq(host->mmc, intmask, cmd_error, data_error); 2050 return 0; 2051 } 2052 2053 static void sdhci_msm_cqe_enable(struct mmc_host *mmc) 2054 { 2055 struct sdhci_host *host = mmc_priv(mmc); 2056 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 2057 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 2058 2059 sdhci_cqe_enable(mmc); 2060 sdhci_msm_ice_enable(msm_host); 2061 } 2062 2063 static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery) 2064 { 2065 struct sdhci_host *host = mmc_priv(mmc); 2066 unsigned long flags; 2067 u32 ctrl; 2068 2069 /* 2070 * When CQE is halted, the legacy SDHCI path operates only 2071 * on 16-byte descriptors in 64bit mode. 2072 */ 2073 if (host->flags & SDHCI_USE_64_BIT_DMA) 2074 host->desc_sz = 16; 2075 2076 spin_lock_irqsave(&host->lock, flags); 2077 2078 /* 2079 * During CQE command transfers, command complete bit gets latched. 2080 * So s/w should clear command complete interrupt status when CQE is 2081 * either halted or disabled. Otherwise unexpected SDCHI legacy 2082 * interrupt gets triggered when CQE is halted/disabled. 2083 */ 2084 ctrl = sdhci_readl(host, SDHCI_INT_ENABLE); 2085 ctrl |= SDHCI_INT_RESPONSE; 2086 sdhci_writel(host, ctrl, SDHCI_INT_ENABLE); 2087 sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS); 2088 2089 spin_unlock_irqrestore(&host->lock, flags); 2090 2091 sdhci_cqe_disable(mmc, recovery); 2092 } 2093 2094 static void sdhci_msm_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 2095 { 2096 u32 count, start = 15; 2097 2098 __sdhci_set_timeout(host, cmd); 2099 count = sdhci_readb(host, SDHCI_TIMEOUT_CONTROL); 2100 /* 2101 * Update software timeout value if its value is less than hardware data 2102 * timeout value. Qcom SoC hardware data timeout value was calculated 2103 * using 4 * MCLK * 2^(count + 13). where MCLK = 1 / host->clock. 2104 */ 2105 if (cmd && cmd->data && host->clock > 400000 && 2106 host->clock <= 50000000 && 2107 ((1 << (count + start)) > (10 * host->clock))) 2108 host->data_timeout = 22LL * NSEC_PER_SEC; 2109 } 2110 2111 static const struct cqhci_host_ops sdhci_msm_cqhci_ops = { 2112 .enable = sdhci_msm_cqe_enable, 2113 .disable = sdhci_msm_cqe_disable, 2114 #ifdef CONFIG_MMC_CRYPTO 2115 .program_key = sdhci_msm_program_key, 2116 #endif 2117 }; 2118 2119 static int sdhci_msm_cqe_add_host(struct sdhci_host *host, 2120 struct platform_device *pdev) 2121 { 2122 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 2123 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 2124 struct cqhci_host *cq_host; 2125 bool dma64; 2126 u32 cqcfg; 2127 int ret; 2128 2129 /* 2130 * When CQE is halted, SDHC operates only on 16byte ADMA descriptors. 2131 * So ensure ADMA table is allocated for 16byte descriptors. 2132 */ 2133 if (host->caps & SDHCI_CAN_64BIT) 2134 host->alloc_desc_sz = 16; 2135 2136 ret = sdhci_setup_host(host); 2137 if (ret) 2138 return ret; 2139 2140 cq_host = cqhci_pltfm_init(pdev); 2141 if (IS_ERR(cq_host)) { 2142 ret = PTR_ERR(cq_host); 2143 dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret); 2144 goto cleanup; 2145 } 2146 2147 msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; 2148 cq_host->ops = &sdhci_msm_cqhci_ops; 2149 2150 dma64 = host->flags & SDHCI_USE_64_BIT_DMA; 2151 2152 ret = sdhci_msm_ice_init(msm_host, cq_host); 2153 if (ret) 2154 goto cleanup; 2155 2156 ret = cqhci_init(cq_host, host->mmc, dma64); 2157 if (ret) { 2158 dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n", 2159 mmc_hostname(host->mmc), ret); 2160 goto cleanup; 2161 } 2162 2163 /* Disable cqe reset due to cqe enable signal */ 2164 cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1); 2165 cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN; 2166 cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1); 2167 2168 /* 2169 * SDHC expects 12byte ADMA descriptors till CQE is enabled. 2170 * So limit desc_sz to 12 so that the data commands that are sent 2171 * during card initialization (before CQE gets enabled) would 2172 * get executed without any issues. 2173 */ 2174 if (host->flags & SDHCI_USE_64_BIT_DMA) 2175 host->desc_sz = 12; 2176 2177 ret = __sdhci_add_host(host); 2178 if (ret) 2179 goto cleanup; 2180 2181 dev_info(&pdev->dev, "%s: CQE init: success\n", 2182 mmc_hostname(host->mmc)); 2183 return ret; 2184 2185 cleanup: 2186 sdhci_cleanup_host(host); 2187 return ret; 2188 } 2189 2190 /* 2191 * Platform specific register write functions. This is so that, if any 2192 * register write needs to be followed up by platform specific actions, 2193 * they can be added here. These functions can go to sleep when writes 2194 * to certain registers are done. 2195 * These functions are relying on sdhci_set_ios not using spinlock. 2196 */ 2197 static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg) 2198 { 2199 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 2200 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 2201 u32 req_type = 0; 2202 2203 switch (reg) { 2204 case SDHCI_HOST_CONTROL2: 2205 req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW : 2206 REQ_IO_HIGH; 2207 break; 2208 case SDHCI_SOFTWARE_RESET: 2209 if (host->pwr && (val & SDHCI_RESET_ALL)) 2210 req_type = REQ_BUS_OFF; 2211 break; 2212 case SDHCI_POWER_CONTROL: 2213 req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON; 2214 break; 2215 case SDHCI_TRANSFER_MODE: 2216 msm_host->transfer_mode = val; 2217 break; 2218 case SDHCI_COMMAND: 2219 if (!msm_host->use_cdr) 2220 break; 2221 if ((msm_host->transfer_mode & SDHCI_TRNS_READ) && 2222 !mmc_op_tuning(SDHCI_GET_CMD(val))) 2223 sdhci_msm_set_cdr(host, true); 2224 else 2225 sdhci_msm_set_cdr(host, false); 2226 break; 2227 } 2228 2229 if (req_type) { 2230 msm_host->pwr_irq_flag = 0; 2231 /* 2232 * Since this register write may trigger a power irq, ensure 2233 * all previous register writes are complete by this point. 2234 */ 2235 mb(); 2236 } 2237 return req_type; 2238 } 2239 2240 /* This function may sleep*/ 2241 static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg) 2242 { 2243 u32 req_type = 0; 2244 2245 req_type = __sdhci_msm_check_write(host, val, reg); 2246 writew_relaxed(val, host->ioaddr + reg); 2247 2248 if (req_type) 2249 sdhci_msm_check_power_status(host, req_type); 2250 } 2251 2252 /* This function may sleep*/ 2253 static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg) 2254 { 2255 u32 req_type = 0; 2256 2257 req_type = __sdhci_msm_check_write(host, val, reg); 2258 2259 writeb_relaxed(val, host->ioaddr + reg); 2260 2261 if (req_type) 2262 sdhci_msm_check_power_status(host, req_type); 2263 } 2264 2265 static void sdhci_msm_set_regulator_caps(struct sdhci_msm_host *msm_host) 2266 { 2267 struct mmc_host *mmc = msm_host->mmc; 2268 struct regulator *supply = mmc->supply.vqmmc; 2269 u32 caps = 0, config; 2270 struct sdhci_host *host = mmc_priv(mmc); 2271 const struct sdhci_msm_offset *msm_offset = msm_host->offset; 2272 2273 if (!IS_ERR(mmc->supply.vqmmc)) { 2274 if (regulator_is_supported_voltage(supply, 1700000, 1950000)) 2275 caps |= CORE_1_8V_SUPPORT; 2276 if (regulator_is_supported_voltage(supply, 2700000, 3600000)) 2277 caps |= CORE_3_0V_SUPPORT; 2278 2279 if (!caps) 2280 pr_warn("%s: 1.8/3V not supported for vqmmc\n", 2281 mmc_hostname(mmc)); 2282 } 2283 2284 if (caps) { 2285 /* 2286 * Set the PAD_PWR_SWITCH_EN bit so that the PAD_PWR_SWITCH 2287 * bit can be used as required later on. 2288 */ 2289 u32 io_level = msm_host->curr_io_level; 2290 2291 config = readl_relaxed(host->ioaddr + 2292 msm_offset->core_vendor_spec); 2293 config |= CORE_IO_PAD_PWR_SWITCH_EN; 2294 2295 if ((io_level & REQ_IO_HIGH) && (caps & CORE_3_0V_SUPPORT)) 2296 config &= ~CORE_IO_PAD_PWR_SWITCH; 2297 else if ((io_level & REQ_IO_LOW) || (caps & CORE_1_8V_SUPPORT)) 2298 config |= CORE_IO_PAD_PWR_SWITCH; 2299 2300 writel_relaxed(config, 2301 host->ioaddr + msm_offset->core_vendor_spec); 2302 } 2303 msm_host->caps_0 |= caps; 2304 pr_debug("%s: supported caps: 0x%08x\n", mmc_hostname(mmc), caps); 2305 } 2306 2307 static int sdhci_msm_register_vreg(struct sdhci_msm_host *msm_host) 2308 { 2309 int ret; 2310 2311 ret = mmc_regulator_get_supply(msm_host->mmc); 2312 if (ret) 2313 return ret; 2314 2315 sdhci_msm_set_regulator_caps(msm_host); 2316 2317 return 0; 2318 } 2319 2320 static int sdhci_msm_start_signal_voltage_switch(struct mmc_host *mmc, 2321 struct mmc_ios *ios) 2322 { 2323 struct sdhci_host *host = mmc_priv(mmc); 2324 u16 ctrl, status; 2325 2326 /* 2327 * Signal Voltage Switching is only applicable for Host Controllers 2328 * v3.00 and above. 2329 */ 2330 if (host->version < SDHCI_SPEC_300) 2331 return 0; 2332 2333 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2334 2335 switch (ios->signal_voltage) { 2336 case MMC_SIGNAL_VOLTAGE_330: 2337 if (!(host->flags & SDHCI_SIGNALING_330)) 2338 return -EINVAL; 2339 2340 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ 2341 ctrl &= ~SDHCI_CTRL_VDD_180; 2342 break; 2343 case MMC_SIGNAL_VOLTAGE_180: 2344 if (!(host->flags & SDHCI_SIGNALING_180)) 2345 return -EINVAL; 2346 2347 /* Enable 1.8V Signal Enable in the Host Control2 register */ 2348 ctrl |= SDHCI_CTRL_VDD_180; 2349 break; 2350 2351 default: 2352 return -EINVAL; 2353 } 2354 2355 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2356 2357 /* Wait for 5ms */ 2358 usleep_range(5000, 5500); 2359 2360 /* regulator output should be stable within 5 ms */ 2361 status = ctrl & SDHCI_CTRL_VDD_180; 2362 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2363 if ((ctrl & SDHCI_CTRL_VDD_180) == status) 2364 return 0; 2365 2366 dev_warn(mmc_dev(mmc), "%s: Regulator output did not became stable\n", 2367 mmc_hostname(mmc)); 2368 2369 return -EAGAIN; 2370 } 2371 2372 #define DRIVER_NAME "sdhci_msm" 2373 #define SDHCI_MSM_DUMP(f, x...) \ 2374 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 2375 2376 static void sdhci_msm_dump_vendor_regs(struct sdhci_host *host) 2377 { 2378 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 2379 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 2380 const struct sdhci_msm_offset *msm_offset = msm_host->offset; 2381 2382 SDHCI_MSM_DUMP("----------- VENDOR REGISTER DUMP -----------\n"); 2383 2384 SDHCI_MSM_DUMP( 2385 "DLL sts: 0x%08x | DLL cfg: 0x%08x | DLL cfg2: 0x%08x\n", 2386 readl_relaxed(host->ioaddr + msm_offset->core_dll_status), 2387 readl_relaxed(host->ioaddr + msm_offset->core_dll_config), 2388 readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2)); 2389 SDHCI_MSM_DUMP( 2390 "DLL cfg3: 0x%08x | DLL usr ctl: 0x%08x | DDR cfg: 0x%08x\n", 2391 readl_relaxed(host->ioaddr + msm_offset->core_dll_config_3), 2392 readl_relaxed(host->ioaddr + msm_offset->core_dll_usr_ctl), 2393 readl_relaxed(host->ioaddr + msm_offset->core_ddr_config)); 2394 SDHCI_MSM_DUMP( 2395 "Vndr func: 0x%08x | Vndr func2 : 0x%08x Vndr func3: 0x%08x\n", 2396 readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec), 2397 readl_relaxed(host->ioaddr + 2398 msm_offset->core_vendor_spec_func2), 2399 readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec3)); 2400 } 2401 2402 static const struct sdhci_msm_variant_ops mci_var_ops = { 2403 .msm_readl_relaxed = sdhci_msm_mci_variant_readl_relaxed, 2404 .msm_writel_relaxed = sdhci_msm_mci_variant_writel_relaxed, 2405 }; 2406 2407 static const struct sdhci_msm_variant_ops v5_var_ops = { 2408 .msm_readl_relaxed = sdhci_msm_v5_variant_readl_relaxed, 2409 .msm_writel_relaxed = sdhci_msm_v5_variant_writel_relaxed, 2410 }; 2411 2412 static const struct sdhci_msm_variant_info sdhci_msm_mci_var = { 2413 .var_ops = &mci_var_ops, 2414 .offset = &sdhci_msm_mci_offset, 2415 }; 2416 2417 static const struct sdhci_msm_variant_info sdhci_msm_v5_var = { 2418 .mci_removed = true, 2419 .var_ops = &v5_var_ops, 2420 .offset = &sdhci_msm_v5_offset, 2421 }; 2422 2423 static const struct sdhci_msm_variant_info sdm845_sdhci_var = { 2424 .mci_removed = true, 2425 .restore_dll_config = true, 2426 .var_ops = &v5_var_ops, 2427 .offset = &sdhci_msm_v5_offset, 2428 }; 2429 2430 static const struct of_device_id sdhci_msm_dt_match[] = { 2431 /* 2432 * Do not add new variants to the driver which are compatible with 2433 * generic ones, unless they need customization. 2434 */ 2435 {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var}, 2436 {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var}, 2437 {.compatible = "qcom,sdm670-sdhci", .data = &sdm845_sdhci_var}, 2438 {.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var}, 2439 {.compatible = "qcom,sc7180-sdhci", .data = &sdm845_sdhci_var}, 2440 {}, 2441 }; 2442 2443 MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match); 2444 2445 static const struct sdhci_ops sdhci_msm_ops = { 2446 .reset = sdhci_and_cqhci_reset, 2447 .set_clock = sdhci_msm_set_clock, 2448 .get_min_clock = sdhci_msm_get_min_clock, 2449 .get_max_clock = sdhci_msm_get_max_clock, 2450 .set_bus_width = sdhci_set_bus_width, 2451 .set_uhs_signaling = sdhci_msm_set_uhs_signaling, 2452 .write_w = sdhci_msm_writew, 2453 .write_b = sdhci_msm_writeb, 2454 .irq = sdhci_msm_cqe_irq, 2455 .dump_vendor_regs = sdhci_msm_dump_vendor_regs, 2456 .set_power = sdhci_set_power_noreg, 2457 .set_timeout = sdhci_msm_set_timeout, 2458 }; 2459 2460 static const struct sdhci_pltfm_data sdhci_msm_pdata = { 2461 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | 2462 SDHCI_QUIRK_SINGLE_POWER_WRITE | 2463 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | 2464 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, 2465 2466 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 2467 .ops = &sdhci_msm_ops, 2468 }; 2469 2470 static inline void sdhci_msm_get_of_property(struct platform_device *pdev, 2471 struct sdhci_host *host) 2472 { 2473 struct device_node *node = pdev->dev.of_node; 2474 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 2475 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 2476 2477 if (of_property_read_u32(node, "qcom,ddr-config", 2478 &msm_host->ddr_config)) 2479 msm_host->ddr_config = DDR_CONFIG_POR_VAL; 2480 2481 of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config); 2482 2483 if (of_device_is_compatible(node, "qcom,msm8916-sdhci")) 2484 host->quirks2 |= SDHCI_QUIRK2_BROKEN_64_BIT_DMA; 2485 } 2486 2487 static int sdhci_msm_gcc_reset(struct device *dev, struct sdhci_host *host) 2488 { 2489 struct reset_control *reset; 2490 int ret = 0; 2491 2492 reset = reset_control_get_optional_exclusive(dev, NULL); 2493 if (IS_ERR(reset)) 2494 return dev_err_probe(dev, PTR_ERR(reset), 2495 "unable to acquire core_reset\n"); 2496 2497 if (!reset) 2498 return ret; 2499 2500 ret = reset_control_assert(reset); 2501 if (ret) { 2502 reset_control_put(reset); 2503 return dev_err_probe(dev, ret, "core_reset assert failed\n"); 2504 } 2505 2506 /* 2507 * The hardware requirement for delay between assert/deassert 2508 * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to 2509 * ~125us (4/32768). To be on the safe side add 200us delay. 2510 */ 2511 usleep_range(200, 210); 2512 2513 ret = reset_control_deassert(reset); 2514 if (ret) { 2515 reset_control_put(reset); 2516 return dev_err_probe(dev, ret, "core_reset deassert failed\n"); 2517 } 2518 2519 usleep_range(200, 210); 2520 reset_control_put(reset); 2521 2522 return ret; 2523 } 2524 2525 static int sdhci_msm_probe(struct platform_device *pdev) 2526 { 2527 struct sdhci_host *host; 2528 struct sdhci_pltfm_host *pltfm_host; 2529 struct sdhci_msm_host *msm_host; 2530 struct clk *clk; 2531 int ret; 2532 u16 host_version, core_minor; 2533 u32 core_version, config; 2534 u8 core_major; 2535 const struct sdhci_msm_offset *msm_offset; 2536 const struct sdhci_msm_variant_info *var_info; 2537 struct device_node *node = pdev->dev.of_node; 2538 2539 host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host)); 2540 if (IS_ERR(host)) 2541 return PTR_ERR(host); 2542 2543 host->sdma_boundary = 0; 2544 pltfm_host = sdhci_priv(host); 2545 msm_host = sdhci_pltfm_priv(pltfm_host); 2546 msm_host->mmc = host->mmc; 2547 msm_host->pdev = pdev; 2548 2549 ret = mmc_of_parse(host->mmc); 2550 if (ret) 2551 goto pltfm_free; 2552 2553 /* 2554 * Based on the compatible string, load the required msm host info from 2555 * the data associated with the version info. 2556 */ 2557 var_info = of_device_get_match_data(&pdev->dev); 2558 2559 msm_host->mci_removed = var_info->mci_removed; 2560 msm_host->restore_dll_config = var_info->restore_dll_config; 2561 msm_host->var_ops = var_info->var_ops; 2562 msm_host->offset = var_info->offset; 2563 2564 msm_offset = msm_host->offset; 2565 2566 sdhci_get_of_property(pdev); 2567 sdhci_msm_get_of_property(pdev, host); 2568 2569 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE; 2570 2571 ret = sdhci_msm_gcc_reset(&pdev->dev, host); 2572 if (ret) 2573 goto pltfm_free; 2574 2575 /* Setup SDCC bus voter clock. */ 2576 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus"); 2577 if (!IS_ERR(msm_host->bus_clk)) { 2578 /* Vote for max. clk rate for max. performance */ 2579 ret = clk_set_rate(msm_host->bus_clk, INT_MAX); 2580 if (ret) 2581 goto pltfm_free; 2582 ret = clk_prepare_enable(msm_host->bus_clk); 2583 if (ret) 2584 goto pltfm_free; 2585 } 2586 2587 /* Setup main peripheral bus clock */ 2588 clk = devm_clk_get(&pdev->dev, "iface"); 2589 if (IS_ERR(clk)) { 2590 ret = PTR_ERR(clk); 2591 dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret); 2592 goto bus_clk_disable; 2593 } 2594 msm_host->bulk_clks[1].clk = clk; 2595 2596 /* Setup SDC MMC clock */ 2597 clk = devm_clk_get(&pdev->dev, "core"); 2598 if (IS_ERR(clk)) { 2599 ret = PTR_ERR(clk); 2600 dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret); 2601 goto bus_clk_disable; 2602 } 2603 msm_host->bulk_clks[0].clk = clk; 2604 2605 /* Check for optional interconnect paths */ 2606 ret = dev_pm_opp_of_find_icc_paths(&pdev->dev, NULL); 2607 if (ret) 2608 goto bus_clk_disable; 2609 2610 ret = devm_pm_opp_set_clkname(&pdev->dev, "core"); 2611 if (ret) 2612 goto bus_clk_disable; 2613 2614 /* OPP table is optional */ 2615 ret = devm_pm_opp_of_add_table(&pdev->dev); 2616 if (ret && ret != -ENODEV) { 2617 dev_err(&pdev->dev, "Invalid OPP table in Device tree\n"); 2618 goto bus_clk_disable; 2619 } 2620 2621 /* Vote for maximum clock rate for maximum performance */ 2622 ret = dev_pm_opp_set_rate(&pdev->dev, INT_MAX); 2623 if (ret) 2624 dev_warn(&pdev->dev, "core clock boost failed\n"); 2625 2626 clk = devm_clk_get(&pdev->dev, "cal"); 2627 if (IS_ERR(clk)) 2628 clk = NULL; 2629 msm_host->bulk_clks[2].clk = clk; 2630 2631 clk = devm_clk_get(&pdev->dev, "sleep"); 2632 if (IS_ERR(clk)) 2633 clk = NULL; 2634 msm_host->bulk_clks[3].clk = clk; 2635 2636 clk = sdhci_msm_ice_get_clk(&pdev->dev); 2637 if (IS_ERR(clk)) 2638 clk = NULL; 2639 msm_host->bulk_clks[4].clk = clk; 2640 2641 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), 2642 msm_host->bulk_clks); 2643 if (ret) 2644 goto bus_clk_disable; 2645 2646 /* 2647 * xo clock is needed for FLL feature of cm_dll. 2648 * In case if xo clock is not mentioned in DT, warn and proceed. 2649 */ 2650 msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo"); 2651 if (IS_ERR(msm_host->xo_clk)) { 2652 ret = PTR_ERR(msm_host->xo_clk); 2653 dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret); 2654 } 2655 2656 if (!msm_host->mci_removed) { 2657 msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1); 2658 if (IS_ERR(msm_host->core_mem)) { 2659 ret = PTR_ERR(msm_host->core_mem); 2660 goto clk_disable; 2661 } 2662 } 2663 2664 /* Reset the vendor spec register to power on reset state */ 2665 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL, 2666 host->ioaddr + msm_offset->core_vendor_spec); 2667 2668 if (!msm_host->mci_removed) { 2669 /* Set HC_MODE_EN bit in HC_MODE register */ 2670 msm_host_writel(msm_host, HC_MODE_EN, host, 2671 msm_offset->core_hc_mode); 2672 config = msm_host_readl(msm_host, host, 2673 msm_offset->core_hc_mode); 2674 config |= FF_CLK_SW_RST_DIS; 2675 msm_host_writel(msm_host, config, host, 2676 msm_offset->core_hc_mode); 2677 } 2678 2679 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION)); 2680 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n", 2681 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >> 2682 SDHCI_VENDOR_VER_SHIFT)); 2683 2684 core_version = msm_host_readl(msm_host, host, 2685 msm_offset->core_mci_version); 2686 core_major = (core_version & CORE_VERSION_MAJOR_MASK) >> 2687 CORE_VERSION_MAJOR_SHIFT; 2688 core_minor = core_version & CORE_VERSION_MINOR_MASK; 2689 dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n", 2690 core_version, core_major, core_minor); 2691 2692 if (core_major == 1 && core_minor >= 0x42) 2693 msm_host->use_14lpp_dll_reset = true; 2694 2695 /* 2696 * SDCC 5 controller with major version 1, minor version 0x34 and later 2697 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL. 2698 */ 2699 if (core_major == 1 && core_minor < 0x34) 2700 msm_host->use_cdclp533 = true; 2701 2702 /* 2703 * Support for some capabilities is not advertised by newer 2704 * controller versions and must be explicitly enabled. 2705 */ 2706 if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) { 2707 config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES); 2708 config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT; 2709 writel_relaxed(config, host->ioaddr + 2710 msm_offset->core_vendor_spec_capabilities0); 2711 } 2712 2713 if (core_major == 1 && core_minor >= 0x49) 2714 msm_host->updated_ddr_cfg = true; 2715 2716 if (core_major == 1 && core_minor >= 0x71) 2717 msm_host->uses_tassadar_dll = true; 2718 2719 ret = sdhci_msm_register_vreg(msm_host); 2720 if (ret) 2721 goto clk_disable; 2722 2723 /* 2724 * Power on reset state may trigger power irq if previous status of 2725 * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq 2726 * interrupt in GIC, any pending power irq interrupt should be 2727 * acknowledged. Otherwise power irq interrupt handler would be 2728 * fired prematurely. 2729 */ 2730 sdhci_msm_handle_pwr_irq(host, 0); 2731 2732 /* 2733 * Ensure that above writes are propogated before interrupt enablement 2734 * in GIC. 2735 */ 2736 mb(); 2737 2738 /* Setup IRQ for handling power/voltage tasks with PMIC */ 2739 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq"); 2740 if (msm_host->pwr_irq < 0) { 2741 ret = msm_host->pwr_irq; 2742 goto clk_disable; 2743 } 2744 2745 sdhci_msm_init_pwr_irq_wait(msm_host); 2746 /* Enable pwr irq interrupts */ 2747 msm_host_writel(msm_host, INT_MASK, host, 2748 msm_offset->core_pwrctl_mask); 2749 2750 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL, 2751 sdhci_msm_pwr_irq, IRQF_ONESHOT, 2752 dev_name(&pdev->dev), host); 2753 if (ret) { 2754 dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret); 2755 goto clk_disable; 2756 } 2757 2758 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY; 2759 2760 /* Set the timeout value to max possible */ 2761 host->max_timeout_count = 0xF; 2762 2763 pm_runtime_get_noresume(&pdev->dev); 2764 pm_runtime_set_active(&pdev->dev); 2765 pm_runtime_enable(&pdev->dev); 2766 pm_runtime_set_autosuspend_delay(&pdev->dev, 2767 MSM_MMC_AUTOSUSPEND_DELAY_MS); 2768 pm_runtime_use_autosuspend(&pdev->dev); 2769 2770 host->mmc_host_ops.start_signal_voltage_switch = 2771 sdhci_msm_start_signal_voltage_switch; 2772 host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning; 2773 if (of_property_read_bool(node, "supports-cqe")) 2774 ret = sdhci_msm_cqe_add_host(host, pdev); 2775 else 2776 ret = sdhci_add_host(host); 2777 if (ret) 2778 goto pm_runtime_disable; 2779 2780 pm_runtime_mark_last_busy(&pdev->dev); 2781 pm_runtime_put_autosuspend(&pdev->dev); 2782 2783 return 0; 2784 2785 pm_runtime_disable: 2786 pm_runtime_disable(&pdev->dev); 2787 pm_runtime_set_suspended(&pdev->dev); 2788 pm_runtime_put_noidle(&pdev->dev); 2789 clk_disable: 2790 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), 2791 msm_host->bulk_clks); 2792 bus_clk_disable: 2793 if (!IS_ERR(msm_host->bus_clk)) 2794 clk_disable_unprepare(msm_host->bus_clk); 2795 pltfm_free: 2796 sdhci_pltfm_free(pdev); 2797 return ret; 2798 } 2799 2800 static int sdhci_msm_remove(struct platform_device *pdev) 2801 { 2802 struct sdhci_host *host = platform_get_drvdata(pdev); 2803 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 2804 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 2805 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 2806 0xffffffff); 2807 2808 sdhci_remove_host(host, dead); 2809 2810 pm_runtime_get_sync(&pdev->dev); 2811 pm_runtime_disable(&pdev->dev); 2812 pm_runtime_put_noidle(&pdev->dev); 2813 2814 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), 2815 msm_host->bulk_clks); 2816 if (!IS_ERR(msm_host->bus_clk)) 2817 clk_disable_unprepare(msm_host->bus_clk); 2818 sdhci_pltfm_free(pdev); 2819 return 0; 2820 } 2821 2822 static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev) 2823 { 2824 struct sdhci_host *host = dev_get_drvdata(dev); 2825 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 2826 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 2827 2828 /* Drop the performance vote */ 2829 dev_pm_opp_set_rate(dev, 0); 2830 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), 2831 msm_host->bulk_clks); 2832 2833 return 0; 2834 } 2835 2836 static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev) 2837 { 2838 struct sdhci_host *host = dev_get_drvdata(dev); 2839 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 2840 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 2841 int ret; 2842 2843 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), 2844 msm_host->bulk_clks); 2845 if (ret) 2846 return ret; 2847 /* 2848 * Whenever core-clock is gated dynamically, it's needed to 2849 * restore the SDR DLL settings when the clock is ungated. 2850 */ 2851 if (msm_host->restore_dll_config && msm_host->clk_rate) { 2852 ret = sdhci_msm_restore_sdr_dll_config(host); 2853 if (ret) 2854 return ret; 2855 } 2856 2857 dev_pm_opp_set_rate(dev, msm_host->clk_rate); 2858 2859 return sdhci_msm_ice_resume(msm_host); 2860 } 2861 2862 static const struct dev_pm_ops sdhci_msm_pm_ops = { 2863 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 2864 pm_runtime_force_resume) 2865 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, 2866 sdhci_msm_runtime_resume, 2867 NULL) 2868 }; 2869 2870 static struct platform_driver sdhci_msm_driver = { 2871 .probe = sdhci_msm_probe, 2872 .remove = sdhci_msm_remove, 2873 .driver = { 2874 .name = "sdhci_msm", 2875 .of_match_table = sdhci_msm_dt_match, 2876 .pm = &sdhci_msm_pm_ops, 2877 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 2878 }, 2879 }; 2880 2881 module_platform_driver(sdhci_msm_driver); 2882 2883 MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver"); 2884 MODULE_LICENSE("GPL v2"); 2885