1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver 4 * 5 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. 6 */ 7 8 #include <linux/module.h> 9 #include <linux/of_device.h> 10 #include <linux/delay.h> 11 #include <linux/mmc/mmc.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/slab.h> 14 #include <linux/iopoll.h> 15 #include <linux/regulator/consumer.h> 16 17 #include "sdhci-pltfm.h" 18 19 #define CORE_MCI_VERSION 0x50 20 #define CORE_VERSION_MAJOR_SHIFT 28 21 #define CORE_VERSION_MAJOR_MASK (0xf << CORE_VERSION_MAJOR_SHIFT) 22 #define CORE_VERSION_MINOR_MASK 0xff 23 24 #define CORE_MCI_GENERICS 0x70 25 #define SWITCHABLE_SIGNALING_VOLTAGE BIT(29) 26 27 #define HC_MODE_EN 0x1 28 #define CORE_POWER 0x0 29 #define CORE_SW_RST BIT(7) 30 #define FF_CLK_SW_RST_DIS BIT(13) 31 32 #define CORE_PWRCTL_BUS_OFF BIT(0) 33 #define CORE_PWRCTL_BUS_ON BIT(1) 34 #define CORE_PWRCTL_IO_LOW BIT(2) 35 #define CORE_PWRCTL_IO_HIGH BIT(3) 36 #define CORE_PWRCTL_BUS_SUCCESS BIT(0) 37 #define CORE_PWRCTL_IO_SUCCESS BIT(2) 38 #define REQ_BUS_OFF BIT(0) 39 #define REQ_BUS_ON BIT(1) 40 #define REQ_IO_LOW BIT(2) 41 #define REQ_IO_HIGH BIT(3) 42 #define INT_MASK 0xf 43 #define MAX_PHASES 16 44 #define CORE_DLL_LOCK BIT(7) 45 #define CORE_DDR_DLL_LOCK BIT(11) 46 #define CORE_DLL_EN BIT(16) 47 #define CORE_CDR_EN BIT(17) 48 #define CORE_CK_OUT_EN BIT(18) 49 #define CORE_CDR_EXT_EN BIT(19) 50 #define CORE_DLL_PDN BIT(29) 51 #define CORE_DLL_RST BIT(30) 52 #define CORE_CMD_DAT_TRACK_SEL BIT(0) 53 54 #define CORE_DDR_CAL_EN BIT(0) 55 #define CORE_FLL_CYCLE_CNT BIT(18) 56 #define CORE_DLL_CLOCK_DISABLE BIT(21) 57 58 #define CORE_VENDOR_SPEC_POR_VAL 0xa1c 59 #define CORE_CLK_PWRSAVE BIT(1) 60 #define CORE_HC_MCLK_SEL_DFLT (2 << 8) 61 #define CORE_HC_MCLK_SEL_HS400 (3 << 8) 62 #define CORE_HC_MCLK_SEL_MASK (3 << 8) 63 #define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15) 64 #define CORE_IO_PAD_PWR_SWITCH (1 << 16) 65 #define CORE_HC_SELECT_IN_EN BIT(18) 66 #define CORE_HC_SELECT_IN_HS400 (6 << 19) 67 #define CORE_HC_SELECT_IN_MASK (7 << 19) 68 69 #define CORE_3_0V_SUPPORT (1 << 25) 70 #define CORE_1_8V_SUPPORT (1 << 26) 71 #define CORE_VOLT_SUPPORT (CORE_3_0V_SUPPORT | CORE_1_8V_SUPPORT) 72 73 #define CORE_CSR_CDC_CTLR_CFG0 0x130 74 #define CORE_SW_TRIG_FULL_CALIB BIT(16) 75 #define CORE_HW_AUTOCAL_ENA BIT(17) 76 77 #define CORE_CSR_CDC_CTLR_CFG1 0x134 78 #define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138 79 #define CORE_TIMER_ENA BIT(16) 80 81 #define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C 82 #define CORE_CSR_CDC_REFCOUNT_CFG 0x140 83 #define CORE_CSR_CDC_COARSE_CAL_CFG 0x144 84 #define CORE_CDC_OFFSET_CFG 0x14C 85 #define CORE_CSR_CDC_DELAY_CFG 0x150 86 #define CORE_CDC_SLAVE_DDA_CFG 0x160 87 #define CORE_CSR_CDC_STATUS0 0x164 88 #define CORE_CALIBRATION_DONE BIT(0) 89 90 #define CORE_CDC_ERROR_CODE_MASK 0x7000000 91 92 #define CORE_CSR_CDC_GEN_CFG 0x178 93 #define CORE_CDC_SWITCH_BYPASS_OFF BIT(0) 94 #define CORE_CDC_SWITCH_RC_EN BIT(1) 95 96 #define CORE_CDC_T4_DLY_SEL BIT(0) 97 #define CORE_CMDIN_RCLK_EN BIT(1) 98 #define CORE_START_CDC_TRAFFIC BIT(6) 99 100 #define CORE_PWRSAVE_DLL BIT(3) 101 102 #define DDR_CONFIG_POR_VAL 0x80040873 103 104 105 #define INVALID_TUNING_PHASE -1 106 #define SDHCI_MSM_MIN_CLOCK 400000 107 #define CORE_FREQ_100MHZ (100 * 1000 * 1000) 108 109 #define CDR_SELEXT_SHIFT 20 110 #define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT) 111 #define CMUX_SHIFT_PHASE_SHIFT 24 112 #define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT) 113 114 #define MSM_MMC_AUTOSUSPEND_DELAY_MS 50 115 116 /* Timeout value to avoid infinite waiting for pwr_irq */ 117 #define MSM_PWR_IRQ_TIMEOUT_MS 5000 118 119 #define msm_host_readl(msm_host, host, offset) \ 120 msm_host->var_ops->msm_readl_relaxed(host, offset) 121 122 #define msm_host_writel(msm_host, val, host, offset) \ 123 msm_host->var_ops->msm_writel_relaxed(val, host, offset) 124 125 struct sdhci_msm_offset { 126 u32 core_hc_mode; 127 u32 core_mci_data_cnt; 128 u32 core_mci_status; 129 u32 core_mci_fifo_cnt; 130 u32 core_mci_version; 131 u32 core_generics; 132 u32 core_testbus_config; 133 u32 core_testbus_sel2_bit; 134 u32 core_testbus_ena; 135 u32 core_testbus_sel2; 136 u32 core_pwrctl_status; 137 u32 core_pwrctl_mask; 138 u32 core_pwrctl_clear; 139 u32 core_pwrctl_ctl; 140 u32 core_sdcc_debug_reg; 141 u32 core_dll_config; 142 u32 core_dll_status; 143 u32 core_vendor_spec; 144 u32 core_vendor_spec_adma_err_addr0; 145 u32 core_vendor_spec_adma_err_addr1; 146 u32 core_vendor_spec_func2; 147 u32 core_vendor_spec_capabilities0; 148 u32 core_ddr_200_cfg; 149 u32 core_vendor_spec3; 150 u32 core_dll_config_2; 151 u32 core_dll_config_3; 152 u32 core_ddr_config_old; /* Applicable to sdcc minor ver < 0x49 */ 153 u32 core_ddr_config; 154 }; 155 156 static const struct sdhci_msm_offset sdhci_msm_v5_offset = { 157 .core_mci_data_cnt = 0x35c, 158 .core_mci_status = 0x324, 159 .core_mci_fifo_cnt = 0x308, 160 .core_mci_version = 0x318, 161 .core_generics = 0x320, 162 .core_testbus_config = 0x32c, 163 .core_testbus_sel2_bit = 3, 164 .core_testbus_ena = (1 << 31), 165 .core_testbus_sel2 = (1 << 3), 166 .core_pwrctl_status = 0x240, 167 .core_pwrctl_mask = 0x244, 168 .core_pwrctl_clear = 0x248, 169 .core_pwrctl_ctl = 0x24c, 170 .core_sdcc_debug_reg = 0x358, 171 .core_dll_config = 0x200, 172 .core_dll_status = 0x208, 173 .core_vendor_spec = 0x20c, 174 .core_vendor_spec_adma_err_addr0 = 0x214, 175 .core_vendor_spec_adma_err_addr1 = 0x218, 176 .core_vendor_spec_func2 = 0x210, 177 .core_vendor_spec_capabilities0 = 0x21c, 178 .core_ddr_200_cfg = 0x224, 179 .core_vendor_spec3 = 0x250, 180 .core_dll_config_2 = 0x254, 181 .core_dll_config_3 = 0x258, 182 .core_ddr_config = 0x25c, 183 }; 184 185 static const struct sdhci_msm_offset sdhci_msm_mci_offset = { 186 .core_hc_mode = 0x78, 187 .core_mci_data_cnt = 0x30, 188 .core_mci_status = 0x34, 189 .core_mci_fifo_cnt = 0x44, 190 .core_mci_version = 0x050, 191 .core_generics = 0x70, 192 .core_testbus_config = 0x0cc, 193 .core_testbus_sel2_bit = 4, 194 .core_testbus_ena = (1 << 3), 195 .core_testbus_sel2 = (1 << 4), 196 .core_pwrctl_status = 0xdc, 197 .core_pwrctl_mask = 0xe0, 198 .core_pwrctl_clear = 0xe4, 199 .core_pwrctl_ctl = 0xe8, 200 .core_sdcc_debug_reg = 0x124, 201 .core_dll_config = 0x100, 202 .core_dll_status = 0x108, 203 .core_vendor_spec = 0x10c, 204 .core_vendor_spec_adma_err_addr0 = 0x114, 205 .core_vendor_spec_adma_err_addr1 = 0x118, 206 .core_vendor_spec_func2 = 0x110, 207 .core_vendor_spec_capabilities0 = 0x11c, 208 .core_ddr_200_cfg = 0x184, 209 .core_vendor_spec3 = 0x1b0, 210 .core_dll_config_2 = 0x1b4, 211 .core_ddr_config_old = 0x1b8, 212 .core_ddr_config = 0x1bc, 213 }; 214 215 struct sdhci_msm_variant_ops { 216 u32 (*msm_readl_relaxed)(struct sdhci_host *host, u32 offset); 217 void (*msm_writel_relaxed)(u32 val, struct sdhci_host *host, 218 u32 offset); 219 }; 220 221 /* 222 * From V5, register spaces have changed. Wrap this info in a structure 223 * and choose the data_structure based on version info mentioned in DT. 224 */ 225 struct sdhci_msm_variant_info { 226 bool mci_removed; 227 bool restore_dll_config; 228 const struct sdhci_msm_variant_ops *var_ops; 229 const struct sdhci_msm_offset *offset; 230 }; 231 232 struct sdhci_msm_host { 233 struct platform_device *pdev; 234 void __iomem *core_mem; /* MSM SDCC mapped address */ 235 int pwr_irq; /* power irq */ 236 struct clk *bus_clk; /* SDHC bus voter clock */ 237 struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/ 238 struct clk_bulk_data bulk_clks[4]; /* core, iface, cal, sleep clocks */ 239 unsigned long clk_rate; 240 struct mmc_host *mmc; 241 bool use_14lpp_dll_reset; 242 bool tuning_done; 243 bool calibration_done; 244 u8 saved_tuning_phase; 245 bool use_cdclp533; 246 u32 curr_pwr_state; 247 u32 curr_io_level; 248 wait_queue_head_t pwr_irq_wait; 249 bool pwr_irq_flag; 250 u32 caps_0; 251 bool mci_removed; 252 bool restore_dll_config; 253 const struct sdhci_msm_variant_ops *var_ops; 254 const struct sdhci_msm_offset *offset; 255 bool use_cdr; 256 u32 transfer_mode; 257 bool updated_ddr_cfg; 258 }; 259 260 static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host) 261 { 262 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 263 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 264 265 return msm_host->offset; 266 } 267 268 /* 269 * APIs to read/write to vendor specific registers which were there in the 270 * core_mem region before MCI was removed. 271 */ 272 static u32 sdhci_msm_mci_variant_readl_relaxed(struct sdhci_host *host, 273 u32 offset) 274 { 275 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 276 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 277 278 return readl_relaxed(msm_host->core_mem + offset); 279 } 280 281 static u32 sdhci_msm_v5_variant_readl_relaxed(struct sdhci_host *host, 282 u32 offset) 283 { 284 return readl_relaxed(host->ioaddr + offset); 285 } 286 287 static void sdhci_msm_mci_variant_writel_relaxed(u32 val, 288 struct sdhci_host *host, u32 offset) 289 { 290 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 291 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 292 293 writel_relaxed(val, msm_host->core_mem + offset); 294 } 295 296 static void sdhci_msm_v5_variant_writel_relaxed(u32 val, 297 struct sdhci_host *host, u32 offset) 298 { 299 writel_relaxed(val, host->ioaddr + offset); 300 } 301 302 static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host, 303 unsigned int clock) 304 { 305 struct mmc_ios ios = host->mmc->ios; 306 /* 307 * The SDHC requires internal clock frequency to be double the 308 * actual clock that will be set for DDR mode. The controller 309 * uses the faster clock(100/400MHz) for some of its parts and 310 * send the actual required clock (50/200MHz) to the card. 311 */ 312 if (ios.timing == MMC_TIMING_UHS_DDR50 || 313 ios.timing == MMC_TIMING_MMC_DDR52 || 314 ios.timing == MMC_TIMING_MMC_HS400 || 315 host->flags & SDHCI_HS400_TUNING) 316 clock *= 2; 317 return clock; 318 } 319 320 static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host, 321 unsigned int clock) 322 { 323 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 324 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 325 struct mmc_ios curr_ios = host->mmc->ios; 326 struct clk *core_clk = msm_host->bulk_clks[0].clk; 327 int rc; 328 329 clock = msm_get_clock_rate_for_bus_mode(host, clock); 330 rc = clk_set_rate(core_clk, clock); 331 if (rc) { 332 pr_err("%s: Failed to set clock at rate %u at timing %d\n", 333 mmc_hostname(host->mmc), clock, 334 curr_ios.timing); 335 return; 336 } 337 msm_host->clk_rate = clock; 338 pr_debug("%s: Setting clock at rate %lu at timing %d\n", 339 mmc_hostname(host->mmc), clk_get_rate(core_clk), 340 curr_ios.timing); 341 } 342 343 /* Platform specific tuning */ 344 static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll) 345 { 346 u32 wait_cnt = 50; 347 u8 ck_out_en; 348 struct mmc_host *mmc = host->mmc; 349 const struct sdhci_msm_offset *msm_offset = 350 sdhci_priv_msm_offset(host); 351 352 /* Poll for CK_OUT_EN bit. max. poll time = 50us */ 353 ck_out_en = !!(readl_relaxed(host->ioaddr + 354 msm_offset->core_dll_config) & CORE_CK_OUT_EN); 355 356 while (ck_out_en != poll) { 357 if (--wait_cnt == 0) { 358 dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n", 359 mmc_hostname(mmc), poll); 360 return -ETIMEDOUT; 361 } 362 udelay(1); 363 364 ck_out_en = !!(readl_relaxed(host->ioaddr + 365 msm_offset->core_dll_config) & CORE_CK_OUT_EN); 366 } 367 368 return 0; 369 } 370 371 static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase) 372 { 373 int rc; 374 static const u8 grey_coded_phase_table[] = { 375 0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4, 376 0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8 377 }; 378 unsigned long flags; 379 u32 config; 380 struct mmc_host *mmc = host->mmc; 381 const struct sdhci_msm_offset *msm_offset = 382 sdhci_priv_msm_offset(host); 383 384 if (phase > 0xf) 385 return -EINVAL; 386 387 spin_lock_irqsave(&host->lock, flags); 388 389 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); 390 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN); 391 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN); 392 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 393 394 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */ 395 rc = msm_dll_poll_ck_out_en(host, 0); 396 if (rc) 397 goto err_out; 398 399 /* 400 * Write the selected DLL clock output phase (0 ... 15) 401 * to CDR_SELEXT bit field of DLL_CONFIG register. 402 */ 403 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); 404 config &= ~CDR_SELEXT_MASK; 405 config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT; 406 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 407 408 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); 409 config |= CORE_CK_OUT_EN; 410 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 411 412 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */ 413 rc = msm_dll_poll_ck_out_en(host, 1); 414 if (rc) 415 goto err_out; 416 417 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); 418 config |= CORE_CDR_EN; 419 config &= ~CORE_CDR_EXT_EN; 420 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 421 goto out; 422 423 err_out: 424 dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n", 425 mmc_hostname(mmc), phase); 426 out: 427 spin_unlock_irqrestore(&host->lock, flags); 428 return rc; 429 } 430 431 /* 432 * Find out the greatest range of consecuitive selected 433 * DLL clock output phases that can be used as sampling 434 * setting for SD3.0 UHS-I card read operation (in SDR104 435 * timing mode) or for eMMC4.5 card read operation (in 436 * HS400/HS200 timing mode). 437 * Select the 3/4 of the range and configure the DLL with the 438 * selected DLL clock output phase. 439 */ 440 441 static int msm_find_most_appropriate_phase(struct sdhci_host *host, 442 u8 *phase_table, u8 total_phases) 443 { 444 int ret; 445 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} }; 446 u8 phases_per_row[MAX_PHASES] = { 0 }; 447 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0; 448 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0; 449 bool phase_0_found = false, phase_15_found = false; 450 struct mmc_host *mmc = host->mmc; 451 452 if (!total_phases || (total_phases > MAX_PHASES)) { 453 dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n", 454 mmc_hostname(mmc), total_phases); 455 return -EINVAL; 456 } 457 458 for (cnt = 0; cnt < total_phases; cnt++) { 459 ranges[row_index][col_index] = phase_table[cnt]; 460 phases_per_row[row_index] += 1; 461 col_index++; 462 463 if ((cnt + 1) == total_phases) { 464 continue; 465 /* check if next phase in phase_table is consecutive or not */ 466 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) { 467 row_index++; 468 col_index = 0; 469 } 470 } 471 472 if (row_index >= MAX_PHASES) 473 return -EINVAL; 474 475 /* Check if phase-0 is present in first valid window? */ 476 if (!ranges[0][0]) { 477 phase_0_found = true; 478 phase_0_raw_index = 0; 479 /* Check if cycle exist between 2 valid windows */ 480 for (cnt = 1; cnt <= row_index; cnt++) { 481 if (phases_per_row[cnt]) { 482 for (i = 0; i < phases_per_row[cnt]; i++) { 483 if (ranges[cnt][i] == 15) { 484 phase_15_found = true; 485 phase_15_raw_index = cnt; 486 break; 487 } 488 } 489 } 490 } 491 } 492 493 /* If 2 valid windows form cycle then merge them as single window */ 494 if (phase_0_found && phase_15_found) { 495 /* number of phases in raw where phase 0 is present */ 496 u8 phases_0 = phases_per_row[phase_0_raw_index]; 497 /* number of phases in raw where phase 15 is present */ 498 u8 phases_15 = phases_per_row[phase_15_raw_index]; 499 500 if (phases_0 + phases_15 >= MAX_PHASES) 501 /* 502 * If there are more than 1 phase windows then total 503 * number of phases in both the windows should not be 504 * more than or equal to MAX_PHASES. 505 */ 506 return -EINVAL; 507 508 /* Merge 2 cyclic windows */ 509 i = phases_15; 510 for (cnt = 0; cnt < phases_0; cnt++) { 511 ranges[phase_15_raw_index][i] = 512 ranges[phase_0_raw_index][cnt]; 513 if (++i >= MAX_PHASES) 514 break; 515 } 516 517 phases_per_row[phase_0_raw_index] = 0; 518 phases_per_row[phase_15_raw_index] = phases_15 + phases_0; 519 } 520 521 for (cnt = 0; cnt <= row_index; cnt++) { 522 if (phases_per_row[cnt] > curr_max) { 523 curr_max = phases_per_row[cnt]; 524 selected_row_index = cnt; 525 } 526 } 527 528 i = (curr_max * 3) / 4; 529 if (i) 530 i--; 531 532 ret = ranges[selected_row_index][i]; 533 534 if (ret >= MAX_PHASES) { 535 ret = -EINVAL; 536 dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n", 537 mmc_hostname(mmc), ret); 538 } 539 540 return ret; 541 } 542 543 static inline void msm_cm_dll_set_freq(struct sdhci_host *host) 544 { 545 u32 mclk_freq = 0, config; 546 const struct sdhci_msm_offset *msm_offset = 547 sdhci_priv_msm_offset(host); 548 549 /* Program the MCLK value to MCLK_FREQ bit field */ 550 if (host->clock <= 112000000) 551 mclk_freq = 0; 552 else if (host->clock <= 125000000) 553 mclk_freq = 1; 554 else if (host->clock <= 137000000) 555 mclk_freq = 2; 556 else if (host->clock <= 150000000) 557 mclk_freq = 3; 558 else if (host->clock <= 162000000) 559 mclk_freq = 4; 560 else if (host->clock <= 175000000) 561 mclk_freq = 5; 562 else if (host->clock <= 187000000) 563 mclk_freq = 6; 564 else if (host->clock <= 200000000) 565 mclk_freq = 7; 566 567 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); 568 config &= ~CMUX_SHIFT_PHASE_MASK; 569 config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT; 570 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 571 } 572 573 /* Initialize the DLL (Programmable Delay Line) */ 574 static int msm_init_cm_dll(struct sdhci_host *host) 575 { 576 struct mmc_host *mmc = host->mmc; 577 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 578 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 579 int wait_cnt = 50; 580 unsigned long flags, xo_clk = 0; 581 u32 config; 582 const struct sdhci_msm_offset *msm_offset = 583 msm_host->offset; 584 585 if (msm_host->use_14lpp_dll_reset && !IS_ERR_OR_NULL(msm_host->xo_clk)) 586 xo_clk = clk_get_rate(msm_host->xo_clk); 587 588 spin_lock_irqsave(&host->lock, flags); 589 590 /* 591 * Make sure that clock is always enabled when DLL 592 * tuning is in progress. Keeping PWRSAVE ON may 593 * turn off the clock. 594 */ 595 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); 596 config &= ~CORE_CLK_PWRSAVE; 597 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); 598 599 if (msm_host->use_14lpp_dll_reset) { 600 config = readl_relaxed(host->ioaddr + 601 msm_offset->core_dll_config); 602 config &= ~CORE_CK_OUT_EN; 603 writel_relaxed(config, host->ioaddr + 604 msm_offset->core_dll_config); 605 606 config = readl_relaxed(host->ioaddr + 607 msm_offset->core_dll_config_2); 608 config |= CORE_DLL_CLOCK_DISABLE; 609 writel_relaxed(config, host->ioaddr + 610 msm_offset->core_dll_config_2); 611 } 612 613 config = readl_relaxed(host->ioaddr + 614 msm_offset->core_dll_config); 615 config |= CORE_DLL_RST; 616 writel_relaxed(config, host->ioaddr + 617 msm_offset->core_dll_config); 618 619 config = readl_relaxed(host->ioaddr + 620 msm_offset->core_dll_config); 621 config |= CORE_DLL_PDN; 622 writel_relaxed(config, host->ioaddr + 623 msm_offset->core_dll_config); 624 msm_cm_dll_set_freq(host); 625 626 if (msm_host->use_14lpp_dll_reset && 627 !IS_ERR_OR_NULL(msm_host->xo_clk)) { 628 u32 mclk_freq = 0; 629 630 config = readl_relaxed(host->ioaddr + 631 msm_offset->core_dll_config_2); 632 config &= CORE_FLL_CYCLE_CNT; 633 if (config) 634 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8), 635 xo_clk); 636 else 637 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4), 638 xo_clk); 639 640 config = readl_relaxed(host->ioaddr + 641 msm_offset->core_dll_config_2); 642 config &= ~(0xFF << 10); 643 config |= mclk_freq << 10; 644 645 writel_relaxed(config, host->ioaddr + 646 msm_offset->core_dll_config_2); 647 /* wait for 5us before enabling DLL clock */ 648 udelay(5); 649 } 650 651 config = readl_relaxed(host->ioaddr + 652 msm_offset->core_dll_config); 653 config &= ~CORE_DLL_RST; 654 writel_relaxed(config, host->ioaddr + 655 msm_offset->core_dll_config); 656 657 config = readl_relaxed(host->ioaddr + 658 msm_offset->core_dll_config); 659 config &= ~CORE_DLL_PDN; 660 writel_relaxed(config, host->ioaddr + 661 msm_offset->core_dll_config); 662 663 if (msm_host->use_14lpp_dll_reset) { 664 msm_cm_dll_set_freq(host); 665 config = readl_relaxed(host->ioaddr + 666 msm_offset->core_dll_config_2); 667 config &= ~CORE_DLL_CLOCK_DISABLE; 668 writel_relaxed(config, host->ioaddr + 669 msm_offset->core_dll_config_2); 670 } 671 672 config = readl_relaxed(host->ioaddr + 673 msm_offset->core_dll_config); 674 config |= CORE_DLL_EN; 675 writel_relaxed(config, host->ioaddr + 676 msm_offset->core_dll_config); 677 678 config = readl_relaxed(host->ioaddr + 679 msm_offset->core_dll_config); 680 config |= CORE_CK_OUT_EN; 681 writel_relaxed(config, host->ioaddr + 682 msm_offset->core_dll_config); 683 684 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */ 685 while (!(readl_relaxed(host->ioaddr + msm_offset->core_dll_status) & 686 CORE_DLL_LOCK)) { 687 /* max. wait for 50us sec for LOCK bit to be set */ 688 if (--wait_cnt == 0) { 689 dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n", 690 mmc_hostname(mmc)); 691 spin_unlock_irqrestore(&host->lock, flags); 692 return -ETIMEDOUT; 693 } 694 udelay(1); 695 } 696 697 spin_unlock_irqrestore(&host->lock, flags); 698 return 0; 699 } 700 701 static void msm_hc_select_default(struct sdhci_host *host) 702 { 703 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 704 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 705 u32 config; 706 const struct sdhci_msm_offset *msm_offset = 707 msm_host->offset; 708 709 if (!msm_host->use_cdclp533) { 710 config = readl_relaxed(host->ioaddr + 711 msm_offset->core_vendor_spec3); 712 config &= ~CORE_PWRSAVE_DLL; 713 writel_relaxed(config, host->ioaddr + 714 msm_offset->core_vendor_spec3); 715 } 716 717 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); 718 config &= ~CORE_HC_MCLK_SEL_MASK; 719 config |= CORE_HC_MCLK_SEL_DFLT; 720 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); 721 722 /* 723 * Disable HC_SELECT_IN to be able to use the UHS mode select 724 * configuration from Host Control2 register for all other 725 * modes. 726 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field 727 * in VENDOR_SPEC_FUNC 728 */ 729 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); 730 config &= ~CORE_HC_SELECT_IN_EN; 731 config &= ~CORE_HC_SELECT_IN_MASK; 732 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); 733 734 /* 735 * Make sure above writes impacting free running MCLK are completed 736 * before changing the clk_rate at GCC. 737 */ 738 wmb(); 739 } 740 741 static void msm_hc_select_hs400(struct sdhci_host *host) 742 { 743 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 744 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 745 struct mmc_ios ios = host->mmc->ios; 746 u32 config, dll_lock; 747 int rc; 748 const struct sdhci_msm_offset *msm_offset = 749 msm_host->offset; 750 751 /* Select the divided clock (free running MCLK/2) */ 752 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); 753 config &= ~CORE_HC_MCLK_SEL_MASK; 754 config |= CORE_HC_MCLK_SEL_HS400; 755 756 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); 757 /* 758 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC 759 * register 760 */ 761 if ((msm_host->tuning_done || ios.enhanced_strobe) && 762 !msm_host->calibration_done) { 763 config = readl_relaxed(host->ioaddr + 764 msm_offset->core_vendor_spec); 765 config |= CORE_HC_SELECT_IN_HS400; 766 config |= CORE_HC_SELECT_IN_EN; 767 writel_relaxed(config, host->ioaddr + 768 msm_offset->core_vendor_spec); 769 } 770 if (!msm_host->clk_rate && !msm_host->use_cdclp533) { 771 /* 772 * Poll on DLL_LOCK or DDR_DLL_LOCK bits in 773 * core_dll_status to be set. This should get set 774 * within 15 us at 200 MHz. 775 */ 776 rc = readl_relaxed_poll_timeout(host->ioaddr + 777 msm_offset->core_dll_status, 778 dll_lock, 779 (dll_lock & 780 (CORE_DLL_LOCK | 781 CORE_DDR_DLL_LOCK)), 10, 782 1000); 783 if (rc == -ETIMEDOUT) 784 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n", 785 mmc_hostname(host->mmc), dll_lock); 786 } 787 /* 788 * Make sure above writes impacting free running MCLK are completed 789 * before changing the clk_rate at GCC. 790 */ 791 wmb(); 792 } 793 794 /* 795 * sdhci_msm_hc_select_mode :- In general all timing modes are 796 * controlled via UHS mode select in Host Control2 register. 797 * eMMC specific HS200/HS400 doesn't have their respective modes 798 * defined here, hence we use these values. 799 * 800 * HS200 - SDR104 (Since they both are equivalent in functionality) 801 * HS400 - This involves multiple configurations 802 * Initially SDR104 - when tuning is required as HS200 803 * Then when switching to DDR @ 400MHz (HS400) we use 804 * the vendor specific HC_SELECT_IN to control the mode. 805 * 806 * In addition to controlling the modes we also need to select the 807 * correct input clock for DLL depending on the mode. 808 * 809 * HS400 - divided clock (free running MCLK/2) 810 * All other modes - default (free running MCLK) 811 */ 812 static void sdhci_msm_hc_select_mode(struct sdhci_host *host) 813 { 814 struct mmc_ios ios = host->mmc->ios; 815 816 if (ios.timing == MMC_TIMING_MMC_HS400 || 817 host->flags & SDHCI_HS400_TUNING) 818 msm_hc_select_hs400(host); 819 else 820 msm_hc_select_default(host); 821 } 822 823 static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host) 824 { 825 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 826 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 827 u32 config, calib_done; 828 int ret; 829 const struct sdhci_msm_offset *msm_offset = 830 msm_host->offset; 831 832 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__); 833 834 /* 835 * Retuning in HS400 (DDR mode) will fail, just reset the 836 * tuning block and restore the saved tuning phase. 837 */ 838 ret = msm_init_cm_dll(host); 839 if (ret) 840 goto out; 841 842 /* Set the selected phase in delay line hw block */ 843 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase); 844 if (ret) 845 goto out; 846 847 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); 848 config |= CORE_CMD_DAT_TRACK_SEL; 849 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 850 851 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg); 852 config &= ~CORE_CDC_T4_DLY_SEL; 853 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg); 854 855 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG); 856 config &= ~CORE_CDC_SWITCH_BYPASS_OFF; 857 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG); 858 859 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG); 860 config |= CORE_CDC_SWITCH_RC_EN; 861 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG); 862 863 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg); 864 config &= ~CORE_START_CDC_TRAFFIC; 865 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg); 866 867 /* Perform CDC Register Initialization Sequence */ 868 869 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 870 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1); 871 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0); 872 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1); 873 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG); 874 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG); 875 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG); 876 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG); 877 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG); 878 879 /* CDC HW Calibration */ 880 881 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 882 config |= CORE_SW_TRIG_FULL_CALIB; 883 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 884 885 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 886 config &= ~CORE_SW_TRIG_FULL_CALIB; 887 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 888 889 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 890 config |= CORE_HW_AUTOCAL_ENA; 891 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 892 893 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0); 894 config |= CORE_TIMER_ENA; 895 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0); 896 897 ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0, 898 calib_done, 899 (calib_done & CORE_CALIBRATION_DONE), 900 1, 50); 901 902 if (ret == -ETIMEDOUT) { 903 pr_err("%s: %s: CDC calibration was not completed\n", 904 mmc_hostname(host->mmc), __func__); 905 goto out; 906 } 907 908 ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0) 909 & CORE_CDC_ERROR_CODE_MASK; 910 if (ret) { 911 pr_err("%s: %s: CDC error code %d\n", 912 mmc_hostname(host->mmc), __func__, ret); 913 ret = -EINVAL; 914 goto out; 915 } 916 917 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg); 918 config |= CORE_START_CDC_TRAFFIC; 919 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg); 920 out: 921 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc), 922 __func__, ret); 923 return ret; 924 } 925 926 static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host) 927 { 928 struct mmc_host *mmc = host->mmc; 929 u32 dll_status, config, ddr_cfg_offset; 930 int ret; 931 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 932 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 933 const struct sdhci_msm_offset *msm_offset = 934 sdhci_priv_msm_offset(host); 935 936 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__); 937 938 /* 939 * Currently the core_ddr_config register defaults to desired 940 * configuration on reset. Currently reprogramming the power on 941 * reset (POR) value in case it might have been modified by 942 * bootloaders. In the future, if this changes, then the desired 943 * values will need to be programmed appropriately. 944 */ 945 if (msm_host->updated_ddr_cfg) 946 ddr_cfg_offset = msm_offset->core_ddr_config; 947 else 948 ddr_cfg_offset = msm_offset->core_ddr_config_old; 949 writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + ddr_cfg_offset); 950 951 if (mmc->ios.enhanced_strobe) { 952 config = readl_relaxed(host->ioaddr + 953 msm_offset->core_ddr_200_cfg); 954 config |= CORE_CMDIN_RCLK_EN; 955 writel_relaxed(config, host->ioaddr + 956 msm_offset->core_ddr_200_cfg); 957 } 958 959 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2); 960 config |= CORE_DDR_CAL_EN; 961 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config_2); 962 963 ret = readl_relaxed_poll_timeout(host->ioaddr + 964 msm_offset->core_dll_status, 965 dll_status, 966 (dll_status & CORE_DDR_DLL_LOCK), 967 10, 1000); 968 969 if (ret == -ETIMEDOUT) { 970 pr_err("%s: %s: CM_DLL_SDC4 calibration was not completed\n", 971 mmc_hostname(host->mmc), __func__); 972 goto out; 973 } 974 975 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec3); 976 config |= CORE_PWRSAVE_DLL; 977 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec3); 978 979 /* 980 * Drain writebuffer to ensure above DLL calibration 981 * and PWRSAVE DLL is enabled. 982 */ 983 wmb(); 984 out: 985 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc), 986 __func__, ret); 987 return ret; 988 } 989 990 static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host) 991 { 992 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 993 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 994 struct mmc_host *mmc = host->mmc; 995 int ret; 996 u32 config; 997 const struct sdhci_msm_offset *msm_offset = 998 msm_host->offset; 999 1000 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__); 1001 1002 /* 1003 * Retuning in HS400 (DDR mode) will fail, just reset the 1004 * tuning block and restore the saved tuning phase. 1005 */ 1006 ret = msm_init_cm_dll(host); 1007 if (ret) 1008 goto out; 1009 1010 if (!mmc->ios.enhanced_strobe) { 1011 /* Set the selected phase in delay line hw block */ 1012 ret = msm_config_cm_dll_phase(host, 1013 msm_host->saved_tuning_phase); 1014 if (ret) 1015 goto out; 1016 config = readl_relaxed(host->ioaddr + 1017 msm_offset->core_dll_config); 1018 config |= CORE_CMD_DAT_TRACK_SEL; 1019 writel_relaxed(config, host->ioaddr + 1020 msm_offset->core_dll_config); 1021 } 1022 1023 if (msm_host->use_cdclp533) 1024 ret = sdhci_msm_cdclp533_calibration(host); 1025 else 1026 ret = sdhci_msm_cm_dll_sdc4_calibration(host); 1027 out: 1028 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc), 1029 __func__, ret); 1030 return ret; 1031 } 1032 1033 static bool sdhci_msm_is_tuning_needed(struct sdhci_host *host) 1034 { 1035 struct mmc_ios *ios = &host->mmc->ios; 1036 1037 /* 1038 * Tuning is required for SDR104, HS200 and HS400 cards and 1039 * if clock frequency is greater than 100MHz in these modes. 1040 */ 1041 if (host->clock <= CORE_FREQ_100MHZ || 1042 !(ios->timing == MMC_TIMING_MMC_HS400 || 1043 ios->timing == MMC_TIMING_MMC_HS200 || 1044 ios->timing == MMC_TIMING_UHS_SDR104) || 1045 ios->enhanced_strobe) 1046 return false; 1047 1048 return true; 1049 } 1050 1051 static int sdhci_msm_restore_sdr_dll_config(struct sdhci_host *host) 1052 { 1053 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1054 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1055 int ret; 1056 1057 /* 1058 * SDR DLL comes into picture only for timing modes which needs 1059 * tuning. 1060 */ 1061 if (!sdhci_msm_is_tuning_needed(host)) 1062 return 0; 1063 1064 /* Reset the tuning block */ 1065 ret = msm_init_cm_dll(host); 1066 if (ret) 1067 return ret; 1068 1069 /* Restore the tuning block */ 1070 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase); 1071 1072 return ret; 1073 } 1074 1075 static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable) 1076 { 1077 const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host); 1078 u32 config, oldconfig = readl_relaxed(host->ioaddr + 1079 msm_offset->core_dll_config); 1080 1081 config = oldconfig; 1082 if (enable) { 1083 config |= CORE_CDR_EN; 1084 config &= ~CORE_CDR_EXT_EN; 1085 } else { 1086 config &= ~CORE_CDR_EN; 1087 config |= CORE_CDR_EXT_EN; 1088 } 1089 1090 if (config != oldconfig) { 1091 writel_relaxed(config, host->ioaddr + 1092 msm_offset->core_dll_config); 1093 } 1094 } 1095 1096 static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) 1097 { 1098 struct sdhci_host *host = mmc_priv(mmc); 1099 int tuning_seq_cnt = 3; 1100 u8 phase, tuned_phases[16], tuned_phase_cnt = 0; 1101 int rc; 1102 struct mmc_ios ios = host->mmc->ios; 1103 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1104 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1105 1106 if (!sdhci_msm_is_tuning_needed(host)) { 1107 msm_host->use_cdr = false; 1108 sdhci_msm_set_cdr(host, false); 1109 return 0; 1110 } 1111 1112 /* Clock-Data-Recovery used to dynamically adjust RX sampling point */ 1113 msm_host->use_cdr = true; 1114 1115 /* 1116 * For HS400 tuning in HS200 timing requires: 1117 * - select MCLK/2 in VENDOR_SPEC 1118 * - program MCLK to 400MHz (or nearest supported) in GCC 1119 */ 1120 if (host->flags & SDHCI_HS400_TUNING) { 1121 sdhci_msm_hc_select_mode(host); 1122 msm_set_clock_rate_for_bus_mode(host, ios.clock); 1123 host->flags &= ~SDHCI_HS400_TUNING; 1124 } 1125 1126 retry: 1127 /* First of all reset the tuning block */ 1128 rc = msm_init_cm_dll(host); 1129 if (rc) 1130 return rc; 1131 1132 phase = 0; 1133 do { 1134 /* Set the phase in delay line hw block */ 1135 rc = msm_config_cm_dll_phase(host, phase); 1136 if (rc) 1137 return rc; 1138 1139 rc = mmc_send_tuning(mmc, opcode, NULL); 1140 if (!rc) { 1141 /* Tuning is successful at this tuning point */ 1142 tuned_phases[tuned_phase_cnt++] = phase; 1143 dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n", 1144 mmc_hostname(mmc), phase); 1145 } 1146 } while (++phase < ARRAY_SIZE(tuned_phases)); 1147 1148 if (tuned_phase_cnt) { 1149 rc = msm_find_most_appropriate_phase(host, tuned_phases, 1150 tuned_phase_cnt); 1151 if (rc < 0) 1152 return rc; 1153 else 1154 phase = rc; 1155 1156 /* 1157 * Finally set the selected phase in delay 1158 * line hw block. 1159 */ 1160 rc = msm_config_cm_dll_phase(host, phase); 1161 if (rc) 1162 return rc; 1163 msm_host->saved_tuning_phase = phase; 1164 dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n", 1165 mmc_hostname(mmc), phase); 1166 } else { 1167 if (--tuning_seq_cnt) 1168 goto retry; 1169 /* Tuning failed */ 1170 dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n", 1171 mmc_hostname(mmc)); 1172 rc = -EIO; 1173 } 1174 1175 if (!rc) 1176 msm_host->tuning_done = true; 1177 return rc; 1178 } 1179 1180 /* 1181 * sdhci_msm_hs400 - Calibrate the DLL for HS400 bus speed mode operation. 1182 * This needs to be done for both tuning and enhanced_strobe mode. 1183 * DLL operation is only needed for clock > 100MHz. For clock <= 100MHz 1184 * fixed feedback clock is used. 1185 */ 1186 static void sdhci_msm_hs400(struct sdhci_host *host, struct mmc_ios *ios) 1187 { 1188 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1189 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1190 int ret; 1191 1192 if (host->clock > CORE_FREQ_100MHZ && 1193 (msm_host->tuning_done || ios->enhanced_strobe) && 1194 !msm_host->calibration_done) { 1195 ret = sdhci_msm_hs400_dll_calibration(host); 1196 if (!ret) 1197 msm_host->calibration_done = true; 1198 else 1199 pr_err("%s: Failed to calibrate DLL for hs400 mode (%d)\n", 1200 mmc_hostname(host->mmc), ret); 1201 } 1202 } 1203 1204 static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host, 1205 unsigned int uhs) 1206 { 1207 struct mmc_host *mmc = host->mmc; 1208 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1209 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1210 u16 ctrl_2; 1211 u32 config; 1212 const struct sdhci_msm_offset *msm_offset = 1213 msm_host->offset; 1214 1215 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1216 /* Select Bus Speed Mode for host */ 1217 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 1218 switch (uhs) { 1219 case MMC_TIMING_UHS_SDR12: 1220 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 1221 break; 1222 case MMC_TIMING_UHS_SDR25: 1223 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 1224 break; 1225 case MMC_TIMING_UHS_SDR50: 1226 ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 1227 break; 1228 case MMC_TIMING_MMC_HS400: 1229 case MMC_TIMING_MMC_HS200: 1230 case MMC_TIMING_UHS_SDR104: 1231 ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 1232 break; 1233 case MMC_TIMING_UHS_DDR50: 1234 case MMC_TIMING_MMC_DDR52: 1235 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 1236 break; 1237 } 1238 1239 /* 1240 * When clock frequency is less than 100MHz, the feedback clock must be 1241 * provided and DLL must not be used so that tuning can be skipped. To 1242 * provide feedback clock, the mode selection can be any value less 1243 * than 3'b011 in bits [2:0] of HOST CONTROL2 register. 1244 */ 1245 if (host->clock <= CORE_FREQ_100MHZ) { 1246 if (uhs == MMC_TIMING_MMC_HS400 || 1247 uhs == MMC_TIMING_MMC_HS200 || 1248 uhs == MMC_TIMING_UHS_SDR104) 1249 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 1250 /* 1251 * DLL is not required for clock <= 100MHz 1252 * Thus, make sure DLL it is disabled when not required 1253 */ 1254 config = readl_relaxed(host->ioaddr + 1255 msm_offset->core_dll_config); 1256 config |= CORE_DLL_RST; 1257 writel_relaxed(config, host->ioaddr + 1258 msm_offset->core_dll_config); 1259 1260 config = readl_relaxed(host->ioaddr + 1261 msm_offset->core_dll_config); 1262 config |= CORE_DLL_PDN; 1263 writel_relaxed(config, host->ioaddr + 1264 msm_offset->core_dll_config); 1265 1266 /* 1267 * The DLL needs to be restored and CDCLP533 recalibrated 1268 * when the clock frequency is set back to 400MHz. 1269 */ 1270 msm_host->calibration_done = false; 1271 } 1272 1273 dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n", 1274 mmc_hostname(host->mmc), host->clock, uhs, ctrl_2); 1275 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1276 1277 if (mmc->ios.timing == MMC_TIMING_MMC_HS400) 1278 sdhci_msm_hs400(host, &mmc->ios); 1279 } 1280 1281 static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host) 1282 { 1283 init_waitqueue_head(&msm_host->pwr_irq_wait); 1284 } 1285 1286 static inline void sdhci_msm_complete_pwr_irq_wait( 1287 struct sdhci_msm_host *msm_host) 1288 { 1289 wake_up(&msm_host->pwr_irq_wait); 1290 } 1291 1292 /* 1293 * sdhci_msm_check_power_status API should be called when registers writes 1294 * which can toggle sdhci IO bus ON/OFF or change IO lines HIGH/LOW happens. 1295 * To what state the register writes will change the IO lines should be passed 1296 * as the argument req_type. This API will check whether the IO line's state 1297 * is already the expected state and will wait for power irq only if 1298 * power irq is expected to be trigerred based on the current IO line state 1299 * and expected IO line state. 1300 */ 1301 static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type) 1302 { 1303 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1304 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1305 bool done = false; 1306 u32 val = SWITCHABLE_SIGNALING_VOLTAGE; 1307 const struct sdhci_msm_offset *msm_offset = 1308 msm_host->offset; 1309 1310 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n", 1311 mmc_hostname(host->mmc), __func__, req_type, 1312 msm_host->curr_pwr_state, msm_host->curr_io_level); 1313 1314 /* 1315 * The power interrupt will not be generated for signal voltage 1316 * switches if SWITCHABLE_SIGNALING_VOLTAGE in MCI_GENERICS is not set. 1317 * Since sdhci-msm-v5, this bit has been removed and SW must consider 1318 * it as always set. 1319 */ 1320 if (!msm_host->mci_removed) 1321 val = msm_host_readl(msm_host, host, 1322 msm_offset->core_generics); 1323 if ((req_type & REQ_IO_HIGH || req_type & REQ_IO_LOW) && 1324 !(val & SWITCHABLE_SIGNALING_VOLTAGE)) { 1325 return; 1326 } 1327 1328 /* 1329 * The IRQ for request type IO High/LOW will be generated when - 1330 * there is a state change in 1.8V enable bit (bit 3) of 1331 * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0 1332 * which indicates 3.3V IO voltage. So, when MMC core layer tries 1333 * to set it to 3.3V before card detection happens, the 1334 * IRQ doesn't get triggered as there is no state change in this bit. 1335 * The driver already handles this case by changing the IO voltage 1336 * level to high as part of controller power up sequence. Hence, check 1337 * for host->pwr to handle a case where IO voltage high request is 1338 * issued even before controller power up. 1339 */ 1340 if ((req_type & REQ_IO_HIGH) && !host->pwr) { 1341 pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n", 1342 mmc_hostname(host->mmc), req_type); 1343 return; 1344 } 1345 if ((req_type & msm_host->curr_pwr_state) || 1346 (req_type & msm_host->curr_io_level)) 1347 done = true; 1348 /* 1349 * This is needed here to handle cases where register writes will 1350 * not change the current bus state or io level of the controller. 1351 * In this case, no power irq will be triggerred and we should 1352 * not wait. 1353 */ 1354 if (!done) { 1355 if (!wait_event_timeout(msm_host->pwr_irq_wait, 1356 msm_host->pwr_irq_flag, 1357 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS))) 1358 dev_warn(&msm_host->pdev->dev, 1359 "%s: pwr_irq for req: (%d) timed out\n", 1360 mmc_hostname(host->mmc), req_type); 1361 } 1362 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc), 1363 __func__, req_type); 1364 } 1365 1366 static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host) 1367 { 1368 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1369 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1370 const struct sdhci_msm_offset *msm_offset = 1371 msm_host->offset; 1372 1373 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n", 1374 mmc_hostname(host->mmc), 1375 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_status), 1376 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_mask), 1377 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_ctl)); 1378 } 1379 1380 static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq) 1381 { 1382 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1383 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1384 u32 irq_status, irq_ack = 0; 1385 int retry = 10; 1386 u32 pwr_state = 0, io_level = 0; 1387 u32 config; 1388 const struct sdhci_msm_offset *msm_offset = msm_host->offset; 1389 1390 irq_status = msm_host_readl(msm_host, host, 1391 msm_offset->core_pwrctl_status); 1392 irq_status &= INT_MASK; 1393 1394 msm_host_writel(msm_host, irq_status, host, 1395 msm_offset->core_pwrctl_clear); 1396 1397 /* 1398 * There is a rare HW scenario where the first clear pulse could be 1399 * lost when actual reset and clear/read of status register is 1400 * happening at a time. Hence, retry for at least 10 times to make 1401 * sure status register is cleared. Otherwise, this will result in 1402 * a spurious power IRQ resulting in system instability. 1403 */ 1404 while (irq_status & msm_host_readl(msm_host, host, 1405 msm_offset->core_pwrctl_status)) { 1406 if (retry == 0) { 1407 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n", 1408 mmc_hostname(host->mmc), irq_status); 1409 sdhci_msm_dump_pwr_ctrl_regs(host); 1410 WARN_ON(1); 1411 break; 1412 } 1413 msm_host_writel(msm_host, irq_status, host, 1414 msm_offset->core_pwrctl_clear); 1415 retry--; 1416 udelay(10); 1417 } 1418 1419 /* Handle BUS ON/OFF*/ 1420 if (irq_status & CORE_PWRCTL_BUS_ON) { 1421 pwr_state = REQ_BUS_ON; 1422 io_level = REQ_IO_HIGH; 1423 irq_ack |= CORE_PWRCTL_BUS_SUCCESS; 1424 } 1425 if (irq_status & CORE_PWRCTL_BUS_OFF) { 1426 pwr_state = REQ_BUS_OFF; 1427 io_level = REQ_IO_LOW; 1428 irq_ack |= CORE_PWRCTL_BUS_SUCCESS; 1429 } 1430 /* Handle IO LOW/HIGH */ 1431 if (irq_status & CORE_PWRCTL_IO_LOW) { 1432 io_level = REQ_IO_LOW; 1433 irq_ack |= CORE_PWRCTL_IO_SUCCESS; 1434 } 1435 if (irq_status & CORE_PWRCTL_IO_HIGH) { 1436 io_level = REQ_IO_HIGH; 1437 irq_ack |= CORE_PWRCTL_IO_SUCCESS; 1438 } 1439 1440 /* 1441 * The driver has to acknowledge the interrupt, switch voltages and 1442 * report back if it succeded or not to this register. The voltage 1443 * switches are handled by the sdhci core, so just report success. 1444 */ 1445 msm_host_writel(msm_host, irq_ack, host, 1446 msm_offset->core_pwrctl_ctl); 1447 1448 /* 1449 * If we don't have info regarding the voltage levels supported by 1450 * regulators, don't change the IO PAD PWR SWITCH. 1451 */ 1452 if (msm_host->caps_0 & CORE_VOLT_SUPPORT) { 1453 u32 new_config; 1454 /* 1455 * We should unset IO PAD PWR switch only if the register write 1456 * can set IO lines high and the regulator also switches to 3 V. 1457 * Else, we should keep the IO PAD PWR switch set. 1458 * This is applicable to certain targets where eMMC vccq supply 1459 * is only 1.8V. In such targets, even during REQ_IO_HIGH, the 1460 * IO PAD PWR switch must be kept set to reflect actual 1461 * regulator voltage. This way, during initialization of 1462 * controllers with only 1.8V, we will set the IO PAD bit 1463 * without waiting for a REQ_IO_LOW. 1464 */ 1465 config = readl_relaxed(host->ioaddr + 1466 msm_offset->core_vendor_spec); 1467 new_config = config; 1468 1469 if ((io_level & REQ_IO_HIGH) && 1470 (msm_host->caps_0 & CORE_3_0V_SUPPORT)) 1471 new_config &= ~CORE_IO_PAD_PWR_SWITCH; 1472 else if ((io_level & REQ_IO_LOW) || 1473 (msm_host->caps_0 & CORE_1_8V_SUPPORT)) 1474 new_config |= CORE_IO_PAD_PWR_SWITCH; 1475 1476 if (config ^ new_config) 1477 writel_relaxed(new_config, host->ioaddr + 1478 msm_offset->core_vendor_spec); 1479 } 1480 1481 if (pwr_state) 1482 msm_host->curr_pwr_state = pwr_state; 1483 if (io_level) 1484 msm_host->curr_io_level = io_level; 1485 1486 pr_debug("%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n", 1487 mmc_hostname(msm_host->mmc), __func__, irq, irq_status, 1488 irq_ack); 1489 } 1490 1491 static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data) 1492 { 1493 struct sdhci_host *host = (struct sdhci_host *)data; 1494 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1495 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1496 1497 sdhci_msm_handle_pwr_irq(host, irq); 1498 msm_host->pwr_irq_flag = 1; 1499 sdhci_msm_complete_pwr_irq_wait(msm_host); 1500 1501 1502 return IRQ_HANDLED; 1503 } 1504 1505 static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host) 1506 { 1507 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1508 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1509 struct clk *core_clk = msm_host->bulk_clks[0].clk; 1510 1511 return clk_round_rate(core_clk, ULONG_MAX); 1512 } 1513 1514 static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host) 1515 { 1516 return SDHCI_MSM_MIN_CLOCK; 1517 } 1518 1519 /** 1520 * __sdhci_msm_set_clock - sdhci_msm clock control. 1521 * 1522 * Description: 1523 * MSM controller does not use internal divider and 1524 * instead directly control the GCC clock as per 1525 * HW recommendation. 1526 **/ 1527 static void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock) 1528 { 1529 u16 clk; 1530 /* 1531 * Keep actual_clock as zero - 1532 * - since there is no divider used so no need of having actual_clock. 1533 * - MSM controller uses SDCLK for data timeout calculation. If 1534 * actual_clock is zero, host->clock is taken for calculation. 1535 */ 1536 host->mmc->actual_clock = 0; 1537 1538 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 1539 1540 if (clock == 0) 1541 return; 1542 1543 /* 1544 * MSM controller do not use clock divider. 1545 * Thus read SDHCI_CLOCK_CONTROL and only enable 1546 * clock with no divider value programmed. 1547 */ 1548 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1549 sdhci_enable_clk(host, clk); 1550 } 1551 1552 /* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */ 1553 static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock) 1554 { 1555 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1556 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1557 1558 if (!clock) { 1559 msm_host->clk_rate = clock; 1560 goto out; 1561 } 1562 1563 sdhci_msm_hc_select_mode(host); 1564 1565 msm_set_clock_rate_for_bus_mode(host, clock); 1566 out: 1567 __sdhci_msm_set_clock(host, clock); 1568 } 1569 1570 /* 1571 * Platform specific register write functions. This is so that, if any 1572 * register write needs to be followed up by platform specific actions, 1573 * they can be added here. These functions can go to sleep when writes 1574 * to certain registers are done. 1575 * These functions are relying on sdhci_set_ios not using spinlock. 1576 */ 1577 static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg) 1578 { 1579 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1580 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1581 u32 req_type = 0; 1582 1583 switch (reg) { 1584 case SDHCI_HOST_CONTROL2: 1585 req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW : 1586 REQ_IO_HIGH; 1587 break; 1588 case SDHCI_SOFTWARE_RESET: 1589 if (host->pwr && (val & SDHCI_RESET_ALL)) 1590 req_type = REQ_BUS_OFF; 1591 break; 1592 case SDHCI_POWER_CONTROL: 1593 req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON; 1594 break; 1595 case SDHCI_TRANSFER_MODE: 1596 msm_host->transfer_mode = val; 1597 break; 1598 case SDHCI_COMMAND: 1599 if (!msm_host->use_cdr) 1600 break; 1601 if ((msm_host->transfer_mode & SDHCI_TRNS_READ) && 1602 SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK_HS200 && 1603 SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK) 1604 sdhci_msm_set_cdr(host, true); 1605 else 1606 sdhci_msm_set_cdr(host, false); 1607 break; 1608 } 1609 1610 if (req_type) { 1611 msm_host->pwr_irq_flag = 0; 1612 /* 1613 * Since this register write may trigger a power irq, ensure 1614 * all previous register writes are complete by this point. 1615 */ 1616 mb(); 1617 } 1618 return req_type; 1619 } 1620 1621 /* This function may sleep*/ 1622 static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg) 1623 { 1624 u32 req_type = 0; 1625 1626 req_type = __sdhci_msm_check_write(host, val, reg); 1627 writew_relaxed(val, host->ioaddr + reg); 1628 1629 if (req_type) 1630 sdhci_msm_check_power_status(host, req_type); 1631 } 1632 1633 /* This function may sleep*/ 1634 static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg) 1635 { 1636 u32 req_type = 0; 1637 1638 req_type = __sdhci_msm_check_write(host, val, reg); 1639 1640 writeb_relaxed(val, host->ioaddr + reg); 1641 1642 if (req_type) 1643 sdhci_msm_check_power_status(host, req_type); 1644 } 1645 1646 static void sdhci_msm_set_regulator_caps(struct sdhci_msm_host *msm_host) 1647 { 1648 struct mmc_host *mmc = msm_host->mmc; 1649 struct regulator *supply = mmc->supply.vqmmc; 1650 u32 caps = 0, config; 1651 struct sdhci_host *host = mmc_priv(mmc); 1652 const struct sdhci_msm_offset *msm_offset = msm_host->offset; 1653 1654 if (!IS_ERR(mmc->supply.vqmmc)) { 1655 if (regulator_is_supported_voltage(supply, 1700000, 1950000)) 1656 caps |= CORE_1_8V_SUPPORT; 1657 if (regulator_is_supported_voltage(supply, 2700000, 3600000)) 1658 caps |= CORE_3_0V_SUPPORT; 1659 1660 if (!caps) 1661 pr_warn("%s: 1.8/3V not supported for vqmmc\n", 1662 mmc_hostname(mmc)); 1663 } 1664 1665 if (caps) { 1666 /* 1667 * Set the PAD_PWR_SWITCH_EN bit so that the PAD_PWR_SWITCH 1668 * bit can be used as required later on. 1669 */ 1670 u32 io_level = msm_host->curr_io_level; 1671 1672 config = readl_relaxed(host->ioaddr + 1673 msm_offset->core_vendor_spec); 1674 config |= CORE_IO_PAD_PWR_SWITCH_EN; 1675 1676 if ((io_level & REQ_IO_HIGH) && (caps & CORE_3_0V_SUPPORT)) 1677 config &= ~CORE_IO_PAD_PWR_SWITCH; 1678 else if ((io_level & REQ_IO_LOW) || (caps & CORE_1_8V_SUPPORT)) 1679 config |= CORE_IO_PAD_PWR_SWITCH; 1680 1681 writel_relaxed(config, 1682 host->ioaddr + msm_offset->core_vendor_spec); 1683 } 1684 msm_host->caps_0 |= caps; 1685 pr_debug("%s: supported caps: 0x%08x\n", mmc_hostname(mmc), caps); 1686 } 1687 1688 static const struct sdhci_msm_variant_ops mci_var_ops = { 1689 .msm_readl_relaxed = sdhci_msm_mci_variant_readl_relaxed, 1690 .msm_writel_relaxed = sdhci_msm_mci_variant_writel_relaxed, 1691 }; 1692 1693 static const struct sdhci_msm_variant_ops v5_var_ops = { 1694 .msm_readl_relaxed = sdhci_msm_v5_variant_readl_relaxed, 1695 .msm_writel_relaxed = sdhci_msm_v5_variant_writel_relaxed, 1696 }; 1697 1698 static const struct sdhci_msm_variant_info sdhci_msm_mci_var = { 1699 .var_ops = &mci_var_ops, 1700 .offset = &sdhci_msm_mci_offset, 1701 }; 1702 1703 static const struct sdhci_msm_variant_info sdhci_msm_v5_var = { 1704 .mci_removed = true, 1705 .var_ops = &v5_var_ops, 1706 .offset = &sdhci_msm_v5_offset, 1707 }; 1708 1709 static const struct sdhci_msm_variant_info sdm845_sdhci_var = { 1710 .mci_removed = true, 1711 .restore_dll_config = true, 1712 .var_ops = &v5_var_ops, 1713 .offset = &sdhci_msm_v5_offset, 1714 }; 1715 1716 static const struct of_device_id sdhci_msm_dt_match[] = { 1717 {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var}, 1718 {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var}, 1719 {.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var}, 1720 {}, 1721 }; 1722 1723 MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match); 1724 1725 static const struct sdhci_ops sdhci_msm_ops = { 1726 .reset = sdhci_reset, 1727 .set_clock = sdhci_msm_set_clock, 1728 .get_min_clock = sdhci_msm_get_min_clock, 1729 .get_max_clock = sdhci_msm_get_max_clock, 1730 .set_bus_width = sdhci_set_bus_width, 1731 .set_uhs_signaling = sdhci_msm_set_uhs_signaling, 1732 .write_w = sdhci_msm_writew, 1733 .write_b = sdhci_msm_writeb, 1734 }; 1735 1736 static const struct sdhci_pltfm_data sdhci_msm_pdata = { 1737 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | 1738 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1739 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1740 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1741 .ops = &sdhci_msm_ops, 1742 }; 1743 1744 static int sdhci_msm_probe(struct platform_device *pdev) 1745 { 1746 struct sdhci_host *host; 1747 struct sdhci_pltfm_host *pltfm_host; 1748 struct sdhci_msm_host *msm_host; 1749 struct resource *core_memres; 1750 struct clk *clk; 1751 int ret; 1752 u16 host_version, core_minor; 1753 u32 core_version, config; 1754 u8 core_major; 1755 const struct sdhci_msm_offset *msm_offset; 1756 const struct sdhci_msm_variant_info *var_info; 1757 1758 host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host)); 1759 if (IS_ERR(host)) 1760 return PTR_ERR(host); 1761 1762 host->sdma_boundary = 0; 1763 pltfm_host = sdhci_priv(host); 1764 msm_host = sdhci_pltfm_priv(pltfm_host); 1765 msm_host->mmc = host->mmc; 1766 msm_host->pdev = pdev; 1767 1768 ret = mmc_of_parse(host->mmc); 1769 if (ret) 1770 goto pltfm_free; 1771 1772 /* 1773 * Based on the compatible string, load the required msm host info from 1774 * the data associated with the version info. 1775 */ 1776 var_info = of_device_get_match_data(&pdev->dev); 1777 1778 msm_host->mci_removed = var_info->mci_removed; 1779 msm_host->restore_dll_config = var_info->restore_dll_config; 1780 msm_host->var_ops = var_info->var_ops; 1781 msm_host->offset = var_info->offset; 1782 1783 msm_offset = msm_host->offset; 1784 1785 sdhci_get_of_property(pdev); 1786 1787 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE; 1788 1789 /* Setup SDCC bus voter clock. */ 1790 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus"); 1791 if (!IS_ERR(msm_host->bus_clk)) { 1792 /* Vote for max. clk rate for max. performance */ 1793 ret = clk_set_rate(msm_host->bus_clk, INT_MAX); 1794 if (ret) 1795 goto pltfm_free; 1796 ret = clk_prepare_enable(msm_host->bus_clk); 1797 if (ret) 1798 goto pltfm_free; 1799 } 1800 1801 /* Setup main peripheral bus clock */ 1802 clk = devm_clk_get(&pdev->dev, "iface"); 1803 if (IS_ERR(clk)) { 1804 ret = PTR_ERR(clk); 1805 dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret); 1806 goto bus_clk_disable; 1807 } 1808 msm_host->bulk_clks[1].clk = clk; 1809 1810 /* Setup SDC MMC clock */ 1811 clk = devm_clk_get(&pdev->dev, "core"); 1812 if (IS_ERR(clk)) { 1813 ret = PTR_ERR(clk); 1814 dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret); 1815 goto bus_clk_disable; 1816 } 1817 msm_host->bulk_clks[0].clk = clk; 1818 1819 /* Vote for maximum clock rate for maximum performance */ 1820 ret = clk_set_rate(clk, INT_MAX); 1821 if (ret) 1822 dev_warn(&pdev->dev, "core clock boost failed\n"); 1823 1824 clk = devm_clk_get(&pdev->dev, "cal"); 1825 if (IS_ERR(clk)) 1826 clk = NULL; 1827 msm_host->bulk_clks[2].clk = clk; 1828 1829 clk = devm_clk_get(&pdev->dev, "sleep"); 1830 if (IS_ERR(clk)) 1831 clk = NULL; 1832 msm_host->bulk_clks[3].clk = clk; 1833 1834 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), 1835 msm_host->bulk_clks); 1836 if (ret) 1837 goto bus_clk_disable; 1838 1839 /* 1840 * xo clock is needed for FLL feature of cm_dll. 1841 * In case if xo clock is not mentioned in DT, warn and proceed. 1842 */ 1843 msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo"); 1844 if (IS_ERR(msm_host->xo_clk)) { 1845 ret = PTR_ERR(msm_host->xo_clk); 1846 dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret); 1847 } 1848 1849 if (!msm_host->mci_removed) { 1850 core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1851 msm_host->core_mem = devm_ioremap_resource(&pdev->dev, 1852 core_memres); 1853 1854 if (IS_ERR(msm_host->core_mem)) { 1855 ret = PTR_ERR(msm_host->core_mem); 1856 goto clk_disable; 1857 } 1858 } 1859 1860 /* Reset the vendor spec register to power on reset state */ 1861 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL, 1862 host->ioaddr + msm_offset->core_vendor_spec); 1863 1864 if (!msm_host->mci_removed) { 1865 /* Set HC_MODE_EN bit in HC_MODE register */ 1866 msm_host_writel(msm_host, HC_MODE_EN, host, 1867 msm_offset->core_hc_mode); 1868 config = msm_host_readl(msm_host, host, 1869 msm_offset->core_hc_mode); 1870 config |= FF_CLK_SW_RST_DIS; 1871 msm_host_writel(msm_host, config, host, 1872 msm_offset->core_hc_mode); 1873 } 1874 1875 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION)); 1876 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n", 1877 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >> 1878 SDHCI_VENDOR_VER_SHIFT)); 1879 1880 core_version = msm_host_readl(msm_host, host, 1881 msm_offset->core_mci_version); 1882 core_major = (core_version & CORE_VERSION_MAJOR_MASK) >> 1883 CORE_VERSION_MAJOR_SHIFT; 1884 core_minor = core_version & CORE_VERSION_MINOR_MASK; 1885 dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n", 1886 core_version, core_major, core_minor); 1887 1888 if (core_major == 1 && core_minor >= 0x42) 1889 msm_host->use_14lpp_dll_reset = true; 1890 1891 /* 1892 * SDCC 5 controller with major version 1, minor version 0x34 and later 1893 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL. 1894 */ 1895 if (core_major == 1 && core_minor < 0x34) 1896 msm_host->use_cdclp533 = true; 1897 1898 /* 1899 * Support for some capabilities is not advertised by newer 1900 * controller versions and must be explicitly enabled. 1901 */ 1902 if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) { 1903 config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES); 1904 config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT; 1905 writel_relaxed(config, host->ioaddr + 1906 msm_offset->core_vendor_spec_capabilities0); 1907 } 1908 1909 if (core_major == 1 && core_minor >= 0x49) 1910 msm_host->updated_ddr_cfg = true; 1911 1912 /* 1913 * Power on reset state may trigger power irq if previous status of 1914 * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq 1915 * interrupt in GIC, any pending power irq interrupt should be 1916 * acknowledged. Otherwise power irq interrupt handler would be 1917 * fired prematurely. 1918 */ 1919 sdhci_msm_handle_pwr_irq(host, 0); 1920 1921 /* 1922 * Ensure that above writes are propogated before interrupt enablement 1923 * in GIC. 1924 */ 1925 mb(); 1926 1927 /* Setup IRQ for handling power/voltage tasks with PMIC */ 1928 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq"); 1929 if (msm_host->pwr_irq < 0) { 1930 ret = msm_host->pwr_irq; 1931 goto clk_disable; 1932 } 1933 1934 sdhci_msm_init_pwr_irq_wait(msm_host); 1935 /* Enable pwr irq interrupts */ 1936 msm_host_writel(msm_host, INT_MASK, host, 1937 msm_offset->core_pwrctl_mask); 1938 1939 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL, 1940 sdhci_msm_pwr_irq, IRQF_ONESHOT, 1941 dev_name(&pdev->dev), host); 1942 if (ret) { 1943 dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret); 1944 goto clk_disable; 1945 } 1946 1947 pm_runtime_get_noresume(&pdev->dev); 1948 pm_runtime_set_active(&pdev->dev); 1949 pm_runtime_enable(&pdev->dev); 1950 pm_runtime_set_autosuspend_delay(&pdev->dev, 1951 MSM_MMC_AUTOSUSPEND_DELAY_MS); 1952 pm_runtime_use_autosuspend(&pdev->dev); 1953 1954 host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning; 1955 ret = sdhci_add_host(host); 1956 if (ret) 1957 goto pm_runtime_disable; 1958 sdhci_msm_set_regulator_caps(msm_host); 1959 1960 pm_runtime_mark_last_busy(&pdev->dev); 1961 pm_runtime_put_autosuspend(&pdev->dev); 1962 1963 return 0; 1964 1965 pm_runtime_disable: 1966 pm_runtime_disable(&pdev->dev); 1967 pm_runtime_set_suspended(&pdev->dev); 1968 pm_runtime_put_noidle(&pdev->dev); 1969 clk_disable: 1970 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), 1971 msm_host->bulk_clks); 1972 bus_clk_disable: 1973 if (!IS_ERR(msm_host->bus_clk)) 1974 clk_disable_unprepare(msm_host->bus_clk); 1975 pltfm_free: 1976 sdhci_pltfm_free(pdev); 1977 return ret; 1978 } 1979 1980 static int sdhci_msm_remove(struct platform_device *pdev) 1981 { 1982 struct sdhci_host *host = platform_get_drvdata(pdev); 1983 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1984 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1985 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 1986 0xffffffff); 1987 1988 sdhci_remove_host(host, dead); 1989 1990 pm_runtime_get_sync(&pdev->dev); 1991 pm_runtime_disable(&pdev->dev); 1992 pm_runtime_put_noidle(&pdev->dev); 1993 1994 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), 1995 msm_host->bulk_clks); 1996 if (!IS_ERR(msm_host->bus_clk)) 1997 clk_disable_unprepare(msm_host->bus_clk); 1998 sdhci_pltfm_free(pdev); 1999 return 0; 2000 } 2001 2002 static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev) 2003 { 2004 struct sdhci_host *host = dev_get_drvdata(dev); 2005 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 2006 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 2007 2008 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), 2009 msm_host->bulk_clks); 2010 2011 return 0; 2012 } 2013 2014 static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev) 2015 { 2016 struct sdhci_host *host = dev_get_drvdata(dev); 2017 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 2018 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 2019 int ret; 2020 2021 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), 2022 msm_host->bulk_clks); 2023 if (ret) 2024 return ret; 2025 /* 2026 * Whenever core-clock is gated dynamically, it's needed to 2027 * restore the SDR DLL settings when the clock is ungated. 2028 */ 2029 if (msm_host->restore_dll_config && msm_host->clk_rate) 2030 return sdhci_msm_restore_sdr_dll_config(host); 2031 2032 return 0; 2033 } 2034 2035 static const struct dev_pm_ops sdhci_msm_pm_ops = { 2036 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 2037 pm_runtime_force_resume) 2038 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, 2039 sdhci_msm_runtime_resume, 2040 NULL) 2041 }; 2042 2043 static struct platform_driver sdhci_msm_driver = { 2044 .probe = sdhci_msm_probe, 2045 .remove = sdhci_msm_remove, 2046 .driver = { 2047 .name = "sdhci_msm", 2048 .of_match_table = sdhci_msm_dt_match, 2049 .pm = &sdhci_msm_pm_ops, 2050 }, 2051 }; 2052 2053 module_platform_driver(sdhci_msm_driver); 2054 2055 MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver"); 2056 MODULE_LICENSE("GPL v2"); 2057