1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Freescale eSDHC i.MX controller driver for the platform bus. 4 * 5 * derived from the OF-version. 6 * 7 * Copyright (c) 2010 Pengutronix e.K. 8 * Author: Wolfram Sang <kernel@pengutronix.de> 9 */ 10 11 #include <linux/bitfield.h> 12 #include <linux/io.h> 13 #include <linux/iopoll.h> 14 #include <linux/delay.h> 15 #include <linux/err.h> 16 #include <linux/clk.h> 17 #include <linux/module.h> 18 #include <linux/slab.h> 19 #include <linux/pm_qos.h> 20 #include <linux/mmc/host.h> 21 #include <linux/mmc/mmc.h> 22 #include <linux/mmc/sdio.h> 23 #include <linux/mmc/slot-gpio.h> 24 #include <linux/of.h> 25 #include <linux/of_device.h> 26 #include <linux/pinctrl/consumer.h> 27 #include <linux/pm_runtime.h> 28 #include "sdhci-pltfm.h" 29 #include "sdhci-esdhc.h" 30 #include "cqhci.h" 31 32 #define ESDHC_SYS_CTRL_DTOCV_MASK 0x0f 33 #define ESDHC_CTRL_D3CD 0x08 34 #define ESDHC_BURST_LEN_EN_INCR (1 << 27) 35 /* VENDOR SPEC register */ 36 #define ESDHC_VENDOR_SPEC 0xc0 37 #define ESDHC_VENDOR_SPEC_SDIO_QUIRK (1 << 1) 38 #define ESDHC_VENDOR_SPEC_VSELECT (1 << 1) 39 #define ESDHC_VENDOR_SPEC_FRC_SDCLK_ON (1 << 8) 40 #define ESDHC_DEBUG_SEL_AND_STATUS_REG 0xc2 41 #define ESDHC_DEBUG_SEL_REG 0xc3 42 #define ESDHC_DEBUG_SEL_MASK 0xf 43 #define ESDHC_DEBUG_SEL_CMD_STATE 1 44 #define ESDHC_DEBUG_SEL_DATA_STATE 2 45 #define ESDHC_DEBUG_SEL_TRANS_STATE 3 46 #define ESDHC_DEBUG_SEL_DMA_STATE 4 47 #define ESDHC_DEBUG_SEL_ADMA_STATE 5 48 #define ESDHC_DEBUG_SEL_FIFO_STATE 6 49 #define ESDHC_DEBUG_SEL_ASYNC_FIFO_STATE 7 50 #define ESDHC_WTMK_LVL 0x44 51 #define ESDHC_WTMK_DEFAULT_VAL 0x10401040 52 #define ESDHC_WTMK_LVL_RD_WML_MASK 0x000000FF 53 #define ESDHC_WTMK_LVL_RD_WML_SHIFT 0 54 #define ESDHC_WTMK_LVL_WR_WML_MASK 0x00FF0000 55 #define ESDHC_WTMK_LVL_WR_WML_SHIFT 16 56 #define ESDHC_WTMK_LVL_WML_VAL_DEF 64 57 #define ESDHC_WTMK_LVL_WML_VAL_MAX 128 58 #define ESDHC_MIX_CTRL 0x48 59 #define ESDHC_MIX_CTRL_DDREN (1 << 3) 60 #define ESDHC_MIX_CTRL_AC23EN (1 << 7) 61 #define ESDHC_MIX_CTRL_EXE_TUNE (1 << 22) 62 #define ESDHC_MIX_CTRL_SMPCLK_SEL (1 << 23) 63 #define ESDHC_MIX_CTRL_AUTO_TUNE_EN (1 << 24) 64 #define ESDHC_MIX_CTRL_FBCLK_SEL (1 << 25) 65 #define ESDHC_MIX_CTRL_HS400_EN (1 << 26) 66 #define ESDHC_MIX_CTRL_HS400_ES_EN (1 << 27) 67 /* Bits 3 and 6 are not SDHCI standard definitions */ 68 #define ESDHC_MIX_CTRL_SDHCI_MASK 0xb7 69 /* Tuning bits */ 70 #define ESDHC_MIX_CTRL_TUNING_MASK 0x03c00000 71 72 /* dll control register */ 73 #define ESDHC_DLL_CTRL 0x60 74 #define ESDHC_DLL_OVERRIDE_VAL_SHIFT 9 75 #define ESDHC_DLL_OVERRIDE_EN_SHIFT 8 76 77 /* tune control register */ 78 #define ESDHC_TUNE_CTRL_STATUS 0x68 79 #define ESDHC_TUNE_CTRL_STEP 1 80 #define ESDHC_TUNE_CTRL_MIN 0 81 #define ESDHC_TUNE_CTRL_MAX ((1 << 7) - 1) 82 83 /* strobe dll register */ 84 #define ESDHC_STROBE_DLL_CTRL 0x70 85 #define ESDHC_STROBE_DLL_CTRL_ENABLE (1 << 0) 86 #define ESDHC_STROBE_DLL_CTRL_RESET (1 << 1) 87 #define ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_DEFAULT 0x7 88 #define ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT 3 89 #define ESDHC_STROBE_DLL_CTRL_SLV_UPDATE_INT_DEFAULT (4 << 20) 90 91 #define ESDHC_STROBE_DLL_STATUS 0x74 92 #define ESDHC_STROBE_DLL_STS_REF_LOCK (1 << 1) 93 #define ESDHC_STROBE_DLL_STS_SLV_LOCK 0x1 94 95 #define ESDHC_VEND_SPEC2 0xc8 96 #define ESDHC_VEND_SPEC2_EN_BUSY_IRQ (1 << 8) 97 #define ESDHC_VEND_SPEC2_AUTO_TUNE_8BIT_EN (1 << 4) 98 #define ESDHC_VEND_SPEC2_AUTO_TUNE_4BIT_EN (0 << 4) 99 #define ESDHC_VEND_SPEC2_AUTO_TUNE_1BIT_EN (2 << 4) 100 #define ESDHC_VEND_SPEC2_AUTO_TUNE_CMD_EN (1 << 6) 101 #define ESDHC_VEND_SPEC2_AUTO_TUNE_MODE_MASK (7 << 4) 102 103 #define ESDHC_TUNING_CTRL 0xcc 104 #define ESDHC_STD_TUNING_EN (1 << 24) 105 /* NOTE: the minimum valid tuning start tap for mx6sl is 1 */ 106 #define ESDHC_TUNING_START_TAP_DEFAULT 0x1 107 #define ESDHC_TUNING_START_TAP_MASK 0x7f 108 #define ESDHC_TUNING_CMD_CRC_CHECK_DISABLE (1 << 7) 109 #define ESDHC_TUNING_STEP_MASK 0x00070000 110 #define ESDHC_TUNING_STEP_SHIFT 16 111 112 /* pinctrl state */ 113 #define ESDHC_PINCTRL_STATE_100MHZ "state_100mhz" 114 #define ESDHC_PINCTRL_STATE_200MHZ "state_200mhz" 115 116 /* 117 * Our interpretation of the SDHCI_HOST_CONTROL register 118 */ 119 #define ESDHC_CTRL_4BITBUS (0x1 << 1) 120 #define ESDHC_CTRL_8BITBUS (0x2 << 1) 121 #define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1) 122 #define USDHC_GET_BUSWIDTH(c) (c & ESDHC_CTRL_BUSWIDTH_MASK) 123 124 /* 125 * There is an INT DMA ERR mismatch between eSDHC and STD SDHC SPEC: 126 * Bit25 is used in STD SPEC, and is reserved in fsl eSDHC design, 127 * but bit28 is used as the INT DMA ERR in fsl eSDHC design. 128 * Define this macro DMA error INT for fsl eSDHC 129 */ 130 #define ESDHC_INT_VENDOR_SPEC_DMA_ERR (1 << 28) 131 132 /* the address offset of CQHCI */ 133 #define ESDHC_CQHCI_ADDR_OFFSET 0x100 134 135 /* 136 * The CMDTYPE of the CMD register (offset 0xE) should be set to 137 * "11" when the STOP CMD12 is issued on imx53 to abort one 138 * open ended multi-blk IO. Otherwise the TC INT wouldn't 139 * be generated. 140 * In exact block transfer, the controller doesn't complete the 141 * operations automatically as required at the end of the 142 * transfer and remains on hold if the abort command is not sent. 143 * As a result, the TC flag is not asserted and SW received timeout 144 * exception. Bit1 of Vendor Spec register is used to fix it. 145 */ 146 #define ESDHC_FLAG_MULTIBLK_NO_INT BIT(1) 147 /* 148 * The flag tells that the ESDHC controller is an USDHC block that is 149 * integrated on the i.MX6 series. 150 */ 151 #define ESDHC_FLAG_USDHC BIT(3) 152 /* The IP supports manual tuning process */ 153 #define ESDHC_FLAG_MAN_TUNING BIT(4) 154 /* The IP supports standard tuning process */ 155 #define ESDHC_FLAG_STD_TUNING BIT(5) 156 /* The IP has SDHCI_CAPABILITIES_1 register */ 157 #define ESDHC_FLAG_HAVE_CAP1 BIT(6) 158 /* 159 * The IP has erratum ERR004536 160 * uSDHC: ADMA Length Mismatch Error occurs if the AHB read access is slow, 161 * when reading data from the card 162 * This flag is also set for i.MX25 and i.MX35 in order to get 163 * SDHCI_QUIRK_BROKEN_ADMA, but for different reasons (ADMA capability bits). 164 */ 165 #define ESDHC_FLAG_ERR004536 BIT(7) 166 /* The IP supports HS200 mode */ 167 #define ESDHC_FLAG_HS200 BIT(8) 168 /* The IP supports HS400 mode */ 169 #define ESDHC_FLAG_HS400 BIT(9) 170 /* 171 * The IP has errata ERR010450 172 * uSDHC: Due to the I/O timing limit, for SDR mode, SD card clock can't 173 * exceed 150MHz, for DDR mode, SD card clock can't exceed 45MHz. 174 */ 175 #define ESDHC_FLAG_ERR010450 BIT(10) 176 /* The IP supports HS400ES mode */ 177 #define ESDHC_FLAG_HS400_ES BIT(11) 178 /* The IP has Host Controller Interface for Command Queuing */ 179 #define ESDHC_FLAG_CQHCI BIT(12) 180 /* need request pmqos during low power */ 181 #define ESDHC_FLAG_PMQOS BIT(13) 182 /* The IP state got lost in low power mode */ 183 #define ESDHC_FLAG_STATE_LOST_IN_LPMODE BIT(14) 184 /* The IP lost clock rate in PM_RUNTIME */ 185 #define ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME BIT(15) 186 /* 187 * The IP do not support the ACMD23 feature completely when use ADMA mode. 188 * In ADMA mode, it only use the 16 bit block count of the register 0x4 189 * (BLOCK_ATT) as the CMD23's argument for ACMD23 mode, which means it will 190 * ignore the upper 16 bit of the CMD23's argument. This will block the reliable 191 * write operation in RPMB, because RPMB reliable write need to set the bit31 192 * of the CMD23's argument. 193 * imx6qpdl/imx6sx/imx6sl/imx7d has this limitation only for ADMA mode, SDMA 194 * do not has this limitation. so when these SoC use ADMA mode, it need to 195 * disable the ACMD23 feature. 196 */ 197 #define ESDHC_FLAG_BROKEN_AUTO_CMD23 BIT(16) 198 199 /* ERR004536 is not applicable for the IP */ 200 #define ESDHC_FLAG_SKIP_ERR004536 BIT(17) 201 202 enum wp_types { 203 ESDHC_WP_NONE, /* no WP, neither controller nor gpio */ 204 ESDHC_WP_CONTROLLER, /* mmc controller internal WP */ 205 ESDHC_WP_GPIO, /* external gpio pin for WP */ 206 }; 207 208 enum cd_types { 209 ESDHC_CD_NONE, /* no CD, neither controller nor gpio */ 210 ESDHC_CD_CONTROLLER, /* mmc controller internal CD */ 211 ESDHC_CD_GPIO, /* external gpio pin for CD */ 212 ESDHC_CD_PERMANENT, /* no CD, card permanently wired to host */ 213 }; 214 215 /* 216 * struct esdhc_platform_data - platform data for esdhc on i.MX 217 * 218 * ESDHC_WP(CD)_CONTROLLER type is not available on i.MX25/35. 219 * 220 * @wp_type: type of write_protect method (see wp_types enum above) 221 * @cd_type: type of card_detect method (see cd_types enum above) 222 */ 223 224 struct esdhc_platform_data { 225 enum wp_types wp_type; 226 enum cd_types cd_type; 227 int max_bus_width; 228 unsigned int delay_line; 229 unsigned int tuning_step; /* The delay cell steps in tuning procedure */ 230 unsigned int tuning_start_tap; /* The start delay cell point in tuning procedure */ 231 unsigned int strobe_dll_delay_target; /* The delay cell for strobe pad (read clock) */ 232 }; 233 234 struct esdhc_soc_data { 235 u32 flags; 236 }; 237 238 static const struct esdhc_soc_data esdhc_imx25_data = { 239 .flags = ESDHC_FLAG_ERR004536, 240 }; 241 242 static const struct esdhc_soc_data esdhc_imx35_data = { 243 .flags = ESDHC_FLAG_ERR004536, 244 }; 245 246 static const struct esdhc_soc_data esdhc_imx51_data = { 247 .flags = 0, 248 }; 249 250 static const struct esdhc_soc_data esdhc_imx53_data = { 251 .flags = ESDHC_FLAG_MULTIBLK_NO_INT, 252 }; 253 254 static const struct esdhc_soc_data usdhc_imx6q_data = { 255 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING 256 | ESDHC_FLAG_BROKEN_AUTO_CMD23, 257 }; 258 259 static const struct esdhc_soc_data usdhc_imx6sl_data = { 260 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING 261 | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_ERR004536 262 | ESDHC_FLAG_HS200 263 | ESDHC_FLAG_BROKEN_AUTO_CMD23, 264 }; 265 266 static const struct esdhc_soc_data usdhc_imx6sll_data = { 267 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING 268 | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 269 | ESDHC_FLAG_HS400 270 | ESDHC_FLAG_STATE_LOST_IN_LPMODE, 271 }; 272 273 static const struct esdhc_soc_data usdhc_imx6sx_data = { 274 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING 275 | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 276 | ESDHC_FLAG_STATE_LOST_IN_LPMODE 277 | ESDHC_FLAG_BROKEN_AUTO_CMD23, 278 }; 279 280 static const struct esdhc_soc_data usdhc_imx6ull_data = { 281 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING 282 | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 283 | ESDHC_FLAG_ERR010450 284 | ESDHC_FLAG_STATE_LOST_IN_LPMODE, 285 }; 286 287 static const struct esdhc_soc_data usdhc_imx7d_data = { 288 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING 289 | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 290 | ESDHC_FLAG_HS400 291 | ESDHC_FLAG_STATE_LOST_IN_LPMODE 292 | ESDHC_FLAG_BROKEN_AUTO_CMD23, 293 }; 294 295 static struct esdhc_soc_data usdhc_s32g2_data = { 296 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING 297 | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 298 | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES 299 | ESDHC_FLAG_SKIP_ERR004536, 300 }; 301 302 static struct esdhc_soc_data usdhc_imx7ulp_data = { 303 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING 304 | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 305 | ESDHC_FLAG_PMQOS | ESDHC_FLAG_HS400 306 | ESDHC_FLAG_STATE_LOST_IN_LPMODE, 307 }; 308 309 static struct esdhc_soc_data usdhc_imx8qxp_data = { 310 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING 311 | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 312 | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES 313 | ESDHC_FLAG_STATE_LOST_IN_LPMODE 314 | ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME, 315 }; 316 317 static struct esdhc_soc_data usdhc_imx8mm_data = { 318 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING 319 | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 320 | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES 321 | ESDHC_FLAG_STATE_LOST_IN_LPMODE, 322 }; 323 324 struct pltfm_imx_data { 325 u32 scratchpad; 326 struct pinctrl *pinctrl; 327 struct pinctrl_state *pins_100mhz; 328 struct pinctrl_state *pins_200mhz; 329 const struct esdhc_soc_data *socdata; 330 struct esdhc_platform_data boarddata; 331 struct clk *clk_ipg; 332 struct clk *clk_ahb; 333 struct clk *clk_per; 334 unsigned int actual_clock; 335 enum { 336 NO_CMD_PENDING, /* no multiblock command pending */ 337 MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */ 338 WAIT_FOR_INT, /* sent CMD12, waiting for response INT */ 339 } multiblock_status; 340 u32 is_ddr; 341 struct pm_qos_request pm_qos_req; 342 }; 343 344 static const struct of_device_id imx_esdhc_dt_ids[] = { 345 { .compatible = "fsl,imx25-esdhc", .data = &esdhc_imx25_data, }, 346 { .compatible = "fsl,imx35-esdhc", .data = &esdhc_imx35_data, }, 347 { .compatible = "fsl,imx51-esdhc", .data = &esdhc_imx51_data, }, 348 { .compatible = "fsl,imx53-esdhc", .data = &esdhc_imx53_data, }, 349 { .compatible = "fsl,imx6sx-usdhc", .data = &usdhc_imx6sx_data, }, 350 { .compatible = "fsl,imx6sl-usdhc", .data = &usdhc_imx6sl_data, }, 351 { .compatible = "fsl,imx6sll-usdhc", .data = &usdhc_imx6sll_data, }, 352 { .compatible = "fsl,imx6q-usdhc", .data = &usdhc_imx6q_data, }, 353 { .compatible = "fsl,imx6ull-usdhc", .data = &usdhc_imx6ull_data, }, 354 { .compatible = "fsl,imx7d-usdhc", .data = &usdhc_imx7d_data, }, 355 { .compatible = "fsl,imx7ulp-usdhc", .data = &usdhc_imx7ulp_data, }, 356 { .compatible = "fsl,imx8qxp-usdhc", .data = &usdhc_imx8qxp_data, }, 357 { .compatible = "fsl,imx8mm-usdhc", .data = &usdhc_imx8mm_data, }, 358 { .compatible = "nxp,s32g2-usdhc", .data = &usdhc_s32g2_data, }, 359 { /* sentinel */ } 360 }; 361 MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids); 362 363 static inline int is_imx25_esdhc(struct pltfm_imx_data *data) 364 { 365 return data->socdata == &esdhc_imx25_data; 366 } 367 368 static inline int is_imx53_esdhc(struct pltfm_imx_data *data) 369 { 370 return data->socdata == &esdhc_imx53_data; 371 } 372 373 static inline int esdhc_is_usdhc(struct pltfm_imx_data *data) 374 { 375 return !!(data->socdata->flags & ESDHC_FLAG_USDHC); 376 } 377 378 static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) 379 { 380 void __iomem *base = host->ioaddr + (reg & ~0x3); 381 u32 shift = (reg & 0x3) * 8; 382 383 writel(((readl(base) & ~(mask << shift)) | (val << shift)), base); 384 } 385 386 #define DRIVER_NAME "sdhci-esdhc-imx" 387 #define ESDHC_IMX_DUMP(f, x...) \ 388 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 389 static void esdhc_dump_debug_regs(struct sdhci_host *host) 390 { 391 int i; 392 char *debug_status[7] = { 393 "cmd debug status", 394 "data debug status", 395 "trans debug status", 396 "dma debug status", 397 "adma debug status", 398 "fifo debug status", 399 "async fifo debug status" 400 }; 401 402 ESDHC_IMX_DUMP("========= ESDHC IMX DEBUG STATUS DUMP =========\n"); 403 for (i = 0; i < 7; i++) { 404 esdhc_clrset_le(host, ESDHC_DEBUG_SEL_MASK, 405 ESDHC_DEBUG_SEL_CMD_STATE + i, ESDHC_DEBUG_SEL_REG); 406 ESDHC_IMX_DUMP("%s: 0x%04x\n", debug_status[i], 407 readw(host->ioaddr + ESDHC_DEBUG_SEL_AND_STATUS_REG)); 408 } 409 410 esdhc_clrset_le(host, ESDHC_DEBUG_SEL_MASK, 0, ESDHC_DEBUG_SEL_REG); 411 412 } 413 414 static inline void esdhc_wait_for_card_clock_gate_off(struct sdhci_host *host) 415 { 416 u32 present_state; 417 int ret; 418 419 ret = readl_poll_timeout(host->ioaddr + ESDHC_PRSSTAT, present_state, 420 (present_state & ESDHC_CLOCK_GATE_OFF), 2, 100); 421 if (ret == -ETIMEDOUT) 422 dev_warn(mmc_dev(host->mmc), "%s: card clock still not gate off in 100us!.\n", __func__); 423 } 424 425 /* Enable the auto tuning circuit to check the CMD line and BUS line */ 426 static inline void usdhc_auto_tuning_mode_sel(struct sdhci_host *host) 427 { 428 u32 buswidth, auto_tune_buswidth; 429 430 buswidth = USDHC_GET_BUSWIDTH(readl(host->ioaddr + SDHCI_HOST_CONTROL)); 431 432 switch (buswidth) { 433 case ESDHC_CTRL_8BITBUS: 434 auto_tune_buswidth = ESDHC_VEND_SPEC2_AUTO_TUNE_8BIT_EN; 435 break; 436 case ESDHC_CTRL_4BITBUS: 437 auto_tune_buswidth = ESDHC_VEND_SPEC2_AUTO_TUNE_4BIT_EN; 438 break; 439 default: /* 1BITBUS */ 440 auto_tune_buswidth = ESDHC_VEND_SPEC2_AUTO_TUNE_1BIT_EN; 441 break; 442 } 443 444 esdhc_clrset_le(host, ESDHC_VEND_SPEC2_AUTO_TUNE_MODE_MASK, 445 auto_tune_buswidth | ESDHC_VEND_SPEC2_AUTO_TUNE_CMD_EN, 446 ESDHC_VEND_SPEC2); 447 } 448 449 static u32 esdhc_readl_le(struct sdhci_host *host, int reg) 450 { 451 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 452 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 453 u32 val = readl(host->ioaddr + reg); 454 455 if (unlikely(reg == SDHCI_PRESENT_STATE)) { 456 u32 fsl_prss = val; 457 /* save the least 20 bits */ 458 val = fsl_prss & 0x000FFFFF; 459 /* move dat[0-3] bits */ 460 val |= (fsl_prss & 0x0F000000) >> 4; 461 /* move cmd line bit */ 462 val |= (fsl_prss & 0x00800000) << 1; 463 } 464 465 if (unlikely(reg == SDHCI_CAPABILITIES)) { 466 /* ignore bit[0-15] as it stores cap_1 register val for mx6sl */ 467 if (imx_data->socdata->flags & ESDHC_FLAG_HAVE_CAP1) 468 val &= 0xffff0000; 469 470 /* In FSL esdhc IC module, only bit20 is used to indicate the 471 * ADMA2 capability of esdhc, but this bit is messed up on 472 * some SOCs (e.g. on MX25, MX35 this bit is set, but they 473 * don't actually support ADMA2). So set the BROKEN_ADMA 474 * quirk on MX25/35 platforms. 475 */ 476 477 if (val & SDHCI_CAN_DO_ADMA1) { 478 val &= ~SDHCI_CAN_DO_ADMA1; 479 val |= SDHCI_CAN_DO_ADMA2; 480 } 481 } 482 483 if (unlikely(reg == SDHCI_CAPABILITIES_1)) { 484 if (esdhc_is_usdhc(imx_data)) { 485 if (imx_data->socdata->flags & ESDHC_FLAG_HAVE_CAP1) 486 val = readl(host->ioaddr + SDHCI_CAPABILITIES) & 0xFFFF; 487 else 488 /* imx6q/dl does not have cap_1 register, fake one */ 489 val = SDHCI_SUPPORT_DDR50 | SDHCI_SUPPORT_SDR104 490 | SDHCI_SUPPORT_SDR50 491 | SDHCI_USE_SDR50_TUNING 492 | FIELD_PREP(SDHCI_RETUNING_MODE_MASK, 493 SDHCI_TUNING_MODE_3); 494 495 /* 496 * Do not advertise faster UHS modes if there are no 497 * pinctrl states for 100MHz/200MHz. 498 */ 499 if (IS_ERR_OR_NULL(imx_data->pins_100mhz)) 500 val &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50); 501 if (IS_ERR_OR_NULL(imx_data->pins_200mhz)) 502 val &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_HS400); 503 } 504 } 505 506 if (unlikely(reg == SDHCI_MAX_CURRENT) && esdhc_is_usdhc(imx_data)) { 507 val = 0; 508 val |= FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, 0xFF); 509 val |= FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, 0xFF); 510 val |= FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, 0xFF); 511 } 512 513 if (unlikely(reg == SDHCI_INT_STATUS)) { 514 if (val & ESDHC_INT_VENDOR_SPEC_DMA_ERR) { 515 val &= ~ESDHC_INT_VENDOR_SPEC_DMA_ERR; 516 val |= SDHCI_INT_ADMA_ERROR; 517 } 518 519 /* 520 * mask off the interrupt we get in response to the manually 521 * sent CMD12 522 */ 523 if ((imx_data->multiblock_status == WAIT_FOR_INT) && 524 ((val & SDHCI_INT_RESPONSE) == SDHCI_INT_RESPONSE)) { 525 val &= ~SDHCI_INT_RESPONSE; 526 writel(SDHCI_INT_RESPONSE, host->ioaddr + 527 SDHCI_INT_STATUS); 528 imx_data->multiblock_status = NO_CMD_PENDING; 529 } 530 } 531 532 return val; 533 } 534 535 static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) 536 { 537 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 538 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 539 u32 data; 540 541 if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE || 542 reg == SDHCI_INT_STATUS)) { 543 if ((val & SDHCI_INT_CARD_INT) && !esdhc_is_usdhc(imx_data)) { 544 /* 545 * Clear and then set D3CD bit to avoid missing the 546 * card interrupt. This is an eSDHC controller problem 547 * so we need to apply the following workaround: clear 548 * and set D3CD bit will make eSDHC re-sample the card 549 * interrupt. In case a card interrupt was lost, 550 * re-sample it by the following steps. 551 */ 552 data = readl(host->ioaddr + SDHCI_HOST_CONTROL); 553 data &= ~ESDHC_CTRL_D3CD; 554 writel(data, host->ioaddr + SDHCI_HOST_CONTROL); 555 data |= ESDHC_CTRL_D3CD; 556 writel(data, host->ioaddr + SDHCI_HOST_CONTROL); 557 } 558 559 if (val & SDHCI_INT_ADMA_ERROR) { 560 val &= ~SDHCI_INT_ADMA_ERROR; 561 val |= ESDHC_INT_VENDOR_SPEC_DMA_ERR; 562 } 563 } 564 565 if (unlikely((imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT) 566 && (reg == SDHCI_INT_STATUS) 567 && (val & SDHCI_INT_DATA_END))) { 568 u32 v; 569 v = readl(host->ioaddr + ESDHC_VENDOR_SPEC); 570 v &= ~ESDHC_VENDOR_SPEC_SDIO_QUIRK; 571 writel(v, host->ioaddr + ESDHC_VENDOR_SPEC); 572 573 if (imx_data->multiblock_status == MULTIBLK_IN_PROCESS) 574 { 575 /* send a manual CMD12 with RESPTYP=none */ 576 data = MMC_STOP_TRANSMISSION << 24 | 577 SDHCI_CMD_ABORTCMD << 16; 578 writel(data, host->ioaddr + SDHCI_TRANSFER_MODE); 579 imx_data->multiblock_status = WAIT_FOR_INT; 580 } 581 } 582 583 writel(val, host->ioaddr + reg); 584 } 585 586 static u16 esdhc_readw_le(struct sdhci_host *host, int reg) 587 { 588 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 589 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 590 u16 ret = 0; 591 u32 val; 592 593 if (unlikely(reg == SDHCI_HOST_VERSION)) { 594 reg ^= 2; 595 if (esdhc_is_usdhc(imx_data)) { 596 /* 597 * The usdhc register returns a wrong host version. 598 * Correct it here. 599 */ 600 return SDHCI_SPEC_300; 601 } 602 } 603 604 if (unlikely(reg == SDHCI_HOST_CONTROL2)) { 605 val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); 606 if (val & ESDHC_VENDOR_SPEC_VSELECT) 607 ret |= SDHCI_CTRL_VDD_180; 608 609 if (esdhc_is_usdhc(imx_data)) { 610 if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) 611 val = readl(host->ioaddr + ESDHC_MIX_CTRL); 612 else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) 613 /* the std tuning bits is in ACMD12_ERR for imx6sl */ 614 val = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS); 615 } 616 617 if (val & ESDHC_MIX_CTRL_EXE_TUNE) 618 ret |= SDHCI_CTRL_EXEC_TUNING; 619 if (val & ESDHC_MIX_CTRL_SMPCLK_SEL) 620 ret |= SDHCI_CTRL_TUNED_CLK; 621 622 ret &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; 623 624 return ret; 625 } 626 627 if (unlikely(reg == SDHCI_TRANSFER_MODE)) { 628 if (esdhc_is_usdhc(imx_data)) { 629 u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL); 630 ret = m & ESDHC_MIX_CTRL_SDHCI_MASK; 631 /* Swap AC23 bit */ 632 if (m & ESDHC_MIX_CTRL_AC23EN) { 633 ret &= ~ESDHC_MIX_CTRL_AC23EN; 634 ret |= SDHCI_TRNS_AUTO_CMD23; 635 } 636 } else { 637 ret = readw(host->ioaddr + SDHCI_TRANSFER_MODE); 638 } 639 640 return ret; 641 } 642 643 return readw(host->ioaddr + reg); 644 } 645 646 static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) 647 { 648 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 649 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 650 u32 new_val = 0; 651 652 switch (reg) { 653 case SDHCI_CLOCK_CONTROL: 654 new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); 655 if (val & SDHCI_CLOCK_CARD_EN) 656 new_val |= ESDHC_VENDOR_SPEC_FRC_SDCLK_ON; 657 else 658 new_val &= ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON; 659 writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC); 660 if (!(new_val & ESDHC_VENDOR_SPEC_FRC_SDCLK_ON)) 661 esdhc_wait_for_card_clock_gate_off(host); 662 return; 663 case SDHCI_HOST_CONTROL2: 664 new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); 665 if (val & SDHCI_CTRL_VDD_180) 666 new_val |= ESDHC_VENDOR_SPEC_VSELECT; 667 else 668 new_val &= ~ESDHC_VENDOR_SPEC_VSELECT; 669 writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC); 670 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) { 671 u32 v = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS); 672 u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL); 673 if (val & SDHCI_CTRL_TUNED_CLK) { 674 v |= ESDHC_MIX_CTRL_SMPCLK_SEL; 675 } else { 676 v &= ~ESDHC_MIX_CTRL_SMPCLK_SEL; 677 m &= ~ESDHC_MIX_CTRL_FBCLK_SEL; 678 m &= ~ESDHC_MIX_CTRL_AUTO_TUNE_EN; 679 } 680 681 if (val & SDHCI_CTRL_EXEC_TUNING) { 682 v |= ESDHC_MIX_CTRL_EXE_TUNE; 683 m |= ESDHC_MIX_CTRL_FBCLK_SEL; 684 m |= ESDHC_MIX_CTRL_AUTO_TUNE_EN; 685 usdhc_auto_tuning_mode_sel(host); 686 } else { 687 v &= ~ESDHC_MIX_CTRL_EXE_TUNE; 688 } 689 690 writel(v, host->ioaddr + SDHCI_AUTO_CMD_STATUS); 691 writel(m, host->ioaddr + ESDHC_MIX_CTRL); 692 } 693 return; 694 case SDHCI_TRANSFER_MODE: 695 if ((imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT) 696 && (host->cmd->opcode == SD_IO_RW_EXTENDED) 697 && (host->cmd->data->blocks > 1) 698 && (host->cmd->data->flags & MMC_DATA_READ)) { 699 u32 v; 700 v = readl(host->ioaddr + ESDHC_VENDOR_SPEC); 701 v |= ESDHC_VENDOR_SPEC_SDIO_QUIRK; 702 writel(v, host->ioaddr + ESDHC_VENDOR_SPEC); 703 } 704 705 if (esdhc_is_usdhc(imx_data)) { 706 u32 wml; 707 u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL); 708 /* Swap AC23 bit */ 709 if (val & SDHCI_TRNS_AUTO_CMD23) { 710 val &= ~SDHCI_TRNS_AUTO_CMD23; 711 val |= ESDHC_MIX_CTRL_AC23EN; 712 } 713 m = val | (m & ~ESDHC_MIX_CTRL_SDHCI_MASK); 714 writel(m, host->ioaddr + ESDHC_MIX_CTRL); 715 716 /* Set watermark levels for PIO access to maximum value 717 * (128 words) to accommodate full 512 bytes buffer. 718 * For DMA access restore the levels to default value. 719 */ 720 m = readl(host->ioaddr + ESDHC_WTMK_LVL); 721 if (val & SDHCI_TRNS_DMA) { 722 wml = ESDHC_WTMK_LVL_WML_VAL_DEF; 723 } else { 724 u8 ctrl; 725 wml = ESDHC_WTMK_LVL_WML_VAL_MAX; 726 727 /* 728 * Since already disable DMA mode, so also need 729 * to clear the DMASEL. Otherwise, for standard 730 * tuning, when send tuning command, usdhc will 731 * still prefetch the ADMA script from wrong 732 * DMA address, then we will see IOMMU report 733 * some error which show lack of TLB mapping. 734 */ 735 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 736 ctrl &= ~SDHCI_CTRL_DMA_MASK; 737 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 738 } 739 m &= ~(ESDHC_WTMK_LVL_RD_WML_MASK | 740 ESDHC_WTMK_LVL_WR_WML_MASK); 741 m |= (wml << ESDHC_WTMK_LVL_RD_WML_SHIFT) | 742 (wml << ESDHC_WTMK_LVL_WR_WML_SHIFT); 743 writel(m, host->ioaddr + ESDHC_WTMK_LVL); 744 } else { 745 /* 746 * Postpone this write, we must do it together with a 747 * command write that is down below. 748 */ 749 imx_data->scratchpad = val; 750 } 751 return; 752 case SDHCI_COMMAND: 753 if (host->cmd->opcode == MMC_STOP_TRANSMISSION) 754 val |= SDHCI_CMD_ABORTCMD; 755 756 if ((host->cmd->opcode == MMC_SET_BLOCK_COUNT) && 757 (imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) 758 imx_data->multiblock_status = MULTIBLK_IN_PROCESS; 759 760 if (esdhc_is_usdhc(imx_data)) 761 writel(val << 16, 762 host->ioaddr + SDHCI_TRANSFER_MODE); 763 else 764 writel(val << 16 | imx_data->scratchpad, 765 host->ioaddr + SDHCI_TRANSFER_MODE); 766 return; 767 case SDHCI_BLOCK_SIZE: 768 val &= ~SDHCI_MAKE_BLKSZ(0x7, 0); 769 break; 770 } 771 esdhc_clrset_le(host, 0xffff, val, reg); 772 } 773 774 static u8 esdhc_readb_le(struct sdhci_host *host, int reg) 775 { 776 u8 ret; 777 u32 val; 778 779 switch (reg) { 780 case SDHCI_HOST_CONTROL: 781 val = readl(host->ioaddr + reg); 782 783 ret = val & SDHCI_CTRL_LED; 784 ret |= (val >> 5) & SDHCI_CTRL_DMA_MASK; 785 ret |= (val & ESDHC_CTRL_4BITBUS); 786 ret |= (val & ESDHC_CTRL_8BITBUS) << 3; 787 return ret; 788 } 789 790 return readb(host->ioaddr + reg); 791 } 792 793 static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) 794 { 795 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 796 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 797 u32 new_val = 0; 798 u32 mask; 799 800 switch (reg) { 801 case SDHCI_POWER_CONTROL: 802 /* 803 * FSL put some DMA bits here 804 * If your board has a regulator, code should be here 805 */ 806 return; 807 case SDHCI_HOST_CONTROL: 808 /* FSL messed up here, so we need to manually compose it. */ 809 new_val = val & SDHCI_CTRL_LED; 810 /* ensure the endianness */ 811 new_val |= ESDHC_HOST_CONTROL_LE; 812 /* bits 8&9 are reserved on mx25 */ 813 if (!is_imx25_esdhc(imx_data)) { 814 /* DMA mode bits are shifted */ 815 new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5; 816 } 817 818 /* 819 * Do not touch buswidth bits here. This is done in 820 * esdhc_pltfm_bus_width. 821 * Do not touch the D3CD bit either which is used for the 822 * SDIO interrupt erratum workaround. 823 */ 824 mask = 0xffff & ~(ESDHC_CTRL_BUSWIDTH_MASK | ESDHC_CTRL_D3CD); 825 826 esdhc_clrset_le(host, mask, new_val, reg); 827 return; 828 case SDHCI_SOFTWARE_RESET: 829 if (val & SDHCI_RESET_DATA) 830 new_val = readl(host->ioaddr + SDHCI_HOST_CONTROL); 831 break; 832 } 833 esdhc_clrset_le(host, 0xff, val, reg); 834 835 if (reg == SDHCI_SOFTWARE_RESET) { 836 if (val & SDHCI_RESET_ALL) { 837 /* 838 * The esdhc has a design violation to SDHC spec which 839 * tells that software reset should not affect card 840 * detection circuit. But esdhc clears its SYSCTL 841 * register bits [0..2] during the software reset. This 842 * will stop those clocks that card detection circuit 843 * relies on. To work around it, we turn the clocks on 844 * back to keep card detection circuit functional. 845 */ 846 esdhc_clrset_le(host, 0x7, 0x7, ESDHC_SYSTEM_CONTROL); 847 /* 848 * The reset on usdhc fails to clear MIX_CTRL register. 849 * Do it manually here. 850 */ 851 if (esdhc_is_usdhc(imx_data)) { 852 /* 853 * the tuning bits should be kept during reset 854 */ 855 new_val = readl(host->ioaddr + ESDHC_MIX_CTRL); 856 writel(new_val & ESDHC_MIX_CTRL_TUNING_MASK, 857 host->ioaddr + ESDHC_MIX_CTRL); 858 imx_data->is_ddr = 0; 859 } 860 } else if (val & SDHCI_RESET_DATA) { 861 /* 862 * The eSDHC DAT line software reset clears at least the 863 * data transfer width on i.MX25, so make sure that the 864 * Host Control register is unaffected. 865 */ 866 esdhc_clrset_le(host, 0xff, new_val, 867 SDHCI_HOST_CONTROL); 868 } 869 } 870 } 871 872 static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host) 873 { 874 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 875 876 return pltfm_host->clock; 877 } 878 879 static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host) 880 { 881 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 882 883 return pltfm_host->clock / 256 / 16; 884 } 885 886 static inline void esdhc_pltfm_set_clock(struct sdhci_host *host, 887 unsigned int clock) 888 { 889 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 890 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 891 unsigned int host_clock = pltfm_host->clock; 892 int ddr_pre_div = imx_data->is_ddr ? 2 : 1; 893 int pre_div = 1; 894 int div = 1; 895 int ret; 896 u32 temp, val; 897 898 if (esdhc_is_usdhc(imx_data)) { 899 val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); 900 writel(val & ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON, 901 host->ioaddr + ESDHC_VENDOR_SPEC); 902 esdhc_wait_for_card_clock_gate_off(host); 903 } 904 905 if (clock == 0) { 906 host->mmc->actual_clock = 0; 907 return; 908 } 909 910 /* For i.MX53 eSDHCv3, SYSCTL.SDCLKFS may not be set to 0. */ 911 if (is_imx53_esdhc(imx_data)) { 912 /* 913 * According to the i.MX53 reference manual, if DLLCTRL[10] can 914 * be set, then the controller is eSDHCv3, else it is eSDHCv2. 915 */ 916 val = readl(host->ioaddr + ESDHC_DLL_CTRL); 917 writel(val | BIT(10), host->ioaddr + ESDHC_DLL_CTRL); 918 temp = readl(host->ioaddr + ESDHC_DLL_CTRL); 919 writel(val, host->ioaddr + ESDHC_DLL_CTRL); 920 if (temp & BIT(10)) 921 pre_div = 2; 922 } 923 924 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); 925 temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN 926 | ESDHC_CLOCK_MASK); 927 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); 928 929 if (imx_data->socdata->flags & ESDHC_FLAG_ERR010450) { 930 unsigned int max_clock; 931 932 max_clock = imx_data->is_ddr ? 45000000 : 150000000; 933 934 clock = min(clock, max_clock); 935 } 936 937 while (host_clock / (16 * pre_div * ddr_pre_div) > clock && 938 pre_div < 256) 939 pre_div *= 2; 940 941 while (host_clock / (div * pre_div * ddr_pre_div) > clock && div < 16) 942 div++; 943 944 host->mmc->actual_clock = host_clock / (div * pre_div * ddr_pre_div); 945 dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", 946 clock, host->mmc->actual_clock); 947 948 pre_div >>= 1; 949 div--; 950 951 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); 952 temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN 953 | (div << ESDHC_DIVIDER_SHIFT) 954 | (pre_div << ESDHC_PREDIV_SHIFT)); 955 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); 956 957 /* need to wait the bit 3 of the PRSSTAT to be set, make sure card clock is stable */ 958 ret = readl_poll_timeout(host->ioaddr + ESDHC_PRSSTAT, temp, 959 (temp & ESDHC_CLOCK_STABLE), 2, 100); 960 if (ret == -ETIMEDOUT) 961 dev_warn(mmc_dev(host->mmc), "card clock still not stable in 100us!.\n"); 962 963 if (esdhc_is_usdhc(imx_data)) { 964 val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); 965 writel(val | ESDHC_VENDOR_SPEC_FRC_SDCLK_ON, 966 host->ioaddr + ESDHC_VENDOR_SPEC); 967 } 968 969 } 970 971 static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) 972 { 973 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 974 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 975 struct esdhc_platform_data *boarddata = &imx_data->boarddata; 976 977 switch (boarddata->wp_type) { 978 case ESDHC_WP_GPIO: 979 return mmc_gpio_get_ro(host->mmc); 980 case ESDHC_WP_CONTROLLER: 981 return !(readl(host->ioaddr + SDHCI_PRESENT_STATE) & 982 SDHCI_WRITE_PROTECT); 983 case ESDHC_WP_NONE: 984 break; 985 } 986 987 return -ENOSYS; 988 } 989 990 static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) 991 { 992 u32 ctrl; 993 994 switch (width) { 995 case MMC_BUS_WIDTH_8: 996 ctrl = ESDHC_CTRL_8BITBUS; 997 break; 998 case MMC_BUS_WIDTH_4: 999 ctrl = ESDHC_CTRL_4BITBUS; 1000 break; 1001 default: 1002 ctrl = 0; 1003 break; 1004 } 1005 1006 esdhc_clrset_le(host, ESDHC_CTRL_BUSWIDTH_MASK, ctrl, 1007 SDHCI_HOST_CONTROL); 1008 } 1009 1010 static int usdhc_execute_tuning(struct mmc_host *mmc, u32 opcode) 1011 { 1012 struct sdhci_host *host = mmc_priv(mmc); 1013 1014 /* 1015 * i.MX uSDHC internally already uses a fixed optimized timing for 1016 * DDR50, normally does not require tuning for DDR50 mode. 1017 */ 1018 if (host->timing == MMC_TIMING_UHS_DDR50) 1019 return 0; 1020 1021 return sdhci_execute_tuning(mmc, opcode); 1022 } 1023 1024 static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val) 1025 { 1026 u32 reg; 1027 u8 sw_rst; 1028 int ret; 1029 1030 /* FIXME: delay a bit for card to be ready for next tuning due to errors */ 1031 mdelay(1); 1032 1033 /* IC suggest to reset USDHC before every tuning command */ 1034 esdhc_clrset_le(host, 0xff, SDHCI_RESET_ALL, SDHCI_SOFTWARE_RESET); 1035 ret = readb_poll_timeout(host->ioaddr + SDHCI_SOFTWARE_RESET, sw_rst, 1036 !(sw_rst & SDHCI_RESET_ALL), 10, 100); 1037 if (ret == -ETIMEDOUT) 1038 dev_warn(mmc_dev(host->mmc), 1039 "warning! RESET_ALL never complete before sending tuning command\n"); 1040 1041 reg = readl(host->ioaddr + ESDHC_MIX_CTRL); 1042 reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL | 1043 ESDHC_MIX_CTRL_FBCLK_SEL; 1044 writel(reg, host->ioaddr + ESDHC_MIX_CTRL); 1045 writel(val << 8, host->ioaddr + ESDHC_TUNE_CTRL_STATUS); 1046 dev_dbg(mmc_dev(host->mmc), 1047 "tuning with delay 0x%x ESDHC_TUNE_CTRL_STATUS 0x%x\n", 1048 val, readl(host->ioaddr + ESDHC_TUNE_CTRL_STATUS)); 1049 } 1050 1051 static void esdhc_post_tuning(struct sdhci_host *host) 1052 { 1053 u32 reg; 1054 1055 usdhc_auto_tuning_mode_sel(host); 1056 1057 reg = readl(host->ioaddr + ESDHC_MIX_CTRL); 1058 reg &= ~ESDHC_MIX_CTRL_EXE_TUNE; 1059 reg |= ESDHC_MIX_CTRL_AUTO_TUNE_EN; 1060 writel(reg, host->ioaddr + ESDHC_MIX_CTRL); 1061 } 1062 1063 static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode) 1064 { 1065 int min, max, avg, ret; 1066 1067 /* find the mininum delay first which can pass tuning */ 1068 min = ESDHC_TUNE_CTRL_MIN; 1069 while (min < ESDHC_TUNE_CTRL_MAX) { 1070 esdhc_prepare_tuning(host, min); 1071 if (!mmc_send_tuning(host->mmc, opcode, NULL)) 1072 break; 1073 min += ESDHC_TUNE_CTRL_STEP; 1074 } 1075 1076 /* find the maxinum delay which can not pass tuning */ 1077 max = min + ESDHC_TUNE_CTRL_STEP; 1078 while (max < ESDHC_TUNE_CTRL_MAX) { 1079 esdhc_prepare_tuning(host, max); 1080 if (mmc_send_tuning(host->mmc, opcode, NULL)) { 1081 max -= ESDHC_TUNE_CTRL_STEP; 1082 break; 1083 } 1084 max += ESDHC_TUNE_CTRL_STEP; 1085 } 1086 1087 /* use average delay to get the best timing */ 1088 avg = (min + max) / 2; 1089 esdhc_prepare_tuning(host, avg); 1090 ret = mmc_send_tuning(host->mmc, opcode, NULL); 1091 esdhc_post_tuning(host); 1092 1093 dev_dbg(mmc_dev(host->mmc), "tuning %s at 0x%x ret %d\n", 1094 ret ? "failed" : "passed", avg, ret); 1095 1096 return ret; 1097 } 1098 1099 static void esdhc_hs400_enhanced_strobe(struct mmc_host *mmc, struct mmc_ios *ios) 1100 { 1101 struct sdhci_host *host = mmc_priv(mmc); 1102 u32 m; 1103 1104 m = readl(host->ioaddr + ESDHC_MIX_CTRL); 1105 if (ios->enhanced_strobe) 1106 m |= ESDHC_MIX_CTRL_HS400_ES_EN; 1107 else 1108 m &= ~ESDHC_MIX_CTRL_HS400_ES_EN; 1109 writel(m, host->ioaddr + ESDHC_MIX_CTRL); 1110 } 1111 1112 static int esdhc_change_pinstate(struct sdhci_host *host, 1113 unsigned int uhs) 1114 { 1115 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1116 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 1117 struct pinctrl_state *pinctrl; 1118 1119 dev_dbg(mmc_dev(host->mmc), "change pinctrl state for uhs %d\n", uhs); 1120 1121 if (IS_ERR(imx_data->pinctrl) || 1122 IS_ERR(imx_data->pins_100mhz) || 1123 IS_ERR(imx_data->pins_200mhz)) 1124 return -EINVAL; 1125 1126 switch (uhs) { 1127 case MMC_TIMING_UHS_SDR50: 1128 case MMC_TIMING_UHS_DDR50: 1129 pinctrl = imx_data->pins_100mhz; 1130 break; 1131 case MMC_TIMING_UHS_SDR104: 1132 case MMC_TIMING_MMC_HS200: 1133 case MMC_TIMING_MMC_HS400: 1134 pinctrl = imx_data->pins_200mhz; 1135 break; 1136 default: 1137 /* back to default state for other legacy timing */ 1138 return pinctrl_select_default_state(mmc_dev(host->mmc)); 1139 } 1140 1141 return pinctrl_select_state(imx_data->pinctrl, pinctrl); 1142 } 1143 1144 /* 1145 * For HS400 eMMC, there is a data_strobe line. This signal is generated 1146 * by the device and used for data output and CRC status response output 1147 * in HS400 mode. The frequency of this signal follows the frequency of 1148 * CLK generated by host. The host receives the data which is aligned to the 1149 * edge of data_strobe line. Due to the time delay between CLK line and 1150 * data_strobe line, if the delay time is larger than one clock cycle, 1151 * then CLK and data_strobe line will be misaligned, read error shows up. 1152 */ 1153 static void esdhc_set_strobe_dll(struct sdhci_host *host) 1154 { 1155 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1156 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 1157 u32 strobe_delay; 1158 u32 v; 1159 int ret; 1160 1161 /* disable clock before enabling strobe dll */ 1162 writel(readl(host->ioaddr + ESDHC_VENDOR_SPEC) & 1163 ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON, 1164 host->ioaddr + ESDHC_VENDOR_SPEC); 1165 esdhc_wait_for_card_clock_gate_off(host); 1166 1167 /* force a reset on strobe dll */ 1168 writel(ESDHC_STROBE_DLL_CTRL_RESET, 1169 host->ioaddr + ESDHC_STROBE_DLL_CTRL); 1170 /* clear the reset bit on strobe dll before any setting */ 1171 writel(0, host->ioaddr + ESDHC_STROBE_DLL_CTRL); 1172 1173 /* 1174 * enable strobe dll ctrl and adjust the delay target 1175 * for the uSDHC loopback read clock 1176 */ 1177 if (imx_data->boarddata.strobe_dll_delay_target) 1178 strobe_delay = imx_data->boarddata.strobe_dll_delay_target; 1179 else 1180 strobe_delay = ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_DEFAULT; 1181 v = ESDHC_STROBE_DLL_CTRL_ENABLE | 1182 ESDHC_STROBE_DLL_CTRL_SLV_UPDATE_INT_DEFAULT | 1183 (strobe_delay << ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT); 1184 writel(v, host->ioaddr + ESDHC_STROBE_DLL_CTRL); 1185 1186 /* wait max 50us to get the REF/SLV lock */ 1187 ret = readl_poll_timeout(host->ioaddr + ESDHC_STROBE_DLL_STATUS, v, 1188 ((v & ESDHC_STROBE_DLL_STS_REF_LOCK) && (v & ESDHC_STROBE_DLL_STS_SLV_LOCK)), 1, 50); 1189 if (ret == -ETIMEDOUT) 1190 dev_warn(mmc_dev(host->mmc), 1191 "warning! HS400 strobe DLL status REF/SLV not lock in 50us, STROBE DLL status is %x!\n", v); 1192 } 1193 1194 static void esdhc_reset_tuning(struct sdhci_host *host) 1195 { 1196 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1197 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 1198 u32 ctrl; 1199 int ret; 1200 1201 /* Reset the tuning circuit */ 1202 if (esdhc_is_usdhc(imx_data)) { 1203 if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) { 1204 ctrl = readl(host->ioaddr + ESDHC_MIX_CTRL); 1205 ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL; 1206 ctrl &= ~ESDHC_MIX_CTRL_FBCLK_SEL; 1207 writel(ctrl, host->ioaddr + ESDHC_MIX_CTRL); 1208 writel(0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS); 1209 } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) { 1210 ctrl = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS); 1211 ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL; 1212 ctrl &= ~ESDHC_MIX_CTRL_EXE_TUNE; 1213 writel(ctrl, host->ioaddr + SDHCI_AUTO_CMD_STATUS); 1214 /* Make sure ESDHC_MIX_CTRL_EXE_TUNE cleared */ 1215 ret = readl_poll_timeout(host->ioaddr + SDHCI_AUTO_CMD_STATUS, 1216 ctrl, !(ctrl & ESDHC_MIX_CTRL_EXE_TUNE), 1, 50); 1217 if (ret == -ETIMEDOUT) 1218 dev_warn(mmc_dev(host->mmc), 1219 "Warning! clear execute tuning bit failed\n"); 1220 /* 1221 * SDHCI_INT_DATA_AVAIL is W1C bit, set this bit will clear the 1222 * usdhc IP internal logic flag execute_tuning_with_clr_buf, which 1223 * will finally make sure the normal data transfer logic correct. 1224 */ 1225 ctrl = readl(host->ioaddr + SDHCI_INT_STATUS); 1226 ctrl |= SDHCI_INT_DATA_AVAIL; 1227 writel(ctrl, host->ioaddr + SDHCI_INT_STATUS); 1228 } 1229 } 1230 } 1231 1232 static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing) 1233 { 1234 u32 m; 1235 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1236 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 1237 struct esdhc_platform_data *boarddata = &imx_data->boarddata; 1238 1239 /* disable ddr mode and disable HS400 mode */ 1240 m = readl(host->ioaddr + ESDHC_MIX_CTRL); 1241 m &= ~(ESDHC_MIX_CTRL_DDREN | ESDHC_MIX_CTRL_HS400_EN); 1242 imx_data->is_ddr = 0; 1243 1244 switch (timing) { 1245 case MMC_TIMING_UHS_SDR12: 1246 case MMC_TIMING_UHS_SDR25: 1247 case MMC_TIMING_UHS_SDR50: 1248 case MMC_TIMING_UHS_SDR104: 1249 case MMC_TIMING_MMC_HS: 1250 case MMC_TIMING_MMC_HS200: 1251 writel(m, host->ioaddr + ESDHC_MIX_CTRL); 1252 break; 1253 case MMC_TIMING_UHS_DDR50: 1254 case MMC_TIMING_MMC_DDR52: 1255 m |= ESDHC_MIX_CTRL_DDREN; 1256 writel(m, host->ioaddr + ESDHC_MIX_CTRL); 1257 imx_data->is_ddr = 1; 1258 if (boarddata->delay_line) { 1259 u32 v; 1260 v = boarddata->delay_line << 1261 ESDHC_DLL_OVERRIDE_VAL_SHIFT | 1262 (1 << ESDHC_DLL_OVERRIDE_EN_SHIFT); 1263 if (is_imx53_esdhc(imx_data)) 1264 v <<= 1; 1265 writel(v, host->ioaddr + ESDHC_DLL_CTRL); 1266 } 1267 break; 1268 case MMC_TIMING_MMC_HS400: 1269 m |= ESDHC_MIX_CTRL_DDREN | ESDHC_MIX_CTRL_HS400_EN; 1270 writel(m, host->ioaddr + ESDHC_MIX_CTRL); 1271 imx_data->is_ddr = 1; 1272 /* update clock after enable DDR for strobe DLL lock */ 1273 host->ops->set_clock(host, host->clock); 1274 esdhc_set_strobe_dll(host); 1275 break; 1276 case MMC_TIMING_LEGACY: 1277 default: 1278 esdhc_reset_tuning(host); 1279 break; 1280 } 1281 1282 esdhc_change_pinstate(host, timing); 1283 } 1284 1285 static void esdhc_reset(struct sdhci_host *host, u8 mask) 1286 { 1287 sdhci_reset(host, mask); 1288 1289 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1290 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1291 } 1292 1293 static unsigned int esdhc_get_max_timeout_count(struct sdhci_host *host) 1294 { 1295 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1296 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 1297 1298 /* Doc Erratum: the uSDHC actual maximum timeout count is 1 << 29 */ 1299 return esdhc_is_usdhc(imx_data) ? 1 << 29 : 1 << 27; 1300 } 1301 1302 static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 1303 { 1304 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1305 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 1306 1307 /* use maximum timeout counter */ 1308 esdhc_clrset_le(host, ESDHC_SYS_CTRL_DTOCV_MASK, 1309 esdhc_is_usdhc(imx_data) ? 0xF : 0xE, 1310 SDHCI_TIMEOUT_CONTROL); 1311 } 1312 1313 static u32 esdhc_cqhci_irq(struct sdhci_host *host, u32 intmask) 1314 { 1315 int cmd_error = 0; 1316 int data_error = 0; 1317 1318 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) 1319 return intmask; 1320 1321 cqhci_irq(host->mmc, intmask, cmd_error, data_error); 1322 1323 return 0; 1324 } 1325 1326 static struct sdhci_ops sdhci_esdhc_ops = { 1327 .read_l = esdhc_readl_le, 1328 .read_w = esdhc_readw_le, 1329 .read_b = esdhc_readb_le, 1330 .write_l = esdhc_writel_le, 1331 .write_w = esdhc_writew_le, 1332 .write_b = esdhc_writeb_le, 1333 .set_clock = esdhc_pltfm_set_clock, 1334 .get_max_clock = esdhc_pltfm_get_max_clock, 1335 .get_min_clock = esdhc_pltfm_get_min_clock, 1336 .get_max_timeout_count = esdhc_get_max_timeout_count, 1337 .get_ro = esdhc_pltfm_get_ro, 1338 .set_timeout = esdhc_set_timeout, 1339 .set_bus_width = esdhc_pltfm_set_bus_width, 1340 .set_uhs_signaling = esdhc_set_uhs_signaling, 1341 .reset = esdhc_reset, 1342 .irq = esdhc_cqhci_irq, 1343 .dump_vendor_regs = esdhc_dump_debug_regs, 1344 }; 1345 1346 static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { 1347 .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_NO_HISPD_BIT 1348 | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC 1349 | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC 1350 | SDHCI_QUIRK_BROKEN_CARD_DETECTION, 1351 .ops = &sdhci_esdhc_ops, 1352 }; 1353 1354 static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host) 1355 { 1356 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1357 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 1358 struct cqhci_host *cq_host = host->mmc->cqe_private; 1359 int tmp; 1360 1361 if (esdhc_is_usdhc(imx_data)) { 1362 /* 1363 * The imx6q ROM code will change the default watermark 1364 * level setting to something insane. Change it back here. 1365 */ 1366 writel(ESDHC_WTMK_DEFAULT_VAL, host->ioaddr + ESDHC_WTMK_LVL); 1367 1368 /* 1369 * ROM code will change the bit burst_length_enable setting 1370 * to zero if this usdhc is chosen to boot system. Change 1371 * it back here, otherwise it will impact the performance a 1372 * lot. This bit is used to enable/disable the burst length 1373 * for the external AHB2AXI bridge. It's useful especially 1374 * for INCR transfer because without burst length indicator, 1375 * the AHB2AXI bridge does not know the burst length in 1376 * advance. And without burst length indicator, AHB INCR 1377 * transfer can only be converted to singles on the AXI side. 1378 */ 1379 writel(readl(host->ioaddr + SDHCI_HOST_CONTROL) 1380 | ESDHC_BURST_LEN_EN_INCR, 1381 host->ioaddr + SDHCI_HOST_CONTROL); 1382 1383 /* 1384 * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL 1385 * TO1.1, it's harmless for MX6SL 1386 */ 1387 if (!(imx_data->socdata->flags & ESDHC_FLAG_SKIP_ERR004536)) { 1388 writel(readl(host->ioaddr + 0x6c) & ~BIT(7), 1389 host->ioaddr + 0x6c); 1390 } 1391 1392 /* disable DLL_CTRL delay line settings */ 1393 writel(0x0, host->ioaddr + ESDHC_DLL_CTRL); 1394 1395 /* 1396 * For the case of command with busy, if set the bit 1397 * ESDHC_VEND_SPEC2_EN_BUSY_IRQ, USDHC will generate a 1398 * transfer complete interrupt when busy is deasserted. 1399 * When CQHCI use DCMD to send a CMD need R1b respons, 1400 * CQHCI require to set ESDHC_VEND_SPEC2_EN_BUSY_IRQ, 1401 * otherwise DCMD will always meet timeout waiting for 1402 * hardware interrupt issue. 1403 */ 1404 if (imx_data->socdata->flags & ESDHC_FLAG_CQHCI) { 1405 tmp = readl(host->ioaddr + ESDHC_VEND_SPEC2); 1406 tmp |= ESDHC_VEND_SPEC2_EN_BUSY_IRQ; 1407 writel(tmp, host->ioaddr + ESDHC_VEND_SPEC2); 1408 1409 host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; 1410 } 1411 1412 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) { 1413 tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL); 1414 tmp |= ESDHC_STD_TUNING_EN | 1415 ESDHC_TUNING_START_TAP_DEFAULT; 1416 if (imx_data->boarddata.tuning_start_tap) { 1417 tmp &= ~ESDHC_TUNING_START_TAP_MASK; 1418 tmp |= imx_data->boarddata.tuning_start_tap; 1419 } 1420 1421 if (imx_data->boarddata.tuning_step) { 1422 tmp &= ~ESDHC_TUNING_STEP_MASK; 1423 tmp |= imx_data->boarddata.tuning_step 1424 << ESDHC_TUNING_STEP_SHIFT; 1425 } 1426 1427 /* Disable the CMD CRC check for tuning, if not, need to 1428 * add some delay after every tuning command, because 1429 * hardware standard tuning logic will directly go to next 1430 * step once it detect the CMD CRC error, will not wait for 1431 * the card side to finally send out the tuning data, trigger 1432 * the buffer read ready interrupt immediately. If usdhc send 1433 * the next tuning command some eMMC card will stuck, can't 1434 * response, block the tuning procedure or the first command 1435 * after the whole tuning procedure always can't get any response. 1436 */ 1437 tmp |= ESDHC_TUNING_CMD_CRC_CHECK_DISABLE; 1438 writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL); 1439 } else if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) { 1440 /* 1441 * ESDHC_STD_TUNING_EN may be configed in bootloader 1442 * or ROM code, so clear this bit here to make sure 1443 * the manual tuning can work. 1444 */ 1445 tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL); 1446 tmp &= ~ESDHC_STD_TUNING_EN; 1447 writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL); 1448 } 1449 1450 /* 1451 * On i.MX8MM, we are running Dual Linux OS, with 1st Linux using SD Card 1452 * as rootfs storage, 2nd Linux using eMMC as rootfs storage. We let the 1453 * the 1st linux configure power/clock for the 2nd Linux. 1454 * 1455 * When the 2nd Linux is booting into rootfs stage, we let the 1st Linux 1456 * to destroy the 2nd linux, then restart the 2nd linux, we met SDHCI dump. 1457 * After we clear the pending interrupt and halt CQCTL, issue gone. 1458 */ 1459 if (cq_host) { 1460 tmp = cqhci_readl(cq_host, CQHCI_IS); 1461 cqhci_writel(cq_host, tmp, CQHCI_IS); 1462 cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL); 1463 } 1464 } 1465 } 1466 1467 static void esdhc_cqe_enable(struct mmc_host *mmc) 1468 { 1469 struct sdhci_host *host = mmc_priv(mmc); 1470 struct cqhci_host *cq_host = mmc->cqe_private; 1471 u32 reg; 1472 u16 mode; 1473 int count = 10; 1474 1475 /* 1476 * CQE gets stuck if it sees Buffer Read Enable bit set, which can be 1477 * the case after tuning, so ensure the buffer is drained. 1478 */ 1479 reg = sdhci_readl(host, SDHCI_PRESENT_STATE); 1480 while (reg & SDHCI_DATA_AVAILABLE) { 1481 sdhci_readl(host, SDHCI_BUFFER); 1482 reg = sdhci_readl(host, SDHCI_PRESENT_STATE); 1483 if (count-- == 0) { 1484 dev_warn(mmc_dev(host->mmc), 1485 "CQE may get stuck because the Buffer Read Enable bit is set\n"); 1486 break; 1487 } 1488 mdelay(1); 1489 } 1490 1491 /* 1492 * Runtime resume will reset the entire host controller, which 1493 * will also clear the DMAEN/BCEN of register ESDHC_MIX_CTRL. 1494 * Here set DMAEN and BCEN when enable CMDQ. 1495 */ 1496 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); 1497 if (host->flags & SDHCI_REQ_USE_DMA) 1498 mode |= SDHCI_TRNS_DMA; 1499 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE)) 1500 mode |= SDHCI_TRNS_BLK_CNT_EN; 1501 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 1502 1503 /* 1504 * Though Runtime resume reset the entire host controller, 1505 * but do not impact the CQHCI side, need to clear the 1506 * HALT bit, avoid CQHCI stuck in the first request when 1507 * system resume back. 1508 */ 1509 cqhci_writel(cq_host, 0, CQHCI_CTL); 1510 if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) 1511 dev_err(mmc_dev(host->mmc), 1512 "failed to exit halt state when enable CQE\n"); 1513 1514 1515 sdhci_cqe_enable(mmc); 1516 } 1517 1518 static void esdhc_sdhci_dumpregs(struct mmc_host *mmc) 1519 { 1520 sdhci_dumpregs(mmc_priv(mmc)); 1521 } 1522 1523 static const struct cqhci_host_ops esdhc_cqhci_ops = { 1524 .enable = esdhc_cqe_enable, 1525 .disable = sdhci_cqe_disable, 1526 .dumpregs = esdhc_sdhci_dumpregs, 1527 }; 1528 1529 static int 1530 sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, 1531 struct sdhci_host *host, 1532 struct pltfm_imx_data *imx_data) 1533 { 1534 struct device_node *np = pdev->dev.of_node; 1535 struct esdhc_platform_data *boarddata = &imx_data->boarddata; 1536 int ret; 1537 1538 if (of_get_property(np, "fsl,wp-controller", NULL)) 1539 boarddata->wp_type = ESDHC_WP_CONTROLLER; 1540 1541 /* 1542 * If we have this property, then activate WP check. 1543 * Retrieveing and requesting the actual WP GPIO will happen 1544 * in the call to mmc_of_parse(). 1545 */ 1546 if (of_property_read_bool(np, "wp-gpios")) 1547 boarddata->wp_type = ESDHC_WP_GPIO; 1548 1549 of_property_read_u32(np, "fsl,tuning-step", &boarddata->tuning_step); 1550 of_property_read_u32(np, "fsl,tuning-start-tap", 1551 &boarddata->tuning_start_tap); 1552 1553 of_property_read_u32(np, "fsl,strobe-dll-delay-target", 1554 &boarddata->strobe_dll_delay_target); 1555 if (of_find_property(np, "no-1-8-v", NULL)) 1556 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; 1557 1558 if (of_property_read_u32(np, "fsl,delay-line", &boarddata->delay_line)) 1559 boarddata->delay_line = 0; 1560 1561 mmc_of_parse_voltage(host->mmc, &host->ocr_mask); 1562 1563 if (esdhc_is_usdhc(imx_data) && !IS_ERR(imx_data->pinctrl)) { 1564 imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl, 1565 ESDHC_PINCTRL_STATE_100MHZ); 1566 imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, 1567 ESDHC_PINCTRL_STATE_200MHZ); 1568 } 1569 1570 /* call to generic mmc_of_parse to support additional capabilities */ 1571 ret = mmc_of_parse(host->mmc); 1572 if (ret) 1573 return ret; 1574 1575 if (mmc_gpio_get_cd(host->mmc) >= 0) 1576 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 1577 1578 return 0; 1579 } 1580 1581 static int sdhci_esdhc_imx_probe(struct platform_device *pdev) 1582 { 1583 struct sdhci_pltfm_host *pltfm_host; 1584 struct sdhci_host *host; 1585 struct cqhci_host *cq_host; 1586 int err; 1587 struct pltfm_imx_data *imx_data; 1588 1589 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 1590 sizeof(*imx_data)); 1591 if (IS_ERR(host)) 1592 return PTR_ERR(host); 1593 1594 pltfm_host = sdhci_priv(host); 1595 1596 imx_data = sdhci_pltfm_priv(pltfm_host); 1597 1598 imx_data->socdata = device_get_match_data(&pdev->dev); 1599 1600 if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS) 1601 cpu_latency_qos_add_request(&imx_data->pm_qos_req, 0); 1602 1603 imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 1604 if (IS_ERR(imx_data->clk_ipg)) { 1605 err = PTR_ERR(imx_data->clk_ipg); 1606 goto free_sdhci; 1607 } 1608 1609 imx_data->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); 1610 if (IS_ERR(imx_data->clk_ahb)) { 1611 err = PTR_ERR(imx_data->clk_ahb); 1612 goto free_sdhci; 1613 } 1614 1615 imx_data->clk_per = devm_clk_get(&pdev->dev, "per"); 1616 if (IS_ERR(imx_data->clk_per)) { 1617 err = PTR_ERR(imx_data->clk_per); 1618 goto free_sdhci; 1619 } 1620 1621 pltfm_host->clk = imx_data->clk_per; 1622 pltfm_host->clock = clk_get_rate(pltfm_host->clk); 1623 err = clk_prepare_enable(imx_data->clk_per); 1624 if (err) 1625 goto free_sdhci; 1626 err = clk_prepare_enable(imx_data->clk_ipg); 1627 if (err) 1628 goto disable_per_clk; 1629 err = clk_prepare_enable(imx_data->clk_ahb); 1630 if (err) 1631 goto disable_ipg_clk; 1632 1633 imx_data->pinctrl = devm_pinctrl_get(&pdev->dev); 1634 if (IS_ERR(imx_data->pinctrl)) 1635 dev_warn(mmc_dev(host->mmc), "could not get pinctrl\n"); 1636 1637 if (esdhc_is_usdhc(imx_data)) { 1638 host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN; 1639 host->mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR; 1640 1641 /* GPIO CD can be set as a wakeup source */ 1642 host->mmc->caps |= MMC_CAP_CD_WAKE; 1643 1644 if (!(imx_data->socdata->flags & ESDHC_FLAG_HS200)) 1645 host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200; 1646 1647 /* clear tuning bits in case ROM has set it already */ 1648 writel(0x0, host->ioaddr + ESDHC_MIX_CTRL); 1649 writel(0x0, host->ioaddr + SDHCI_AUTO_CMD_STATUS); 1650 writel(0x0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS); 1651 1652 /* 1653 * Link usdhc specific mmc_host_ops execute_tuning function, 1654 * to replace the standard one in sdhci_ops. 1655 */ 1656 host->mmc_host_ops.execute_tuning = usdhc_execute_tuning; 1657 } 1658 1659 if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) 1660 sdhci_esdhc_ops.platform_execute_tuning = 1661 esdhc_executing_tuning; 1662 1663 if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536) 1664 host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; 1665 1666 if (imx_data->socdata->flags & ESDHC_FLAG_HS400) 1667 host->mmc->caps2 |= MMC_CAP2_HS400; 1668 1669 if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23) 1670 host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN; 1671 1672 if (imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) { 1673 host->mmc->caps2 |= MMC_CAP2_HS400_ES; 1674 host->mmc_host_ops.hs400_enhanced_strobe = 1675 esdhc_hs400_enhanced_strobe; 1676 } 1677 1678 if (imx_data->socdata->flags & ESDHC_FLAG_CQHCI) { 1679 host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; 1680 cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL); 1681 if (!cq_host) { 1682 err = -ENOMEM; 1683 goto disable_ahb_clk; 1684 } 1685 1686 cq_host->mmio = host->ioaddr + ESDHC_CQHCI_ADDR_OFFSET; 1687 cq_host->ops = &esdhc_cqhci_ops; 1688 1689 err = cqhci_init(cq_host, host->mmc, false); 1690 if (err) 1691 goto disable_ahb_clk; 1692 } 1693 1694 err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data); 1695 if (err) 1696 goto disable_ahb_clk; 1697 1698 sdhci_esdhc_imx_hwinit(host); 1699 1700 err = sdhci_add_host(host); 1701 if (err) 1702 goto disable_ahb_clk; 1703 1704 /* 1705 * Setup the wakeup capability here, let user to decide 1706 * whether need to enable this wakeup through sysfs interface. 1707 */ 1708 if ((host->mmc->pm_caps & MMC_PM_KEEP_POWER) && 1709 (host->mmc->pm_caps & MMC_PM_WAKE_SDIO_IRQ)) 1710 device_set_wakeup_capable(&pdev->dev, true); 1711 1712 pm_runtime_set_active(&pdev->dev); 1713 pm_runtime_set_autosuspend_delay(&pdev->dev, 50); 1714 pm_runtime_use_autosuspend(&pdev->dev); 1715 pm_suspend_ignore_children(&pdev->dev, 1); 1716 pm_runtime_enable(&pdev->dev); 1717 1718 return 0; 1719 1720 disable_ahb_clk: 1721 clk_disable_unprepare(imx_data->clk_ahb); 1722 disable_ipg_clk: 1723 clk_disable_unprepare(imx_data->clk_ipg); 1724 disable_per_clk: 1725 clk_disable_unprepare(imx_data->clk_per); 1726 free_sdhci: 1727 if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS) 1728 cpu_latency_qos_remove_request(&imx_data->pm_qos_req); 1729 sdhci_pltfm_free(pdev); 1730 return err; 1731 } 1732 1733 static int sdhci_esdhc_imx_remove(struct platform_device *pdev) 1734 { 1735 struct sdhci_host *host = platform_get_drvdata(pdev); 1736 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1737 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 1738 int dead; 1739 1740 pm_runtime_get_sync(&pdev->dev); 1741 dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); 1742 pm_runtime_disable(&pdev->dev); 1743 pm_runtime_put_noidle(&pdev->dev); 1744 1745 sdhci_remove_host(host, dead); 1746 1747 clk_disable_unprepare(imx_data->clk_per); 1748 clk_disable_unprepare(imx_data->clk_ipg); 1749 clk_disable_unprepare(imx_data->clk_ahb); 1750 1751 if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS) 1752 cpu_latency_qos_remove_request(&imx_data->pm_qos_req); 1753 1754 sdhci_pltfm_free(pdev); 1755 1756 return 0; 1757 } 1758 1759 #ifdef CONFIG_PM_SLEEP 1760 static int sdhci_esdhc_suspend(struct device *dev) 1761 { 1762 struct sdhci_host *host = dev_get_drvdata(dev); 1763 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1764 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 1765 int ret; 1766 1767 if (host->mmc->caps2 & MMC_CAP2_CQE) { 1768 ret = cqhci_suspend(host->mmc); 1769 if (ret) 1770 return ret; 1771 } 1772 1773 if ((imx_data->socdata->flags & ESDHC_FLAG_STATE_LOST_IN_LPMODE) && 1774 (host->tuning_mode != SDHCI_TUNING_MODE_1)) { 1775 mmc_retune_timer_stop(host->mmc); 1776 mmc_retune_needed(host->mmc); 1777 } 1778 1779 if (host->tuning_mode != SDHCI_TUNING_MODE_3) 1780 mmc_retune_needed(host->mmc); 1781 1782 ret = sdhci_suspend_host(host); 1783 if (ret) 1784 return ret; 1785 1786 ret = pinctrl_pm_select_sleep_state(dev); 1787 if (ret) 1788 return ret; 1789 1790 ret = mmc_gpio_set_cd_wake(host->mmc, true); 1791 1792 return ret; 1793 } 1794 1795 static int sdhci_esdhc_resume(struct device *dev) 1796 { 1797 struct sdhci_host *host = dev_get_drvdata(dev); 1798 int ret; 1799 1800 ret = pinctrl_pm_select_default_state(dev); 1801 if (ret) 1802 return ret; 1803 1804 /* re-initialize hw state in case it's lost in low power mode */ 1805 sdhci_esdhc_imx_hwinit(host); 1806 1807 ret = sdhci_resume_host(host); 1808 if (ret) 1809 return ret; 1810 1811 if (host->mmc->caps2 & MMC_CAP2_CQE) 1812 ret = cqhci_resume(host->mmc); 1813 1814 if (!ret) 1815 ret = mmc_gpio_set_cd_wake(host->mmc, false); 1816 1817 return ret; 1818 } 1819 #endif 1820 1821 #ifdef CONFIG_PM 1822 static int sdhci_esdhc_runtime_suspend(struct device *dev) 1823 { 1824 struct sdhci_host *host = dev_get_drvdata(dev); 1825 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1826 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 1827 int ret; 1828 1829 if (host->mmc->caps2 & MMC_CAP2_CQE) { 1830 ret = cqhci_suspend(host->mmc); 1831 if (ret) 1832 return ret; 1833 } 1834 1835 ret = sdhci_runtime_suspend_host(host); 1836 if (ret) 1837 return ret; 1838 1839 if (host->tuning_mode != SDHCI_TUNING_MODE_3) 1840 mmc_retune_needed(host->mmc); 1841 1842 imx_data->actual_clock = host->mmc->actual_clock; 1843 esdhc_pltfm_set_clock(host, 0); 1844 clk_disable_unprepare(imx_data->clk_per); 1845 clk_disable_unprepare(imx_data->clk_ipg); 1846 clk_disable_unprepare(imx_data->clk_ahb); 1847 1848 if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS) 1849 cpu_latency_qos_remove_request(&imx_data->pm_qos_req); 1850 1851 return ret; 1852 } 1853 1854 static int sdhci_esdhc_runtime_resume(struct device *dev) 1855 { 1856 struct sdhci_host *host = dev_get_drvdata(dev); 1857 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1858 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 1859 int err; 1860 1861 if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS) 1862 cpu_latency_qos_add_request(&imx_data->pm_qos_req, 0); 1863 1864 if (imx_data->socdata->flags & ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME) 1865 clk_set_rate(imx_data->clk_per, pltfm_host->clock); 1866 1867 err = clk_prepare_enable(imx_data->clk_ahb); 1868 if (err) 1869 goto remove_pm_qos_request; 1870 1871 err = clk_prepare_enable(imx_data->clk_per); 1872 if (err) 1873 goto disable_ahb_clk; 1874 1875 err = clk_prepare_enable(imx_data->clk_ipg); 1876 if (err) 1877 goto disable_per_clk; 1878 1879 esdhc_pltfm_set_clock(host, imx_data->actual_clock); 1880 1881 err = sdhci_runtime_resume_host(host, 0); 1882 if (err) 1883 goto disable_ipg_clk; 1884 1885 if (host->mmc->caps2 & MMC_CAP2_CQE) 1886 err = cqhci_resume(host->mmc); 1887 1888 return err; 1889 1890 disable_ipg_clk: 1891 clk_disable_unprepare(imx_data->clk_ipg); 1892 disable_per_clk: 1893 clk_disable_unprepare(imx_data->clk_per); 1894 disable_ahb_clk: 1895 clk_disable_unprepare(imx_data->clk_ahb); 1896 remove_pm_qos_request: 1897 if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS) 1898 cpu_latency_qos_remove_request(&imx_data->pm_qos_req); 1899 return err; 1900 } 1901 #endif 1902 1903 static const struct dev_pm_ops sdhci_esdhc_pmops = { 1904 SET_SYSTEM_SLEEP_PM_OPS(sdhci_esdhc_suspend, sdhci_esdhc_resume) 1905 SET_RUNTIME_PM_OPS(sdhci_esdhc_runtime_suspend, 1906 sdhci_esdhc_runtime_resume, NULL) 1907 }; 1908 1909 static struct platform_driver sdhci_esdhc_imx_driver = { 1910 .driver = { 1911 .name = "sdhci-esdhc-imx", 1912 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 1913 .of_match_table = imx_esdhc_dt_ids, 1914 .pm = &sdhci_esdhc_pmops, 1915 }, 1916 .probe = sdhci_esdhc_imx_probe, 1917 .remove = sdhci_esdhc_imx_remove, 1918 }; 1919 1920 module_platform_driver(sdhci_esdhc_imx_driver); 1921 1922 MODULE_DESCRIPTION("SDHCI driver for Freescale i.MX eSDHC"); 1923 MODULE_AUTHOR("Wolfram Sang <kernel@pengutronix.de>"); 1924 MODULE_LICENSE("GPL v2"); 1925