xref: /openbmc/linux/drivers/mmc/host/sdhci-tegra.c (revision 82df5b73)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2010 Google, Inc.
4  */
5 
6 #include <linux/delay.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/err.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/iopoll.h>
12 #include <linux/platform_device.h>
13 #include <linux/clk.h>
14 #include <linux/io.h>
15 #include <linux/of.h>
16 #include <linux/of_device.h>
17 #include <linux/pinctrl/consumer.h>
18 #include <linux/regulator/consumer.h>
19 #include <linux/reset.h>
20 #include <linux/mmc/card.h>
21 #include <linux/mmc/host.h>
22 #include <linux/mmc/mmc.h>
23 #include <linux/mmc/slot-gpio.h>
24 #include <linux/gpio/consumer.h>
25 #include <linux/ktime.h>
26 
27 #include "sdhci-pltfm.h"
28 #include "cqhci.h"
29 
30 /* Tegra SDHOST controller vendor register definitions */
31 #define SDHCI_TEGRA_VENDOR_CLOCK_CTRL			0x100
32 #define SDHCI_CLOCK_CTRL_TAP_MASK			0x00ff0000
33 #define SDHCI_CLOCK_CTRL_TAP_SHIFT			16
34 #define SDHCI_CLOCK_CTRL_TRIM_MASK			0x1f000000
35 #define SDHCI_CLOCK_CTRL_TRIM_SHIFT			24
36 #define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE		BIT(5)
37 #define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE		BIT(3)
38 #define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE	BIT(2)
39 
40 #define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL			0x104
41 #define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE		BIT(31)
42 
43 #define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES		0x10c
44 #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK		0x00003f00
45 #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT	8
46 
47 #define SDHCI_TEGRA_VENDOR_MISC_CTRL			0x120
48 #define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT		BIT(0)
49 #define SDHCI_MISC_CTRL_ENABLE_SDR104			0x8
50 #define SDHCI_MISC_CTRL_ENABLE_SDR50			0x10
51 #define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300		0x20
52 #define SDHCI_MISC_CTRL_ENABLE_DDR50			0x200
53 
54 #define SDHCI_TEGRA_VENDOR_DLLCAL_CFG			0x1b0
55 #define SDHCI_TEGRA_DLLCAL_CALIBRATE			BIT(31)
56 
57 #define SDHCI_TEGRA_VENDOR_DLLCAL_STA			0x1bc
58 #define SDHCI_TEGRA_DLLCAL_STA_ACTIVE			BIT(31)
59 
60 #define SDHCI_VNDR_TUN_CTRL0_0				0x1c0
61 #define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP			0x20000
62 #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK		0x03fc0000
63 #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT	18
64 #define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK			0x00001fc0
65 #define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT		6
66 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK		0x000e000
67 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT		13
68 #define TRIES_128					2
69 #define TRIES_256					4
70 #define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK		0x7
71 
72 #define SDHCI_TEGRA_VNDR_TUN_CTRL1_0			0x1c4
73 #define SDHCI_TEGRA_VNDR_TUN_STATUS0			0x1C8
74 #define SDHCI_TEGRA_VNDR_TUN_STATUS1			0x1CC
75 #define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK		0xFF
76 #define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT	0x8
77 #define TUNING_WORD_BIT_SIZE				32
78 
79 #define SDHCI_TEGRA_AUTO_CAL_CONFIG			0x1e4
80 #define SDHCI_AUTO_CAL_START				BIT(31)
81 #define SDHCI_AUTO_CAL_ENABLE				BIT(29)
82 #define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK			0x0000ffff
83 
84 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL			0x1e0
85 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK	0x0000000f
86 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL	0x7
87 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD	BIT(31)
88 #define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK		0x07FFF000
89 
90 #define SDHCI_TEGRA_AUTO_CAL_STATUS			0x1ec
91 #define SDHCI_TEGRA_AUTO_CAL_ACTIVE			BIT(31)
92 
93 #define NVQUIRK_FORCE_SDHCI_SPEC_200			BIT(0)
94 #define NVQUIRK_ENABLE_BLOCK_GAP_DET			BIT(1)
95 #define NVQUIRK_ENABLE_SDHCI_SPEC_300			BIT(2)
96 #define NVQUIRK_ENABLE_SDR50				BIT(3)
97 #define NVQUIRK_ENABLE_SDR104				BIT(4)
98 #define NVQUIRK_ENABLE_DDR50				BIT(5)
99 #define NVQUIRK_HAS_PADCALIB				BIT(6)
100 #define NVQUIRK_NEEDS_PAD_CONTROL			BIT(7)
101 #define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP			BIT(8)
102 #define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING		BIT(9)
103 
104 /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
105 #define SDHCI_TEGRA_CQE_BASE_ADDR			0xF000
106 
107 struct sdhci_tegra_soc_data {
108 	const struct sdhci_pltfm_data *pdata;
109 	u64 dma_mask;
110 	u32 nvquirks;
111 	u8 min_tap_delay;
112 	u8 max_tap_delay;
113 };
114 
115 /* Magic pull up and pull down pad calibration offsets */
116 struct sdhci_tegra_autocal_offsets {
117 	u32 pull_up_3v3;
118 	u32 pull_down_3v3;
119 	u32 pull_up_3v3_timeout;
120 	u32 pull_down_3v3_timeout;
121 	u32 pull_up_1v8;
122 	u32 pull_down_1v8;
123 	u32 pull_up_1v8_timeout;
124 	u32 pull_down_1v8_timeout;
125 	u32 pull_up_sdr104;
126 	u32 pull_down_sdr104;
127 	u32 pull_up_hs400;
128 	u32 pull_down_hs400;
129 };
130 
131 struct sdhci_tegra {
132 	const struct sdhci_tegra_soc_data *soc_data;
133 	struct gpio_desc *power_gpio;
134 	bool ddr_signaling;
135 	bool pad_calib_required;
136 	bool pad_control_available;
137 
138 	struct reset_control *rst;
139 	struct pinctrl *pinctrl_sdmmc;
140 	struct pinctrl_state *pinctrl_state_3v3;
141 	struct pinctrl_state *pinctrl_state_1v8;
142 	struct pinctrl_state *pinctrl_state_3v3_drv;
143 	struct pinctrl_state *pinctrl_state_1v8_drv;
144 
145 	struct sdhci_tegra_autocal_offsets autocal_offsets;
146 	ktime_t last_calib;
147 
148 	u32 default_tap;
149 	u32 default_trim;
150 	u32 dqs_trim;
151 	bool enable_hwcq;
152 	unsigned long curr_clk_rate;
153 	u8 tuned_tap_delay;
154 };
155 
156 static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
157 {
158 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
159 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
160 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
161 
162 	if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
163 			(reg == SDHCI_HOST_VERSION))) {
164 		/* Erratum: Version register is invalid in HW. */
165 		return SDHCI_SPEC_200;
166 	}
167 
168 	return readw(host->ioaddr + reg);
169 }
170 
171 static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
172 {
173 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
174 
175 	switch (reg) {
176 	case SDHCI_TRANSFER_MODE:
177 		/*
178 		 * Postpone this write, we must do it together with a
179 		 * command write that is down below.
180 		 */
181 		pltfm_host->xfer_mode_shadow = val;
182 		return;
183 	case SDHCI_COMMAND:
184 		writel((val << 16) | pltfm_host->xfer_mode_shadow,
185 			host->ioaddr + SDHCI_TRANSFER_MODE);
186 		return;
187 	}
188 
189 	writew(val, host->ioaddr + reg);
190 }
191 
192 static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
193 {
194 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
195 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
196 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
197 
198 	/* Seems like we're getting spurious timeout and crc errors, so
199 	 * disable signalling of them. In case of real errors software
200 	 * timers should take care of eventually detecting them.
201 	 */
202 	if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
203 		val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
204 
205 	writel(val, host->ioaddr + reg);
206 
207 	if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
208 			(reg == SDHCI_INT_ENABLE))) {
209 		/* Erratum: Must enable block gap interrupt detection */
210 		u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
211 		if (val & SDHCI_INT_CARD_INT)
212 			gap_ctrl |= 0x8;
213 		else
214 			gap_ctrl &= ~0x8;
215 		writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
216 	}
217 }
218 
219 static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable)
220 {
221 	bool status;
222 	u32 reg;
223 
224 	reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
225 	status = !!(reg & SDHCI_CLOCK_CARD_EN);
226 
227 	if (status == enable)
228 		return status;
229 
230 	if (enable)
231 		reg |= SDHCI_CLOCK_CARD_EN;
232 	else
233 		reg &= ~SDHCI_CLOCK_CARD_EN;
234 
235 	sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL);
236 
237 	return status;
238 }
239 
240 static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
241 {
242 	bool is_tuning_cmd = 0;
243 	bool clk_enabled;
244 	u8 cmd;
245 
246 	if (reg == SDHCI_COMMAND) {
247 		cmd = SDHCI_GET_CMD(val);
248 		is_tuning_cmd = cmd == MMC_SEND_TUNING_BLOCK ||
249 				cmd == MMC_SEND_TUNING_BLOCK_HS200;
250 	}
251 
252 	if (is_tuning_cmd)
253 		clk_enabled = tegra_sdhci_configure_card_clk(host, 0);
254 
255 	writew(val, host->ioaddr + reg);
256 
257 	if (is_tuning_cmd) {
258 		udelay(1);
259 		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
260 		tegra_sdhci_configure_card_clk(host, clk_enabled);
261 	}
262 }
263 
264 static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
265 {
266 	/*
267 	 * Write-enable shall be assumed if GPIO is missing in a board's
268 	 * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
269 	 * Tegra.
270 	 */
271 	return mmc_gpio_get_ro(host->mmc);
272 }
273 
274 static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
275 {
276 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
277 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
278 	int has_1v8, has_3v3;
279 
280 	/*
281 	 * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad
282 	 * voltage configuration in order to perform voltage switching. This
283 	 * means that valid pinctrl info is required on SDHCI instances capable
284 	 * of performing voltage switching. Whether or not an SDHCI instance is
285 	 * capable of voltage switching is determined based on the regulator.
286 	 */
287 
288 	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
289 		return true;
290 
291 	if (IS_ERR(host->mmc->supply.vqmmc))
292 		return false;
293 
294 	has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
295 						 1700000, 1950000);
296 
297 	has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
298 						 2700000, 3600000);
299 
300 	if (has_1v8 == 1 && has_3v3 == 1)
301 		return tegra_host->pad_control_available;
302 
303 	/* Fixed voltage, no pad control required. */
304 	return true;
305 }
306 
307 static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
308 {
309 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
310 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
311 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
312 	bool card_clk_enabled = false;
313 	u32 reg;
314 
315 	/*
316 	 * Touching the tap values is a bit tricky on some SoC generations.
317 	 * The quirk enables a workaround for a glitch that sometimes occurs if
318 	 * the tap values are changed.
319 	 */
320 
321 	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP)
322 		card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
323 
324 	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
325 	reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK;
326 	reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT;
327 	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
328 
329 	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP &&
330 	    card_clk_enabled) {
331 		udelay(1);
332 		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
333 		tegra_sdhci_configure_card_clk(host, card_clk_enabled);
334 	}
335 }
336 
337 static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
338 					      struct mmc_ios *ios)
339 {
340 	struct sdhci_host *host = mmc_priv(mmc);
341 	u32 val;
342 
343 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
344 
345 	if (ios->enhanced_strobe)
346 		val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
347 	else
348 		val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
349 
350 	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
351 
352 }
353 
354 static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
355 {
356 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
357 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
358 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
359 	u32 misc_ctrl, clk_ctrl, pad_ctrl;
360 
361 	sdhci_reset(host, mask);
362 
363 	if (!(mask & SDHCI_RESET_ALL))
364 		return;
365 
366 	tegra_sdhci_set_tap(host, tegra_host->default_tap);
367 
368 	misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
369 	clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
370 
371 	misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 |
372 		       SDHCI_MISC_CTRL_ENABLE_SDR50 |
373 		       SDHCI_MISC_CTRL_ENABLE_DDR50 |
374 		       SDHCI_MISC_CTRL_ENABLE_SDR104);
375 
376 	clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK |
377 		      SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE);
378 
379 	if (tegra_sdhci_is_pad_and_regulator_valid(host)) {
380 		/* Erratum: Enable SDHCI spec v3.00 support */
381 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
382 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
383 		/* Advertise UHS modes as supported by host */
384 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
385 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
386 		if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
387 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
388 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
389 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
390 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
391 			clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
392 	}
393 
394 	clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT;
395 
396 	sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
397 	sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
398 
399 	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) {
400 		pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
401 		pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK;
402 		pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL;
403 		sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
404 
405 		tegra_host->pad_calib_required = true;
406 	}
407 
408 	tegra_host->ddr_signaling = false;
409 }
410 
411 static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable)
412 {
413 	u32 val;
414 
415 	/*
416 	 * Enable or disable the additional I/O pad used by the drive strength
417 	 * calibration process.
418 	 */
419 	val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
420 
421 	if (enable)
422 		val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
423 	else
424 		val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
425 
426 	sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
427 
428 	if (enable)
429 		usleep_range(1, 2);
430 }
431 
432 static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host,
433 					       u16 pdpu)
434 {
435 	u32 reg;
436 
437 	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
438 	reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK;
439 	reg |= pdpu;
440 	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
441 }
442 
443 static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage,
444 				   bool state_drvupdn)
445 {
446 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
447 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
448 	struct sdhci_tegra_autocal_offsets *offsets =
449 						&tegra_host->autocal_offsets;
450 	struct pinctrl_state *pinctrl_drvupdn = NULL;
451 	int ret = 0;
452 	u8 drvup = 0, drvdn = 0;
453 	u32 reg;
454 
455 	if (!state_drvupdn) {
456 		/* PADS Drive Strength */
457 		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
458 			if (tegra_host->pinctrl_state_1v8_drv) {
459 				pinctrl_drvupdn =
460 					tegra_host->pinctrl_state_1v8_drv;
461 			} else {
462 				drvup = offsets->pull_up_1v8_timeout;
463 				drvdn = offsets->pull_down_1v8_timeout;
464 			}
465 		} else {
466 			if (tegra_host->pinctrl_state_3v3_drv) {
467 				pinctrl_drvupdn =
468 					tegra_host->pinctrl_state_3v3_drv;
469 			} else {
470 				drvup = offsets->pull_up_3v3_timeout;
471 				drvdn = offsets->pull_down_3v3_timeout;
472 			}
473 		}
474 
475 		if (pinctrl_drvupdn != NULL) {
476 			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
477 							pinctrl_drvupdn);
478 			if (ret < 0)
479 				dev_err(mmc_dev(host->mmc),
480 					"failed pads drvupdn, ret: %d\n", ret);
481 		} else if ((drvup) || (drvdn)) {
482 			reg = sdhci_readl(host,
483 					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
484 			reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK;
485 			reg |= (drvup << 20) | (drvdn << 12);
486 			sdhci_writel(host, reg,
487 					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
488 		}
489 
490 	} else {
491 		/* Dual Voltage PADS Voltage selection */
492 		if (!tegra_host->pad_control_available)
493 			return 0;
494 
495 		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
496 			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
497 						tegra_host->pinctrl_state_1v8);
498 			if (ret < 0)
499 				dev_err(mmc_dev(host->mmc),
500 					"setting 1.8V failed, ret: %d\n", ret);
501 		} else {
502 			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
503 						tegra_host->pinctrl_state_3v3);
504 			if (ret < 0)
505 				dev_err(mmc_dev(host->mmc),
506 					"setting 3.3V failed, ret: %d\n", ret);
507 		}
508 	}
509 
510 	return ret;
511 }
512 
513 static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
514 {
515 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
516 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
517 	struct sdhci_tegra_autocal_offsets offsets =
518 			tegra_host->autocal_offsets;
519 	struct mmc_ios *ios = &host->mmc->ios;
520 	bool card_clk_enabled;
521 	u16 pdpu;
522 	u32 reg;
523 	int ret;
524 
525 	switch (ios->timing) {
526 	case MMC_TIMING_UHS_SDR104:
527 		pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104;
528 		break;
529 	case MMC_TIMING_MMC_HS400:
530 		pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400;
531 		break;
532 	default:
533 		if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
534 			pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8;
535 		else
536 			pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3;
537 	}
538 
539 	/* Set initial offset before auto-calibration */
540 	tegra_sdhci_set_pad_autocal_offset(host, pdpu);
541 
542 	card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
543 
544 	tegra_sdhci_configure_cal_pad(host, true);
545 
546 	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
547 	reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START;
548 	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
549 
550 	usleep_range(1, 2);
551 	/* 10 ms timeout */
552 	ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS,
553 				 reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE),
554 				 1000, 10000);
555 
556 	tegra_sdhci_configure_cal_pad(host, false);
557 
558 	tegra_sdhci_configure_card_clk(host, card_clk_enabled);
559 
560 	if (ret) {
561 		dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n");
562 
563 		/* Disable automatic cal and use fixed Drive Strengths */
564 		reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
565 		reg &= ~SDHCI_AUTO_CAL_ENABLE;
566 		sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
567 
568 		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false);
569 		if (ret < 0)
570 			dev_err(mmc_dev(host->mmc),
571 				"Setting drive strengths failed: %d\n", ret);
572 	}
573 }
574 
575 static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
576 {
577 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
578 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
579 	struct sdhci_tegra_autocal_offsets *autocal =
580 			&tegra_host->autocal_offsets;
581 	int err;
582 
583 	err = device_property_read_u32(host->mmc->parent,
584 			"nvidia,pad-autocal-pull-up-offset-3v3",
585 			&autocal->pull_up_3v3);
586 	if (err)
587 		autocal->pull_up_3v3 = 0;
588 
589 	err = device_property_read_u32(host->mmc->parent,
590 			"nvidia,pad-autocal-pull-down-offset-3v3",
591 			&autocal->pull_down_3v3);
592 	if (err)
593 		autocal->pull_down_3v3 = 0;
594 
595 	err = device_property_read_u32(host->mmc->parent,
596 			"nvidia,pad-autocal-pull-up-offset-1v8",
597 			&autocal->pull_up_1v8);
598 	if (err)
599 		autocal->pull_up_1v8 = 0;
600 
601 	err = device_property_read_u32(host->mmc->parent,
602 			"nvidia,pad-autocal-pull-down-offset-1v8",
603 			&autocal->pull_down_1v8);
604 	if (err)
605 		autocal->pull_down_1v8 = 0;
606 
607 	err = device_property_read_u32(host->mmc->parent,
608 			"nvidia,pad-autocal-pull-up-offset-sdr104",
609 			&autocal->pull_up_sdr104);
610 	if (err)
611 		autocal->pull_up_sdr104 = autocal->pull_up_1v8;
612 
613 	err = device_property_read_u32(host->mmc->parent,
614 			"nvidia,pad-autocal-pull-down-offset-sdr104",
615 			&autocal->pull_down_sdr104);
616 	if (err)
617 		autocal->pull_down_sdr104 = autocal->pull_down_1v8;
618 
619 	err = device_property_read_u32(host->mmc->parent,
620 			"nvidia,pad-autocal-pull-up-offset-hs400",
621 			&autocal->pull_up_hs400);
622 	if (err)
623 		autocal->pull_up_hs400 = autocal->pull_up_1v8;
624 
625 	err = device_property_read_u32(host->mmc->parent,
626 			"nvidia,pad-autocal-pull-down-offset-hs400",
627 			&autocal->pull_down_hs400);
628 	if (err)
629 		autocal->pull_down_hs400 = autocal->pull_down_1v8;
630 
631 	/*
632 	 * Different fail-safe drive strength values based on the signaling
633 	 * voltage are applicable for SoCs supporting 3V3 and 1V8 pad controls.
634 	 * So, avoid reading below device tree properties for SoCs that don't
635 	 * have NVQUIRK_NEEDS_PAD_CONTROL.
636 	 */
637 	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
638 		return;
639 
640 	err = device_property_read_u32(host->mmc->parent,
641 			"nvidia,pad-autocal-pull-up-offset-3v3-timeout",
642 			&autocal->pull_up_3v3_timeout);
643 	if (err) {
644 		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
645 			(tegra_host->pinctrl_state_3v3_drv == NULL))
646 			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
647 				mmc_hostname(host->mmc));
648 		autocal->pull_up_3v3_timeout = 0;
649 	}
650 
651 	err = device_property_read_u32(host->mmc->parent,
652 			"nvidia,pad-autocal-pull-down-offset-3v3-timeout",
653 			&autocal->pull_down_3v3_timeout);
654 	if (err) {
655 		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
656 			(tegra_host->pinctrl_state_3v3_drv == NULL))
657 			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
658 				mmc_hostname(host->mmc));
659 		autocal->pull_down_3v3_timeout = 0;
660 	}
661 
662 	err = device_property_read_u32(host->mmc->parent,
663 			"nvidia,pad-autocal-pull-up-offset-1v8-timeout",
664 			&autocal->pull_up_1v8_timeout);
665 	if (err) {
666 		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
667 			(tegra_host->pinctrl_state_1v8_drv == NULL))
668 			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
669 				mmc_hostname(host->mmc));
670 		autocal->pull_up_1v8_timeout = 0;
671 	}
672 
673 	err = device_property_read_u32(host->mmc->parent,
674 			"nvidia,pad-autocal-pull-down-offset-1v8-timeout",
675 			&autocal->pull_down_1v8_timeout);
676 	if (err) {
677 		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
678 			(tegra_host->pinctrl_state_1v8_drv == NULL))
679 			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
680 				mmc_hostname(host->mmc));
681 		autocal->pull_down_1v8_timeout = 0;
682 	}
683 }
684 
685 static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
686 {
687 	struct sdhci_host *host = mmc_priv(mmc);
688 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
689 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
690 	ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib);
691 
692 	/* 100 ms calibration interval is specified in the TRM */
693 	if (ktime_to_ms(since_calib) > 100) {
694 		tegra_sdhci_pad_autocalib(host);
695 		tegra_host->last_calib = ktime_get();
696 	}
697 
698 	sdhci_request(mmc, mrq);
699 }
700 
701 static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host)
702 {
703 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
704 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
705 	int err;
706 
707 	err = device_property_read_u32(host->mmc->parent, "nvidia,default-tap",
708 				       &tegra_host->default_tap);
709 	if (err)
710 		tegra_host->default_tap = 0;
711 
712 	err = device_property_read_u32(host->mmc->parent, "nvidia,default-trim",
713 				       &tegra_host->default_trim);
714 	if (err)
715 		tegra_host->default_trim = 0;
716 
717 	err = device_property_read_u32(host->mmc->parent, "nvidia,dqs-trim",
718 				       &tegra_host->dqs_trim);
719 	if (err)
720 		tegra_host->dqs_trim = 0x11;
721 }
722 
723 static void tegra_sdhci_parse_dt(struct sdhci_host *host)
724 {
725 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
726 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
727 
728 	if (device_property_read_bool(host->mmc->parent, "supports-cqe"))
729 		tegra_host->enable_hwcq = true;
730 	else
731 		tegra_host->enable_hwcq = false;
732 
733 	tegra_sdhci_parse_pad_autocal_dt(host);
734 	tegra_sdhci_parse_tap_and_trim(host);
735 }
736 
737 static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
738 {
739 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
740 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
741 	unsigned long host_clk;
742 
743 	if (!clock)
744 		return sdhci_set_clock(host, clock);
745 
746 	/*
747 	 * In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI
748 	 * divider to be configured to divided the host clock by two. The SDHCI
749 	 * clock divider is calculated as part of sdhci_set_clock() by
750 	 * sdhci_calc_clk(). The divider is calculated from host->max_clk and
751 	 * the requested clock rate.
752 	 *
753 	 * By setting the host->max_clk to clock * 2 the divider calculation
754 	 * will always result in the correct value for DDR50/52 modes,
755 	 * regardless of clock rate rounding, which may happen if the value
756 	 * from clk_get_rate() is used.
757 	 */
758 	host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
759 	clk_set_rate(pltfm_host->clk, host_clk);
760 	tegra_host->curr_clk_rate = host_clk;
761 	if (tegra_host->ddr_signaling)
762 		host->max_clk = host_clk;
763 	else
764 		host->max_clk = clk_get_rate(pltfm_host->clk);
765 
766 	sdhci_set_clock(host, clock);
767 
768 	if (tegra_host->pad_calib_required) {
769 		tegra_sdhci_pad_autocalib(host);
770 		tegra_host->pad_calib_required = false;
771 	}
772 }
773 
774 static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
775 {
776 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
777 
778 	return clk_round_rate(pltfm_host->clk, UINT_MAX);
779 }
780 
781 static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim)
782 {
783 	u32 val;
784 
785 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
786 	val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK;
787 	val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT;
788 	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
789 }
790 
791 static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host)
792 {
793 	u32 reg;
794 	int err;
795 
796 	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
797 	reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE;
798 	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
799 
800 	/* 1 ms sleep, 5 ms timeout */
801 	err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA,
802 				 reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE),
803 				 1000, 5000);
804 	if (err)
805 		dev_err(mmc_dev(host->mmc),
806 			"HS400 delay line calibration timed out\n");
807 }
808 
809 static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up,
810 				       u8 thd_low, u8 fixed_tap)
811 {
812 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
813 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
814 	u32 val, tun_status;
815 	u8 word, bit, edge1, tap, window;
816 	bool tap_result;
817 	bool start_fail = false;
818 	bool start_pass = false;
819 	bool end_pass = false;
820 	bool first_fail = false;
821 	bool first_pass = false;
822 	u8 start_pass_tap = 0;
823 	u8 end_pass_tap = 0;
824 	u8 first_fail_tap = 0;
825 	u8 first_pass_tap = 0;
826 	u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE;
827 
828 	/*
829 	 * Read auto-tuned results and extract good valid passing window by
830 	 * filtering out un-wanted bubble/partial/merged windows.
831 	 */
832 	for (word = 0; word < total_tuning_words; word++) {
833 		val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
834 		val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK;
835 		val |= word;
836 		sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
837 		tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0);
838 		bit = 0;
839 		while (bit < TUNING_WORD_BIT_SIZE) {
840 			tap = word * TUNING_WORD_BIT_SIZE + bit;
841 			tap_result = tun_status & (1 << bit);
842 			if (!tap_result && !start_fail) {
843 				start_fail = true;
844 				if (!first_fail) {
845 					first_fail_tap = tap;
846 					first_fail = true;
847 				}
848 
849 			} else if (tap_result && start_fail && !start_pass) {
850 				start_pass_tap = tap;
851 				start_pass = true;
852 				if (!first_pass) {
853 					first_pass_tap = tap;
854 					first_pass = true;
855 				}
856 
857 			} else if (!tap_result && start_fail && start_pass &&
858 				   !end_pass) {
859 				end_pass_tap = tap - 1;
860 				end_pass = true;
861 			} else if (tap_result && start_pass && start_fail &&
862 				   end_pass) {
863 				window = end_pass_tap - start_pass_tap;
864 				/* discard merged window and bubble window */
865 				if (window >= thd_up || window < thd_low) {
866 					start_pass_tap = tap;
867 					end_pass = false;
868 				} else {
869 					/* set tap at middle of valid window */
870 					tap = start_pass_tap + window / 2;
871 					tegra_host->tuned_tap_delay = tap;
872 					return;
873 				}
874 			}
875 
876 			bit++;
877 		}
878 	}
879 
880 	if (!first_fail) {
881 		WARN(1, "no edge detected, continue with hw tuned delay.\n");
882 	} else if (first_pass) {
883 		/* set tap location at fixed tap relative to the first edge */
884 		edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2;
885 		if (edge1 - 1 > fixed_tap)
886 			tegra_host->tuned_tap_delay = edge1 - fixed_tap;
887 		else
888 			tegra_host->tuned_tap_delay = edge1 + fixed_tap;
889 	}
890 }
891 
892 static void tegra_sdhci_post_tuning(struct sdhci_host *host)
893 {
894 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
895 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
896 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
897 	u32 avg_tap_dly, val, min_tap_dly, max_tap_dly;
898 	u8 fixed_tap, start_tap, end_tap, window_width;
899 	u8 thdupper, thdlower;
900 	u8 num_iter;
901 	u32 clk_rate_mhz, period_ps, bestcase, worstcase;
902 
903 	/* retain HW tuned tap to use incase if no correction is needed */
904 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
905 	tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >>
906 				      SDHCI_CLOCK_CTRL_TAP_SHIFT;
907 	if (soc_data->min_tap_delay && soc_data->max_tap_delay) {
908 		min_tap_dly = soc_data->min_tap_delay;
909 		max_tap_dly = soc_data->max_tap_delay;
910 		clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC;
911 		period_ps = USEC_PER_SEC / clk_rate_mhz;
912 		bestcase = period_ps / min_tap_dly;
913 		worstcase = period_ps / max_tap_dly;
914 		/*
915 		 * Upper and Lower bound thresholds used to detect merged and
916 		 * bubble windows
917 		 */
918 		thdupper = (2 * worstcase + bestcase) / 2;
919 		thdlower = worstcase / 4;
920 		/*
921 		 * fixed tap is used when HW tuning result contains single edge
922 		 * and tap is set at fixed tap delay relative to the first edge
923 		 */
924 		avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly);
925 		fixed_tap = avg_tap_dly / 2;
926 
927 		val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1);
928 		start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
929 		end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) &
930 			  SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
931 		window_width = end_tap - start_tap;
932 		num_iter = host->tuning_loop_count;
933 		/*
934 		 * partial window includes edges of the tuning range.
935 		 * merged window includes more taps so window width is higher
936 		 * than upper threshold.
937 		 */
938 		if (start_tap == 0 || (end_tap == (num_iter - 1)) ||
939 		    (end_tap == num_iter - 2) || window_width >= thdupper) {
940 			pr_debug("%s: Apply tuning correction\n",
941 				 mmc_hostname(host->mmc));
942 			tegra_sdhci_tap_correction(host, thdupper, thdlower,
943 						   fixed_tap);
944 		}
945 	}
946 
947 	tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
948 }
949 
950 static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode)
951 {
952 	struct sdhci_host *host = mmc_priv(mmc);
953 	int err;
954 
955 	err = sdhci_execute_tuning(mmc, opcode);
956 	if (!err && !host->tuning_err)
957 		tegra_sdhci_post_tuning(host);
958 
959 	return err;
960 }
961 
962 static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
963 					  unsigned timing)
964 {
965 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
966 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
967 	bool set_default_tap = false;
968 	bool set_dqs_trim = false;
969 	bool do_hs400_dll_cal = false;
970 	u8 iter = TRIES_256;
971 	u32 val;
972 
973 	tegra_host->ddr_signaling = false;
974 	switch (timing) {
975 	case MMC_TIMING_UHS_SDR50:
976 		break;
977 	case MMC_TIMING_UHS_SDR104:
978 	case MMC_TIMING_MMC_HS200:
979 		/* Don't set default tap on tunable modes. */
980 		iter = TRIES_128;
981 		break;
982 	case MMC_TIMING_MMC_HS400:
983 		set_dqs_trim = true;
984 		do_hs400_dll_cal = true;
985 		iter = TRIES_128;
986 		break;
987 	case MMC_TIMING_MMC_DDR52:
988 	case MMC_TIMING_UHS_DDR50:
989 		tegra_host->ddr_signaling = true;
990 		set_default_tap = true;
991 		break;
992 	default:
993 		set_default_tap = true;
994 		break;
995 	}
996 
997 	val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
998 	val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK |
999 		 SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK |
1000 		 SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK);
1001 	val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT |
1002 		0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT |
1003 		1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT);
1004 	sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
1005 	sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0);
1006 
1007 	host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256;
1008 
1009 	sdhci_set_uhs_signaling(host, timing);
1010 
1011 	tegra_sdhci_pad_autocalib(host);
1012 
1013 	if (tegra_host->tuned_tap_delay && !set_default_tap)
1014 		tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
1015 	else
1016 		tegra_sdhci_set_tap(host, tegra_host->default_tap);
1017 
1018 	if (set_dqs_trim)
1019 		tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim);
1020 
1021 	if (do_hs400_dll_cal)
1022 		tegra_sdhci_hs400_dll_cal(host);
1023 }
1024 
1025 static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
1026 {
1027 	unsigned int min, max;
1028 
1029 	/*
1030 	 * Start search for minimum tap value at 10, as smaller values are
1031 	 * may wrongly be reported as working but fail at higher speeds,
1032 	 * according to the TRM.
1033 	 */
1034 	min = 10;
1035 	while (min < 255) {
1036 		tegra_sdhci_set_tap(host, min);
1037 		if (!mmc_send_tuning(host->mmc, opcode, NULL))
1038 			break;
1039 		min++;
1040 	}
1041 
1042 	/* Find the maximum tap value that still passes. */
1043 	max = min + 1;
1044 	while (max < 255) {
1045 		tegra_sdhci_set_tap(host, max);
1046 		if (mmc_send_tuning(host->mmc, opcode, NULL)) {
1047 			max--;
1048 			break;
1049 		}
1050 		max++;
1051 	}
1052 
1053 	/* The TRM states the ideal tap value is at 75% in the passing range. */
1054 	tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4));
1055 
1056 	return mmc_send_tuning(host->mmc, opcode, NULL);
1057 }
1058 
1059 static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
1060 						   struct mmc_ios *ios)
1061 {
1062 	struct sdhci_host *host = mmc_priv(mmc);
1063 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1064 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1065 	int ret = 0;
1066 
1067 	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1068 		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1069 		if (ret < 0)
1070 			return ret;
1071 		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1072 	} else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
1073 		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1074 		if (ret < 0)
1075 			return ret;
1076 		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1077 	}
1078 
1079 	if (tegra_host->pad_calib_required)
1080 		tegra_sdhci_pad_autocalib(host);
1081 
1082 	return ret;
1083 }
1084 
1085 static int tegra_sdhci_init_pinctrl_info(struct device *dev,
1086 					 struct sdhci_tegra *tegra_host)
1087 {
1088 	tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev);
1089 	if (IS_ERR(tegra_host->pinctrl_sdmmc)) {
1090 		dev_dbg(dev, "No pinctrl info, err: %ld\n",
1091 			PTR_ERR(tegra_host->pinctrl_sdmmc));
1092 		return -1;
1093 	}
1094 
1095 	tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state(
1096 				tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv");
1097 	if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) {
1098 		if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV)
1099 			tegra_host->pinctrl_state_1v8_drv = NULL;
1100 	}
1101 
1102 	tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state(
1103 				tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv");
1104 	if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) {
1105 		if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV)
1106 			tegra_host->pinctrl_state_3v3_drv = NULL;
1107 	}
1108 
1109 	tegra_host->pinctrl_state_3v3 =
1110 		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3");
1111 	if (IS_ERR(tegra_host->pinctrl_state_3v3)) {
1112 		dev_warn(dev, "Missing 3.3V pad state, err: %ld\n",
1113 			 PTR_ERR(tegra_host->pinctrl_state_3v3));
1114 		return -1;
1115 	}
1116 
1117 	tegra_host->pinctrl_state_1v8 =
1118 		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8");
1119 	if (IS_ERR(tegra_host->pinctrl_state_1v8)) {
1120 		dev_warn(dev, "Missing 1.8V pad state, err: %ld\n",
1121 			 PTR_ERR(tegra_host->pinctrl_state_1v8));
1122 		return -1;
1123 	}
1124 
1125 	tegra_host->pad_control_available = true;
1126 
1127 	return 0;
1128 }
1129 
1130 static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
1131 {
1132 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1133 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1134 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1135 
1136 	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1137 		tegra_host->pad_calib_required = true;
1138 }
1139 
1140 static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
1141 {
1142 	struct mmc_host *mmc = cq_host->mmc;
1143 	u8 ctrl;
1144 	ktime_t timeout;
1145 	bool timed_out;
1146 
1147 	/*
1148 	 * During CQE resume/unhalt, CQHCI driver unhalts CQE prior to
1149 	 * cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need
1150 	 * to be re-configured.
1151 	 * Tegra CQHCI/SDHCI prevents write access to block size register when
1152 	 * CQE is unhalted. So handling CQE resume sequence here to configure
1153 	 * SDHCI block registers prior to exiting CQE halt state.
1154 	 */
1155 	if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
1156 	    cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
1157 		sdhci_cqe_enable(mmc);
1158 		writel(val, cq_host->mmio + reg);
1159 		timeout = ktime_add_us(ktime_get(), 50);
1160 		while (1) {
1161 			timed_out = ktime_compare(ktime_get(), timeout) > 0;
1162 			ctrl = cqhci_readl(cq_host, CQHCI_CTL);
1163 			if (!(ctrl & CQHCI_HALT) || timed_out)
1164 				break;
1165 		}
1166 		/*
1167 		 * CQE usually resumes very quick, but incase if Tegra CQE
1168 		 * doesn't resume retry unhalt.
1169 		 */
1170 		if (timed_out)
1171 			writel(val, cq_host->mmio + reg);
1172 	} else {
1173 		writel(val, cq_host->mmio + reg);
1174 	}
1175 }
1176 
1177 static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
1178 					 struct mmc_request *mrq, u64 *data)
1179 {
1180 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc));
1181 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1182 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1183 
1184 	if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING &&
1185 	    mrq->cmd->flags & MMC_RSP_R1B)
1186 		*data |= CQHCI_CMD_TIMING(1);
1187 }
1188 
1189 static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
1190 {
1191 	struct cqhci_host *cq_host = mmc->cqe_private;
1192 	u32 val;
1193 
1194 	/*
1195 	 * Tegra CQHCI/SDMMC design prevents write access to sdhci block size
1196 	 * register when CQE is enabled and unhalted.
1197 	 * CQHCI driver enables CQE prior to activation, so disable CQE before
1198 	 * programming block size in sdhci controller and enable it back.
1199 	 */
1200 	if (!cq_host->activated) {
1201 		val = cqhci_readl(cq_host, CQHCI_CFG);
1202 		if (val & CQHCI_ENABLE)
1203 			cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
1204 				     CQHCI_CFG);
1205 		sdhci_cqe_enable(mmc);
1206 		if (val & CQHCI_ENABLE)
1207 			cqhci_writel(cq_host, val, CQHCI_CFG);
1208 	}
1209 
1210 	/*
1211 	 * CMD CRC errors are seen sometimes with some eMMC devices when status
1212 	 * command is sent during transfer of last data block which is the
1213 	 * default case as send status command block counter (CBC) is 1.
1214 	 * Recommended fix to set CBC to 0 allowing send status command only
1215 	 * when data lines are idle.
1216 	 */
1217 	val = cqhci_readl(cq_host, CQHCI_SSC1);
1218 	val &= ~CQHCI_SSC1_CBC_MASK;
1219 	cqhci_writel(cq_host, val, CQHCI_SSC1);
1220 }
1221 
1222 static void sdhci_tegra_dumpregs(struct mmc_host *mmc)
1223 {
1224 	sdhci_dumpregs(mmc_priv(mmc));
1225 }
1226 
1227 static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask)
1228 {
1229 	int cmd_error = 0;
1230 	int data_error = 0;
1231 
1232 	if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
1233 		return intmask;
1234 
1235 	cqhci_irq(host->mmc, intmask, cmd_error, data_error);
1236 
1237 	return 0;
1238 }
1239 
1240 static void tegra_sdhci_set_timeout(struct sdhci_host *host,
1241 				    struct mmc_command *cmd)
1242 {
1243 	u32 val;
1244 
1245 	/*
1246 	 * HW busy detection timeout is based on programmed data timeout
1247 	 * counter and maximum supported timeout is 11s which may not be
1248 	 * enough for long operations like cache flush, sleep awake, erase.
1249 	 *
1250 	 * ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows
1251 	 * host controller to wait for busy state until the card is busy
1252 	 * without HW timeout.
1253 	 *
1254 	 * So, use infinite busy wait mode for operations that may take
1255 	 * more than maximum HW busy timeout of 11s otherwise use finite
1256 	 * busy wait mode.
1257 	 */
1258 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1259 	if (cmd && cmd->busy_timeout >= 11 * HZ)
1260 		val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1261 	else
1262 		val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1263 	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1264 
1265 	__sdhci_set_timeout(host, cmd);
1266 }
1267 
1268 static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
1269 	.write_l    = tegra_cqhci_writel,
1270 	.enable	= sdhci_tegra_cqe_enable,
1271 	.disable = sdhci_cqe_disable,
1272 	.dumpregs = sdhci_tegra_dumpregs,
1273 	.update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
1274 };
1275 
1276 static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
1277 {
1278 	struct sdhci_pltfm_host *platform = sdhci_priv(host);
1279 	struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform);
1280 	const struct sdhci_tegra_soc_data *soc = tegra->soc_data;
1281 	struct device *dev = mmc_dev(host->mmc);
1282 
1283 	if (soc->dma_mask)
1284 		return dma_set_mask_and_coherent(dev, soc->dma_mask);
1285 
1286 	return 0;
1287 }
1288 
1289 static const struct sdhci_ops tegra_sdhci_ops = {
1290 	.get_ro     = tegra_sdhci_get_ro,
1291 	.read_w     = tegra_sdhci_readw,
1292 	.write_l    = tegra_sdhci_writel,
1293 	.set_clock  = tegra_sdhci_set_clock,
1294 	.set_dma_mask = tegra_sdhci_set_dma_mask,
1295 	.set_bus_width = sdhci_set_bus_width,
1296 	.reset      = tegra_sdhci_reset,
1297 	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1298 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1299 	.voltage_switch = tegra_sdhci_voltage_switch,
1300 	.get_max_clock = tegra_sdhci_get_max_clock,
1301 };
1302 
1303 static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
1304 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1305 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1306 		  SDHCI_QUIRK_NO_HISPD_BIT |
1307 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1308 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1309 	.ops  = &tegra_sdhci_ops,
1310 };
1311 
1312 static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
1313 	.pdata = &sdhci_tegra20_pdata,
1314 	.dma_mask = DMA_BIT_MASK(32),
1315 	.nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
1316 		    NVQUIRK_ENABLE_BLOCK_GAP_DET,
1317 };
1318 
1319 static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
1320 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1321 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1322 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1323 		  SDHCI_QUIRK_NO_HISPD_BIT |
1324 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1325 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1326 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1327 		   SDHCI_QUIRK2_BROKEN_HS200 |
1328 		   /*
1329 		    * Auto-CMD23 leads to "Got command interrupt 0x00010000 even
1330 		    * though no command operation was in progress."
1331 		    *
1332 		    * The exact reason is unknown, as the same hardware seems
1333 		    * to support Auto CMD23 on a downstream 3.1 kernel.
1334 		    */
1335 		   SDHCI_QUIRK2_ACMD23_BROKEN,
1336 	.ops  = &tegra_sdhci_ops,
1337 };
1338 
1339 static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
1340 	.pdata = &sdhci_tegra30_pdata,
1341 	.dma_mask = DMA_BIT_MASK(32),
1342 	.nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
1343 		    NVQUIRK_ENABLE_SDR50 |
1344 		    NVQUIRK_ENABLE_SDR104 |
1345 		    NVQUIRK_HAS_PADCALIB,
1346 };
1347 
1348 static const struct sdhci_ops tegra114_sdhci_ops = {
1349 	.get_ro     = tegra_sdhci_get_ro,
1350 	.read_w     = tegra_sdhci_readw,
1351 	.write_w    = tegra_sdhci_writew,
1352 	.write_l    = tegra_sdhci_writel,
1353 	.set_clock  = tegra_sdhci_set_clock,
1354 	.set_dma_mask = tegra_sdhci_set_dma_mask,
1355 	.set_bus_width = sdhci_set_bus_width,
1356 	.reset      = tegra_sdhci_reset,
1357 	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1358 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1359 	.voltage_switch = tegra_sdhci_voltage_switch,
1360 	.get_max_clock = tegra_sdhci_get_max_clock,
1361 };
1362 
1363 static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
1364 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1365 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1366 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1367 		  SDHCI_QUIRK_NO_HISPD_BIT |
1368 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1369 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1370 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1371 	.ops  = &tegra114_sdhci_ops,
1372 };
1373 
1374 static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
1375 	.pdata = &sdhci_tegra114_pdata,
1376 	.dma_mask = DMA_BIT_MASK(32),
1377 };
1378 
1379 static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
1380 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1381 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1382 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1383 		  SDHCI_QUIRK_NO_HISPD_BIT |
1384 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1385 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1386 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1387 	.ops  = &tegra114_sdhci_ops,
1388 };
1389 
1390 static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
1391 	.pdata = &sdhci_tegra124_pdata,
1392 	.dma_mask = DMA_BIT_MASK(34),
1393 };
1394 
1395 static const struct sdhci_ops tegra210_sdhci_ops = {
1396 	.get_ro     = tegra_sdhci_get_ro,
1397 	.read_w     = tegra_sdhci_readw,
1398 	.write_w    = tegra210_sdhci_writew,
1399 	.write_l    = tegra_sdhci_writel,
1400 	.set_clock  = tegra_sdhci_set_clock,
1401 	.set_dma_mask = tegra_sdhci_set_dma_mask,
1402 	.set_bus_width = sdhci_set_bus_width,
1403 	.reset      = tegra_sdhci_reset,
1404 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1405 	.voltage_switch = tegra_sdhci_voltage_switch,
1406 	.get_max_clock = tegra_sdhci_get_max_clock,
1407 	.set_timeout = tegra_sdhci_set_timeout,
1408 };
1409 
1410 static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
1411 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1412 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1413 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1414 		  SDHCI_QUIRK_NO_HISPD_BIT |
1415 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1416 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1417 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1418 	.ops  = &tegra210_sdhci_ops,
1419 };
1420 
1421 static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
1422 	.pdata = &sdhci_tegra210_pdata,
1423 	.dma_mask = DMA_BIT_MASK(34),
1424 	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1425 		    NVQUIRK_HAS_PADCALIB |
1426 		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1427 		    NVQUIRK_ENABLE_SDR50 |
1428 		    NVQUIRK_ENABLE_SDR104,
1429 	.min_tap_delay = 106,
1430 	.max_tap_delay = 185,
1431 };
1432 
1433 static const struct sdhci_ops tegra186_sdhci_ops = {
1434 	.get_ro     = tegra_sdhci_get_ro,
1435 	.read_w     = tegra_sdhci_readw,
1436 	.write_l    = tegra_sdhci_writel,
1437 	.set_clock  = tegra_sdhci_set_clock,
1438 	.set_dma_mask = tegra_sdhci_set_dma_mask,
1439 	.set_bus_width = sdhci_set_bus_width,
1440 	.reset      = tegra_sdhci_reset,
1441 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1442 	.voltage_switch = tegra_sdhci_voltage_switch,
1443 	.get_max_clock = tegra_sdhci_get_max_clock,
1444 	.irq = sdhci_tegra_cqhci_irq,
1445 	.set_timeout = tegra_sdhci_set_timeout,
1446 };
1447 
1448 static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
1449 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1450 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1451 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1452 		  SDHCI_QUIRK_NO_HISPD_BIT |
1453 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1454 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1455 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1456 	.ops  = &tegra186_sdhci_ops,
1457 };
1458 
1459 static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
1460 	.pdata = &sdhci_tegra186_pdata,
1461 	.dma_mask = DMA_BIT_MASK(40),
1462 	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1463 		    NVQUIRK_HAS_PADCALIB |
1464 		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1465 		    NVQUIRK_ENABLE_SDR50 |
1466 		    NVQUIRK_ENABLE_SDR104 |
1467 		    NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING,
1468 	.min_tap_delay = 84,
1469 	.max_tap_delay = 136,
1470 };
1471 
1472 static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
1473 	.pdata = &sdhci_tegra186_pdata,
1474 	.dma_mask = DMA_BIT_MASK(39),
1475 	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1476 		    NVQUIRK_HAS_PADCALIB |
1477 		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1478 		    NVQUIRK_ENABLE_SDR50 |
1479 		    NVQUIRK_ENABLE_SDR104,
1480 	.min_tap_delay = 96,
1481 	.max_tap_delay = 139,
1482 };
1483 
1484 static const struct of_device_id sdhci_tegra_dt_match[] = {
1485 	{ .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 },
1486 	{ .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 },
1487 	{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
1488 	{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
1489 	{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
1490 	{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
1491 	{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
1492 	{}
1493 };
1494 MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match);
1495 
1496 static int sdhci_tegra_add_host(struct sdhci_host *host)
1497 {
1498 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1499 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1500 	struct cqhci_host *cq_host;
1501 	bool dma64;
1502 	int ret;
1503 
1504 	if (!tegra_host->enable_hwcq)
1505 		return sdhci_add_host(host);
1506 
1507 	sdhci_enable_v4_mode(host);
1508 
1509 	ret = sdhci_setup_host(host);
1510 	if (ret)
1511 		return ret;
1512 
1513 	host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1514 
1515 	cq_host = devm_kzalloc(host->mmc->parent,
1516 				sizeof(*cq_host), GFP_KERNEL);
1517 	if (!cq_host) {
1518 		ret = -ENOMEM;
1519 		goto cleanup;
1520 	}
1521 
1522 	cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR;
1523 	cq_host->ops = &sdhci_tegra_cqhci_ops;
1524 
1525 	dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
1526 	if (dma64)
1527 		cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
1528 
1529 	ret = cqhci_init(cq_host, host->mmc, dma64);
1530 	if (ret)
1531 		goto cleanup;
1532 
1533 	ret = __sdhci_add_host(host);
1534 	if (ret)
1535 		goto cleanup;
1536 
1537 	return 0;
1538 
1539 cleanup:
1540 	sdhci_cleanup_host(host);
1541 	return ret;
1542 }
1543 
1544 static int sdhci_tegra_probe(struct platform_device *pdev)
1545 {
1546 	const struct of_device_id *match;
1547 	const struct sdhci_tegra_soc_data *soc_data;
1548 	struct sdhci_host *host;
1549 	struct sdhci_pltfm_host *pltfm_host;
1550 	struct sdhci_tegra *tegra_host;
1551 	struct clk *clk;
1552 	int rc;
1553 
1554 	match = of_match_device(sdhci_tegra_dt_match, &pdev->dev);
1555 	if (!match)
1556 		return -EINVAL;
1557 	soc_data = match->data;
1558 
1559 	host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host));
1560 	if (IS_ERR(host))
1561 		return PTR_ERR(host);
1562 	pltfm_host = sdhci_priv(host);
1563 
1564 	tegra_host = sdhci_pltfm_priv(pltfm_host);
1565 	tegra_host->ddr_signaling = false;
1566 	tegra_host->pad_calib_required = false;
1567 	tegra_host->pad_control_available = false;
1568 	tegra_host->soc_data = soc_data;
1569 
1570 	if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) {
1571 		rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host);
1572 		if (rc == 0)
1573 			host->mmc_host_ops.start_signal_voltage_switch =
1574 				sdhci_tegra_start_signal_voltage_switch;
1575 	}
1576 
1577 	/* Hook to periodically rerun pad calibration */
1578 	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1579 		host->mmc_host_ops.request = tegra_sdhci_request;
1580 
1581 	host->mmc_host_ops.hs400_enhanced_strobe =
1582 			tegra_sdhci_hs400_enhanced_strobe;
1583 
1584 	if (!host->ops->platform_execute_tuning)
1585 		host->mmc_host_ops.execute_tuning =
1586 				tegra_sdhci_execute_hw_tuning;
1587 
1588 	rc = mmc_of_parse(host->mmc);
1589 	if (rc)
1590 		goto err_parse_dt;
1591 
1592 	if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
1593 		host->mmc->caps |= MMC_CAP_1_8V_DDR;
1594 
1595 	/* HW busy detection is supported, but R1B responses are required. */
1596 	host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
1597 
1598 	tegra_sdhci_parse_dt(host);
1599 
1600 	tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
1601 							 GPIOD_OUT_HIGH);
1602 	if (IS_ERR(tegra_host->power_gpio)) {
1603 		rc = PTR_ERR(tegra_host->power_gpio);
1604 		goto err_power_req;
1605 	}
1606 
1607 	clk = devm_clk_get(mmc_dev(host->mmc), NULL);
1608 	if (IS_ERR(clk)) {
1609 		rc = PTR_ERR(clk);
1610 
1611 		if (rc != -EPROBE_DEFER)
1612 			dev_err(&pdev->dev, "failed to get clock: %d\n", rc);
1613 
1614 		goto err_clk_get;
1615 	}
1616 	clk_prepare_enable(clk);
1617 	pltfm_host->clk = clk;
1618 
1619 	tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev,
1620 							   "sdhci");
1621 	if (IS_ERR(tegra_host->rst)) {
1622 		rc = PTR_ERR(tegra_host->rst);
1623 		dev_err(&pdev->dev, "failed to get reset control: %d\n", rc);
1624 		goto err_rst_get;
1625 	}
1626 
1627 	rc = reset_control_assert(tegra_host->rst);
1628 	if (rc)
1629 		goto err_rst_get;
1630 
1631 	usleep_range(2000, 4000);
1632 
1633 	rc = reset_control_deassert(tegra_host->rst);
1634 	if (rc)
1635 		goto err_rst_get;
1636 
1637 	usleep_range(2000, 4000);
1638 
1639 	rc = sdhci_tegra_add_host(host);
1640 	if (rc)
1641 		goto err_add_host;
1642 
1643 	return 0;
1644 
1645 err_add_host:
1646 	reset_control_assert(tegra_host->rst);
1647 err_rst_get:
1648 	clk_disable_unprepare(pltfm_host->clk);
1649 err_clk_get:
1650 err_power_req:
1651 err_parse_dt:
1652 	sdhci_pltfm_free(pdev);
1653 	return rc;
1654 }
1655 
1656 static int sdhci_tegra_remove(struct platform_device *pdev)
1657 {
1658 	struct sdhci_host *host = platform_get_drvdata(pdev);
1659 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1660 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1661 
1662 	sdhci_remove_host(host, 0);
1663 
1664 	reset_control_assert(tegra_host->rst);
1665 	usleep_range(2000, 4000);
1666 	clk_disable_unprepare(pltfm_host->clk);
1667 
1668 	sdhci_pltfm_free(pdev);
1669 
1670 	return 0;
1671 }
1672 
1673 #ifdef CONFIG_PM_SLEEP
1674 static int __maybe_unused sdhci_tegra_suspend(struct device *dev)
1675 {
1676 	struct sdhci_host *host = dev_get_drvdata(dev);
1677 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1678 	int ret;
1679 
1680 	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1681 		ret = cqhci_suspend(host->mmc);
1682 		if (ret)
1683 			return ret;
1684 	}
1685 
1686 	ret = sdhci_suspend_host(host);
1687 	if (ret) {
1688 		cqhci_resume(host->mmc);
1689 		return ret;
1690 	}
1691 
1692 	clk_disable_unprepare(pltfm_host->clk);
1693 	return 0;
1694 }
1695 
1696 static int __maybe_unused sdhci_tegra_resume(struct device *dev)
1697 {
1698 	struct sdhci_host *host = dev_get_drvdata(dev);
1699 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1700 	int ret;
1701 
1702 	ret = clk_prepare_enable(pltfm_host->clk);
1703 	if (ret)
1704 		return ret;
1705 
1706 	ret = sdhci_resume_host(host);
1707 	if (ret)
1708 		goto disable_clk;
1709 
1710 	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1711 		ret = cqhci_resume(host->mmc);
1712 		if (ret)
1713 			goto suspend_host;
1714 	}
1715 
1716 	return 0;
1717 
1718 suspend_host:
1719 	sdhci_suspend_host(host);
1720 disable_clk:
1721 	clk_disable_unprepare(pltfm_host->clk);
1722 	return ret;
1723 }
1724 #endif
1725 
1726 static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend,
1727 			 sdhci_tegra_resume);
1728 
1729 static struct platform_driver sdhci_tegra_driver = {
1730 	.driver		= {
1731 		.name	= "sdhci-tegra",
1732 		.of_match_table = sdhci_tegra_dt_match,
1733 		.pm	= &sdhci_tegra_dev_pm_ops,
1734 	},
1735 	.probe		= sdhci_tegra_probe,
1736 	.remove		= sdhci_tegra_remove,
1737 };
1738 
1739 module_platform_driver(sdhci_tegra_driver);
1740 
1741 MODULE_DESCRIPTION("SDHCI driver for Tegra");
1742 MODULE_AUTHOR("Google, Inc.");
1743 MODULE_LICENSE("GPL v2");
1744